2 This file is part of GNUnet.
3 Copyright (C) 2010, 2013 GNUnet e.V.
5 GNUnet is free software: you can redistribute it and/or modify it
6 under the terms of the GNU Affero General Public License as published
7 by the Free Software Foundation, either version 3 of the License,
8 or (at your option) any later version.
10 GNUnet is distributed in the hope that it will be useful, but
11 WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Affero General Public License for more details.
15 You should have received a copy of the GNU Affero General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>.
18 SPDX-License-Identifier: AGPL3.0-or-later
23 * @brief functions related to load calculations
24 * @author Christian Grothoff
27 #include "gnunet_util_lib.h"
30 #define LOG(kind, ...) GNUNET_log_from(kind, "util-load", __VA_ARGS__)
33 * Values we track for load calculations.
35 struct GNUNET_LOAD_Value {
37 * How fast should the load decline if no values are added?
39 struct GNUNET_TIME_Relative autodecline;
42 * Last time this load value was updated by an event.
44 struct GNUNET_TIME_Absolute last_update;
47 * Sum of all datastore delays ever observed (in ms). Note that
48 * delays above 64k ms are excluded (to avoid overflow within
49 * first 4 billion requests).
51 uint64_t cummulative_delay;
54 * Sum of squares of all datastore delays ever observed (in ms). Note that
55 * delays above 64k ms are excluded (to avoid overflow within
56 * first 4 billion requests).
58 uint64_t cummulative_squared_delay;
61 * Total number of requests included in the cummulative datastore delay values.
63 uint64_t cummulative_request_count;
66 * Current running average datastore delay. Its relation to the
67 * average datastore delay and it std. dev. (as calcualted from the
68 * cummulative values) tells us our current load.
73 * How high is the load? 0 for below average, otherwise
74 * the number of std. devs we are above average, or 100 if the
75 * load is so high that we currently cannot calculate it.
82 internal_update(struct GNUNET_LOAD_Value *load)
84 struct GNUNET_TIME_Relative delta;
87 if (load->autodecline.rel_value_us == GNUNET_TIME_UNIT_FOREVER_REL.rel_value_us)
89 delta = GNUNET_TIME_absolute_get_duration(load->last_update);
90 if (delta.rel_value_us < load->autodecline.rel_value_us)
92 if (0 == load->autodecline.rel_value_us)
94 load->runavg_delay = 0.0;
98 n = delta.rel_value_us / load->autodecline.rel_value_us;
101 load->runavg_delay = 0.0;
108 load->runavg_delay = (load->runavg_delay * 7.0) / 8.0;
114 * Create a new load value.
116 * @param autodecline speed at which this value should automatically
117 * decline in the absence of external events; at the given
118 * frequency, 0-load values will be added to the load
119 * @return the new load value
121 struct GNUNET_LOAD_Value *
122 GNUNET_LOAD_value_init(struct GNUNET_TIME_Relative autodecline)
124 struct GNUNET_LOAD_Value *ret;
126 ret = GNUNET_new(struct GNUNET_LOAD_Value);
127 ret->autodecline = autodecline;
128 ret->last_update = GNUNET_TIME_absolute_get();
134 * Change the value by which the load automatically declines.
136 * @param load load to update
137 * @param autodecline frequency of load decline
140 GNUNET_LOAD_value_set_decline(struct GNUNET_LOAD_Value *load,
141 struct GNUNET_TIME_Relative autodecline)
143 internal_update(load);
144 load->autodecline = autodecline;
149 * Recalculate our load value.
151 * @param load load to update
154 calculate_load(struct GNUNET_LOAD_Value *load)
162 if (load->cummulative_request_count <= 1)
164 /* calcuate std dev of latency; we have for n values of "i" that:
166 * avg = (sum val_i) / n
167 * stddev = (sum (val_i - avg)^2) / (n-1)
168 * = (sum (val_i^2 - 2 avg val_i + avg^2) / (n-1)
169 * = (sum (val_i^2) - 2 avg sum (val_i) + n * avg^2) / (n-1)
171 sum_val_i = (double)load->cummulative_delay;
172 n = ((double)load->cummulative_request_count);
174 avgdel = sum_val_i / n;
176 (((double)load->cummulative_squared_delay) - 2.0 * avgdel * sum_val_i +
177 n * avgdel * avgdel) / nm1;
179 stddev = 0.01; /* must have been rounding error or zero; prevent division by zero */
180 /* now calculate load based on how far out we are from
181 * std dev; or if we are below average, simply assume load zero */
182 if (load->runavg_delay < avgdel)
185 load->load = (load->runavg_delay - avgdel) / stddev;
190 * Get the current load.
192 * @param load load handle
193 * @return zero for below-average load, otherwise
194 * number of std. devs we are above average;
195 * 100 if the latest updates were so large
196 * that we could not do proper calculations
199 GNUNET_LOAD_get_load(struct GNUNET_LOAD_Value *load)
201 internal_update(load);
202 calculate_load(load);
208 * Get the average value given to update so far.
210 * @param load load handle
211 * @return zero if update was never called
214 GNUNET_LOAD_get_average(struct GNUNET_LOAD_Value *load)
219 internal_update(load);
220 if (load->cummulative_request_count == 0)
222 n = ((double)load->cummulative_request_count);
223 sum_val_i = (double)load->cummulative_delay;
224 return sum_val_i / n;
229 * Update the current load.
231 * @param load to update
232 * @param data latest measurement value (for example, delay)
235 GNUNET_LOAD_update(struct GNUNET_LOAD_Value *load, uint64_t data)
239 internal_update(load);
240 load->last_update = GNUNET_TIME_absolute_get();
241 if (data > 64 * 1024)
248 load->cummulative_delay += dv;
249 load->cummulative_squared_delay += dv * dv;
250 load->cummulative_request_count++;
251 load->runavg_delay = ((load->runavg_delay * 7.0) + dv) / 8.0;