2 This file is part of GNUnet.
3 (C) 2010 Christian Grothoff (and other contributing authors)
5 GNUnet is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published
7 by the Free Software Foundation; either version 2, or (at your
8 option) any later version.
10 GNUnet is distributed in the hope that it will be useful, but
11 WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with GNUnet; see the file COPYING. If not, write to the
17 Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 Boston, MA 02111-1307, USA.
23 * @brief functions related to load calculations
24 * @author Christian Grothoff
27 #include "gnunet_load_lib.h"
29 #define DEBUG_LOAD GNUNET_NO
32 * Values we track for load calculations.
34 struct GNUNET_LOAD_Value
38 * How fast should the load decline if no values are added?
40 struct GNUNET_TIME_Relative autodecline;
43 * Last time this load value was updated by an event.
45 struct GNUNET_TIME_Absolute last_update;
48 * Sum of all datastore delays ever observed (in ms). Note that
49 * delays above 64k ms are excluded (to avoid overflow within
50 * first 4 billion requests).
52 uint64_t cummulative_delay;
55 * Sum of squares of all datastore delays ever observed (in ms). Note that
56 * delays above 64k ms are excluded (to avoid overflow within
57 * first 4 billion requests).
59 uint64_t cummulative_squared_delay;
62 * Total number of requests included in the cummulative datastore delay values.
64 uint64_t cummulative_request_count;
67 * Current running average datastore delay. Its relation to the
68 * average datastore delay and it std. dev. (as calcualted from the
69 * cummulative values) tells us our current load.
74 * How high is the load? 0 for below average, otherwise
75 * the number of std. devs we are above average, or 100 if the
76 * load is so high that we currently cannot calculate it.
84 internal_update (struct GNUNET_LOAD_Value *load)
86 struct GNUNET_TIME_Relative delta;
89 if (load->autodecline.value == GNUNET_TIME_UNIT_FOREVER_REL.value)
91 delta = GNUNET_TIME_absolute_get_duration (load->last_update);
92 if (delta.value < load->autodecline.value)
94 n = delta.value / load->autodecline.value;
97 load->runavg_delay = 0.0;
104 load->runavg_delay = (load->runavg_delay * 7.0) / 8.0;
110 * Create a new load value.
112 * @param autodecline speed at which this value should automatically
113 * decline in the absence of external events; at the given
114 * frequency, 0-load values will be added to the load
115 * @return the new load value
117 struct GNUNET_LOAD_Value *
118 GNUNET_LOAD_value_init (struct GNUNET_TIME_Relative autodecline)
120 struct GNUNET_LOAD_Value *ret;
122 ret = GNUNET_malloc (sizeof (struct GNUNET_LOAD_Value));
123 ret->autodecline = autodecline;
124 if (ret->autodecline.value == 0)
125 ret->autodecline.value = 1;
126 ret->last_update = GNUNET_TIME_absolute_get ();
132 * Change the value by which the load automatically declines.
134 * @param load load to update
135 * @param autodecline frequency of load decline
138 GNUNET_LOAD_value_set_decline (struct GNUNET_LOAD_Value *load,
139 struct GNUNET_TIME_Relative autodecline)
141 internal_update (load);
142 load->autodecline = autodecline;
143 if (load->autodecline.value == 0)
144 load->autodecline.value = 1;
149 * Recalculate our load value.
151 * @param load load to update
154 calculate_load (struct GNUNET_LOAD_Value *load)
162 if (load->cummulative_request_count <= 1)
164 /* calcuate std dev of latency; we have for n values of "i" that:
166 avg = (sum val_i) / n
167 stddev = (sum (val_i - avg)^2) / (n-1)
168 = (sum (val_i^2 - 2 avg val_i + avg^2) / (n-1)
169 = (sum (val_i^2) - 2 avg sum (val_i) + n * avg^2) / (n-1)
171 sum_val_i = (double) load->cummulative_delay;
172 n = ((double) load->cummulative_request_count);
174 avgdel = sum_val_i / n;
175 stddev = (((double) load->cummulative_squared_delay) - 2.0 * avgdel * sum_val_i + n * avgdel * avgdel) / nm1;
177 stddev = 0.01; /* must have been rounding error or zero; prevent division by zero */
178 /* now calculate load based on how far out we are from
179 std dev; or if we are below average, simply assume load zero */
180 if (load->runavg_delay < avgdel)
183 load->load = (load->runavg_delay - avgdel) / stddev;
188 * Get the current load.
190 * @param load load handle
191 * @return zero for below-average load, otherwise
192 * number of std. devs we are above average;
193 * 100 if the latest updates were so large
194 * that we could not do proper calculations
197 GNUNET_LOAD_get_load (struct GNUNET_LOAD_Value *load)
199 internal_update (load);
200 calculate_load (load);
206 * Get the average value given to update so far.
208 * @param load load handle
209 * @return zero if update was never called
212 GNUNET_LOAD_get_average (struct GNUNET_LOAD_Value *load)
217 internal_update (load);
218 if (load->cummulative_request_count == 0)
220 n = ((double) load->cummulative_request_count);
221 sum_val_i = (double) load->cummulative_delay;
222 return sum_val_i / n;
227 * Update the current load.
229 * @param load to update
230 * @param data latest measurement value (for example, delay)
233 GNUNET_LOAD_update (struct GNUNET_LOAD_Value *load,
238 internal_update (load);
239 load->last_update = GNUNET_TIME_absolute_get ();
240 if (data > 64 * 1024)
246 dv = (uint32_t) data;
247 load->cummulative_delay += dv;
248 load->cummulative_squared_delay += dv * dv;
249 load->cummulative_request_count++;
250 load->runavg_delay = ((load->runavg_delay * 7.0) + dv) / 8.0;