X-Git-Url: https://git.librecmc.org/?a=blobdiff_plain;f=src%2Futil%2Fload.c;h=146e8509512382efcf8d87cbdf9e47fe9cd13953;hb=8f654f30c3c4987c9ca1b564d6e6f2d75ae24862;hp=b7ab01540b18c540921872dea78abc55a07da71d;hpb=f6a270ac2ec89ed94ef00b97dd2f18b16fd6c306;p=oweals%2Fgnunet.git diff --git a/src/util/load.c b/src/util/load.c index b7ab01540..146e85095 100644 --- a/src/util/load.c +++ b/src/util/load.c @@ -26,12 +26,13 @@ #include "platform.h" #include "gnunet_load_lib.h" -#define DEBUG_LOAD GNUNET_NO + +#define LOG(kind,...) GNUNET_log_from (kind, "util", __VA_ARGS__) /** * Values we track for load calculations. */ -struct GNUNET_LOAD_Value +struct GNUNET_LOAD_Value { /** @@ -41,7 +42,7 @@ struct GNUNET_LOAD_Value /** * Last time this load value was updated by an event. - */ + */ struct GNUNET_TIME_Absolute last_update; /** @@ -50,19 +51,19 @@ struct GNUNET_LOAD_Value * first 4 billion requests). */ uint64_t cummulative_delay; - + /** * Sum of squares of all datastore delays ever observed (in ms). Note that * delays above 64k ms are excluded (to avoid overflow within * first 4 billion requests). */ uint64_t cummulative_squared_delay; - + /** * Total number of requests included in the cummulative datastore delay values. */ uint64_t cummulative_request_count; - + /** * Current running average datastore delay. Its relation to the * average datastore delay and it std. dev. (as calcualted from the @@ -86,23 +87,29 @@ internal_update (struct GNUNET_LOAD_Value *load) struct GNUNET_TIME_Relative delta; unsigned int n; - if (load->autodecline.value == GNUNET_TIME_UNIT_FOREVER_REL.value) + if (load->autodecline.rel_value == GNUNET_TIME_UNIT_FOREVER_REL.rel_value) return; delta = GNUNET_TIME_absolute_get_duration (load->last_update); - if (delta.value < load->autodecline.value) + if (delta.rel_value < load->autodecline.rel_value) return; - n = delta.value / load->autodecline.value; + if (load->autodecline.rel_value == 0) + { + load->runavg_delay = 0.0; + load->load = 0; + return; + } + n = delta.rel_value / load->autodecline.rel_value; if (n > 16) - { - load->runavg_delay = 0.0; - load->load = 0; - return; - } + { + load->runavg_delay = 0.0; + load->load = 0; + return; + } while (n > 0) - { - n--; - load->runavg_delay = (load->runavg_delay * 7.0) / 8.0; - } + { + n--; + load->runavg_delay = (load->runavg_delay * 7.0) / 8.0; + } } @@ -119,7 +126,6 @@ GNUNET_LOAD_value_init (struct GNUNET_TIME_Relative autodecline) { struct GNUNET_LOAD_Value *ret; - GNUNET_assert (autodecline.value != 0); ret = GNUNET_malloc (sizeof (struct GNUNET_LOAD_Value)); ret->autodecline = autodecline; ret->last_update = GNUNET_TIME_absolute_get (); @@ -135,10 +141,10 @@ GNUNET_LOAD_value_init (struct GNUNET_TIME_Relative autodecline) */ void GNUNET_LOAD_value_set_decline (struct GNUNET_LOAD_Value *load, - struct GNUNET_TIME_Relative autodecline) + struct GNUNET_TIME_Relative autodecline) { internal_update (load); - load->autodecline = autodecline; + load->autodecline = autodecline; } @@ -159,25 +165,27 @@ calculate_load (struct GNUNET_LOAD_Value *load) if (load->cummulative_request_count <= 1) return; /* calcuate std dev of latency; we have for n values of "i" that: - - avg = (sum val_i) / n - stddev = (sum (val_i - avg)^2) / (n-1) - = (sum (val_i^2 - 2 avg val_i + avg^2) / (n-1) - = (sum (val_i^2) - 2 avg sum (val_i) + n * avg^2) / (n-1) - */ + * + * avg = (sum val_i) / n + * stddev = (sum (val_i - avg)^2) / (n-1) + * = (sum (val_i^2 - 2 avg val_i + avg^2) / (n-1) + * = (sum (val_i^2) - 2 avg sum (val_i) + n * avg^2) / (n-1) + */ sum_val_i = (double) load->cummulative_delay; n = ((double) load->cummulative_request_count); nm1 = n - 1.0; avgdel = sum_val_i / n; - stddev = (((double) load->cummulative_squared_delay) - 2.0 * avgdel * sum_val_i + n * avgdel * avgdel) / nm1; + stddev = + (((double) load->cummulative_squared_delay) - 2.0 * avgdel * sum_val_i + + n * avgdel * avgdel) / nm1; if (stddev <= 0) - stddev = 0.01; /* must have been rounding error or zero; prevent division by zero */ + stddev = 0.01; /* must have been rounding error or zero; prevent division by zero */ /* now calculate load based on how far out we are from - std dev; or if we are below average, simply assume load zero */ + * std dev; or if we are below average, simply assume load zero */ if (load->runavg_delay < avgdel) load->load = 0.0; else - load->load = (load->runavg_delay - avgdel) / stddev; + load->load = (load->runavg_delay - avgdel) / stddev; } @@ -227,22 +235,21 @@ GNUNET_LOAD_get_average (struct GNUNET_LOAD_Value *load) * @param data latest measurement value (for example, delay) */ void -GNUNET_LOAD_update (struct GNUNET_LOAD_Value *load, - uint64_t data) +GNUNET_LOAD_update (struct GNUNET_LOAD_Value *load, uint64_t data) { uint32_t dv; internal_update (load); load->last_update = GNUNET_TIME_absolute_get (); if (data > 64 * 1024) - { - /* very large */ - load->load = 100.0; - return; - } + { + /* very large */ + load->load = 100.0; + return; + } dv = (uint32_t) data; load->cummulative_delay += dv; - load->cummulative_squared_delay += dv * dv; + load->cummulative_squared_delay += dv * dv; load->cummulative_request_count++; load->runavg_delay = ((load->runavg_delay * 7.0) + dv) / 8.0; }