*/
#define SUPPORT_DELAYS GNUNET_NO
+/**
+ * Currently experimental code...
+ */
+#define ENABLE_LOAD_MGMT GNUNET_YES
+
/**
* Size for the hash map for DHT requests from the FS
* service. Should be about the number of concurrent
*/
#define FS_DHT_HT_SIZE 1024
+#define DATASTORE_LOAD_AUTODECLINE GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_MILLISECONDS, 250)
+
/**
* How often do we flush trust values to disk?
*/
uint32_t trust;
cp = GNUNET_malloc (sizeof (struct ConnectedPeer));
- cp->transmission_delay = GNUNET_LOAD_value_init ();
+ cp->transmission_delay = GNUNET_LOAD_value_init (GNUNET_CONSTANTS_IDLE_CONNECTION_TIMEOUT);
cp->pid = GNUNET_PEER_intern (peer);
fn = get_trust_filename (peer);
GNUNET_NO);
/* FIXME: if this is activated, we might stall large downloads
indefinitely since (presumably) the load can never go down again! */
-#if 0
+#if ENABLE_LOAD_MGMT
GNUNET_DATASTORE_get_next (dsh, GNUNET_NO);
return;
#endif
gettext_noop ("# requests dropped due to high load"),
1,
GNUNET_NO);
-#if 0
+#if ENABLE_LOAD_MGMT
/* FIXME: this causes problems... */
return GNUNET_OK;
#endif
/* don't have BW to send to peer, or would likely take longer than we have for it,
so at best indirect the query */
priority = 0;
+#if ENABLE_LOAD_MGMT
/* FIXME: if this line is enabled, the 'perf' test for larger files simply "hangs";
the cause seems to be that the load goes up (to the point where we do this)
and then never goes down again... (outch) */
- // pr->forward_only = GNUNET_YES;
+ pr->forward_only = GNUNET_YES;
+#endif
}
pr->type = type;
pr->mingle = ntohl (gm->filter_mutator);
}
connected_peers = GNUNET_CONTAINER_multihashmap_create (enc);
query_request_map = GNUNET_CONTAINER_multihashmap_create (max_pending_requests);
- rt_entry_lifetime = GNUNET_LOAD_value_init ();
+ rt_entry_lifetime = GNUNET_LOAD_value_init (GNUNET_TIME_UNIT_FOREVER_REL);
peer_request_map = GNUNET_CONTAINER_multihashmap_create (enc);
requests_by_expiration_heap = GNUNET_CONTAINER_heap_create (GNUNET_CONTAINER_HEAP_ORDER_MIN);
core = GNUNET_CORE_connect (sched,
GNUNET_SCHEDULER_shutdown (sched);
return;
}
- datastore_get_load = GNUNET_LOAD_value_init ();
- datastore_put_load = GNUNET_LOAD_value_init ();
+ datastore_get_load = GNUNET_LOAD_value_init (DATASTORE_LOAD_AUTODECLINE);
+ datastore_put_load = GNUNET_LOAD_value_init (DATASTORE_LOAD_AUTODECLINE);
block_cfg = GNUNET_CONFIGURATION_create ();
GNUNET_CONFIGURATION_set_value_string (block_cfg,
"block",
struct GNUNET_LOAD_Value
{
+ /**
+ * How fast should the load decline if no values are added?
+ */
+ struct GNUNET_TIME_Relative autodecline;
+
+ /**
+ * Last time this load value was updated by an event.
+ */
+ struct GNUNET_TIME_Absolute last_update;
+
/**
* Sum of all datastore delays ever observed (in ms). Note that
* delays above 64k ms are excluded (to avoid overflow within
};
+static void
+internal_update (struct GNUNET_LOAD_Value *load)
+{
+ struct GNUNET_TIME_Relative delta;
+ unsigned int n;
+
+ if (load->autodecline.value == GNUNET_TIME_UNIT_FOREVER_REL.value)
+ return;
+ delta = GNUNET_TIME_absolute_get_duration (load->last_update);
+ if (delta.value < load->autodecline.value)
+ return;
+ n = delta.value / load->autodecline.value;
+ if (n > 16)
+ {
+ load->runavg_delay = 0.0;
+ load->load = 0;
+ return;
+ }
+ while (n > 0)
+ {
+ n--;
+ load->runavg_delay = (load->runavg_delay * 7.0) / 8.0;
+ }
+}
+
+
/**
* Create a new load value.
*
+ * @param autodecline speed at which this value should automatically
+ * decline in the absence of external events; at the given
+ * frequency, 0-load values will be added to the load
* @return the new load value
*/
struct GNUNET_LOAD_Value *
-GNUNET_LOAD_value_init ()
+GNUNET_LOAD_value_init (struct GNUNET_TIME_Relative autodecline)
{
- return GNUNET_malloc (sizeof (struct GNUNET_LOAD_Value));
+ struct GNUNET_LOAD_Value *ret;
+
+ GNUNET_assert (autodecline.value != 0);
+ ret = GNUNET_malloc (sizeof (struct GNUNET_LOAD_Value));
+ ret->autodecline = autodecline;
+ ret->last_update = GNUNET_TIME_absolute_get ();
+ return ret;
+}
+
+
+/**
+ * Recalculate our load value.
+ *
+ * @param load load to update
+ */
+static void
+calculate_load (struct GNUNET_LOAD_Value *load)
+{
+ double stddev;
+ double avgdel;
+ double sum_val_i;
+ double n;
+ double nm1;
+
+ if (load->cummulative_request_count == 0)
+ return;
+ /* calcuate std dev of latency; we have for n values of "i" that:
+
+ avg = (sum val_i) / n
+ stddev = (sum (val_i - avg)^2) / (n-1)
+ = (sum (val_i^2 - 2 avg val_i + avg^2) / (n-1)
+ = (sum (val_i^2) - 2 avg sum (val_i) + n * avg^2) / (n-1)
+ */
+ sum_val_i = (double) load->cummulative_delay;
+ n = ((double) load->cummulative_request_count);
+ nm1 = n - 1.0;
+ avgdel = sum_val_i / n;
+ stddev = (((double) load->cummulative_squared_delay) - 2.0 * avgdel * sum_val_i + n * avgdel * avgdel) / nm1;
+ if (stddev <= 0)
+ stddev = 0.01; /* must have been rounding error or zero; prevent division by zero */
+ /* now calculate load based on how far out we are from
+ std dev; or if we are below average, simply assume load zero */
+ if (load->runavg_delay < avgdel)
+ load->load = 0.0;
+ else
+ load->load = (load->runavg_delay - avgdel) / stddev;
}
* that we could not do proper calculations
*/
double
-GNUNET_LOAD_get_load (const struct GNUNET_LOAD_Value *load)
+GNUNET_LOAD_get_load (struct GNUNET_LOAD_Value *load)
{
+ internal_update (load);
+ calculate_load (load);
return load->load;
}
* @return zero if update was never called
*/
double
-GNUNET_LOAD_get_average (const struct GNUNET_LOAD_Value *load)
+GNUNET_LOAD_get_average (struct GNUNET_LOAD_Value *load)
{
double n;
double sum_val_i;
+ internal_update (load);
if (load->cummulative_request_count == 0)
return 0.0;
n = ((double) load->cummulative_request_count);
uint64_t data)
{
uint32_t dv;
- double stddev;
- double avgdel;
- double sum_val_i;
- double n;
- double nm1;
+ internal_update (load);
+ load->last_update = GNUNET_TIME_absolute_get ();
if (data > 64 * 1024)
{
/* very large */
load->cummulative_squared_delay += dv * dv;
load->cummulative_request_count++;
load->runavg_delay = ((load->runavg_delay * 7.0) + dv) / 8.0;
- if (load->cummulative_request_count > 1)
- {
- /* calcuate std dev of latency; we have for n values of "i" that:
-
- avg = (sum val_i) / n
- stddev = (sum (val_i - avg)^2) / (n-1)
- = (sum (val_i^2 - 2 avg val_i + avg^2) / (n-1)
- = (sum (val_i^2) - 2 avg sum (val_i) + n * avg^2) / (n-1)
- */
- sum_val_i = (double) load->cummulative_delay;
- n = ((double) load->cummulative_request_count);
- nm1 = n - 1.0;
- avgdel = sum_val_i / n;
- stddev = (((double) load->cummulative_squared_delay) - 2.0 * avgdel * sum_val_i + n * avgdel * avgdel) / nm1;
- if (stddev <= 0)
- stddev = 0.01; /* must have been rounding error or zero; prevent division by zero */
- /* now calculate load based on how far out we are from
- std dev; or if we are below average, simply assume load zero */
- if (load->runavg_delay < avgdel)
- load->load = 0.0;
- else
- load->load = (load->runavg_delay - avgdel) / stddev;
- }
}
+
/* end of load.c */