X-Git-Url: https://git.librecmc.org/?a=blobdiff_plain;f=src%2Ffs%2Fgnunet-service-fs.h;h=e4efbb8f534cad872b1b1430ec7af43191c598d2;hb=bf886ddf525b991c6a11c9bd0be10e9aa0dd1c80;hp=a66339b91a945b8c0e37ca993d7026110f2be62a;hpb=143cde208ce477883102bf23f67a1d31aaa4ed11;p=oweals%2Fgnunet.git diff --git a/src/fs/gnunet-service-fs.h b/src/fs/gnunet-service-fs.h index a66339b91..e4efbb8f5 100644 --- a/src/fs/gnunet-service-fs.h +++ b/src/fs/gnunet-service-fs.h @@ -26,28 +26,164 @@ #ifndef GNUNET_SERVICE_FS_H #define GNUNET_SERVICE_FS_H +#include "gnunet_util_lib.h" +#include "gnunet_statistics_service.h" +#include "gnunet_transport_service.h" +#include "gnunet_core_service.h" +#include "gnunet_block_lib.h" +#include "fs.h" + +#define DEBUG_FS GNUNET_YES + +#define DEBUG_FS_CLIENT GNUNET_EXTRA_LOGGING + + +/** + * Should we introduce random latency in processing? Required for proper + * implementation of GAP, but can be disabled for performance evaluation of + * the basic routing algorithm. + * + * Note that with delays enabled, performance can be significantly lower + * (several orders of magnitude in 2-peer test runs); if you want to + * measure throughput of other components, set this to NO. Also, you + * might want to consider changing 'RETRY_PROBABILITY_INV' to 1 for + * a rather wasteful mode of operation (that might still get the highest + * throughput overall). + * + * Performance measurements (for 50 MB file, 2 peers): + * + * - Without delays: 3300 kb/s + * - With delays: 101 kb/s + */ +#define SUPPORT_DELAYS GNUNET_NO + + +/** + * At what frequency should our datastore load decrease + * automatically (since if we don't use it, clearly the + * load must be going down). + */ +#define DATASTORE_LOAD_AUTODECLINE GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_MILLISECONDS, 250) + + + /** * A connected peer. */ struct GSF_ConnectedPeer; - /** * An active request. */ struct GSF_PendingRequest; - /** * A local client. */ struct GSF_LocalClient; +/** + * Information kept per plan per request ('pe' module). + */ +struct GSF_RequestPlan; + +/** + * DLL of request plans a particular pending request is + * involved with. + */ +struct GSF_RequestPlanReference; + +/** + * Our connection to the datastore. + */ +extern struct GNUNET_DATASTORE_Handle *GSF_dsh; + +/** + * Our configuration. + */ +extern const struct GNUNET_CONFIGURATION_Handle *GSF_cfg; + +/** + * Handle for reporting statistics. + */ +extern struct GNUNET_STATISTICS_Handle *GSF_stats; + +/** + * Pointer to handle to the core service (points to NULL until we've + * connected to it). + */ +extern struct GNUNET_CORE_Handle *GSF_core; + +/** + * Handle for DHT operations. + */ +extern struct GNUNET_DHT_Handle *GSF_dht; + +/** + * How long do requests typically stay in the routing table? + */ +extern struct GNUNET_LOAD_Value *GSF_rt_entry_lifetime; + +/** + * Running average of the observed latency to other peers (round trip). + */ +extern struct GNUNET_TIME_Relative GSF_avg_latency; + +/** + * Typical priorities we're seeing from other peers right now. Since + * most priorities will be zero, this value is the weighted average of + * non-zero priorities seen "recently". In order to ensure that new + * values do not dramatically change the ratio, values are first + * "capped" to a reasonable range (+N of the current value) and then + * averaged into the existing value by a ratio of 1:N. Hence + * receiving the largest possible priority can still only raise our + * "current_priorities" by at most 1. + */ +extern double GSF_current_priorities; + +/** + * How many query messages have we received 'recently' that + * have not yet been claimed as cover traffic? + */ +extern unsigned int GSF_cover_query_count; + +/** + * How many content messages have we received 'recently' that + * have not yet been claimed as cover traffic? + */ +extern unsigned int GSF_cover_content_count; + +/** + * Our block context. + */ +extern struct GNUNET_BLOCK_Context *GSF_block_ctx; + +/** + * Are we introducing randomized delays for better anonymity? + */ +extern int GSF_enable_randomized_delays; + +/** + * Test if the DATABASE (GET) load on this peer is too high + * to even consider processing the query at + * all. + * + * @return GNUNET_YES if the load is too high to do anything (load high) + * GNUNET_NO to process normally (load normal) + * GNUNET_SYSERR to process for free (load low) + */ +int +GSF_test_get_load_too_high_ (uint32_t priority); + /** - * Entry in the query transmission plan. + * We've just now completed a datastore request. Update our + * datastore load calculations. + * + * @param start time when the datastore request was issued */ -struct GSF_PlanEntry; +void +GSF_update_datastore_delay_ (struct GNUNET_TIME_Absolute start);