*/
#define MAX_RESULTS (100 * 1024)
+/**
+ * Collect an instane number of statistics? May cause excessive IPC.
+ */
+#define INSANE_STATISTICS GNUNET_NO
+
/**
* An active request.
*/
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
"Creating request handle for `%s' of type %d\n",
GNUNET_h2s (query), type);
+#if INSANE_STATISTICS
GNUNET_STATISTICS_update (GSF_stats,
gettext_noop ("# Pending requests created"), 1,
GNUNET_NO);
+#endif
extra = 0;
if (GNUNET_BLOCK_TYPE_FS_SBLOCK == type)
extra += sizeof (struct GNUNET_HashCode);
{
refresh_bloomfilter (pr);
}
- GNUNET_CONTAINER_multihashmap_put (pr_map, query, pr,
+ GNUNET_CONTAINER_multihashmap_put (pr_map,
+ &pr->public_data.query, pr,
GNUNET_CONTAINER_MULTIHASHMAPOPTION_MULTIPLE);
if (0 == (options & GSF_PRO_REQUEST_NEVER_EXPIRES))
{
last_transmission, prq->type, prq->data, prq->size);
return GNUNET_YES;
case GNUNET_BLOCK_EVALUATION_OK_DUPLICATE:
+#if INSANE_STATISTICS
GNUNET_STATISTICS_update (GSF_stats,
gettext_noop
("# duplicate replies discarded (bloomfilter)"),
1, GNUNET_NO);
+#endif
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
"Duplicate response, discarding.\n");
return GNUNET_YES; /* duplicate */
pr->qe = NULL;
if (NULL == key)
{
+#if INSANE_STATISTICS
GNUNET_STATISTICS_update (GSF_stats,
gettext_noop
("# Datastore lookups concluded (no results)"),
1, GNUNET_NO);
+#endif
}
if (GNUNET_NO == pr->have_first_uid)
{
{
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG | GNUNET_ERROR_TYPE_BULK,
"No further local responses available.\n");
+#if INSANE_STATISTICS
if ((pr->public_data.type == GNUNET_BLOCK_TYPE_FS_DBLOCK) ||
(pr->public_data.type == GNUNET_BLOCK_TYPE_FS_IBLOCK))
GNUNET_STATISTICS_update (GSF_stats,
gettext_noop
("# requested DBLOCK or IBLOCK not found"), 1,
GNUNET_NO);
+#endif
goto check_error_and_continue;
}
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
pr->warn_task =
GNUNET_SCHEDULER_add_delayed (GNUNET_TIME_UNIT_MINUTES, &warn_delay_task,
pr);
+#if INSANE_STATISTICS
GNUNET_STATISTICS_update (GSF_stats,
gettext_noop ("# Datastore lookups initiated"), 1,
GNUNET_NO);
+#endif
pr->qe =
GNUNET_DATASTORE_get_key (GSF_dsh, pr->local_result_offset++,
&pr->public_data.query,
void
GSF_pending_request_init_ ()
{
- unsigned long long bps;
+ unsigned long long dqs;
if (GNUNET_OK !=
GNUNET_CONFIGURATION_get_value_number (GSF_cfg, "fs",
"fs", "MAX_PENDING_REQUESTS");
}
if (GNUNET_OK !=
- GNUNET_CONFIGURATION_get_value_size (GSF_cfg, "ats", "WAN_QUOTA_OUT",
- &bps))
+ GNUNET_CONFIGURATION_get_value_size (GSF_cfg, "fs", "DATASTORE_QUEUE_SIZE",
+ &dqs))
{
GNUNET_log_config_missing (GNUNET_ERROR_TYPE_INFO,
- "ats", "WAN_QUOTA_OUT");
- bps = 65536;
+ "fs", "DATASTORE_QUEUE_SIZE");
+ dqs = 1024;
}
- /* queue size should be #queries we can have pending and satisfy within
- * a carry interval: */
- datastore_queue_size =
- bps * GNUNET_CONSTANTS_MAX_BANDWIDTH_CARRY_S / DBLOCK_SIZE;
+ datastore_queue_size = (unsigned int) dqs;
active_to_migration =
GNUNET_CONFIGURATION_get_value_yesno (GSF_cfg, "FS", "CONTENT_CACHING");
datastore_put_load = GNUNET_LOAD_value_init (DATASTORE_LOAD_AUTODECLINE);
- pr_map = GNUNET_CONTAINER_multihashmap_create (32 * 1024, GNUNET_NO);
+ pr_map = GNUNET_CONTAINER_multihashmap_create (32 * 1024, GNUNET_YES);
requests_by_expiration_heap =
GNUNET_CONTAINER_heap_create (GNUNET_CONTAINER_HEAP_ORDER_MIN);
}