#include "gnunet_transport_service.h"
#include "gnunet_hello_lib.h"
#include "gnunet_dht_service.h"
+#include "gnunet_statistics_service.h"
#include "dhtlog.h"
#include "dht.h"
+#define PRINT_TABLES GNUNET_NO
+
+#define EXTRA_CHECKS GNUNET_YES
/**
* How many buckets will we allow total.
*/
#define MAX_BUCKETS sizeof (GNUNET_HashCode) * 8
+/**
+ * Should the DHT issue FIND_PEER requests to get better routing tables?
+ */
+#define DO_FIND_PEER GNUNET_YES
+
/**
* What is the maximum number of peers in a given bucket.
*/
*/
#define MINIMUM_PEER_THRESHOLD 20
-#define DHT_DEFAULT_FIND_PEER_REPLICATION 20
+#define DHT_DEFAULT_FIND_PEER_REPLICATION 10
#define DHT_DEFAULT_FIND_PEER_OPTIONS GNUNET_DHT_RO_DEMULTIPLEX_EVERYWHERE
-#define DHT_DEFAULT_FIND_PEER_INTERVAL GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MINUTES, 5)
+#define DHT_MINIMUM_FIND_PEER_INTERVAL GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MINUTES, 1)
+#define DHT_MAXIMUM_FIND_PEER_INTERVAL GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MINUTES, 5)
+
+/**
+ * How many initial requests to send out (in true Kademlia fashion)
+ */
+#define DHT_KADEMLIA_REPLICATION 3
+
+/*
+ * Default frequency for sending malicious get messages
+ */
+#define DEFAULT_MALICIOUS_GET_FREQUENCY 1000 /* Number of milliseconds */
+
+/**
+ * Type for a malicious request, so we can ignore it during testing
+ */
+#define DHT_MALICIOUS_MESSAGE_TYPE 42
+/*
+ * Default frequency for sending malicious put messages
+ */
+#define DEFAULT_MALICIOUS_PUT_FREQUENCY 1000 /* Default is in milliseconds */
+
+#define DHT_DEFAULT_PING_DELAY GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MINUTES, 1)
/**
* Real maximum number of hops, at which point we refuse
};
+
/**
* Per-peer information.
*/
*/
unsigned int distance;
+ /**
+ * Task for scheduling periodic ping messages for this peer.
+ */
+ GNUNET_SCHEDULER_TaskIdentifier ping_task;
+
};
/**
};
+/**
+ * Don't use our routing algorithm, always route
+ * to closest peer; initially send requests to 3
+ * peers.
+ */
+static int strict_kademlia;
+
/**
* Routing option to end routing when closest peer found.
*/
*/
static struct GNUNET_DATACACHE_Handle *datacache;
+/**
+ * Handle for the statistics service.
+ */
+struct GNUNET_STATISTICS_Handle *stats;
+
/**
* The main scheduler to use for the DHT service
*/
*/
static struct PeerBucket k_buckets[MAX_BUCKETS]; /* From 0 to MAX_BUCKETS - 1 */
+/**
+ * Hash map of all known peers, for easy removal from k_buckets on disconnect.
+ */
+static struct GNUNET_CONTAINER_MultiHashMap *all_known_peers;
+
/**
* Maximum size for each bucket.
*/
*/
static unsigned int debug_routes_extended;
+/*
+ * GNUNET_YES or GNUNET_NO, whether or not to act as
+ * a malicious node which drops all messages
+ */
+static unsigned int malicious_dropper;
+
+/*
+ * GNUNET_YES or GNUNET_NO, whether or not to act as
+ * a malicious node which sends out lots of GETS
+ */
+static unsigned int malicious_getter;
+
+/*
+ * GNUNET_YES or GNUNET_NO, whether or not to act as
+ * a malicious node which sends out lots of PUTS
+ */
+static unsigned int malicious_putter;
+
+static unsigned long long malicious_get_frequency;
+
+static unsigned long long malicious_put_frequency;
+
/**
* Forward declaration.
*/
size_t core_transmit_notify (void *cls,
size_t size, void *buf);
+static void
+increment_stats(const char *value)
+{
+ if (stats != NULL)
+ {
+ GNUNET_STATISTICS_update (stats, value, 1, GNUNET_NO);
+ }
+}
+
/**
* Try to send another message from our core send list
*/
size_t msize;
size_t psize;
+ increment_stats(STAT_RESULT_FORWARDS);
msize = sizeof (struct GNUNET_DHT_P2PRouteResultMessage) + ntohs(msg->size);
GNUNET_assert(msize <= GNUNET_SERVER_MAX_MESSAGE_SIZE);
psize = sizeof(struct P2PPendingMessage) + msize;
return actual_bucket;
}
+/**
+ * Find a routing table entry from a peer identity
+ *
+ * @param peer the peer to look up
+ *
+ * @return the bucket number holding the peer, GNUNET_SYSERR if not found
+ */
+static int
+find_bucket_by_peer(const struct PeerInfo *peer)
+{
+ int bucket;
+ struct PeerInfo *pos;
+
+ for (bucket = lowest_bucket; bucket < MAX_BUCKETS - 1; bucket++)
+ {
+ pos = k_buckets[bucket].head;
+ while (pos != NULL)
+ {
+ if (peer == pos)
+ return bucket;
+ pos = pos->next;
+ }
+ }
+
+ return GNUNET_SYSERR; /* No such peer. */
+}
+
+#if PRINT_TABLES
+/**
+ * Print the complete routing table for this peer.
+ */
+static void
+print_routing_table ()
+{
+ int bucket;
+ struct PeerInfo *pos;
+ char char_buf[30000];
+ int char_pos;
+ memset(char_buf, 0, sizeof(char_buf));
+ char_pos = 0;
+ char_pos += sprintf(&char_buf[char_pos], "Printing routing table for peer %s\n", my_short_id);
+ //fprintf(stderr, "Printing routing table for peer %s\n", my_short_id);
+ for (bucket = lowest_bucket; bucket < MAX_BUCKETS; bucket++)
+ {
+ pos = k_buckets[bucket].head;
+ char_pos += sprintf(&char_buf[char_pos], "Bucket %d:\n", bucket);
+ //fprintf(stderr, "Bucket %d:\n", bucket);
+ while (pos != NULL)
+ {
+ //fprintf(stderr, "\tPeer %s, best bucket %d, %d bits match\n", GNUNET_i2s(&pos->id), find_bucket(&pos->id.hashPubKey), matching_bits(&pos->id.hashPubKey, &my_identity.hashPubKey));
+ char_pos += sprintf(&char_buf[char_pos], "\tPeer %s, best bucket %d, %d bits match\n", GNUNET_i2s(&pos->id), find_bucket(&pos->id.hashPubKey), matching_bits(&pos->id.hashPubKey, &my_identity.hashPubKey));
+ pos = pos->next;
+ }
+ }
+ fprintf(stderr, "%s", char_buf);
+ fflush(stderr);
+}
+#endif
+
/**
* Find a routing table entry from a peer identity
*
* the peer to
* @param latency the core reported latency of this peer
* @param distance the transport level distance to this peer
+ *
+ * @return the newly added PeerInfo
*/
-static void add_peer(const struct GNUNET_PeerIdentity *peer,
- unsigned int bucket,
- struct GNUNET_TIME_Relative latency,
- unsigned int distance)
+static struct PeerInfo *
+add_peer(const struct GNUNET_PeerIdentity *peer,
+ unsigned int bucket,
+ struct GNUNET_TIME_Relative latency,
+ unsigned int distance)
{
struct PeerInfo *new_peer;
GNUNET_assert(bucket < MAX_BUCKETS);
k_buckets[bucket].tail,
new_peer);
k_buckets[bucket].peers_size++;
+
+ return new_peer;
}
/**
k_buckets[bucket].tail,
peer);
k_buckets[bucket].peers_size--;
+ if ((bucket == lowest_bucket) && (k_buckets[lowest_bucket].peers_size == 0) && (lowest_bucket < MAX_BUCKETS - 1))
+ lowest_bucket++;
}
/**
{
struct P2PPendingMessage *pos;
struct P2PPendingMessage *next;
+#if EXTRA_CHECKS
+ struct PeerInfo *peer_pos;
+
+ peer_pos = k_buckets[bucket].head;
+ while ((peer_pos != NULL) && (peer_pos != peer))
+ peer_pos = peer_pos->next;
+ if (peer_pos == NULL)
+ {
+ GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s: Expected peer `%s' in bucket %d\n", my_short_id, "DHT", GNUNET_i2s(&peer->id), bucket);
+ GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s: Lowest bucket: %d, find_current_bucket: %d, peer resides in bucket: %d\n", my_short_id, "DHT", lowest_bucket, find_current_bucket(&peer->id.hashPubKey), find_bucket_by_peer(peer));
+ }
+ GNUNET_assert(peer_pos != NULL);
+#endif
remove_peer(peer, bucket); /* First remove the peer from its bucket */
if (peer->send_task != GNUNET_SCHEDULER_NO_TASK)
GNUNET_free(pos);
pos = next;
}
+
+ GNUNET_assert(GNUNET_CONTAINER_multihashmap_contains(all_known_peers, &peer->id.hashPubKey));
+ GNUNET_CONTAINER_multihashmap_remove (all_known_peers, &peer->id.hashPubKey, peer);
GNUNET_free(peer);
}
+
+/**
+ * Iterator over hash map entries.
+ *
+ * @param cls closure
+ * @param key current key code
+ * @param value PeerInfo of the peer to move to new lowest bucket
+ * @return GNUNET_YES if we should continue to
+ * iterate,
+ * GNUNET_NO if not.
+ */
+static int move_lowest_bucket (void *cls,
+ const GNUNET_HashCode * key,
+ void *value)
+{
+ struct PeerInfo *peer = value;
+ int new_bucket;
+
+ new_bucket = lowest_bucket - 1;
+ remove_peer(peer, lowest_bucket);
+ GNUNET_CONTAINER_DLL_insert_after(k_buckets[new_bucket].head,
+ k_buckets[new_bucket].tail,
+ k_buckets[new_bucket].tail,
+ peer);
+ k_buckets[new_bucket].peers_size++;
+ return GNUNET_YES;
+}
+
+
/**
* The current lowest bucket is full, so change the lowest
* bucket to the next lower down, and move any appropriate
*/
static void enable_next_bucket()
{
- unsigned int new_bucket;
- unsigned int to_remove;
- int i;
- struct PeerInfo *to_remove_list[bucket_size]; /* We either use CPU by making a list, or memory with array. Use memory. */
+ struct GNUNET_CONTAINER_MultiHashMap *to_remove;
struct PeerInfo *pos;
GNUNET_assert(lowest_bucket > 0);
-
+ to_remove = GNUNET_CONTAINER_multihashmap_create(bucket_size);
pos = k_buckets[lowest_bucket].head;
- memset(to_remove_list, 0, sizeof(to_remove_list));
- to_remove = 0;
+
+#if PRINT_TABLES
+ fprintf(stderr, "Printing RT before new bucket\n");
+ print_routing_table();
+#endif
/* Populate the array of peers which should be in the next lowest bucket */
- while (pos->next != NULL)
+ while (pos != NULL)
{
if (find_bucket(&pos->id.hashPubKey) < lowest_bucket)
- {
- to_remove_list[to_remove] = pos;
- to_remove++;
- }
+ GNUNET_CONTAINER_multihashmap_put(to_remove, &pos->id.hashPubKey, pos, GNUNET_CONTAINER_MULTIHASHMAPOPTION_UNIQUE_ONLY);
pos = pos->next;
}
- new_bucket = lowest_bucket - 1;
/* Remove peers from lowest bucket, insert into next lowest bucket */
- for (i = 0; i < bucket_size; i++)
+ GNUNET_CONTAINER_multihashmap_iterate(to_remove, &move_lowest_bucket, NULL);
+ GNUNET_CONTAINER_multihashmap_destroy(to_remove);
+ lowest_bucket = lowest_bucket - 1;
+#if PRINT_TABLES
+ fprintf(stderr, "Printing RT after new bucket\n");
+ print_routing_table();
+#endif
+}
+
+/**
+ * Function called to send a request out to another peer.
+ * Called both for locally initiated requests and those
+ * received from other peers.
+ *
+ * @param cls DHT service closure argument (unused)
+ * @param msg the encapsulated message
+ * @param peer the peer to forward the message to
+ * @param msg_ctx the context of the message (hop count, bloom, etc.)
+ */
+static void forward_message (void *cls,
+ const struct GNUNET_MessageHeader *msg,
+ struct PeerInfo *peer,
+ struct DHT_MessageContext *msg_ctx)
+{
+ struct GNUNET_DHT_P2PRouteMessage *route_message;
+ struct P2PPendingMessage *pending;
+ size_t msize;
+ size_t psize;
+
+ increment_stats(STAT_ROUTE_FORWARDS);
+ msize = sizeof (struct GNUNET_DHT_P2PRouteMessage) + ntohs(msg->size);
+ GNUNET_assert(msize <= GNUNET_SERVER_MAX_MESSAGE_SIZE);
+ psize = sizeof(struct P2PPendingMessage) + msize;
+ pending = GNUNET_malloc(psize);
+ pending->msg = (struct GNUNET_MessageHeader *)&pending[1];
+ pending->importance = DHT_SEND_PRIORITY;
+ pending->timeout = GNUNET_TIME_relative_get_forever();
+ route_message = (struct GNUNET_DHT_P2PRouteMessage *)pending->msg;
+ route_message->header.size = htons(msize);
+ route_message->header.type = htons(GNUNET_MESSAGE_TYPE_DHT_P2P_ROUTE);
+ route_message->options = htonl(msg_ctx->msg_options);
+ route_message->hop_count = htonl(msg_ctx->hop_count + 1);
+ route_message->network_size = htonl(msg_ctx->network_size);
+ route_message->desired_replication_level = htonl(msg_ctx->replication);
+ route_message->unique_id = GNUNET_htonll(msg_ctx->unique_id);
+ if (msg_ctx->bloom != NULL)
+ GNUNET_assert(GNUNET_OK == GNUNET_CONTAINER_bloomfilter_get_raw_data(msg_ctx->bloom, route_message->bloomfilter, DHT_BLOOM_SIZE));
+ if (msg_ctx->key != NULL)
+ memcpy(&route_message->key, msg_ctx->key, sizeof(GNUNET_HashCode));
+ memcpy(&route_message[1], msg, ntohs(msg->size));
+#if DEBUG_DHT > 1
+ GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "%s:%s Adding pending message size %d for peer %s\n", my_short_id, "DHT", msize, GNUNET_i2s(&peer->id));
+#endif
+ GNUNET_CONTAINER_DLL_insert_after(peer->head, peer->tail, peer->tail, pending);
+ if (peer->send_task == GNUNET_SCHEDULER_NO_TASK)
+ peer->send_task = GNUNET_SCHEDULER_add_now(sched, &try_core_send, peer);
+}
+
+#if DO_PING
+/**
+ * Task used to send ping messages to peers so that
+ * they don't get disconnected.
+ *
+ * @param cls the peer to send a ping message to
+ * @param tc context, reason, etc.
+ */
+static void
+periodic_ping_task (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
+{
+ struct PeerInfo *peer = cls;
+ struct GNUNET_MessageHeader ping_message;
+ struct DHT_MessageContext message_context;
+
+ if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
+ return;
+
+ ping_message.size = htons(sizeof(struct GNUNET_MessageHeader));
+ ping_message.type = htons(GNUNET_MESSAGE_TYPE_DHT_P2P_PING);
+
+ memset(&message_context, 0, sizeof(struct DHT_MessageContext));
+#if DEBUG_PING
+ GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s Sending periodic ping to %s\n", my_short_id, "DHT", GNUNET_i2s(&peer->id));
+#endif
+ forward_message(NULL, &ping_message, peer, &message_context);
+ peer->ping_task = GNUNET_SCHEDULER_add_delayed(sched, DHT_DEFAULT_PING_DELAY, &periodic_ping_task, peer);
+}
+
+/**
+ * Schedule PING messages for the top X peers in each
+ * bucket of the routing table (so core won't disconnect them!)
+ */
+void schedule_ping_messages()
+{
+ unsigned int bucket;
+ unsigned int count;
+ struct PeerInfo *pos;
+ for (bucket = lowest_bucket; bucket < MAX_BUCKETS; bucket++)
{
- if (to_remove_list[i] != NULL)
+ pos = k_buckets[bucket].head;
+ count = 0;
+ while (pos != NULL)
{
- remove_peer(to_remove_list[i], lowest_bucket);
- GNUNET_CONTAINER_DLL_insert_after(k_buckets[new_bucket].head,
- k_buckets[new_bucket].tail,
- k_buckets[new_bucket].tail,
- to_remove_list[i]);
- k_buckets[new_bucket].peers_size++;
+ if ((count < bucket_size) && (pos->ping_task == GNUNET_SCHEDULER_NO_TASK))
+ GNUNET_SCHEDULER_add_now(sched, &periodic_ping_task, pos);
+ else if ((count >= bucket_size) && (pos->ping_task != GNUNET_SCHEDULER_NO_TASK))
+ {
+ GNUNET_SCHEDULER_cancel(sched, pos->ping_task);
+ pos->ping_task = GNUNET_SCHEDULER_NO_TASK;
+ }
+ pos = pos->next;
+ count++;
}
- else
- break;
}
- lowest_bucket = new_bucket;
}
+#endif
+
/**
* Attempt to add a peer to our k-buckets.
*
* @param peer, the peer identity of the peer being added
*
- * @return GNUNET_YES if the peer was added,
- * GNUNET_NO if not,
- * GNUNET_SYSERR on err (peer is us!)
+ * @return NULL if the peer was not added,
+ * pointer to PeerInfo for new peer otherwise
*/
-static int try_add_peer(const struct GNUNET_PeerIdentity *peer,
- unsigned int bucket,
- struct GNUNET_TIME_Relative latency,
- unsigned int distance)
+static struct PeerInfo *
+try_add_peer(const struct GNUNET_PeerIdentity *peer,
+ unsigned int bucket,
+ struct GNUNET_TIME_Relative latency,
+ unsigned int distance)
{
int peer_bucket;
-
+ struct PeerInfo *new_peer;
peer_bucket = find_current_bucket(&peer->hashPubKey);
if (peer_bucket == GNUNET_SYSERR)
- return GNUNET_SYSERR;
+ return NULL;
GNUNET_assert(peer_bucket >= lowest_bucket);
- if ((k_buckets[peer_bucket].peers_size) < bucket_size)
- {
- add_peer(peer, peer_bucket, latency, distance);
- return GNUNET_YES;
- }
- else if ((peer_bucket == lowest_bucket) && (lowest_bucket > 0))
- {
- enable_next_bucket();
- return try_add_peer(peer, bucket, latency, distance); /* Recurse, if proper bucket still full ping peers */
- }
- else if ((k_buckets[peer_bucket].peers_size) == bucket_size)
- {
- /* TODO: implement ping_oldest_peer */
- //ping_oldest_peer(bucket, peer, latency, distance); /* Find oldest peer, ping it. If no response, remove and add new peer! */
- return GNUNET_NO;
- }
- GNUNET_break(0);
- return GNUNET_NO;
+ new_peer = add_peer(peer, peer_bucket, latency, distance);
+
+ if ((k_buckets[lowest_bucket].peers_size) >= bucket_size)
+ enable_next_bucket();
+#if DO_PING
+ schedule_ping_messages();
+#endif
+ return new_peer;
}
add_pending_message (client, pending_message);
}
+/**
+ * Consider whether or not we would like to have this peer added to
+ * our routing table. Check whether bucket for this peer is full,
+ * if so return negative; if not return positive. Since peers are
+ * only added on CORE level connect, this doesn't actually add the
+ * peer to the routing table.
+ *
+ * @param peer the peer we are considering adding
+ *
+ * @return GNUNET_YES if we want this peer, GNUNET_NO if not (bucket
+ * already full)
+ *
+ * FIXME: Think about making a context for this call so that we can
+ * ping the oldest peer in the current bucket and consider
+ * removing it in lieu of the new peer.
+ */
+static int consider_peer (struct GNUNET_PeerIdentity *peer)
+{
+ int bucket;
+
+ if (GNUNET_CONTAINER_multihashmap_contains(all_known_peers, &peer->hashPubKey))
+ return GNUNET_NO; /* We already know this peer (are connected even!) */
+ bucket = find_current_bucket(&peer->hashPubKey);
+ if ((k_buckets[bucket].peers_size < bucket_size) || ((bucket == lowest_bucket) && (lowest_bucket > 0)))
+ return GNUNET_YES;
+
+ return GNUNET_NO;
+}
/**
* Main function that handles whether or not to route a result
struct GNUNET_MessageHeader *msg,
struct DHT_MessageContext *message_context)
{
+ struct GNUNET_PeerIdentity new_peer;
struct DHTQueryRecord *record;
struct DHTRouteSource *pos;
struct PeerInfo *peer_info;
- struct GNUNET_MessageHeader *hello_msg;
+ const struct GNUNET_MessageHeader *hello_msg;
+ increment_stats(STAT_RESULTS);
/**
* If a find peer result message is received and contains a valid
* HELLO for another peer, offer it to the transport service.
- *
- * FIXME: Check whether we need this peer (based on routing table
- * fullness) and only try to connect to it conditionally. This should
- * reduce trying to connect to say (500) peers when the bucket size will
- * discard most of them.
*/
if (ntohs(msg->type) == GNUNET_MESSAGE_TYPE_DHT_FIND_PEER_RESULT)
{
GNUNET_break_op(0);
hello_msg = &msg[1];
- if (ntohs(hello_msg->type) != GNUNET_MESSAGE_TYPE_HELLO)
+ if ((ntohs(hello_msg->type) != GNUNET_MESSAGE_TYPE_HELLO) || (GNUNET_SYSERR == GNUNET_HELLO_get_id((const struct GNUNET_HELLO_Message *)hello_msg, &new_peer)))
{
GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s Received non-HELLO message type in find peer result message!\n", my_short_id, "DHT");
GNUNET_break_op(0);
}
- else
+ else /* We have a valid hello, and peer id stored in new_peer */
{
- GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s Received HELLO message for another peer, offering to transport!\n", my_short_id, "DHT");
- GNUNET_TRANSPORT_offer_hello(transport_handle, hello_msg);
+ increment_stats(STAT_FIND_PEER_REPLY);
+ if (GNUNET_YES == consider_peer(&new_peer))
+ {
+ GNUNET_TRANSPORT_offer_hello(transport_handle, hello_msg);
+ /* GNUNET_CORE_peer_request_connect(sched, cfg, GNUNET_TIME_UNIT_FOREVER_REL, &new_peer, NULL, NULL); */
+ /* peer_request_connect call causes service to segfault */
+ /* FIXME: Do we need this (peer_request_connect call)??? */
+ }
}
-
}
- record = GNUNET_CONTAINER_multihashmap_get(forward_list.hashmap, message_context->key);
+
+ if (malicious_dropper == GNUNET_YES)
+ record = NULL;
+ else
+ record = GNUNET_CONTAINER_multihashmap_get(forward_list.hashmap, message_context->key);
+
if (record == NULL) /* No record of this message! */
{
#if DEBUG_DHT
message_context->peer, NULL);
}
#endif
+ increment_stats(STAT_RESULTS_TO_CLIENT);
+ if (ntohs(msg->type) == GNUNET_MESSAGE_TYPE_DHT_GET_RESULT)
+ increment_stats(STAT_GET_REPLY);
+
send_reply_to_client(pos->client, msg, message_context->unique_id);
}
else /* Send to peer */
new_msg_ctx->peer = &my_identity;
new_msg_ctx->bloom = GNUNET_CONTAINER_bloomfilter_init (NULL, DHT_BLOOM_SIZE, DHT_BLOOM_K);
new_msg_ctx->hop_count = 0;
+ increment_stats(STAT_GET_RESPONSE_START);
route_result_message(cls, &get_result->header, new_msg_ctx);
GNUNET_free(new_msg_ctx);
//send_reply_to_client (datacache_get_ctx->client, &get_result->header,
get_type = ntohs (get_msg->type);
#if DEBUG_DHT
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
- "`%s:%s': Received `%s' request from client, message type %u, key %s, uid %llu\n", my_short_id,
+ "`%s:%s': Received `%s' request, message type %u, key %s, uid %llu\n", my_short_id,
"DHT", "GET", get_type, GNUNET_h2s (message_context->key),
message_context->unique_id);
#endif
-
+ increment_stats(STAT_GETS);
results = 0;
+ if (get_type == DHT_MALICIOUS_MESSAGE_TYPE)
+ return results;
+
if (datacache != NULL)
results =
GNUNET_DATACACHE_get (datacache, message_context->key, get_type,
ntohs (find_msg->size),
sizeof (struct GNUNET_MessageHeader));
#endif
- if ((my_hello == NULL) || (message_context->closest != GNUNET_YES))
+ if (my_hello == NULL)
{
#if DEBUG_DHT
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
new_msg_ctx->peer = &my_identity;
new_msg_ctx->bloom = GNUNET_CONTAINER_bloomfilter_init (NULL, DHT_BLOOM_SIZE, DHT_BLOOM_K);
new_msg_ctx->hop_count = 0;
+ increment_stats(STAT_FIND_PEER_ANSWER);
route_result_message(cls, find_peer_result, new_msg_ctx);
+ GNUNET_free(new_msg_ctx);
+#if DEBUG_DHT_ROUTING
+ if ((debug_routes) && (dhtlog_handle != NULL))
+ {
+ dhtlog_handle->insert_query (NULL, message_context->unique_id, DHTLOG_FIND_PEER,
+ message_context->hop_count, GNUNET_YES, &my_identity,
+ message_context->key);
+ }
+#endif
//send_reply_to_client(message_context->client, find_peer_result, message_context->unique_id);
GNUNET_free(find_peer_result);
}
GNUNET_assert (ntohs (msg->size) >=
sizeof (struct GNUNET_DHT_PutMessage));
+
+
put_msg = (struct GNUNET_DHT_PutMessage *)msg;
put_type = ntohs (put_msg->type);
+
+ if (put_type == DHT_MALICIOUS_MESSAGE_TYPE)
+ return;
+
data_size = ntohs (put_msg->header.size) - sizeof (struct GNUNET_DHT_PutMessage);
#if DEBUG_DHT
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
my_short_id, "DHT", "PUT", put_type, GNUNET_h2s (message_context->key), message_context->unique_id);
#endif
#if DEBUG_DHT_ROUTING
+ if (message_context->hop_count == 0) /* Locally initiated request */
+ {
+ if ((debug_routes) && (dhtlog_handle != NULL))
+ {
+ dhtlog_handle->insert_query (NULL, message_context->unique_id, DHTLOG_PUT,
+ message_context->hop_count, GNUNET_NO, &my_identity,
+ message_context->key);
+ }
+ }
+#endif
+
+ if (message_context->closest != GNUNET_YES)
+ return;
+
+#if DEBUG_DHT_ROUTING
+ if ((debug_routes_extended) && (dhtlog_handle != NULL))
+ {
+ dhtlog_handle->insert_route (NULL, message_context->unique_id, DHTLOG_ROUTE,
+ message_context->hop_count, GNUNET_YES,
+ &my_identity, message_context->key, message_context->peer,
+ NULL);
+ }
if ((debug_routes) && (dhtlog_handle != NULL))
{
}
#endif
+ increment_stats(STAT_PUTS_INSERTED);
if (datacache != NULL)
GNUNET_DATACACHE_put (datacache, message_context->key, data_size,
(char *) &put_msg[1], put_type,
unsigned int target_value;
unsigned int diameter;
+ /**
+ * If we are behaving in strict kademlia mode, send multiple initial requests,
+ * but then only send to 1 or 0 peers.
+ */
+ if (strict_kademlia == GNUNET_YES)
+ {
+ if (hop_count == 0)
+ return DHT_KADEMLIA_REPLICATION;
+ else if (hop_count < MAX_HOPS)
+ return 1;
+ else
+ return 0;
+ }
+
/* FIXME: the smaller we think the network is the more lenient we should be for
* routing right? The estimation below only works if we think we have reasonably
* full routing tables, which for our RR topologies may not be the case!
unsigned int lowest_distance;
unsigned int temp_distance;
int bucket;
+ int count;
lowest_distance = -1;
for (bucket = lowest_bucket; bucket < MAX_BUCKETS; bucket++)
{
pos = k_buckets[bucket].head;
- while (pos != NULL)
+ count = 0;
+ while ((pos != NULL) && (count < bucket_size))
{
temp_distance = distance(&pos->id.hashPubKey, hc);
if (temp_distance <= lowest_distance)
current_closest = pos;
}
pos = pos->next;
+ count++;
}
}
GNUNET_assert(current_closest != NULL);
int bits;
int other_bits;
int bucket_num;
+ int count;
struct PeerInfo *pos;
unsigned int my_distance;
my_distance = distance(&my_identity.hashPubKey, target);
pos = k_buckets[bucket_num].head;
- while (pos != NULL)
+ count = 0;
+ while ((pos != NULL) && (count < bucket_size))
{
other_bits = matching_bits(&pos->id.hashPubKey, target);
if (other_bits > bits)
return GNUNET_NO;
else if (other_bits == bits) /* We match the same number of bits, do distance comparison */
{
- if (distance(&pos->id.hashPubKey, target) < my_distance)
- return GNUNET_NO;
+ return GNUNET_YES;
+ /* FIXME: why not just return GNUNET_YES here? We are certainly close. */
+ /*if (distance(&pos->id.hashPubKey, target) < my_distance)
+ return GNUNET_NO;*/
}
pos = pos->next;
}
{
unsigned int distance;
unsigned int bc;
+ unsigned int count;
struct PeerInfo *pos;
-#if USE_KADEMLIA
- const struct PeerInfo *chosen;
+ struct PeerInfo *chosen;
unsigned long long largest_distance;
-#else
unsigned long long total_distance;
unsigned long long selected;
-#endif
-#if USE_KADEMLIA
- largest_distance = 0;
- chosen = NULL;
- for (bc = lowest_bucket; bc < MAX_BUCKETS; bc++)
- {
- pos = k_buckets[bc].head;
- while (pos != NULL)
- {
- if (GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey))
- {
- distance = inverse_distance (target, &pos->id.hashPubKey);
- if (distance > largest_distance)
- {
- chosen = pos;
- largest_distance = distance;
- }
- }
- pos = pos->next;
- }
- }
+if (strict_kademlia == GNUNET_YES)
+ {
+ largest_distance = 0;
+ chosen = NULL;
+ for (bc = lowest_bucket; bc < MAX_BUCKETS; bc++)
+ {
+ pos = k_buckets[bc].head;
+ count = 0;
+ while ((pos != NULL) && (count < bucket_size))
+ {
+ if (GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey))
+ {
+ distance = inverse_distance (target, &pos->id.hashPubKey);
+ if (distance > largest_distance)
+ {
+ chosen = pos;
+ largest_distance = distance;
+ }
+ }
+ count++;
+ pos = pos->next;
+ }
+ }
- if ((largest_distance > 0) && (chosen != NULL))
- {
- GNUNET_CONTAINER_bloomfilter_add(bloom, &chosen->id.hashPubKey);
- return chosen;
- }
+ if ((largest_distance > 0) && (chosen != NULL))
+ {
+ GNUNET_CONTAINER_bloomfilter_add(bloom, &chosen->id.hashPubKey);
+ return chosen;
+ }
+ else
+ {
+ return NULL;
+ }
+ }
else
- {
- return NULL;
- }
-#else
- /* GNUnet-style */
- total_distance = 0;
- for (bc = lowest_bucket; bc < MAX_BUCKETS; bc++)
- {
- pos = k_buckets[bc].head;
- while (pos != NULL)
- {
- if (GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey))
- total_distance += (unsigned long long)inverse_distance (target, &pos->id.hashPubKey);
+ {
+ /* GNUnet-style */
+ total_distance = 0;
+ for (bc = lowest_bucket; bc < MAX_BUCKETS; bc++)
+ {
+ pos = k_buckets[bc].head;
+ count = 0;
+ while ((pos != NULL) && (count < bucket_size))
+ {
+ if (GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey))
+ total_distance += (unsigned long long)inverse_distance (target, &pos->id.hashPubKey);
#if DEBUG_DHT > 1
- GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
- "`%s:%s': Total distance is %llu, distance from %s to %s is %u\n",
- my_short_id, "DHT", total_distance, GNUNET_i2s(&pos->id), GNUNET_h2s(target) , inverse_distance(target, &pos->id.hashPubKey));
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "`%s:%s': Total distance is %llu, distance from %s to %s is %u\n",
+ my_short_id, "DHT", total_distance, GNUNET_i2s(&pos->id), GNUNET_h2s(target) , inverse_distance(target, &pos->id.hashPubKey));
#endif
- pos = pos->next;
- }
- }
- if (total_distance == 0)
- {
- return NULL;
- }
+ pos = pos->next;
+ count++;
+ }
+ }
+ if (total_distance == 0)
+ {
+ return NULL;
+ }
- selected = GNUNET_CRYPTO_random_u64 (GNUNET_CRYPTO_QUALITY_WEAK, total_distance);
- for (bc = lowest_bucket; bc < MAX_BUCKETS; bc++)
- {
- pos = k_buckets[bc].head;
- while (pos != NULL)
- {
- if (GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey))
- {
- distance = inverse_distance (target, &pos->id.hashPubKey);
- if (distance > selected)
- return pos;
- selected -= distance;
- }
- else
- {
+ selected = GNUNET_CRYPTO_random_u64 (GNUNET_CRYPTO_QUALITY_WEAK, total_distance);
+ for (bc = lowest_bucket; bc < MAX_BUCKETS; bc++)
+ {
+ pos = k_buckets[bc].head;
+ count = 0;
+ while ((pos != NULL) && (count < bucket_size))
+ {
+ if (GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey))
+ {
+ distance = inverse_distance (target, &pos->id.hashPubKey);
+ if (distance > selected)
+ return pos;
+ selected -= distance;
+ }
+ else
+ {
#if DEBUG_DHT
- GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
- "`%s:%s': peer %s matches bloomfilter.\n",
- my_short_id, "DHT", GNUNET_i2s(&pos->id));
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "`%s:%s': peer %s matches bloomfilter.\n",
+ my_short_id, "DHT", GNUNET_i2s(&pos->id));
#endif
- }
- pos = pos->next;
- }
- }
+ }
+ pos = pos->next;
+ count++;
+ }
+ }
#if DEBUG_DHT
- GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
- "`%s:%s': peer %s matches bloomfilter.\n",
- my_short_id, "DHT", GNUNET_i2s(&pos->id));
-#endif
- return NULL;
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "`%s:%s': peer %s matches bloomfilter.\n",
+ my_short_id, "DHT", GNUNET_i2s(&pos->id));
#endif
+ return NULL;
+ }
}
-/**
- * Function called to send a request out to another peer.
- * Called both for locally initiated requests and those
- * received from other peers.
- *
- * @param cls DHT service closure argument
- * @param msg the encapsulated message
- * @param peer the peer to forward the message to
- * @param msg_ctx the context of the message (hop count, bloom, etc.)
- */
-static void forward_message (void *cls,
- const struct GNUNET_MessageHeader *msg,
- struct PeerInfo *peer,
- struct DHT_MessageContext *msg_ctx)
-{
- struct GNUNET_DHT_P2PRouteMessage *route_message;
- struct P2PPendingMessage *pending;
- size_t msize;
- size_t psize;
-
- msize = sizeof (struct GNUNET_DHT_P2PRouteMessage) + ntohs(msg->size);
- GNUNET_assert(msize <= GNUNET_SERVER_MAX_MESSAGE_SIZE);
- psize = sizeof(struct P2PPendingMessage) + msize;
- pending = GNUNET_malloc(psize);
- pending->msg = (struct GNUNET_MessageHeader *)&pending[1];
- pending->importance = DHT_SEND_PRIORITY;
- pending->timeout = GNUNET_TIME_relative_get_forever();
- route_message = (struct GNUNET_DHT_P2PRouteMessage *)pending->msg;
- route_message->header.size = htons(msize);
- route_message->header.type = htons(GNUNET_MESSAGE_TYPE_DHT_P2P_ROUTE);
- route_message->options = htonl(msg_ctx->msg_options);
- route_message->hop_count = htonl(msg_ctx->hop_count + 1);
- route_message->network_size = htonl(msg_ctx->network_size);
- route_message->desired_replication_level = htonl(msg_ctx->replication);
- route_message->unique_id = GNUNET_htonll(msg_ctx->unique_id);
- GNUNET_assert(GNUNET_OK == GNUNET_CONTAINER_bloomfilter_get_raw_data(msg_ctx->bloom, route_message->bloomfilter, DHT_BLOOM_SIZE));
- memcpy(&route_message->key, msg_ctx->key, sizeof(GNUNET_HashCode));
- memcpy(&route_message[1], msg, ntohs(msg->size));
-#if DEBUG_DHT > 1
- GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "%s:%s Adding pending message size %d for peer %s\n", my_short_id, "DHT", msize, GNUNET_i2s(&peer->id));
-#endif
- GNUNET_CONTAINER_DLL_insert_after(peer->head, peer->tail, peer->tail, pending);
- if (peer->send_task == GNUNET_SCHEDULER_NO_TASK)
- peer->send_task = GNUNET_SCHEDULER_add_now(sched, &try_core_send, peer);
-}
/**
* Task used to remove forwarding entries, either
int ret;
#endif
+ if (malicious_dropper == GNUNET_YES)
+ {
+#if DEBUG_DHT_ROUTING
+ if ((debug_routes_extended) && (dhtlog_handle != NULL))
+ {
+ dhtlog_handle->insert_route (NULL, message_context->unique_id, DHTLOG_ROUTE,
+ message_context->hop_count, GNUNET_SYSERR,
+ &my_identity, message_context->key, message_context->peer,
+ NULL);
+ }
+#endif
+ if (message_context->bloom != NULL)
+ GNUNET_CONTAINER_bloomfilter_free(message_context->bloom);
+ return 0;
+ }
+
+ increment_stats(STAT_ROUTES);
message_context->closest = am_closest_peer(message_context->key);
forward_count = get_forward_count(message_context->hop_count, message_context->replication);
nearest = find_closest_peer(message_context->key);
message_context->bloom = GNUNET_CONTAINER_bloomfilter_init (NULL, DHT_BLOOM_SIZE, DHT_BLOOM_K);
GNUNET_CONTAINER_bloomfilter_add (message_context->bloom, &my_identity.hashPubKey);
- if ((stop_on_closest == GNUNET_YES) && (message_context->closest == GNUNET_YES) && (ntohs(msg->type) == GNUNET_MESSAGE_TYPE_DHT_PUT))
+ if (((stop_on_closest == GNUNET_YES) && (message_context->closest == GNUNET_YES) && (ntohs(msg->type) == GNUNET_MESSAGE_TYPE_DHT_PUT))
+ || ((strict_kademlia == GNUNET_YES) && (message_context->closest == GNUNET_YES)))
forward_count = 0;
#if DEBUG_DHT_ROUTING
if ((handle_dht_get (cls, msg, message_context) > 0) && (stop_on_found == GNUNET_YES))
forward_count = 0;
break;
- case GNUNET_MESSAGE_TYPE_DHT_PUT: /* Check if closest, if so insert data. FIXME: thresholding?*/
- if (message_context->closest == GNUNET_YES)
- {
-#if DEBUG_DHT_ROUTING
- if ((debug_routes_extended) && (dhtlog_handle != NULL))
- {
- dhtlog_handle->insert_route (NULL, message_context->unique_id, DHTLOG_ROUTE,
- message_context->hop_count, GNUNET_YES,
- &my_identity, message_context->key, message_context->peer,
- NULL);
- }
-#endif
- handle_dht_put (cls, msg, message_context);
- }
-#if DEBUG_DHT_ROUTING
- if (message_context->hop_count == 0) /* Locally initiated request */
- {
- if ((debug_routes) && (dhtlog_handle != NULL))
- {
- dhtlog_handle->insert_query (NULL, message_context->unique_id, DHTLOG_PUT,
- message_context->hop_count, GNUNET_NO, &my_identity,
- message_context->key);
- }
- }
-#endif
+ case GNUNET_MESSAGE_TYPE_DHT_PUT: /* Check if closest, if so insert data. FIXME: thresholding to reduce complexity?*/
+ increment_stats(STAT_PUTS);
+ handle_dht_put (cls, msg, message_context);
break;
case GNUNET_MESSAGE_TYPE_DHT_FIND_PEER: /* Check if closest and not started by us, check options, add to requests seen */
- if (0 != memcmp(message_context->peer, &my_identity, sizeof(struct GNUNET_PeerIdentity)))
+ increment_stats(STAT_FIND_PEER);
+ if (((message_context->hop_count > 0) && (0 != memcmp(message_context->peer, &my_identity, sizeof(struct GNUNET_PeerIdentity)))) || (message_context->client != NULL))
{
cache_response (cls, message_context);
if ((message_context->closest == GNUNET_YES) || (message_context->msg_options == GNUNET_DHT_RO_DEMULTIPLEX_EVERYWHERE))
handle_dht_find_peer (cls, msg, message_context);
}
+#if DEBUG_DHT_ROUTING
+ if (message_context->hop_count == 0) /* Locally initiated request */
+ {
+ if ((debug_routes) && (dhtlog_handle != NULL))
+ {
+ dhtlog_handle->insert_dhtkey(NULL, message_context->key);
+ dhtlog_handle->insert_query (NULL, message_context->unique_id, DHTLOG_FIND_PEER,
+ message_context->hop_count, GNUNET_NO, &my_identity,
+ message_context->key);
+ }
+ }
+#endif
break;
default:
GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
"DHT", GNUNET_h2s (message_context->key), message_context->unique_id, GNUNET_i2s(&selected->id), nearest_buf, matching_bits(&nearest->id.hashPubKey, message_context->key), distance(&nearest->id.hashPubKey, message_context->key));
GNUNET_free(nearest_buf);
#endif
- /* FIXME: statistics */
if ((debug_routes_extended) && (dhtlog_handle != NULL))
{
dhtlog_handle->insert_route (NULL, message_context->unique_id, DHTLOG_ROUTE,
return ret;
}
+/**
+ * Task to send a malicious put message across the network.
+ *
+ * @param cls closure for this task
+ * @param tc the context under which the task is running
+ */
+static void
+malicious_put_task (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
+{
+ static struct GNUNET_DHT_PutMessage put_message;
+ static struct DHT_MessageContext message_context;
+ static GNUNET_HashCode key;
+ unsigned int mcsize;
+ uint32_t random_key;
+ put_message.header.size = htons(sizeof(struct GNUNET_DHT_GetMessage));
+ put_message.header.type = htons(GNUNET_MESSAGE_TYPE_DHT_PUT);
+ put_message.type = htons(DHT_MALICIOUS_MESSAGE_TYPE);
+ put_message.expiration = GNUNET_TIME_absolute_hton(GNUNET_TIME_absolute_get_forever());
+ mcsize = sizeof(struct DHT_MessageContext) + sizeof(GNUNET_HashCode);
+ memset(&message_context, 0, sizeof(struct DHT_MessageContext));
+ message_context.client = NULL;
+ random_key = GNUNET_CRYPTO_random_u32(GNUNET_CRYPTO_QUALITY_WEAK, (uint32_t)-1);
+ GNUNET_CRYPTO_hash(&random_key, sizeof(uint32_t), &key);
+ message_context.key = &key;
+ message_context.unique_id = GNUNET_ntohll (GNUNET_CRYPTO_random_u64(GNUNET_CRYPTO_QUALITY_WEAK, (uint64_t)-1));
+ message_context.replication = ntohl (DHT_DEFAULT_FIND_PEER_REPLICATION);
+ message_context.msg_options = ntohl (0);
+ message_context.network_size = estimate_diameter();
+ message_context.peer = &my_identity;
+ increment_stats(STAT_PUT_START);
+ GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "%s:%s Sending malicious PUT message with hash %s", my_short_id, "DHT", GNUNET_h2s(&key));
+ route_message(NULL, &put_message.header, &message_context);
+ GNUNET_SCHEDULER_add_delayed(sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, malicious_put_frequency), &malicious_put_task, NULL);
+
+}
+
+/**
+ * Task to send a malicious put message across the network.
+ *
+ * @param cls closure for this task
+ * @param tc the context under which the task is running
+ */
+static void
+malicious_get_task (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
+{
+ static struct GNUNET_DHT_GetMessage get_message;
+ static struct DHT_MessageContext message_context;
+ static GNUNET_HashCode key;
+ unsigned int mcsize;
+ uint32_t random_key;
+ get_message.header.size = htons(sizeof(struct GNUNET_DHT_GetMessage));
+ get_message.header.type = htons(GNUNET_MESSAGE_TYPE_DHT_GET);
+ get_message.type = htons(DHT_MALICIOUS_MESSAGE_TYPE);
+ mcsize = sizeof(struct DHT_MessageContext) + sizeof(GNUNET_HashCode);
+ memset(&message_context, 0, sizeof(struct DHT_MessageContext));
+ message_context.client = NULL;
+ random_key = GNUNET_CRYPTO_random_u32(GNUNET_CRYPTO_QUALITY_WEAK, (uint32_t)-1);
+ GNUNET_CRYPTO_hash(&random_key, sizeof(uint32_t), &key);
+ message_context.key = &key;
+ message_context.unique_id = GNUNET_ntohll (GNUNET_CRYPTO_random_u64(GNUNET_CRYPTO_QUALITY_WEAK, (uint64_t)-1));
+ message_context.replication = ntohl (DHT_DEFAULT_FIND_PEER_REPLICATION);
+ message_context.msg_options = ntohl (0);
+ message_context.network_size = estimate_diameter();
+ message_context.peer = &my_identity;
+ increment_stats(STAT_GET_START);
+ GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "%s:%s Sending malicious GET message with hash %s", my_short_id, "DHT", GNUNET_h2s(&key));
+ route_message(NULL, &get_message.header, &message_context);
+ GNUNET_SCHEDULER_add_delayed(sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, malicious_get_frequency), &malicious_get_task, NULL);
+}
+
/**
* Task to send a find peer message for our own peer identifier
* so that we can find the closest peers in the network to ourselves
struct GNUNET_MessageHeader *find_peer_msg;
struct DHT_MessageContext message_context;
int ret;
+ struct GNUNET_TIME_Relative next_send_time;
if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
return;
+ increment_stats(STAT_FIND_PEER_START);
+
find_peer_msg = GNUNET_malloc(sizeof(struct GNUNET_MessageHeader));
find_peer_msg->size = htons(sizeof(struct GNUNET_MessageHeader));
find_peer_msg->type = htons(GNUNET_MESSAGE_TYPE_DHT_FIND_PEER);
memset(&message_context, 0, sizeof(struct DHT_MessageContext));
message_context.key = &my_identity.hashPubKey;
- message_context.unique_id = GNUNET_ntohll (GNUNET_CRYPTO_random_u64(GNUNET_CRYPTO_QUALITY_WEAK, (uint64_t)-1));
+ message_context.unique_id = GNUNET_ntohll (GNUNET_CRYPTO_random_u64(GNUNET_CRYPTO_QUALITY_STRONG, (uint64_t)-1));
message_context.replication = ntohl (DHT_DEFAULT_FIND_PEER_REPLICATION);
message_context.msg_options = ntohl (DHT_DEFAULT_FIND_PEER_OPTIONS);
message_context.network_size = estimate_diameter();
message_context.peer = &my_identity;
ret = route_message(NULL, find_peer_msg, &message_context);
-
+ GNUNET_free(find_peer_msg);
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
"`%s:%s': Sent `%s' request to %d peers\n", my_short_id, "DHT",
"FIND PEER", ret);
+ next_send_time.value = DHT_MINIMUM_FIND_PEER_INTERVAL.value +
+ GNUNET_CRYPTO_random_u64(GNUNET_CRYPTO_QUALITY_STRONG,
+ DHT_MAXIMUM_FIND_PEER_INTERVAL.value - DHT_MINIMUM_FIND_PEER_INTERVAL.value);
GNUNET_SCHEDULER_add_delayed (sched,
- DHT_DEFAULT_FIND_PEER_INTERVAL,
+ next_send_time,
&send_find_peer_message, NULL);
}
message_context.network_size = estimate_diameter();
message_context.peer = &my_identity;
+ if (ntohs(enc_msg->type) == GNUNET_MESSAGE_TYPE_DHT_GET)
+ increment_stats(STAT_GET_START);
+ else if (ntohs(enc_msg->type) == GNUNET_MESSAGE_TYPE_DHT_PUT)
+ increment_stats(STAT_PUT_START);
+ else if (ntohs(enc_msg->type) == GNUNET_MESSAGE_TYPE_DHT_FIND_PEER)
+ increment_stats(STAT_FIND_PEER_START);
+
route_message(cls, enc_msg, &message_context);
GNUNET_SERVER_receive_done (client, GNUNET_OK);
}
+/**
+ * Handler for any locally received DHT control messages,
+ * sets malicious flags mostly for now.
+ *
+ * @param cls closure for the service
+ * @param client the client we received this message from
+ * @param message the actual message received
+ *
+ */
+static void
+handle_dht_control_message (void *cls, struct GNUNET_SERVER_Client *client,
+ const struct GNUNET_MessageHeader *message)
+{
+ const struct GNUNET_DHT_ControlMessage *dht_control_msg =
+ (const struct GNUNET_DHT_ControlMessage *) message;
+#if DEBUG_DHT
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "`%s:%s': Received `%s' request from client, command %d\n", my_short_id, "DHT",
+ "CONTROL", ntohs(dht_control_msg->command));
+#endif
+
+ switch (ntohs(dht_control_msg->command))
+ {
+ case GNUNET_MESSAGE_TYPE_DHT_MALICIOUS_GET:
+ if (ntohs(dht_control_msg->variable) > 0)
+ malicious_get_frequency = ntohs(dht_control_msg->variable);
+ if (malicious_getter != GNUNET_YES)
+ GNUNET_SCHEDULER_add_now(sched, &malicious_get_task, NULL);
+ malicious_getter = GNUNET_YES;
+ GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s Initiating malicious GET behavior, frequency %d\n", my_short_id, "DHT", malicious_get_frequency);
+ break;
+ case GNUNET_MESSAGE_TYPE_DHT_MALICIOUS_PUT:
+ if (ntohs(dht_control_msg->variable) > 0)
+ malicious_put_frequency = ntohs(dht_control_msg->variable);
+ if (malicious_putter != GNUNET_YES)
+ GNUNET_SCHEDULER_add_now(sched, &malicious_put_task, NULL);
+ malicious_putter = GNUNET_YES;
+ GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s Initiating malicious PUT behavior, frequency %d\n", my_short_id, "DHT", malicious_put_frequency);
+ break;
+ case GNUNET_MESSAGE_TYPE_DHT_MALICIOUS_DROP:
+ malicious_dropper = GNUNET_YES;
+ GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s Initiating malicious DROP behavior\n", my_short_id, "DHT");
+ break;
+ default:
+ GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s Unknown control command type `%d'!\n", ntohs(dht_control_msg->command));
+ }
+
+ GNUNET_SERVER_receive_done (client, GNUNET_OK);
+}
+
/**
* Handler for any generic DHT stop messages, calls the appropriate handler
* depending on message type (if processed locally)
struct GNUNET_MessageHeader *enc_msg = (struct GNUNET_MessageHeader *)&incoming[1];
struct DHT_MessageContext *message_context;
+ if (ntohs(enc_msg->type) == GNUNET_MESSAGE_TYPE_DHT_P2P_PING) /* Throw these away. FIXME: Don't throw these away? (reply)*/
+ {
+#if DEBUG_PING
+ GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "%s:%s Received P2P Ping message.\n", my_short_id, "DHT");
+#endif
+ return GNUNET_YES;
+ }
+
if (ntohs(enc_msg->size) > GNUNET_SERVER_MAX_MESSAGE_SIZE)
{
GNUNET_break_op(0);
GNUNET_break_op(0);
return GNUNET_YES;
}
+
memset(&message_context, 0, sizeof(struct DHT_MessageContext));
message_context.bloom = GNUNET_CONTAINER_bloomfilter_init(incoming->bloomfilter, DHT_BLOOM_SIZE, DHT_BLOOM_K);
GNUNET_assert(message_context.bloom != NULL);
memcpy(my_hello, message, ntohs(message->size));
}
+
/**
* Task run during shutdown.
*
{
int bucket_count;
struct PeerInfo *pos;
-
if (transport_handle != NULL)
{
GNUNET_free_non_null(my_hello);
GNUNET_DATACACHE_destroy (datacache);
}
+ if (stats != NULL)
+ {
+ GNUNET_STATISTICS_destroy (stats, GNUNET_YES);
+ }
+
if (dhtlog_handle != NULL)
GNUNET_DHTLOG_disconnect(dhtlog_handle);
"%s: Core connection initialized, I am peer: %s\n", "dht",
GNUNET_i2s (identity));
#endif
+
/* Copy our identity so we can use it */
memcpy (&my_identity, identity, sizeof (struct GNUNET_PeerIdentity));
+ if (my_short_id != NULL)
+ GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s Receive CORE INIT message but have already been initialized! Did CORE fail?\n", "DHT SERVICE");
my_short_id = GNUNET_strdup(GNUNET_i2s(&my_identity));
/* Set the server to local variable */
coreAPI = server;
static struct GNUNET_SERVER_MessageHandler plugin_handlers[] = {
{&handle_dht_local_route_request, NULL, GNUNET_MESSAGE_TYPE_DHT_LOCAL_ROUTE, 0},
{&handle_dht_local_route_stop, NULL, GNUNET_MESSAGE_TYPE_DHT_LOCAL_ROUTE_STOP, 0},
+ {&handle_dht_control_message, NULL, GNUNET_MESSAGE_TYPE_DHT_CONTROL, 0},
{NULL, NULL, 0, 0}
};
struct GNUNET_TIME_Relative latency,
uint32_t distance)
{
- int ret;
+ struct PeerInfo *ret;
#if DEBUG_DHT
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
"%s:%s Receives core connect message for peer %s distance %d!\n", my_short_id, "dht", GNUNET_i2s(peer), distance);
#endif
+
+ if (GNUNET_YES == GNUNET_CONTAINER_multihashmap_contains(all_known_peers, &peer->hashPubKey))
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "%s:%s Received %s message for peer %s, but already have peer in RT!", my_short_id, "DHT", "CORE CONNECT", GNUNET_i2s(peer));
+ return;
+ }
+
if (datacache != NULL)
GNUNET_DATACACHE_put(datacache, &peer->hashPubKey, sizeof(struct GNUNET_PeerIdentity), (const char *)peer, 0, GNUNET_TIME_absolute_get_forever());
ret = try_add_peer(peer,
find_current_bucket(&peer->hashPubKey),
latency,
distance);
+ if (ret != NULL)
+ {
+ GNUNET_CONTAINER_multihashmap_put(all_known_peers, &peer->hashPubKey, ret, GNUNET_CONTAINER_MULTIHASHMAPOPTION_UNIQUE_ONLY);
+ }
#if DEBUG_DHT
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
- "%s:%s Adding peer to routing list: %s\n", my_short_id, "DHT", ret == GNUNET_YES ? "PEER ADDED" : "NOT ADDED");
+ "%s:%s Adding peer to routing list: %s\n", my_short_id, "DHT", ret == NULL ? "NOT ADDED" : "PEER ADDED");
#endif
}
+/**
+ * Method called whenever a peer disconnects.
+ *
+ * @param cls closure
+ * @param peer peer identity this notification is about
+ */
+void handle_core_disconnect (void *cls,
+ const struct
+ GNUNET_PeerIdentity * peer)
+{
+ struct PeerInfo *to_remove;
+ int current_bucket;
+
+ GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "%s:%s: Received peer disconnect message for peer `%s' from %s\n", my_short_id, "DHT", GNUNET_i2s(peer), "CORE");
+
+ if (GNUNET_YES != GNUNET_CONTAINER_multihashmap_contains(all_known_peers, &peer->hashPubKey))
+ {
+ GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "%s:%s: do not have peer `%s' in RT, can't disconnect!\n", my_short_id, "DHT", GNUNET_i2s(peer));
+ return;
+ }
+ GNUNET_assert(GNUNET_CONTAINER_multihashmap_contains(all_known_peers, &peer->hashPubKey));
+ to_remove = GNUNET_CONTAINER_multihashmap_get(all_known_peers, &peer->hashPubKey);
+ GNUNET_assert(0 == memcmp(peer, &to_remove->id, sizeof(struct GNUNET_PeerIdentity)));
+ current_bucket = find_current_bucket(&to_remove->id.hashPubKey);
+ delete_peer(to_remove, current_bucket);
+}
+
/**
* Process dht requests.
*
struct GNUNET_SERVER_Handle *server,
const struct GNUNET_CONFIGURATION_Handle *c)
{
+ int random_seconds;
sched = scheduler;
cfg = c;
datacache = GNUNET_DATACACHE_create (sched, cfg, "dhtcache");
GNUNET_TIME_UNIT_FOREVER_REL,
NULL, /* Closure passed to DHT functionas around? */
&core_init, /* Call core_init once connected */
- &handle_core_connect, /* Don't care about connects */
- NULL, /* FIXME: remove peers on disconnects */
+ &handle_core_connect, /* Handle connects */
+ &handle_core_disconnect, /* remove peers on disconnects */
NULL, /* Do we care about "status" updates? */
NULL, /* Don't want notified about all incoming messages */
GNUNET_NO, /* For header only inbound notification */
lowest_bucket = MAX_BUCKETS - 1;
forward_list.hashmap = GNUNET_CONTAINER_multihashmap_create(MAX_OUTSTANDING_FORWARDS / 10);
forward_list.minHeap = GNUNET_CONTAINER_heap_create(GNUNET_CONTAINER_HEAP_ORDER_MIN);
- /* Scheduled the task to clean up when shutdown is called */
-
+ all_known_peers = GNUNET_CONTAINER_multihashmap_create(MAX_BUCKETS / 8);
+ GNUNET_assert(all_known_peers != NULL);
if (GNUNET_YES == GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht_testing", "mysql_logging"))
{
debug_routes = GNUNET_YES;
}
+ if (GNUNET_YES ==
+ GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht",
+ "strict_kademlia"))
+ {
+ strict_kademlia = GNUNET_YES;
+ }
+
if (GNUNET_YES ==
GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht",
"stop_on_closest"))
stop_on_found = GNUNET_YES;
}
+ if (GNUNET_YES ==
+ GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht",
+ "malicious_getter"))
+ {
+ malicious_getter = GNUNET_YES;
+ if (GNUNET_NO == GNUNET_CONFIGURATION_get_value_number (cfg, "DHT",
+ "MALICIOUS_GET_FREQUENCY",
+ &malicious_get_frequency))
+ malicious_get_frequency = DEFAULT_MALICIOUS_GET_FREQUENCY;
+ }
+
+ if (GNUNET_YES ==
+ GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht",
+ "malicious_putter"))
+ {
+ malicious_putter = GNUNET_YES;
+ if (GNUNET_NO == GNUNET_CONFIGURATION_get_value_number (cfg, "DHT",
+ "MALICIOUS_PUT_FREQUENCY",
+ &malicious_put_frequency))
+ malicious_put_frequency = DEFAULT_MALICIOUS_PUT_FREQUENCY;
+ }
+
+ if (GNUNET_YES ==
+ GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht",
+ "malicious_dropper"))
+ {
+ malicious_dropper = GNUNET_YES;
+ }
+
if (GNUNET_YES ==
GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht_testing",
"mysql_logging_extended"))
}
}
+ stats = GNUNET_STATISTICS_create(sched, "dht", cfg);
+
+ if (stats != NULL)
+ {
+ GNUNET_STATISTICS_set(stats, STAT_ROUTES, 0, GNUNET_NO);
+ GNUNET_STATISTICS_set(stats, STAT_ROUTE_FORWARDS, 0, GNUNET_NO);
+ GNUNET_STATISTICS_set(stats, STAT_RESULTS, 0, GNUNET_NO);
+ GNUNET_STATISTICS_set(stats, STAT_RESULTS_TO_CLIENT, 0, GNUNET_NO);
+ GNUNET_STATISTICS_set(stats, STAT_RESULT_FORWARDS, 0, GNUNET_NO);
+ GNUNET_STATISTICS_set(stats, STAT_GETS, 0, GNUNET_NO);
+ GNUNET_STATISTICS_set(stats, STAT_PUTS, 0, GNUNET_NO);
+ GNUNET_STATISTICS_set(stats, STAT_PUTS_INSERTED, 0, GNUNET_NO);
+ GNUNET_STATISTICS_set(stats, STAT_FIND_PEER, 0, GNUNET_NO);
+ GNUNET_STATISTICS_set(stats, STAT_FIND_PEER_START, 0, GNUNET_NO);
+ GNUNET_STATISTICS_set(stats, STAT_GET_START, 0, GNUNET_NO);
+ GNUNET_STATISTICS_set(stats, STAT_PUT_START, 0, GNUNET_NO);
+ GNUNET_STATISTICS_set(stats, STAT_FIND_PEER_REPLY, 0, GNUNET_NO);
+ GNUNET_STATISTICS_set(stats, STAT_FIND_PEER_ANSWER, 0, GNUNET_NO);
+ GNUNET_STATISTICS_set(stats, STAT_GET_REPLY, 0, GNUNET_NO);
+ GNUNET_STATISTICS_set(stats, STAT_GET_RESPONSE_START, 0, GNUNET_NO);
+ }
+#if DO_FIND_PEER
+ random_seconds = GNUNET_CRYPTO_random_u32(GNUNET_CRYPTO_QUALITY_WEAK, 180);
GNUNET_SCHEDULER_add_delayed (sched,
- GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_SECONDS, 30),
+ GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_SECONDS, random_seconds),
&send_find_peer_message, NULL);
+#endif
+ /* Scheduled the task to clean up when shutdown is called */
cleanup_task = GNUNET_SCHEDULER_add_delayed (sched,
GNUNET_TIME_UNIT_FOREVER_REL,
&shutdown_task, NULL);