hopefull stop strict kademlia from being breadth first search of network
[oweals/gnunet.git] / src / dht / gnunet-service-dht.c
index 6fe910ecdb661152362fae18c9641e631ead02f1..fd9694fee4119529fb968af014c769f0b42f51ce 100644 (file)
 #include "gnunet_transport_service.h"
 #include "gnunet_hello_lib.h"
 #include "gnunet_dht_service.h"
+#include "gnunet_statistics_service.h"
 #include "dhtlog.h"
 #include "dht.h"
 
+#define PRINT_TABLES GNUNET_NO
+
+#define EXTRA_CHECKS GNUNET_YES
 /**
  * How many buckets will we allow total.
  */
 #define MAX_BUCKETS sizeof (GNUNET_HashCode) * 8
 
+/**
+ * Should the DHT issue FIND_PEER requests to get better routing tables?
+ */
+#define DO_FIND_PEER GNUNET_YES
+
 /**
  * What is the maximum number of peers in a given bucket.
  */
  */
 #define MINIMUM_PEER_THRESHOLD 20
 
+#define DHT_DEFAULT_FIND_PEER_REPLICATION 10
+
+#define DHT_DEFAULT_FIND_PEER_OPTIONS GNUNET_DHT_RO_DEMULTIPLEX_EVERYWHERE
+
+#define DHT_MINIMUM_FIND_PEER_INTERVAL GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MINUTES, 1)
+#define DHT_MAXIMUM_FIND_PEER_INTERVAL GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MINUTES, 5)
+
+/**
+ * How many initial requests to send out (in true Kademlia fashion)
+ */
+#define DHT_KADEMLIA_REPLICATION 3
+
+/*
+ * Default frequency for sending malicious get messages
+ */
+#define DEFAULT_MALICIOUS_GET_FREQUENCY 1000 /* Number of milliseconds */
+
+/**
+ * Type for a malicious request, so we can ignore it during testing
+ */
+#define DHT_MALICIOUS_MESSAGE_TYPE 42
+/*
+ * Default frequency for sending malicious put messages
+ */
+#define DEFAULT_MALICIOUS_PUT_FREQUENCY 1000 /* Default is in milliseconds */
+
+#define DHT_DEFAULT_PING_DELAY GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MINUTES, 1)
+
 /**
  * Real maximum number of hops, at which point we refuse
  * to forward the message.
@@ -96,6 +133,7 @@ struct P2PPendingMessage
 
 };
 
+
 /**
  * Per-peer information.
  */
@@ -156,6 +194,11 @@ struct PeerInfo
    */
   unsigned int distance;
 
+  /**
+   * Task for scheduling periodic ping messages for this peer.
+   */
+  GNUNET_SCHEDULER_TaskIdentifier ping_task;
+
 };
 
 /**
@@ -395,6 +438,13 @@ struct DHTResults
 
 };
 
+/**
+ * Don't use our routing algorithm, always route
+ * to closest peer; initially send requests to 3
+ * peers.
+ */
+static int strict_kademlia;
+
 /**
  * Routing option to end routing when closest peer found.
  */
@@ -415,6 +465,11 @@ static struct DHTResults forward_list;
  */
 static struct GNUNET_DATACACHE_Handle *datacache;
 
+/**
+ * Handle for the statistics service.
+ */
+struct GNUNET_STATISTICS_Handle *stats;
+
 /**
  * The main scheduler to use for the DHT service
  */
@@ -466,6 +521,11 @@ static unsigned int lowest_bucket; /* Initially equal to MAX_BUCKETS - 1 */
  */
 static struct PeerBucket k_buckets[MAX_BUCKETS]; /* From 0 to MAX_BUCKETS - 1 */
 
+/**
+ * Hash map of all known peers, for easy removal from k_buckets on disconnect.
+ */
+static struct GNUNET_CONTAINER_MultiHashMap *all_known_peers;
+
 /**
  * Maximum size for each bucket.
  */
@@ -493,6 +553,28 @@ static unsigned int debug_routes;
  */
 static unsigned int debug_routes_extended;
 
+/*
+ * GNUNET_YES or GNUNET_NO, whether or not to act as
+ * a malicious node which drops all messages
+ */
+static unsigned int malicious_dropper;
+
+/*
+ * GNUNET_YES or GNUNET_NO, whether or not to act as
+ * a malicious node which sends out lots of GETS
+ */
+static unsigned int malicious_getter;
+
+/*
+ * GNUNET_YES or GNUNET_NO, whether or not to act as
+ * a malicious node which sends out lots of PUTS
+ */
+static unsigned int malicious_putter;
+
+static unsigned long long malicious_get_frequency;
+
+static unsigned long long malicious_put_frequency;
+
 /**
  * Forward declaration.
  */
@@ -502,6 +584,15 @@ static size_t send_generic_reply (void *cls, size_t size, void *buf);
 size_t core_transmit_notify (void *cls,
                              size_t size, void *buf);
 
+static void
+increment_stats(const char *value)
+{
+  if (stats != NULL)
+    {
+      GNUNET_STATISTICS_update (stats, value, 1, GNUNET_NO);
+    }
+}
+
 /**
  *  Try to send another message from our core send list
  */
@@ -556,6 +647,7 @@ static void forward_result_message (void *cls,
   size_t msize;
   size_t psize;
 
+  increment_stats(STAT_RESULT_FORWARDS);
   msize = sizeof (struct GNUNET_DHT_P2PRouteResultMessage) + ntohs(msg->size);
   GNUNET_assert(msize <= GNUNET_SERVER_MAX_MESSAGE_SIZE);
   psize = sizeof(struct P2PPendingMessage) + msize;
@@ -565,7 +657,7 @@ static void forward_result_message (void *cls,
   pending->timeout = GNUNET_TIME_relative_get_forever();
   result_message = (struct GNUNET_DHT_P2PRouteResultMessage *)pending->msg;
   result_message->header.size = htons(msize);
-  result_message->header.type = htons(GNUNET_MESSAGE_TYPE_P2P_DHT_ROUTE_RESULT);
+  result_message->header.type = htons(GNUNET_MESSAGE_TYPE_DHT_P2P_ROUTE_RESULT);
   result_message->options = htonl(msg_ctx->msg_options);
   result_message->hop_count = htonl(msg_ctx->hop_count + 1);
   GNUNET_assert(GNUNET_OK == GNUNET_CONTAINER_bloomfilter_get_raw_data(msg_ctx->bloom, result_message->bloomfilter, DHT_BLOOM_SIZE));
@@ -789,6 +881,65 @@ static int find_current_bucket(const GNUNET_HashCode *hc)
     return actual_bucket;
 }
 
+/**
+ * Find a routing table entry from a peer identity
+ *
+ * @param peer the peer to look up
+ *
+ * @return the bucket number holding the peer, GNUNET_SYSERR if not found
+ */
+static int
+find_bucket_by_peer(const struct PeerInfo *peer)
+{
+  int bucket;
+  struct PeerInfo *pos;
+
+  for (bucket = lowest_bucket; bucket < MAX_BUCKETS - 1; bucket++)
+    {
+      pos = k_buckets[bucket].head;
+      while (pos != NULL)
+        {
+          if (peer == pos)
+            return bucket;
+          pos = pos->next;
+        }
+    }
+
+  return GNUNET_SYSERR; /* No such peer. */
+}
+
+#if PRINT_TABLES
+/**
+ * Print the complete routing table for this peer.
+ */
+static void
+print_routing_table ()
+{
+  int bucket;
+  struct PeerInfo *pos;
+  char char_buf[30000];
+  int char_pos;
+  memset(char_buf, 0, sizeof(char_buf));
+  char_pos = 0;
+  char_pos += sprintf(&char_buf[char_pos], "Printing routing table for peer %s\n", my_short_id);
+  //fprintf(stderr, "Printing routing table for peer %s\n", my_short_id);
+  for (bucket = lowest_bucket; bucket < MAX_BUCKETS; bucket++)
+    {
+      pos = k_buckets[bucket].head;
+      char_pos += sprintf(&char_buf[char_pos], "Bucket %d:\n", bucket);
+      //fprintf(stderr, "Bucket %d:\n", bucket);
+      while (pos != NULL)
+        {
+          //fprintf(stderr, "\tPeer %s, best bucket %d, %d bits match\n", GNUNET_i2s(&pos->id), find_bucket(&pos->id.hashPubKey), matching_bits(&pos->id.hashPubKey, &my_identity.hashPubKey));
+          char_pos += sprintf(&char_buf[char_pos], "\tPeer %s, best bucket %d, %d bits match\n", GNUNET_i2s(&pos->id), find_bucket(&pos->id.hashPubKey), matching_bits(&pos->id.hashPubKey, &my_identity.hashPubKey));
+          pos = pos->next;
+        }
+    }
+  fprintf(stderr, "%s", char_buf);
+  fflush(stderr);
+}
+#endif
+
 /**
  * Find a routing table entry from a peer identity
  *
@@ -825,11 +976,14 @@ find_peer_by_id(const struct GNUNET_PeerIdentity *peer)
  *        the peer to
  * @param latency the core reported latency of this peer
  * @param distance the transport level distance to this peer
+ *
+ * @return the newly added PeerInfo
  */
-static void add_peer(const struct GNUNET_PeerIdentity *peer,
-                     unsigned int bucket,
-                     struct GNUNET_TIME_Relative latency,
-                     unsigned int distance)
+static struct PeerInfo *
+add_peer(const struct GNUNET_PeerIdentity *peer,
+         unsigned int bucket,
+         struct GNUNET_TIME_Relative latency,
+         unsigned int distance)
 {
   struct PeerInfo *new_peer;
   GNUNET_assert(bucket < MAX_BUCKETS);
@@ -844,6 +998,8 @@ static void add_peer(const struct GNUNET_PeerIdentity *peer,
                                     k_buckets[bucket].tail,
                                     new_peer);
   k_buckets[bucket].peers_size++;
+
+  return new_peer;
 }
 
 /**
@@ -864,6 +1020,8 @@ static void remove_peer (struct PeerInfo *peer,
                               k_buckets[bucket].tail,
                               peer);
   k_buckets[bucket].peers_size--;
+  if ((bucket == lowest_bucket) && (k_buckets[lowest_bucket].peers_size == 0) && (lowest_bucket < MAX_BUCKETS - 1))
+    lowest_bucket++;
 }
 
 /**
@@ -878,6 +1036,19 @@ static void delete_peer (struct PeerInfo *peer,
 {
   struct P2PPendingMessage *pos;
   struct P2PPendingMessage *next;
+#if EXTRA_CHECKS
+  struct PeerInfo *peer_pos;
+
+  peer_pos = k_buckets[bucket].head;
+  while ((peer_pos != NULL) && (peer_pos != peer))
+    peer_pos = peer_pos->next;
+  if (peer_pos == NULL)
+    {
+      GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s: Expected peer `%s' in bucket %d\n", my_short_id, "DHT", GNUNET_i2s(&peer->id), bucket);
+      GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s: Lowest bucket: %d, find_current_bucket: %d, peer resides in bucket: %d\n", my_short_id, "DHT", lowest_bucket, find_current_bucket(&peer->id.hashPubKey), find_bucket_by_peer(peer));
+    }
+  GNUNET_assert(peer_pos != NULL);
+#endif
   remove_peer(peer, bucket); /* First remove the peer from its bucket */
 
   if (peer->send_task != GNUNET_SCHEDULER_NO_TASK)
@@ -892,9 +1063,41 @@ static void delete_peer (struct PeerInfo *peer,
       GNUNET_free(pos);
       pos = next;
     }
+
+  GNUNET_assert(GNUNET_CONTAINER_multihashmap_contains(all_known_peers, &peer->id.hashPubKey));
+  GNUNET_CONTAINER_multihashmap_remove (all_known_peers, &peer->id.hashPubKey, peer);
   GNUNET_free(peer);
 }
 
+
+/**
+ * Iterator over hash map entries.
+ *
+ * @param cls closure
+ * @param key current key code
+ * @param value PeerInfo of the peer to move to new lowest bucket
+ * @return GNUNET_YES if we should continue to
+ *         iterate,
+ *         GNUNET_NO if not.
+ */
+static int move_lowest_bucket (void *cls,
+                               const GNUNET_HashCode * key,
+                               void *value)
+{
+  struct PeerInfo *peer = value;
+  int new_bucket;
+
+  new_bucket = lowest_bucket - 1;
+  remove_peer(peer, lowest_bucket);
+  GNUNET_CONTAINER_DLL_insert_after(k_buckets[new_bucket].head,
+                                    k_buckets[new_bucket].tail,
+                                    k_buckets[new_bucket].tail,
+                                    peer);
+  k_buckets[new_bucket].peers_size++;
+  return GNUNET_YES;
+}
+
+
 /**
  * The current lowest bucket is full, so change the lowest
  * bucket to the next lower down, and move any appropriate
@@ -902,84 +1105,170 @@ static void delete_peer (struct PeerInfo *peer,
  */
 static void enable_next_bucket()
 {
-  unsigned int new_bucket;
-  unsigned int to_remove;
-  int i;
-  struct PeerInfo *to_remove_list[bucket_size]; /* We either use CPU by making a list, or memory with array.  Use memory. */
+  struct GNUNET_CONTAINER_MultiHashMap *to_remove;
   struct PeerInfo *pos;
   GNUNET_assert(lowest_bucket > 0);
-
+  to_remove = GNUNET_CONTAINER_multihashmap_create(bucket_size);
   pos = k_buckets[lowest_bucket].head;
-  memset(to_remove_list, 0, sizeof(to_remove_list));
-  to_remove = 0;
+
+#if PRINT_TABLES
+  fprintf(stderr, "Printing RT before new bucket\n");
+  print_routing_table();
+#endif
   /* Populate the array of peers which should be in the next lowest bucket */
-  while (pos->next != NULL)
+  while (pos != NULL)
     {
       if (find_bucket(&pos->id.hashPubKey) < lowest_bucket)
-        {
-          to_remove_list[to_remove] = pos;
-          to_remove++;
-        }
+        GNUNET_CONTAINER_multihashmap_put(to_remove, &pos->id.hashPubKey, pos, GNUNET_CONTAINER_MULTIHASHMAPOPTION_UNIQUE_ONLY);
       pos = pos->next;
     }
-  new_bucket = lowest_bucket - 1;
 
   /* Remove peers from lowest bucket, insert into next lowest bucket */
-  for (i = 0; i < bucket_size; i++)
+  GNUNET_CONTAINER_multihashmap_iterate(to_remove, &move_lowest_bucket, NULL);
+  GNUNET_CONTAINER_multihashmap_destroy(to_remove);
+  lowest_bucket = lowest_bucket - 1;
+#if PRINT_TABLES
+  fprintf(stderr, "Printing RT after new bucket\n");
+  print_routing_table();
+#endif
+}
+
+/**
+ * Function called to send a request out to another peer.
+ * Called both for locally initiated requests and those
+ * received from other peers.
+ *
+ * @param cls DHT service closure argument (unused)
+ * @param msg the encapsulated message
+ * @param peer the peer to forward the message to
+ * @param msg_ctx the context of the message (hop count, bloom, etc.)
+ */
+static void forward_message (void *cls,
+                             const struct GNUNET_MessageHeader *msg,
+                             struct PeerInfo *peer,
+                             struct DHT_MessageContext *msg_ctx)
+{
+  struct GNUNET_DHT_P2PRouteMessage *route_message;
+  struct P2PPendingMessage *pending;
+  size_t msize;
+  size_t psize;
+
+  increment_stats(STAT_ROUTE_FORWARDS);
+  msize = sizeof (struct GNUNET_DHT_P2PRouteMessage) + ntohs(msg->size);
+  GNUNET_assert(msize <= GNUNET_SERVER_MAX_MESSAGE_SIZE);
+  psize = sizeof(struct P2PPendingMessage) + msize;
+  pending = GNUNET_malloc(psize);
+  pending->msg = (struct GNUNET_MessageHeader *)&pending[1];
+  pending->importance = DHT_SEND_PRIORITY;
+  pending->timeout = GNUNET_TIME_relative_get_forever();
+  route_message = (struct GNUNET_DHT_P2PRouteMessage *)pending->msg;
+  route_message->header.size = htons(msize);
+  route_message->header.type = htons(GNUNET_MESSAGE_TYPE_DHT_P2P_ROUTE);
+  route_message->options = htonl(msg_ctx->msg_options);
+  route_message->hop_count = htonl(msg_ctx->hop_count + 1);
+  route_message->network_size = htonl(msg_ctx->network_size);
+  route_message->desired_replication_level = htonl(msg_ctx->replication);
+  route_message->unique_id = GNUNET_htonll(msg_ctx->unique_id);
+  if (msg_ctx->bloom != NULL)
+    GNUNET_assert(GNUNET_OK == GNUNET_CONTAINER_bloomfilter_get_raw_data(msg_ctx->bloom, route_message->bloomfilter, DHT_BLOOM_SIZE));
+  if (msg_ctx->key != NULL)
+    memcpy(&route_message->key, msg_ctx->key, sizeof(GNUNET_HashCode));
+  memcpy(&route_message[1], msg, ntohs(msg->size));
+#if DEBUG_DHT > 1
+  GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "%s:%s Adding pending message size %d for peer %s\n", my_short_id, "DHT", msize, GNUNET_i2s(&peer->id));
+#endif
+  GNUNET_CONTAINER_DLL_insert_after(peer->head, peer->tail, peer->tail, pending);
+  if (peer->send_task == GNUNET_SCHEDULER_NO_TASK)
+    peer->send_task = GNUNET_SCHEDULER_add_now(sched, &try_core_send, peer);
+}
+
+#if DO_PING
+/**
+ * Task used to send ping messages to peers so that
+ * they don't get disconnected.
+ *
+ * @param cls the peer to send a ping message to
+ * @param tc context, reason, etc.
+ */
+static void
+periodic_ping_task (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
+{
+  struct PeerInfo *peer = cls;
+  struct GNUNET_MessageHeader ping_message;
+  struct DHT_MessageContext message_context;
+
+  if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
+    return;
+
+  ping_message.size = htons(sizeof(struct GNUNET_MessageHeader));
+  ping_message.type = htons(GNUNET_MESSAGE_TYPE_DHT_P2P_PING);
+
+  memset(&message_context, 0, sizeof(struct DHT_MessageContext));
+#if DEBUG_PING
+  GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s Sending periodic ping to %s\n", my_short_id, "DHT", GNUNET_i2s(&peer->id));
+#endif
+  forward_message(NULL, &ping_message, peer, &message_context);
+  peer->ping_task = GNUNET_SCHEDULER_add_delayed(sched, DHT_DEFAULT_PING_DELAY, &periodic_ping_task, peer);
+}
+
+/**
+ * Schedule PING messages for the top X peers in each
+ * bucket of the routing table (so core won't disconnect them!)
+ */
+void schedule_ping_messages()
+{
+  unsigned int bucket;
+  unsigned int count;
+  struct PeerInfo *pos;
+  for (bucket = lowest_bucket; bucket < MAX_BUCKETS; bucket++)
     {
-      if (to_remove_list[i] != NULL)
+      pos = k_buckets[bucket].head;
+      count = 0;
+      while (pos != NULL)
         {
-          remove_peer(to_remove_list[i], lowest_bucket);
-          GNUNET_CONTAINER_DLL_insert_after(k_buckets[new_bucket].head,
-                                            k_buckets[new_bucket].tail,
-                                            k_buckets[new_bucket].tail,
-                                            to_remove_list[i]);
-          k_buckets[new_bucket].peers_size++;
+          if ((count < bucket_size) && (pos->ping_task == GNUNET_SCHEDULER_NO_TASK))
+            GNUNET_SCHEDULER_add_now(sched, &periodic_ping_task, pos);
+          else if ((count >= bucket_size) && (pos->ping_task != GNUNET_SCHEDULER_NO_TASK))
+            {
+              GNUNET_SCHEDULER_cancel(sched, pos->ping_task);
+              pos->ping_task = GNUNET_SCHEDULER_NO_TASK;
+            }
+          pos = pos->next;
+          count++;
         }
-      else
-        break;
     }
-  lowest_bucket = new_bucket;
 }
+#endif
+
 /**
  * Attempt to add a peer to our k-buckets.
  *
  * @param peer, the peer identity of the peer being added
  *
- * @return GNUNET_YES if the peer was added,
- *         GNUNET_NO if not,
- *         GNUNET_SYSERR on err (peer is us!)
+ * @return NULL if the peer was not added,
+ *         pointer to PeerInfo for new peer otherwise
  */
-static int try_add_peer(const struct GNUNET_PeerIdentity *peer,
-                        unsigned int bucket,
-                        struct GNUNET_TIME_Relative latency,
-                        unsigned int distance)
+static struct PeerInfo *
+try_add_peer(const struct GNUNET_PeerIdentity *peer,
+             unsigned int bucket,
+             struct GNUNET_TIME_Relative latency,
+             unsigned int distance)
 {
   int peer_bucket;
-
+  struct PeerInfo *new_peer;
   peer_bucket = find_current_bucket(&peer->hashPubKey);
   if (peer_bucket == GNUNET_SYSERR)
-    return GNUNET_SYSERR;
+    return NULL;
 
   GNUNET_assert(peer_bucket >= lowest_bucket);
-  if ((k_buckets[peer_bucket].peers_size) < bucket_size)
-    {
-      add_peer(peer, peer_bucket, latency, distance);
-      return GNUNET_YES;
-    }
-  else if ((peer_bucket == lowest_bucket) && (lowest_bucket > 0))
-    {
-      enable_next_bucket();
-      return try_add_peer(peer, bucket, latency, distance); /* Recurse, if proper bucket still full ping peers */
-    }
-  else if ((k_buckets[peer_bucket].peers_size) == bucket_size)
-    {
-      /* TODO: implement ping_oldest_peer */
-      //ping_oldest_peer(bucket, peer, latency, distance); /* Find oldest peer, ping it.  If no response, remove and add new peer! */
-      return GNUNET_NO;
-    }
-  GNUNET_break(0);
-  return GNUNET_NO;
+  new_peer = add_peer(peer, peer_bucket, latency, distance);
+
+  if ((k_buckets[lowest_bucket].peers_size) >= bucket_size)
+    enable_next_bucket();
+#if DO_PING
+  schedule_ping_messages();
+#endif
+  return new_peer;
 }
 
 
@@ -1064,6 +1353,8 @@ add_pending_message (struct ClientList *client,
 }
 
 
+
+
 /**
  * Called when a reply needs to be sent to a client, as
  * a result it found to a GET or FIND PEER request.
@@ -1096,7 +1387,7 @@ send_reply_to_client (struct ClientList *client,
   pending_message = GNUNET_malloc (sizeof (struct PendingMessage) + tsize);
   pending_message->msg = (struct GNUNET_MessageHeader *)&pending_message[1];
   reply = (struct GNUNET_DHT_RouteResultMessage *)&pending_message[1];
-  reply->header.type = htons (GNUNET_MESSAGE_TYPE_LOCAL_DHT_ROUTE_RESULT);
+  reply->header.type = htons (GNUNET_MESSAGE_TYPE_DHT_LOCAL_ROUTE_RESULT);
   reply->header.size = htons (tsize);
   reply->unique_id = GNUNET_htonll (uid);
   memcpy (&reply[1], message, msize);
@@ -1104,6 +1395,34 @@ send_reply_to_client (struct ClientList *client,
   add_pending_message (client, pending_message);
 }
 
+/**
+ * Consider whether or not we would like to have this peer added to
+ * our routing table.  Check whether bucket for this peer is full,
+ * if so return negative; if not return positive.  Since peers are
+ * only added on CORE level connect, this doesn't actually add the
+ * peer to the routing table.
+ *
+ * @param peer the peer we are considering adding
+ *
+ * @return GNUNET_YES if we want this peer, GNUNET_NO if not (bucket
+ *         already full)
+ *
+ * FIXME: Think about making a context for this call so that we can
+ *        ping the oldest peer in the current bucket and consider
+ *        removing it in lieu of the new peer.
+ */
+static int consider_peer (struct GNUNET_PeerIdentity *peer)
+{
+  int bucket;
+
+  if (GNUNET_CONTAINER_multihashmap_contains(all_known_peers, &peer->hashPubKey))
+    return GNUNET_NO; /* We already know this peer (are connected even!) */
+  bucket = find_current_bucket(&peer->hashPubKey);
+  if ((k_buckets[bucket].peers_size < bucket_size) || ((bucket == lowest_bucket) && (lowest_bucket > 0)))
+    return GNUNET_YES;
+
+  return GNUNET_NO;
+}
 
 /**
  * Main function that handles whether or not to route a result
@@ -1117,11 +1436,46 @@ static int route_result_message(void *cls,
                                 struct GNUNET_MessageHeader *msg,
                                 struct DHT_MessageContext *message_context)
 {
+  struct GNUNET_PeerIdentity new_peer;
   struct DHTQueryRecord *record;
   struct DHTRouteSource *pos;
   struct PeerInfo *peer_info;
+  const struct GNUNET_MessageHeader *hello_msg;
+
+  increment_stats(STAT_RESULTS);
+  /**
+   * If a find peer result message is received and contains a valid
+   * HELLO for another peer, offer it to the transport service.
+   */
+  if (ntohs(msg->type) == GNUNET_MESSAGE_TYPE_DHT_FIND_PEER_RESULT)
+    {
+      if (ntohs(msg->size) <= sizeof(struct GNUNET_MessageHeader))
+        GNUNET_break_op(0);
+
+      hello_msg = &msg[1];
+      if ((ntohs(hello_msg->type) != GNUNET_MESSAGE_TYPE_HELLO) || (GNUNET_SYSERR == GNUNET_HELLO_get_id((const struct GNUNET_HELLO_Message *)hello_msg, &new_peer)))
+      {
+        GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s Received non-HELLO message type in find peer result message!\n", my_short_id, "DHT");
+        GNUNET_break_op(0);
+      }
+      else /* We have a valid hello, and peer id stored in new_peer */
+      {
+        increment_stats(STAT_FIND_PEER_REPLY);
+        if (GNUNET_YES == consider_peer(&new_peer))
+        {
+          GNUNET_TRANSPORT_offer_hello(transport_handle, hello_msg);
+          /* GNUNET_CORE_peer_request_connect(sched, cfg, GNUNET_TIME_UNIT_FOREVER_REL, &new_peer, NULL, NULL); */
+          /* peer_request_connect call causes service to segfault */
+          /* FIXME: Do we need this (peer_request_connect call)??? */
+        }
+      }
+    }
+
+  if (malicious_dropper == GNUNET_YES)
+    record = NULL;
+  else
+    record = GNUNET_CONTAINER_multihashmap_get(forward_list.hashmap, message_context->key);
 
-  record = GNUNET_CONTAINER_multihashmap_get(forward_list.hashmap, message_context->key);
   if (record == NULL) /* No record of this message! */
     {
 #if DEBUG_DHT
@@ -1131,13 +1485,6 @@ static int route_result_message(void *cls,
 #endif
 #if DEBUG_DHT_ROUTING
 
-      /*if ((debug_routes) && (dhtlog_handle != NULL))
-        {
-          dhtlog_handle->insert_query (NULL, message_context->unique_id, DHTLOG_RESULT,
-                                       message_context->hop_count, GNUNET_SYSERR,
-                                       &my_identity, message_context->key);
-        }*/
-
       if ((debug_routes_extended) && (dhtlog_handle != NULL))
         {
           dhtlog_handle->insert_route (NULL,
@@ -1161,7 +1508,7 @@ static int route_result_message(void *cls,
   pos = record->head;
   while (pos != NULL)
     {
-      if (0 == memcmp(&pos->source, &my_identity, sizeof(struct GNUNET_PeerIdentity))) /* Local client initiated request! */
+      if (0 == memcmp(&pos->source, &my_identity, sizeof(struct GNUNET_PeerIdentity))) /* Local client (or DHT) initiated request! */
         {
 #if DEBUG_DHT
           GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
@@ -1169,14 +1516,6 @@ static int route_result_message(void *cls,
                       "DHT", GNUNET_h2s (message_context->key), message_context->unique_id);
 #endif
 #if DEBUG_DHT_ROUTING
-          /*
-          if ((debug_routes) && (dhtlog_handle != NULL))
-            {
-              dhtlog_handle->insert_query (NULL, message_context->unique_id, DHTLOG_RESULT,
-                                           message_context->hop_count, GNUNET_YES,
-                                           &my_identity, message_context->key);
-            }*/
-
           if ((debug_routes_extended) && (dhtlog_handle != NULL))
             {
               dhtlog_handle->insert_route (NULL, message_context->unique_id, DHTLOG_RESULT,
@@ -1185,6 +1524,10 @@ static int route_result_message(void *cls,
                                            message_context->peer, NULL);
             }
 #endif
+          increment_stats(STAT_RESULTS_TO_CLIENT);
+          if (ntohs(msg->type) == GNUNET_MESSAGE_TYPE_DHT_GET_RESULT)
+            increment_stats(STAT_GET_REPLY);
+
           send_reply_to_client(pos->client, msg, message_context->unique_id);
         }
       else /* Send to peer */
@@ -1273,6 +1616,7 @@ datacache_get_iterator (void *cls,
   new_msg_ctx->peer = &my_identity;
   new_msg_ctx->bloom = GNUNET_CONTAINER_bloomfilter_init (NULL, DHT_BLOOM_SIZE, DHT_BLOOM_K);
   new_msg_ctx->hop_count = 0;
+  increment_stats(STAT_GET_RESPONSE_START);
   route_result_message(cls, &get_result->header, new_msg_ctx);
   GNUNET_free(new_msg_ctx);
   //send_reply_to_client (datacache_get_ctx->client, &get_result->header,
@@ -1311,12 +1655,15 @@ handle_dht_get (void *cls,
   get_type = ntohs (get_msg->type);
 #if DEBUG_DHT
   GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
-              "`%s:%s': Received `%s' request from client, message type %u, key %s, uid %llu\n", my_short_id,
+              "`%s:%s': Received `%s' request, message type %u, key %s, uid %llu\n", my_short_id,
               "DHT", "GET", get_type, GNUNET_h2s (message_context->key),
               message_context->unique_id);
 #endif
-
+  increment_stats(STAT_GETS);
   results = 0;
+  if (get_type == DHT_MALICIOUS_MESSAGE_TYPE)
+    return results;
+
   if (datacache != NULL)
     results =
       GNUNET_DATACACHE_get (datacache, message_context->key, get_type,
@@ -1388,7 +1735,7 @@ handle_dht_find_peer (void *cls,
               ntohs (find_msg->size),
               sizeof (struct GNUNET_MessageHeader));
 #endif
-  if ((my_hello == NULL) || (message_context->closest != GNUNET_YES))
+  if (my_hello == NULL)
   {
 #if DEBUG_DHT
     GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
@@ -1406,22 +1753,32 @@ handle_dht_find_peer (void *cls,
       GNUNET_break_op (0);
       return;
     }
+
   find_peer_result = GNUNET_malloc (tsize);
   find_peer_result->type = htons (GNUNET_MESSAGE_TYPE_DHT_FIND_PEER_RESULT);
   find_peer_result->size = htons (tsize);
   memcpy (&find_peer_result[1], my_hello, hello_size);
-#if DEBUG_DHT_HELLO
-    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
-                "`%s': Sending hello size %d to client.\n",
-                "DHT", hello_size);
-#endif
+
+  GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+              "`%s': Sending hello size %d to requesting peer.\n",
+              "DHT", hello_size);
 
   new_msg_ctx = GNUNET_malloc(sizeof(struct DHT_MessageContext));
   memcpy(new_msg_ctx, message_context, sizeof(struct DHT_MessageContext));
   new_msg_ctx->peer = &my_identity;
   new_msg_ctx->bloom = GNUNET_CONTAINER_bloomfilter_init (NULL, DHT_BLOOM_SIZE, DHT_BLOOM_K);
   new_msg_ctx->hop_count = 0;
+  increment_stats(STAT_FIND_PEER_ANSWER);
   route_result_message(cls, find_peer_result, new_msg_ctx);
+  GNUNET_free(new_msg_ctx);
+#if DEBUG_DHT_ROUTING
+  if ((debug_routes) && (dhtlog_handle != NULL))
+    {
+      dhtlog_handle->insert_query (NULL, message_context->unique_id, DHTLOG_FIND_PEER,
+                                   message_context->hop_count, GNUNET_YES, &my_identity,
+                                   message_context->key);
+    }
+#endif
   //send_reply_to_client(message_context->client, find_peer_result, message_context->unique_id);
   GNUNET_free(find_peer_result);
 }
@@ -1445,8 +1802,14 @@ handle_dht_put (void *cls,
 
   GNUNET_assert (ntohs (msg->size) >=
                  sizeof (struct GNUNET_DHT_PutMessage));
+
+
   put_msg = (struct GNUNET_DHT_PutMessage *)msg;
   put_type = ntohs (put_msg->type);
+
+  if (put_type == DHT_MALICIOUS_MESSAGE_TYPE)
+    return;
+
   data_size = ntohs (put_msg->header.size) - sizeof (struct GNUNET_DHT_PutMessage);
 #if DEBUG_DHT
   GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
@@ -1454,6 +1817,28 @@ handle_dht_put (void *cls,
               my_short_id, "DHT", "PUT", put_type, GNUNET_h2s (message_context->key), message_context->unique_id);
 #endif
 #if DEBUG_DHT_ROUTING
+  if (message_context->hop_count == 0) /* Locally initiated request */
+    {
+      if ((debug_routes) && (dhtlog_handle != NULL))
+        {
+          dhtlog_handle->insert_query (NULL, message_context->unique_id, DHTLOG_PUT,
+                                       message_context->hop_count, GNUNET_NO, &my_identity,
+                                       message_context->key);
+        }
+    }
+#endif
+
+  if (message_context->closest != GNUNET_YES)
+    return;
+
+#if DEBUG_DHT_ROUTING
+  if ((debug_routes_extended) && (dhtlog_handle != NULL))
+    {
+      dhtlog_handle->insert_route (NULL, message_context->unique_id, DHTLOG_ROUTE,
+                                   message_context->hop_count, GNUNET_YES,
+                                   &my_identity, message_context->key, message_context->peer,
+                                   NULL);
+    }
 
   if ((debug_routes) && (dhtlog_handle != NULL))
     {
@@ -1463,6 +1848,7 @@ handle_dht_put (void *cls,
     }
 #endif
 
+  increment_stats(STAT_PUTS_INSERTED);
   if (datacache != NULL)
     GNUNET_DATACACHE_put (datacache, message_context->key, data_size,
                           (char *) &put_msg[1], put_type,
@@ -1506,6 +1892,20 @@ get_forward_count (unsigned int hop_count, size_t target_replication)
   unsigned int target_value;
   unsigned int diameter;
 
+  /**
+   * If we are behaving in strict kademlia mode, send multiple initial requests,
+   * but then only send to 1 or 0 peers.
+   */
+  if (strict_kademlia == GNUNET_YES)
+    {
+      if (hop_count == 0)
+        return DHT_KADEMLIA_REPLICATION;
+      else if (hop_count < MAX_HOPS)
+        return 1;
+      else
+        return 0;
+    }
+
   /* FIXME: the smaller we think the network is the more lenient we should be for
    * routing right?  The estimation below only works if we think we have reasonably
    * full routing tables, which for our RR topologies may not be the case!
@@ -1561,16 +1961,19 @@ find_closest_peer (const GNUNET_HashCode *hc)
   unsigned int lowest_distance;
   unsigned int temp_distance;
   int bucket;
+  int count;
 
   lowest_distance = -1;
 
   if (k_buckets[lowest_bucket].peers_size == 0)
     return NULL;
 
+  current_closest = NULL;
   for (bucket = lowest_bucket; bucket < MAX_BUCKETS; bucket++)
     {
       pos = k_buckets[bucket].head;
-      while (pos != NULL)
+      count = 0;
+      while ((pos != NULL) && (count < bucket_size))
         {
           temp_distance = distance(&pos->id.hashPubKey, hc);
           if (temp_distance <= lowest_distance)
@@ -1579,6 +1982,7 @@ find_closest_peer (const GNUNET_HashCode *hc)
               current_closest = pos;
             }
           pos = pos->next;
+          count++;
         }
     }
   GNUNET_assert(current_closest != NULL);
@@ -1599,6 +2003,7 @@ am_closest_peer (const GNUNET_HashCode * target)
   int bits;
   int other_bits;
   int bucket_num;
+  int count;
   struct PeerInfo *pos;
   unsigned int my_distance;
 
@@ -1610,15 +2015,18 @@ am_closest_peer (const GNUNET_HashCode * target)
   my_distance = distance(&my_identity.hashPubKey, target);
 
   pos = k_buckets[bucket_num].head;
-  while (pos != NULL)
+  count = 0;
+  while ((pos != NULL) && (count < bucket_size))
     {
       other_bits = matching_bits(&pos->id.hashPubKey, target);
       if (other_bits > bits)
         return GNUNET_NO;
       else if (other_bits == bits) /* We match the same number of bits, do distance comparison */
         {
-          if (distance(&pos->id.hashPubKey, target) < my_distance)
-            return GNUNET_NO;
+          return GNUNET_YES;
+          /* FIXME: why not just return GNUNET_YES here?  We are certainly close. */
+          /*if (distance(&pos->id.hashPubKey, target) < my_distance)
+            return GNUNET_NO;*/
         }
       pos = pos->next;
     }
@@ -1671,146 +2079,108 @@ select_peer (const GNUNET_HashCode * target,
 {
   unsigned int distance;
   unsigned int bc;
+  unsigned int count;
   struct PeerInfo *pos;
-#if USE_KADEMLIA
-  const struct PeerInfo *chosen;
+  struct PeerInfo *chosen;
   unsigned long long largest_distance;
-#else
   unsigned long long total_distance;
   unsigned long long selected;
-#endif
 
-#if USE_KADEMLIA
-  largest_distance = 0;
-  chosen = NULL;
-  for (bc = lowest_bucket; bc < MAX_BUCKETS; bc++)
-    {
-      pos = k_buckets[bc].head;
-      while (pos != NULL)
-        {
-          if (GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey))
-            {
-              distance = inverse_distance (target, &pos->id.hashPubKey);
-              if (distance > largest_distance)
-                {
-                  chosen = pos;
-                  largest_distance = distance;
-                }
-            }
-          pos = pos->next;
-        }
-    }
+if (strict_kademlia == GNUNET_YES)
+  {
+    largest_distance = 0;
+    chosen = NULL;
+    for (bc = lowest_bucket; bc < MAX_BUCKETS; bc++)
+      {
+        pos = k_buckets[bc].head;
+        count = 0;
+        while ((pos != NULL) && (count < bucket_size))
+          {
+            if (GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey))
+              {
+                distance = inverse_distance (target, &pos->id.hashPubKey);
+                if (distance > largest_distance)
+                  {
+                    chosen = pos;
+                    largest_distance = distance;
+                  }
+              }
+            count++;
+            pos = pos->next;
+          }
+      }
 
-  if ((largest_distance > 0) && (chosen != NULL))
-    {
-      GNUNET_CONTAINER_bloomfilter_add(bloom, &chosen->id.hashPubKey);
-      return chosen;
-    }
+    if ((largest_distance > 0) && (chosen != NULL))
+      {
+        GNUNET_CONTAINER_bloomfilter_add(bloom, &chosen->id.hashPubKey);
+        return chosen;
+      }
+    else
+      {
+        return NULL;
+      }
+  }
   else
-    {
-      return NULL;
-    }
-#else
-  /* GNUnet-style */
-  total_distance = 0;
-  for (bc = lowest_bucket; bc < MAX_BUCKETS; bc++)
-    {
-      pos = k_buckets[bc].head;
-      while (pos != NULL)
-        {
-          if (GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey))
-            total_distance += (unsigned long long)inverse_distance (target, &pos->id.hashPubKey);
+  {
+    /* GNUnet-style */
+    total_distance = 0;
+    for (bc = lowest_bucket; bc < MAX_BUCKETS; bc++)
+      {
+        pos = k_buckets[bc].head;
+        count = 0;
+        while ((pos != NULL) && (count < bucket_size))
+          {
+            if (GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey))
+              total_distance += (unsigned long long)inverse_distance (target, &pos->id.hashPubKey);
 #if DEBUG_DHT > 1
-          GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
-                      "`%s:%s': Total distance is %llu, distance from %s to %s is %u\n",
-                      my_short_id, "DHT", total_distance, GNUNET_i2s(&pos->id), GNUNET_h2s(target) , inverse_distance(target, &pos->id.hashPubKey));
+            GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+                        "`%s:%s': Total distance is %llu, distance from %s to %s is %u\n",
+                        my_short_id, "DHT", total_distance, GNUNET_i2s(&pos->id), GNUNET_h2s(target) , inverse_distance(target, &pos->id.hashPubKey));
 #endif
-          pos = pos->next;
-        }
-    }
-  if (total_distance == 0)
-    {
-      return NULL;
-    }
+            pos = pos->next;
+            count++;
+          }
+      }
+    if (total_distance == 0)
+      {
+        return NULL;
+      }
 
-  selected = GNUNET_CRYPTO_random_u64 (GNUNET_CRYPTO_QUALITY_WEAK, total_distance);
-  for (bc = lowest_bucket; bc < MAX_BUCKETS; bc++)
-    {
-      pos = k_buckets[bc].head;
-      while (pos != NULL)
-        {
-          if (GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey))
-            {
-              distance = inverse_distance (target, &pos->id.hashPubKey);
-              if (distance > selected)
-                return pos;
-              selected -= distance;
-            }
-          else
-            {
+    selected = GNUNET_CRYPTO_random_u64 (GNUNET_CRYPTO_QUALITY_WEAK, total_distance);
+    for (bc = lowest_bucket; bc < MAX_BUCKETS; bc++)
+      {
+        pos = k_buckets[bc].head;
+        count = 0;
+        while ((pos != NULL) && (count < bucket_size))
+          {
+            if (GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey))
+              {
+                distance = inverse_distance (target, &pos->id.hashPubKey);
+                if (distance > selected)
+                  return pos;
+                selected -= distance;
+              }
+            else
+              {
 #if DEBUG_DHT
-              GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
-                          "`%s:%s': peer %s matches bloomfilter.\n",
-                          my_short_id, "DHT", GNUNET_i2s(&pos->id));
+                GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+                            "`%s:%s': peer %s matches bloomfilter.\n",
+                            my_short_id, "DHT", GNUNET_i2s(&pos->id));
 #endif
-            }
-          pos = pos->next;
-        }
-    }
+              }
+            pos = pos->next;
+            count++;
+          }
+      }
 #if DEBUG_DHT
-    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
-                "`%s:%s': peer %s matches bloomfilter.\n",
-                my_short_id, "DHT", GNUNET_i2s(&pos->id));
-#endif
-  return NULL;
+      GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+                  "`%s:%s': peer %s matches bloomfilter.\n",
+                  my_short_id, "DHT", GNUNET_i2s(&pos->id));
 #endif
+    return NULL;
+  }
 }
 
-/**
- * Function called to send a request out to another peer.
- * Called both for locally initiated requests and those
- * received from other peers.
- *
- * @param cls DHT service closure argument
- * @param msg the encapsulated message
- * @param peer the peer to forward the message to
- * @param msg_ctx the context of the message (hop count, bloom, etc.)
- */
-static void forward_message (void *cls,
-                             const struct GNUNET_MessageHeader *msg,
-                             struct PeerInfo *peer,
-                             struct DHT_MessageContext *msg_ctx)
-{
-  struct GNUNET_DHT_P2PRouteMessage *route_message;
-  struct P2PPendingMessage *pending;
-  size_t msize;
-  size_t psize;
-
-  msize = sizeof (struct GNUNET_DHT_P2PRouteMessage) + ntohs(msg->size);
-  GNUNET_assert(msize <= GNUNET_SERVER_MAX_MESSAGE_SIZE);
-  psize = sizeof(struct P2PPendingMessage) + msize;
-  pending = GNUNET_malloc(psize);
-  pending->msg = (struct GNUNET_MessageHeader *)&pending[1];
-  pending->importance = DHT_SEND_PRIORITY;
-  pending->timeout = GNUNET_TIME_relative_get_forever();
-  route_message = (struct GNUNET_DHT_P2PRouteMessage *)pending->msg;
-  route_message->header.size = htons(msize);
-  route_message->header.type = htons(GNUNET_MESSAGE_TYPE_P2P_DHT_ROUTE);
-  route_message->options = htonl(msg_ctx->msg_options);
-  route_message->hop_count = htonl(msg_ctx->hop_count + 1);
-  route_message->network_size = htonl(msg_ctx->network_size);
-  route_message->desired_replication_level = htonl(msg_ctx->replication);
-  route_message->unique_id = GNUNET_htonll(msg_ctx->unique_id);
-  GNUNET_assert(GNUNET_OK == GNUNET_CONTAINER_bloomfilter_get_raw_data(msg_ctx->bloom, route_message->bloomfilter, DHT_BLOOM_SIZE));
-  memcpy(&route_message->key, msg_ctx->key, sizeof(GNUNET_HashCode));
-  memcpy(&route_message[1], msg, ntohs(msg->size));
-#if DEBUG_DHT > 1
-  GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "%s:%s Adding pending message size %d for peer %s\n", my_short_id, "DHT", msize, GNUNET_i2s(&peer->id));
-#endif
-  GNUNET_CONTAINER_DLL_insert_after(peer->head, peer->tail, peer->tail, pending);
-  if (peer->send_task == GNUNET_SCHEDULER_NO_TASK)
-    peer->send_task = GNUNET_SCHEDULER_add_now(sched, &try_core_send, peer);
-}
 
 /**
  * Task used to remove forwarding entries, either
@@ -1937,6 +2307,23 @@ static int route_message(void *cls,
   int ret;
 #endif
 
+  if (malicious_dropper == GNUNET_YES)
+    {
+#if DEBUG_DHT_ROUTING
+      if ((debug_routes_extended) && (dhtlog_handle != NULL))
+        {
+          dhtlog_handle->insert_route (NULL, message_context->unique_id, DHTLOG_ROUTE,
+                                       message_context->hop_count, GNUNET_SYSERR,
+                                       &my_identity, message_context->key, message_context->peer,
+                                       NULL);
+        }
+#endif
+      if (message_context->bloom != NULL)
+        GNUNET_CONTAINER_bloomfilter_free(message_context->bloom);
+      return 0;
+    }
+
+  increment_stats(STAT_ROUTES);
   message_context->closest = am_closest_peer(message_context->key);
   forward_count = get_forward_count(message_context->hop_count, message_context->replication);
   nearest = find_closest_peer(message_context->key);
@@ -1945,7 +2332,8 @@ static int route_message(void *cls,
     message_context->bloom = GNUNET_CONTAINER_bloomfilter_init (NULL, DHT_BLOOM_SIZE, DHT_BLOOM_K);
   GNUNET_CONTAINER_bloomfilter_add (message_context->bloom, &my_identity.hashPubKey);
 
-  if ((stop_on_closest == GNUNET_YES) && (message_context->closest == GNUNET_YES) && (ntohs(msg->type) == GNUNET_MESSAGE_TYPE_DHT_PUT))
+  if (((stop_on_closest == GNUNET_YES) && (message_context->closest == GNUNET_YES) && (ntohs(msg->type) == GNUNET_MESSAGE_TYPE_DHT_PUT))
+      || ((strict_kademlia == GNUNET_YES) && (message_context->closest == GNUNET_YES)))
     forward_count = 0;
 
 #if DEBUG_DHT_ROUTING
@@ -1970,37 +2358,31 @@ static int route_message(void *cls,
       if ((handle_dht_get (cls, msg, message_context) > 0) && (stop_on_found == GNUNET_YES))
         forward_count = 0;
       break;
-    case GNUNET_MESSAGE_TYPE_DHT_PUT: /* Check if closest, if so insert data. FIXME: thresholding?*/
-      if (message_context->closest == GNUNET_YES)
-        {
+    case GNUNET_MESSAGE_TYPE_DHT_PUT: /* Check if closest, if so insert data. FIXME: thresholding to reduce complexity?*/
+      increment_stats(STAT_PUTS);
+      handle_dht_put (cls, msg, message_context);
+      break;
+    case GNUNET_MESSAGE_TYPE_DHT_FIND_PEER: /* Check if closest and not started by us, check options, add to requests seen */
+      increment_stats(STAT_FIND_PEER);
+      if (((message_context->hop_count > 0) && (0 != memcmp(message_context->peer, &my_identity, sizeof(struct GNUNET_PeerIdentity)))) || (message_context->client != NULL))
+      {
+        cache_response (cls, message_context);
+        if ((message_context->closest == GNUNET_YES) || (message_context->msg_options == GNUNET_DHT_RO_DEMULTIPLEX_EVERYWHERE))
+          handle_dht_find_peer (cls, msg, message_context);
+      }
 #if DEBUG_DHT_ROUTING
-          if ((debug_routes_extended) && (dhtlog_handle != NULL))
+      if (message_context->hop_count == 0) /* Locally initiated request */
+        {
+          if ((debug_routes) && (dhtlog_handle != NULL))
             {
-              dhtlog_handle->insert_route (NULL, message_context->unique_id, DHTLOG_ROUTE,
-                                           message_context->hop_count, GNUNET_YES,
-                                           &my_identity, message_context->key, message_context->peer,
-                                           NULL);
+              dhtlog_handle->insert_dhtkey(NULL, message_context->key);
+              dhtlog_handle->insert_query (NULL, message_context->unique_id, DHTLOG_FIND_PEER,
+                                           message_context->hop_count, GNUNET_NO, &my_identity,
+                                           message_context->key);
             }
-#endif
-          handle_dht_put (cls, msg, message_context);
         }
-#if DEBUG_DHT_ROUTING
-        if (message_context->hop_count == 0) /* Locally initiated request */
-          {
-            if ((debug_routes) && (dhtlog_handle != NULL))
-              {
-                dhtlog_handle->insert_query (NULL, message_context->unique_id, DHTLOG_PUT,
-                                             message_context->hop_count, GNUNET_NO, &my_identity,
-                                             message_context->key);
-              }
-          }
 #endif
       break;
-    case GNUNET_MESSAGE_TYPE_DHT_FIND_PEER: /* Check if closest, check options, add to requests seen */
-      cache_response (cls, message_context);
-      if ((message_context->closest == GNUNET_YES) || (message_context->msg_options == GNUNET_DHT_RO_DEMULTIPLEX_EVERYWHERE))
-        handle_dht_find_peer (cls, msg, message_context);
-      break;
     default:
       GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
                   "`%s': Message type (%d) not handled\n", "DHT", ntohs(msg->type));
@@ -2020,7 +2402,6 @@ static int route_message(void *cls,
                       "DHT", GNUNET_h2s (message_context->key), message_context->unique_id, GNUNET_i2s(&selected->id), nearest_buf, matching_bits(&nearest->id.hashPubKey, message_context->key), distance(&nearest->id.hashPubKey, message_context->key));
           GNUNET_free(nearest_buf);
 #endif
-          /* FIXME: statistics */
           if ((debug_routes_extended) && (dhtlog_handle != NULL))
             {
               dhtlog_handle->insert_route (NULL, message_context->unique_id, DHTLOG_ROUTE,
@@ -2081,6 +2462,121 @@ find_active_client (struct GNUNET_SERVER_Client *client)
   return ret;
 }
 
+/**
+ * Task to send a malicious put message across the network.
+ *
+ * @param cls closure for this task
+ * @param tc the context under which the task is running
+ */
+static void
+malicious_put_task (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
+{
+  static struct GNUNET_DHT_PutMessage put_message;
+  static struct DHT_MessageContext message_context;
+  static GNUNET_HashCode key;
+  unsigned int mcsize;
+  uint32_t random_key;
+  put_message.header.size = htons(sizeof(struct GNUNET_DHT_GetMessage));
+  put_message.header.type = htons(GNUNET_MESSAGE_TYPE_DHT_PUT);
+  put_message.type = htons(DHT_MALICIOUS_MESSAGE_TYPE);
+  put_message.expiration = GNUNET_TIME_absolute_hton(GNUNET_TIME_absolute_get_forever());
+  mcsize = sizeof(struct DHT_MessageContext) + sizeof(GNUNET_HashCode);
+  memset(&message_context, 0, sizeof(struct DHT_MessageContext));
+  message_context.client = NULL;
+  random_key = GNUNET_CRYPTO_random_u32(GNUNET_CRYPTO_QUALITY_WEAK, (uint32_t)-1);
+  GNUNET_CRYPTO_hash(&random_key, sizeof(uint32_t), &key);
+  message_context.key = &key;
+  message_context.unique_id = GNUNET_ntohll (GNUNET_CRYPTO_random_u64(GNUNET_CRYPTO_QUALITY_WEAK, (uint64_t)-1));
+  message_context.replication = ntohl (DHT_DEFAULT_FIND_PEER_REPLICATION);
+  message_context.msg_options = ntohl (0);
+  message_context.network_size = estimate_diameter();
+  message_context.peer = &my_identity;
+  increment_stats(STAT_PUT_START);
+  GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "%s:%s Sending malicious PUT message with hash %s", my_short_id, "DHT", GNUNET_h2s(&key));
+  route_message(NULL, &put_message.header, &message_context);
+  GNUNET_SCHEDULER_add_delayed(sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, malicious_put_frequency), &malicious_put_task, NULL);
+
+}
+
+/**
+ * Task to send a malicious put message across the network.
+ *
+ * @param cls closure for this task
+ * @param tc the context under which the task is running
+ */
+static void
+malicious_get_task (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
+{
+  static struct GNUNET_DHT_GetMessage get_message;
+  static struct DHT_MessageContext message_context;
+  static GNUNET_HashCode key;
+  unsigned int mcsize;
+  uint32_t random_key;
+  get_message.header.size = htons(sizeof(struct GNUNET_DHT_GetMessage));
+  get_message.header.type = htons(GNUNET_MESSAGE_TYPE_DHT_GET);
+  get_message.type = htons(DHT_MALICIOUS_MESSAGE_TYPE);
+  mcsize = sizeof(struct DHT_MessageContext) + sizeof(GNUNET_HashCode);
+  memset(&message_context, 0, sizeof(struct DHT_MessageContext));
+  message_context.client = NULL;
+  random_key = GNUNET_CRYPTO_random_u32(GNUNET_CRYPTO_QUALITY_WEAK, (uint32_t)-1);
+  GNUNET_CRYPTO_hash(&random_key, sizeof(uint32_t), &key);
+  message_context.key = &key;
+  message_context.unique_id = GNUNET_ntohll (GNUNET_CRYPTO_random_u64(GNUNET_CRYPTO_QUALITY_WEAK, (uint64_t)-1));
+  message_context.replication = ntohl (DHT_DEFAULT_FIND_PEER_REPLICATION);
+  message_context.msg_options = ntohl (0);
+  message_context.network_size = estimate_diameter();
+  message_context.peer = &my_identity;
+  increment_stats(STAT_GET_START);
+  GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "%s:%s Sending malicious GET message with hash %s", my_short_id, "DHT", GNUNET_h2s(&key));
+  route_message(NULL, &get_message.header, &message_context);
+  GNUNET_SCHEDULER_add_delayed(sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, malicious_get_frequency), &malicious_get_task, NULL);
+}
+
+/**
+ * Task to send a find peer message for our own peer identifier
+ * so that we can find the closest peers in the network to ourselves
+ * and attempt to connect to them.
+ *
+ * @param cls closure for this task
+ * @param tc the context under which the task is running
+ */
+static void
+send_find_peer_message (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
+{
+  struct GNUNET_MessageHeader *find_peer_msg;
+  struct DHT_MessageContext message_context;
+  int ret;
+  struct GNUNET_TIME_Relative next_send_time;
+
+  if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
+    return;
+
+  increment_stats(STAT_FIND_PEER_START);
+
+  find_peer_msg = GNUNET_malloc(sizeof(struct GNUNET_MessageHeader));
+  find_peer_msg->size = htons(sizeof(struct GNUNET_MessageHeader));
+  find_peer_msg->type = htons(GNUNET_MESSAGE_TYPE_DHT_FIND_PEER);
+  memset(&message_context, 0, sizeof(struct DHT_MessageContext));
+  message_context.key = &my_identity.hashPubKey;
+  message_context.unique_id = GNUNET_ntohll (GNUNET_CRYPTO_random_u64(GNUNET_CRYPTO_QUALITY_STRONG, (uint64_t)-1));
+  message_context.replication = ntohl (DHT_DEFAULT_FIND_PEER_REPLICATION);
+  message_context.msg_options = ntohl (DHT_DEFAULT_FIND_PEER_OPTIONS);
+  message_context.network_size = estimate_diameter();
+  message_context.peer = &my_identity;
+
+  ret = route_message(NULL, find_peer_msg, &message_context);
+  GNUNET_free(find_peer_msg);
+  GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+              "`%s:%s': Sent `%s' request to %d peers\n", my_short_id, "DHT",
+              "FIND PEER", ret);
+  next_send_time.value = DHT_MINIMUM_FIND_PEER_INTERVAL.value +
+                         GNUNET_CRYPTO_random_u64(GNUNET_CRYPTO_QUALITY_STRONG,
+                                                  DHT_MAXIMUM_FIND_PEER_INTERVAL.value - DHT_MINIMUM_FIND_PEER_INTERVAL.value);
+  GNUNET_SCHEDULER_add_delayed (sched,
+                                next_send_time,
+                                &send_find_peer_message, NULL);
+}
+
 /**
  * Handler for any generic DHT messages, calls the appropriate handler
  * depending on message type, sends confirmation if responses aren't otherwise
@@ -2121,12 +2617,69 @@ handle_dht_local_route_request (void *cls, struct GNUNET_SERVER_Client *client,
   message_context.network_size = estimate_diameter();
   message_context.peer = &my_identity;
 
+  if (ntohs(enc_msg->type) == GNUNET_MESSAGE_TYPE_DHT_GET)
+    increment_stats(STAT_GET_START);
+  else if (ntohs(enc_msg->type) == GNUNET_MESSAGE_TYPE_DHT_PUT)
+    increment_stats(STAT_PUT_START);
+  else if (ntohs(enc_msg->type) == GNUNET_MESSAGE_TYPE_DHT_FIND_PEER)
+    increment_stats(STAT_FIND_PEER_START);
+
   route_message(cls, enc_msg, &message_context);
 
   GNUNET_SERVER_receive_done (client, GNUNET_OK);
 
 }
 
+/**
+ * Handler for any locally received DHT control messages,
+ * sets malicious flags mostly for now.
+ *
+ * @param cls closure for the service
+ * @param client the client we received this message from
+ * @param message the actual message received
+ *
+ */
+static void
+handle_dht_control_message (void *cls, struct GNUNET_SERVER_Client *client,
+                            const struct GNUNET_MessageHeader *message)
+{
+  const struct GNUNET_DHT_ControlMessage *dht_control_msg =
+      (const struct GNUNET_DHT_ControlMessage *) message;
+#if DEBUG_DHT
+  GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+              "`%s:%s': Received `%s' request from client, command %d\n", my_short_id, "DHT",
+              "CONTROL", ntohs(dht_control_msg->command));
+#endif
+
+  switch (ntohs(dht_control_msg->command))
+  {
+  case GNUNET_MESSAGE_TYPE_DHT_MALICIOUS_GET:
+    if (ntohs(dht_control_msg->variable) > 0)
+      malicious_get_frequency = ntohs(dht_control_msg->variable);
+    if (malicious_getter != GNUNET_YES)
+      GNUNET_SCHEDULER_add_now(sched, &malicious_get_task, NULL);
+    malicious_getter = GNUNET_YES;
+    GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s Initiating malicious GET behavior, frequency %d\n", my_short_id, "DHT", malicious_get_frequency);
+    break;
+  case GNUNET_MESSAGE_TYPE_DHT_MALICIOUS_PUT:
+    if (ntohs(dht_control_msg->variable) > 0)
+      malicious_put_frequency = ntohs(dht_control_msg->variable);
+    if (malicious_putter != GNUNET_YES)
+      GNUNET_SCHEDULER_add_now(sched, &malicious_put_task, NULL);
+    malicious_putter = GNUNET_YES;
+    GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s Initiating malicious PUT behavior, frequency %d\n", my_short_id, "DHT", malicious_put_frequency);
+    break;
+  case GNUNET_MESSAGE_TYPE_DHT_MALICIOUS_DROP:
+    malicious_dropper = GNUNET_YES;
+    GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s Initiating malicious DROP behavior\n", my_short_id, "DHT");
+    break;
+  default:
+    GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s:%s Unknown control command type `%d'!\n", ntohs(dht_control_msg->command));
+  }
+
+  GNUNET_SERVER_receive_done (client, GNUNET_OK);
+}
+
 /**
  * Handler for any generic DHT stop messages, calls the appropriate handler
  * depending on message type (if processed locally)
@@ -2191,6 +2744,14 @@ handle_dht_p2p_route_request (void *cls,
   struct GNUNET_MessageHeader *enc_msg = (struct GNUNET_MessageHeader *)&incoming[1];
   struct DHT_MessageContext *message_context;
 
+  if (ntohs(enc_msg->type) == GNUNET_MESSAGE_TYPE_DHT_P2P_PING) /* Throw these away. FIXME: Don't throw these away? (reply)*/
+    {
+#if DEBUG_PING
+      GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "%s:%s Received P2P Ping message.\n", my_short_id, "DHT");
+#endif
+      return GNUNET_YES;
+    }
+
   if (ntohs(enc_msg->size) > GNUNET_SERVER_MAX_MESSAGE_SIZE)
     {
       GNUNET_break_op(0);
@@ -2235,6 +2796,7 @@ handle_dht_p2p_route_result (void *cls,
       GNUNET_break_op(0);
       return GNUNET_YES;
     }
+
   memset(&message_context, 0, sizeof(struct DHT_MessageContext));
   message_context.bloom = GNUNET_CONTAINER_bloomfilter_init(incoming->bloomfilter, DHT_BLOOM_SIZE, DHT_BLOOM_K);
   GNUNET_assert(message_context.bloom != NULL);
@@ -2270,6 +2832,7 @@ process_hello (void *cls, const struct GNUNET_MessageHeader *message)
   memcpy(my_hello, message, ntohs(message->size));
 }
 
+
 /**
  * Task run during shutdown.
  *
@@ -2281,7 +2844,6 @@ shutdown_task (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
 {
   int bucket_count;
   struct PeerInfo *pos;
-
   if (transport_handle != NULL)
   {
     GNUNET_free_non_null(my_hello);
@@ -2318,6 +2880,11 @@ shutdown_task (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
       GNUNET_DATACACHE_destroy (datacache);
     }
 
+  if (stats != NULL)
+    {
+      GNUNET_STATISTICS_destroy (stats, GNUNET_YES);
+    }
+
   if (dhtlog_handle != NULL)
     GNUNET_DHTLOG_disconnect(dhtlog_handle);
 
@@ -2356,8 +2923,11 @@ core_init (void *cls,
               "%s: Core connection initialized, I am peer: %s\n", "dht",
               GNUNET_i2s (identity));
 #endif
+
   /* Copy our identity so we can use it */
   memcpy (&my_identity, identity, sizeof (struct GNUNET_PeerIdentity));
+  if (my_short_id != NULL)
+    GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "%s Receive CORE INIT message but have already been initialized! Did CORE fail?\n", "DHT SERVICE");
   my_short_id = GNUNET_strdup(GNUNET_i2s(&my_identity));
   /* Set the server to local variable */
   coreAPI = server;
@@ -2368,15 +2938,16 @@ core_init (void *cls,
 
 
 static struct GNUNET_SERVER_MessageHandler plugin_handlers[] = {
-  {&handle_dht_local_route_request, NULL, GNUNET_MESSAGE_TYPE_LOCAL_DHT_ROUTE, 0},
-  {&handle_dht_local_route_stop, NULL, GNUNET_MESSAGE_TYPE_DHT_ROUTE_STOP, 0},
+  {&handle_dht_local_route_request, NULL, GNUNET_MESSAGE_TYPE_DHT_LOCAL_ROUTE, 0},
+  {&handle_dht_local_route_stop, NULL, GNUNET_MESSAGE_TYPE_DHT_LOCAL_ROUTE_STOP, 0},
+  {&handle_dht_control_message, NULL, GNUNET_MESSAGE_TYPE_DHT_CONTROL, 0},
   {NULL, NULL, 0, 0}
 };
 
 
 static struct GNUNET_CORE_MessageHandler core_handlers[] = {
-  {&handle_dht_p2p_route_request, GNUNET_MESSAGE_TYPE_P2P_DHT_ROUTE, 0},
-  {&handle_dht_p2p_route_result, GNUNET_MESSAGE_TYPE_P2P_DHT_ROUTE_RESULT, 0},
+  {&handle_dht_p2p_route_request, GNUNET_MESSAGE_TYPE_DHT_P2P_ROUTE, 0},
+  {&handle_dht_p2p_route_result, GNUNET_MESSAGE_TYPE_DHT_P2P_ROUTE_RESULT, 0},
   {NULL, 0, 0}
 };
 
@@ -2393,24 +2964,62 @@ void handle_core_connect (void *cls,
                           struct GNUNET_TIME_Relative latency,
                           uint32_t distance)
 {
-  int ret;
+  struct PeerInfo *ret;
 
 #if DEBUG_DHT
   GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
               "%s:%s Receives core connect message for peer %s distance %d!\n", my_short_id, "dht", GNUNET_i2s(peer), distance);
 #endif
+
+  if (GNUNET_YES == GNUNET_CONTAINER_multihashmap_contains(all_known_peers, &peer->hashPubKey))
+    {
+      GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "%s:%s Received %s message for peer %s, but already have peer in RT!", my_short_id, "DHT", "CORE CONNECT", GNUNET_i2s(peer));
+      return;
+    }
+
   if (datacache != NULL)
     GNUNET_DATACACHE_put(datacache, &peer->hashPubKey, sizeof(struct GNUNET_PeerIdentity), (const char *)peer, 0, GNUNET_TIME_absolute_get_forever());
   ret = try_add_peer(peer,
                      find_current_bucket(&peer->hashPubKey),
                      latency,
                      distance);
+  if (ret != NULL)
+    {
+      GNUNET_CONTAINER_multihashmap_put(all_known_peers, &peer->hashPubKey, ret, GNUNET_CONTAINER_MULTIHASHMAPOPTION_UNIQUE_ONLY);
+    }
 #if DEBUG_DHT
     GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
-                "%s:%s Adding peer to routing list: %s\n", my_short_id, "DHT", ret == GNUNET_YES ? "PEER ADDED" : "NOT ADDED");
+                "%s:%s Adding peer to routing list: %s\n", my_short_id, "DHT", ret == NULL ? "NOT ADDED" : "PEER ADDED");
 #endif
 }
 
+/**
+ * Method called whenever a peer disconnects.
+ *
+ * @param cls closure
+ * @param peer peer identity this notification is about
+ */
+void handle_core_disconnect (void *cls,
+                             const struct
+                             GNUNET_PeerIdentity * peer)
+{
+  struct PeerInfo *to_remove;
+  int current_bucket;
+
+  GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "%s:%s: Received peer disconnect message for peer `%s' from %s\n", my_short_id, "DHT", GNUNET_i2s(peer), "CORE");
+
+  if (GNUNET_YES != GNUNET_CONTAINER_multihashmap_contains(all_known_peers, &peer->hashPubKey))
+    {
+      GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "%s:%s: do not have peer `%s' in RT, can't disconnect!\n", my_short_id, "DHT", GNUNET_i2s(peer));
+      return;
+    }
+  GNUNET_assert(GNUNET_CONTAINER_multihashmap_contains(all_known_peers, &peer->hashPubKey));
+  to_remove = GNUNET_CONTAINER_multihashmap_get(all_known_peers, &peer->hashPubKey);
+  GNUNET_assert(0 == memcmp(peer, &to_remove->id, sizeof(struct GNUNET_PeerIdentity)));
+  current_bucket = find_current_bucket(&to_remove->id.hashPubKey);
+  delete_peer(to_remove, current_bucket);
+}
+
 /**
  * Process dht requests.
  *
@@ -2425,6 +3034,7 @@ run (void *cls,
      struct GNUNET_SERVER_Handle *server,
      const struct GNUNET_CONFIGURATION_Handle *c)
 {
+  int random_seconds;
   sched = scheduler;
   cfg = c;
   datacache = GNUNET_DATACACHE_create (sched, cfg, "dhtcache");
@@ -2434,8 +3044,8 @@ run (void *cls,
                                  GNUNET_TIME_UNIT_FOREVER_REL,
                                  NULL,  /* Closure passed to DHT functionas around? */
                                  &core_init,    /* Call core_init once connected */
-                                 &handle_core_connect,  /* Don't care about connects */
-                                 NULL,  /* FIXME: remove peers on disconnects */
+                                 &handle_core_connect,  /* Handle connects */
+                                 &handle_core_disconnect,  /* remove peers on disconnects */
                                  NULL,  /* Do we care about "status" updates? */
                                  NULL,  /* Don't want notified about all incoming messages */
                                  GNUNET_NO,     /* For header only inbound notification */
@@ -2445,7 +3055,8 @@ run (void *cls,
 
   if (coreAPI == NULL)
     return;
-  transport_handle = GNUNET_TRANSPORT_connect(sched, cfg, NULL, NULL, NULL, NULL);
+  transport_handle = GNUNET_TRANSPORT_connect(sched, cfg, 
+                                             NULL, NULL, NULL, NULL, NULL);
   if (transport_handle != NULL)
     GNUNET_TRANSPORT_get_hello (transport_handle, &process_hello, NULL);
   else
@@ -2454,13 +3065,20 @@ run (void *cls,
   lowest_bucket = MAX_BUCKETS - 1;
   forward_list.hashmap = GNUNET_CONTAINER_multihashmap_create(MAX_OUTSTANDING_FORWARDS / 10);
   forward_list.minHeap = GNUNET_CONTAINER_heap_create(GNUNET_CONTAINER_HEAP_ORDER_MIN);
-  /* Scheduled the task to clean up when shutdown is called */
-
+  all_known_peers = GNUNET_CONTAINER_multihashmap_create(MAX_BUCKETS / 8);
+  GNUNET_assert(all_known_peers != NULL);
   if (GNUNET_YES == GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht_testing", "mysql_logging"))
     {
       debug_routes = GNUNET_YES;
     }
 
+  if (GNUNET_YES ==
+      GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht",
+                                           "strict_kademlia"))
+    {
+      strict_kademlia = GNUNET_YES;
+    }
+
   if (GNUNET_YES ==
       GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht",
                                            "stop_on_closest"))
@@ -2475,6 +3093,35 @@ run (void *cls,
       stop_on_found = GNUNET_YES;
     }
 
+  if (GNUNET_YES ==
+      GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht",
+                                           "malicious_getter"))
+    {
+      malicious_getter = GNUNET_YES;
+      if (GNUNET_NO == GNUNET_CONFIGURATION_get_value_number (cfg, "DHT",
+                                            "MALICIOUS_GET_FREQUENCY",
+                                            &malicious_get_frequency))
+        malicious_get_frequency = DEFAULT_MALICIOUS_GET_FREQUENCY;
+    }
+
+  if (GNUNET_YES ==
+      GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht",
+                                           "malicious_putter"))
+    {
+      malicious_putter = GNUNET_YES;
+      if (GNUNET_NO == GNUNET_CONFIGURATION_get_value_number (cfg, "DHT",
+                                            "MALICIOUS_PUT_FREQUENCY",
+                                            &malicious_put_frequency))
+        malicious_put_frequency = DEFAULT_MALICIOUS_PUT_FREQUENCY;
+    }
+
+  if (GNUNET_YES ==
+          GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht",
+                                               "malicious_dropper"))
+    {
+      malicious_dropper = GNUNET_YES;
+    }
+
   if (GNUNET_YES ==
       GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht_testing",
                                            "mysql_logging_extended"))
@@ -2490,10 +3137,38 @@ run (void *cls,
         {
           GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
                       "Could not connect to mysql logging server, logging will not happen!");
-          return;
         }
     }
 
+  stats = GNUNET_STATISTICS_create(sched, "dht", cfg);
+
+  if (stats != NULL)
+    {
+      GNUNET_STATISTICS_set(stats, STAT_ROUTES, 0, GNUNET_NO);
+      GNUNET_STATISTICS_set(stats, STAT_ROUTE_FORWARDS, 0, GNUNET_NO);
+      GNUNET_STATISTICS_set(stats, STAT_RESULTS, 0, GNUNET_NO);
+      GNUNET_STATISTICS_set(stats, STAT_RESULTS_TO_CLIENT, 0, GNUNET_NO);
+      GNUNET_STATISTICS_set(stats, STAT_RESULT_FORWARDS, 0, GNUNET_NO);
+      GNUNET_STATISTICS_set(stats, STAT_GETS, 0, GNUNET_NO);
+      GNUNET_STATISTICS_set(stats, STAT_PUTS, 0, GNUNET_NO);
+      GNUNET_STATISTICS_set(stats, STAT_PUTS_INSERTED, 0, GNUNET_NO);
+      GNUNET_STATISTICS_set(stats, STAT_FIND_PEER, 0, GNUNET_NO);
+      GNUNET_STATISTICS_set(stats, STAT_FIND_PEER_START, 0, GNUNET_NO);
+      GNUNET_STATISTICS_set(stats, STAT_GET_START, 0, GNUNET_NO);
+      GNUNET_STATISTICS_set(stats, STAT_PUT_START, 0, GNUNET_NO);
+      GNUNET_STATISTICS_set(stats, STAT_FIND_PEER_REPLY, 0, GNUNET_NO);
+      GNUNET_STATISTICS_set(stats, STAT_FIND_PEER_ANSWER, 0, GNUNET_NO);
+      GNUNET_STATISTICS_set(stats, STAT_GET_REPLY, 0, GNUNET_NO);
+      GNUNET_STATISTICS_set(stats, STAT_GET_RESPONSE_START, 0, GNUNET_NO);
+    }
+#if DO_FIND_PEER
+  random_seconds = GNUNET_CRYPTO_random_u32(GNUNET_CRYPTO_QUALITY_WEAK, 180);
+  GNUNET_SCHEDULER_add_delayed (sched,
+                                GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_SECONDS, random_seconds),
+                                &send_find_peer_message, NULL);
+#endif
+
+  /* Scheduled the task to clean up when shutdown is called */
   cleanup_task = GNUNET_SCHEDULER_add_delayed (sched,
                                                GNUNET_TIME_UNIT_FOREVER_REL,
                                                &shutdown_task, NULL);