authentication of ciphertexts (+ seed)
[oweals/gnunet.git] / src / testing / testing_group.c
index 6f100cbdd923bf59d4eb1d7a553b8668f94785a5..72b56ae9998984b68d423e707ac729770f5a5111 100644 (file)
@@ -32,6 +32,8 @@
 
 #define VERBOSE_TESTING GNUNET_NO
 
+#define VERBOSE_TOPOLOGY GNUNET_YES
+
 #define DEBUG_CHURN GNUNET_NO
 
 /**
@@ -49,7 +51,7 @@
  */
 #define HIGH_PORT 56000
 
-#define MAX_OUTSTANDING_CONNECTIONS 10
+#define MAX_OUTSTANDING_CONNECTIONS 40
 
 #define MAX_CONCURRENT_HOSTKEYS 10
 
@@ -365,7 +367,7 @@ struct PeerData
 
 
 /**
- * Data we keep per host.
+ * Linked list of per-host data.
  */
 struct HostData
 {
@@ -509,11 +511,15 @@ struct GNUNET_TESTING_PeerGroup
   void *notify_connection_cls;
 
   /**
-   * NULL-terminated array of information about
-   * hosts.
+   * Array of information about hosts.
    */
   struct HostData *hosts;
 
+  /**
+   * Number of hosts (size of HostData)
+   */
+  unsigned int num_hosts;
+
   /**
    * Array of "total" peers.
    */
@@ -721,6 +727,13 @@ GNUNET_TESTING_topology_option_get (enum GNUNET_TESTING_TopologyOption *topology
        */
       "CONNECT_DFS",
       
+      /**
+       * Find the N closest peers to each allowed peer in the
+       * topology and make sure a connection to those peers
+       * exists in the connect topology.
+       */
+      "CONNECT_CLOSEST",
+
       /**
        * No options specified.
        */
@@ -890,7 +903,6 @@ make_config (const struct GNUNET_CONFIGURATION_Handle *cfg,
       GNUNET_CONFIGURATION_set_value_string(uc.ret, "transport-udp", "BINDTO", "127.0.0.1");
     }
 
-
   *port = (uint16_t) uc.nport;
   *upnum = uc.upnum;
   uc.fdnum++;
@@ -1605,7 +1617,9 @@ create_erdos_renyi (struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING_Connecti
 
 /**
  * Create a topology given a peer group (set of running peers)
- * and a connection processor.
+ * and a connection processor.  This particular function creates
+ * the connections for a 2d-torus, plus additional "closest"
+ * connections per peer.
  *
  * @param pg the peergroup to create the topology on
  * @param proc the connection processor to call to actually set
@@ -2665,8 +2679,8 @@ struct DFSContext
  */
 static int
 random_connect_iterator (void *cls,
-                  const GNUNET_HashCode * key,
-                  void *value)
+                         const GNUNET_HashCode * key,
+                         void *value)
 {
   struct RandomContext *random_ctx = cls;
   double random_number;
@@ -2866,6 +2880,96 @@ static unsigned int count_allowed_connections(struct GNUNET_TESTING_PeerGroup *p
   return count;
 }
 
+
+struct FindClosestContext
+{
+  /**
+   * The currently known closest peer.
+   */
+  struct GNUNET_TESTING_Daemon *closest;
+
+  /**
+   * The info for the peer we are adding connections for.
+   */
+  struct PeerData *curr_peer;
+
+  /**
+   * The distance (bits) between the current
+   * peer and the currently known closest.
+   */
+  unsigned int closest_dist;
+
+  /**
+   * The offset of the closest known peer in
+   * the peer group.
+   */
+  unsigned int closest_num;
+};
+
+/**
+ * Iterator over hash map entries of the allowed
+ * peer connections.  Find the closest, not already
+ * connected peer and return it.
+ *
+ * @param cls closure (struct FindClosestContext)
+ * @param key current key code (hash of offset in pg)
+ * @param value value in the hash map - a GNUNET_TESTING_Daemon
+ * @return GNUNET_YES if we should continue to
+ *         iterate,
+ *         GNUNET_NO if not.
+ */
+static
+int find_closest_peers (void *cls, const GNUNET_HashCode * key, void *value)
+{
+  struct FindClosestContext *closest_ctx = cls;
+  struct GNUNET_TESTING_Daemon *daemon = value;
+
+  if (((closest_ctx->closest == NULL) ||
+       (GNUNET_CRYPTO_hash_matching_bits(&daemon->id.hashPubKey, &closest_ctx->curr_peer->daemon->id.hashPubKey) > closest_ctx->closest_dist))
+      && (GNUNET_YES != GNUNET_CONTAINER_multihashmap_contains(closest_ctx->curr_peer->connect_peers, key)))
+    {
+      closest_ctx->closest_dist = GNUNET_CRYPTO_hash_matching_bits(&daemon->id.hashPubKey, &closest_ctx->curr_peer->daemon->id.hashPubKey);
+      closest_ctx->closest = daemon;
+      uid_from_hash(key, &closest_ctx->closest_num);
+    }
+  return GNUNET_YES;
+}
+
+/**
+ * From the set of connections possible, choose at num connections per
+ * peer based on depth which are closest out of those allowed.  Guaranteed
+ * to add num peers to connect to, provided there are that many peers
+ * in the underlay topology to connect to.
+ *
+ * @param pg the peergroup we are dealing with
+ * @param num how many connections at least should each peer have (if possible)?
+ * @param proc processor to actually add the connections
+ */
+void
+add_closest (struct GNUNET_TESTING_PeerGroup *pg, unsigned int num, GNUNET_TESTING_ConnectionProcessor proc)
+{
+  struct FindClosestContext closest_ctx;
+  uint32_t pg_iter;
+  uint32_t i;
+
+  for (i = 0; i < num; i++) /* Each time find a closest peer (from those available) */
+    {
+      for (pg_iter = 0; pg_iter < pg->total; pg_iter++)
+        {
+          closest_ctx.curr_peer = &pg->peers[pg_iter];
+          closest_ctx.closest = NULL;
+          closest_ctx.closest_dist = 0;
+          closest_ctx.closest_num = 0;
+          GNUNET_CONTAINER_multihashmap_iterate(pg->peers[pg_iter].allowed_peers, &find_closest_peers, &closest_ctx);
+          if (closest_ctx.closest != NULL)
+            {
+              GNUNET_assert((0 <= closest_ctx.closest_num) && (closest_ctx.closest_num < pg->total));
+              proc(pg, pg_iter, closest_ctx.closest_num);
+            }
+        }
+    }
+}
+
 /**
  * From the set of connections possible, choose at least num connections per
  * peer based on depth first traversal of peer connections.  If DFS leaves
@@ -3129,6 +3233,66 @@ schedule_get_statistics(void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc
     }
 }
 
+struct DuplicateStats
+{
+  /**
+   * Next item in the list
+   */
+  struct DuplicateStats *next;
+
+  /**
+   * Nasty string, concatenation of relevant information.
+   */
+  char *unique_string;
+};
+
+/**
+ * Check whether the combination of port/host/unix domain socket
+ * already exists in the list of peers being checked for statistics.
+ *
+ * @param pg the peergroup in question
+ * @param specific_peer the peer we're concerned with
+ * @param stats_list the list to return to the caller
+ *
+ * @return GNUNET_YES if the statistics instance has been seen already,
+ *         GNUNET_NO if not (and we may have added it to the list)
+ */
+static int
+stats_check_existing(struct GNUNET_TESTING_PeerGroup *pg, struct PeerData *specific_peer, struct DuplicateStats **stats_list)
+{
+  struct DuplicateStats *pos;
+  char *unix_domain_socket;
+  unsigned long long port;
+  char *to_match;
+  if (GNUNET_YES != GNUNET_CONFIGURATION_get_value_yesno(pg->cfg, "testing", "single_statistics_per_host"))
+    return GNUNET_NO; /* Each peer has its own statistics instance, do nothing! */
+
+  pos = *stats_list;
+  if (GNUNET_OK != GNUNET_CONFIGURATION_get_value_string(specific_peer->cfg, "statistics", "unixpath", &unix_domain_socket))
+    return GNUNET_NO;
+
+  GNUNET_CONFIGURATION_get_value_number(specific_peer->cfg, "statistics", "port", &port);
+
+  if (specific_peer->daemon->hostname != NULL)
+    GNUNET_asprintf(&to_match, "%s%s%llu", specific_peer->daemon->hostname, unix_domain_socket, port);
+  else
+    GNUNET_asprintf(&to_match, "%s%llu", unix_domain_socket, port);
+
+  while (pos != NULL)
+    {
+      if (0 == strcmp(to_match, pos->unique_string))
+        {
+          GNUNET_free(to_match);
+          return GNUNET_YES;
+        }
+      pos = pos->next;
+    }
+  pos = GNUNET_malloc(sizeof(struct DuplicateStats));
+  pos->unique_string = to_match;
+  pos->next = *stats_list;
+  *stats_list = pos;
+  return GNUNET_NO;
+}
 
 /**
  * Iterate over all (running) peers in the peer group, retrieve
@@ -3143,6 +3307,9 @@ GNUNET_TESTING_get_statistics (struct GNUNET_TESTING_PeerGroup *pg,
   struct StatsCoreContext *core_ctx;
   unsigned int i;
   unsigned int total_count;
+  struct DuplicateStats *stats_list;
+  struct DuplicateStats *pos;
+  stats_list = NULL;
 
   /* Allocate a single stats iteration context */
   stats_context = GNUNET_malloc(sizeof(struct StatsIterateContext));
@@ -3150,9 +3317,10 @@ GNUNET_TESTING_get_statistics (struct GNUNET_TESTING_PeerGroup *pg,
   stats_context->proc = proc;
   stats_context->cls = cls;
   total_count = 0;
+
   for (i = 0; i < pg->total; i++)
     {
-      if (pg->peers[i].daemon->running == GNUNET_YES)
+      if ((pg->peers[i].daemon->running == GNUNET_YES) && (GNUNET_NO == stats_check_existing(pg, &pg->peers[i], &stats_list)))
         {
           /* Allocate one core context per core we need to connect to */
           core_ctx = GNUNET_malloc(sizeof(struct StatsCoreContext));
@@ -3163,7 +3331,20 @@ GNUNET_TESTING_get_statistics (struct GNUNET_TESTING_PeerGroup *pg,
           total_count++;
         }
     }
+
+  GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "Retrieving stats from %u total instances.\n", total_count);
   stats_context->total = total_count;
+  if (stats_list != NULL)
+    {
+      pos = stats_list;
+      while(pos != NULL)
+        {
+          GNUNET_free(pos->unique_string);
+          stats_list = pos->next;
+          GNUNET_free(pos);
+          pos = stats_list->next;
+        }
+    }
   return;
 }
 
@@ -3289,6 +3470,13 @@ GNUNET_TESTING_connect_topology (struct GNUNET_TESTING_PeerGroup *pg,
 #endif
       perform_dfs(pg, (int)option_modifier);
       break;
+    case GNUNET_TESTING_TOPOLOGY_OPTION_ADD_CLOSEST:
+#if VERBOSE_TOPOLOGY
+      GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
+                  _("Finding additional %u closest peers each (if possible)\n"), (unsigned int)option_modifier);
+#endif
+      add_closest(pg, (unsigned int)option_modifier, &add_actual_connections);
+      break;
     case GNUNET_TESTING_TOPOLOGY_OPTION_NONE:
       break;
     case GNUNET_TESTING_TOPOLOGY_OPTION_ALL:
@@ -3571,7 +3759,7 @@ GNUNET_TESTING_daemons_start (struct GNUNET_SCHEDULER_Handle *sched,
   pg->peers = GNUNET_malloc (total * sizeof (struct PeerData));
   if (NULL != hostnames)
     {
-      off = 2;
+      off = 0;
       hostpos = hostnames;
       while (hostpos != NULL)
         {
@@ -3585,21 +3773,21 @@ GNUNET_TESTING_daemons_start (struct GNUNET_SCHEDULER_Handle *sched,
       while (hostpos != NULL)
         {
           pg->hosts[off].minport = LOW_PORT;
-          off++;
           pg->hosts[off].hostname = GNUNET_strdup(hostpos->hostname);
           if (hostpos->username != NULL)
             pg->hosts[off].username = GNUNET_strdup(hostpos->username);
           pg->hosts[off].sshport = hostpos->port;
           hostpos = hostpos->next;
+          off++;
         }
 
       if (off == 0)
         {
-          GNUNET_free (pg->hosts);
           pg->hosts = NULL;
         }
       hostcnt = off;
       minport = 0;
+      pg->num_hosts = off;
 
 #if NO_LL
       off = 2;
@@ -4135,19 +4323,23 @@ GNUNET_TESTING_daemons_vary (struct GNUNET_TESTING_PeerGroup *pg,
                             GNUNET_TESTING_NotifyCompletion cb,
                             void *cb_cls)
 {
+  struct ShutdownContext *shutdown_ctx;
+  struct ChurnRestartContext *startup_ctx;
   struct ChurnContext *churn_ctx;
 
   if (GNUNET_NO == desired_status)
     {
       if (NULL != pg->peers[offset].daemon)
        {
+          shutdown_ctx = GNUNET_malloc(sizeof(struct ShutdownContext));
          churn_ctx = GNUNET_malloc(sizeof(struct ChurnContext));
          churn_ctx->num_to_start = 0;
          churn_ctx->num_to_stop = 1;
          churn_ctx->cb = cb;
-         churn_ctx->cb_cls = cb_cls;  
+         churn_ctx->cb_cls = cb_cls;
+         shutdown_ctx->cb_cls = churn_ctx;
          GNUNET_TESTING_daemon_stop(pg->peers[offset].daemon, 
-                                    timeout, &churn_stop_callback, churn_ctx, 
+                                    timeout, &churn_stop_callback, shutdown_ctx,
                                     GNUNET_NO, GNUNET_YES);     
        }
     }
@@ -4155,13 +4347,15 @@ GNUNET_TESTING_daemons_vary (struct GNUNET_TESTING_PeerGroup *pg,
     {
       if (NULL == pg->peers[offset].daemon)
        {
+          startup_ctx = GNUNET_malloc(sizeof(struct ChurnRestartContext));
          churn_ctx = GNUNET_malloc(sizeof(struct ChurnContext));
          churn_ctx->num_to_start = 1;
          churn_ctx->num_to_stop = 0;
          churn_ctx->cb = cb;
          churn_ctx->cb_cls = cb_cls;  
+         startup_ctx->churn_ctx = churn_ctx;
          GNUNET_TESTING_daemon_start_stopped(pg->peers[offset].daemon, 
-                                             timeout, &churn_start_callback, churn_ctx);
+                                             timeout, &churn_start_callback, startup_ctx);
        }
     }
   else
@@ -4273,11 +4467,12 @@ GNUNET_TESTING_daemons_stop (struct GNUNET_TESTING_PeerGroup *pg,
         GNUNET_CONTAINER_multihashmap_destroy(pg->peers[off].blacklisted_peers);
     }
   GNUNET_free (pg->peers);
-  if (NULL != pg->hosts)
+  for (off = 0; off < pg->num_hosts; off++)
     {
-      GNUNET_free (pg->hosts[0].hostname);
-      GNUNET_free (pg->hosts);
+      GNUNET_free (pg->hosts[off].hostname);
+      GNUNET_free_non_null (pg->hosts[off].username);
     }
+  GNUNET_free_non_null (pg->hosts);
   GNUNET_free (pg);
 }