add bindto for udp transport as well, in case it's used
[oweals/gnunet.git] / src / testing / testing_group.c
index 36734ba48d5528647ddb0da6d68ae00cbe15ad7a..734383b20a1cb77e60aba90e98e9be911f8f02db 100644 (file)
  */
 #define HIGH_PORT 56000
 
-#define MAX_OUTSTANDING_CONNECTIONS 50
+#define MAX_OUTSTANDING_CONNECTIONS 10
 
-#define MAX_CONCURRENT_HOSTKEYS 16
+#define MAX_CONCURRENT_HOSTKEYS 10
 
-#define MAX_CONCURRENT_STARTING 50
+#define MAX_CONCURRENT_STARTING 10
+
+#define MAX_CONCURRENT_SHUTDOWN 10
 
 #define CONNECT_TIMEOUT GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 300)
 
@@ -139,17 +141,28 @@ struct ShutdownContext
   /**
    * Total peers to wait for
    */
-  int total_peers;
+  unsigned int total_peers;
 
   /**
    * Number of peers successfully shut down
    */
-  int peers_down;
+  unsigned int peers_down;
 
   /**
    * Number of peers failed to shut down
    */
-  int peers_failed;
+  unsigned int peers_failed;
+
+  /**
+   * Number of peers we have started shutting
+   * down.  If too many, wait on them.
+   */
+  unsigned int outstanding;
+
+  /**
+   * Timeout for shutdown.
+   */
+  struct GNUNET_TIME_Relative timeout;
 
   /**
    * Callback to call when all peers either
@@ -333,9 +346,52 @@ struct TopologyIterateContext
   unsigned int total;
 };
 
-struct TopologyCoreContext
+struct StatsIterateContext
 {
-  struct TopologyIterateContext *topology_context;
+  /**
+   * Handle to the statistics service.
+   */
+  struct GNUNET_STATISTICS_Handle *stats_handle;
+
+  /**
+   * Handle for getting statistics.
+   */
+  struct GNUNET_STATISTICS_GetHandle *stats_get_handle;
+
+  /**
+   * Continuation to call once all stats information has been retrieved.
+   */
+  GNUNET_STATISTICS_Callback cont;
+
+  /**
+   * Proc function to call on each value received.
+   */
+  GNUNET_TESTING_STATISTICS_Iterator proc;
+
+  /**
+   * Closure for topology_cb
+   */
+  void *cls;
+
+  /**
+   * Number of peers currently connected to.
+   */
+  unsigned int connected;
+
+  /**
+   * Number of peers we have finished iterating.
+   */
+  unsigned int completed;
+
+  /**
+   * Number of peers total.
+   */
+  unsigned int total;
+};
+
+struct CoreContext
+{
+  void *iter_context;
   struct GNUNET_TESTING_Daemon *daemon;
 };
 
@@ -697,9 +753,15 @@ make_config (const struct GNUNET_CONFIGURATION_Handle *cfg,
 
   if (GNUNET_CONFIGURATION_get_value_string(cfg, "testing", "control_host", &control_host) == GNUNET_OK)
     {
-      GNUNET_asprintf(&allowed_hosts, "%s; 127.0.0.1;", control_host);
+      if (hostname != NULL)
+        GNUNET_asprintf(&allowed_hosts, "%s; 127.0.0.1; %s;", control_host, hostname);
+      else
+        GNUNET_asprintf(&allowed_hosts, "%s; 127.0.0.1;", control_host);
+
       GNUNET_CONFIGURATION_set_value_string(uc.ret, "core", "ACCEPT_FROM", allowed_hosts);
       GNUNET_CONFIGURATION_set_value_string(uc.ret, "transport", "ACCEPT_FROM", allowed_hosts);
+      GNUNET_CONFIGURATION_set_value_string(uc.ret, "dht", "ACCEPT_FROM", allowed_hosts);
+      GNUNET_CONFIGURATION_set_value_string(uc.ret, "statistics", "ACCEPT_FROM", allowed_hosts);
       GNUNET_free_non_null(control_host);
       GNUNET_free(allowed_hosts);
     }
@@ -710,9 +772,16 @@ make_config (const struct GNUNET_CONFIGURATION_Handle *cfg,
   if (hostname != NULL)
     {
       GNUNET_asprintf(&allowed_hosts, "%s; 127.0.0.1;", hostname);
+      GNUNET_CONFIGURATION_set_value_string(uc.ret, "transport-udp", "BINDTO", hostname);
+      GNUNET_CONFIGURATION_set_value_string(uc.ret, "transport-tcp", "BINDTO", hostname);
       GNUNET_CONFIGURATION_set_value_string(uc.ret, "arm", "ACCEPT_FROM", allowed_hosts);
       GNUNET_free(allowed_hosts);
     }
+  else
+    {
+      GNUNET_CONFIGURATION_set_value_string(uc.ret, "transport-tcp", "BINDTO", "127.0.0.1");
+      GNUNET_CONFIGURATION_set_value_string(uc.ret, "transport-udp", "BINDTO", "127.0.0.1");
+    }
 
   *port = (uint16_t) uc.nport;
   *upnum = uc.upnum;
@@ -2755,13 +2824,15 @@ internal_topology_callback(void *cls,
                            const struct GNUNET_PeerIdentity *peer,
                            struct GNUNET_TIME_Relative latency, uint32_t distance)
 {
-  struct TopologyCoreContext *core_ctx = cls;
-  struct TopologyIterateContext *iter_ctx = core_ctx->topology_context;
+  struct CoreContext *core_ctx = cls;
+  struct TopologyIterateContext *iter_ctx = core_ctx->iter_context;
 
-  if (peer == NULL) /* Either finished, or something went wrongo */
+  if (peer == NULL) /* Either finished, or something went wrong */
     {
       iter_ctx->completed++;
       iter_ctx->connected--;
+      /* One core context allocated per iteration, must free! */
+      GNUNET_free(core_ctx);
     }
   else
     {
@@ -2771,8 +2842,8 @@ internal_topology_callback(void *cls,
   if (iter_ctx->completed == iter_ctx->total)
     {
       iter_ctx->topology_cb(iter_ctx->cls, NULL, NULL, GNUNET_TIME_relative_get_zero(), 0, NULL);
+      /* Once all are done, free the iteration context */
       GNUNET_free(iter_ctx);
-      GNUNET_free(core_ctx);
     }
 }
 
@@ -2784,18 +2855,18 @@ internal_topology_callback(void *cls,
 static void
 schedule_get_topology(void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
 {
-  struct TopologyCoreContext *core_context = cls;
-
+  struct CoreContext *core_context = cls;
+  struct TopologyIterateContext *topology_context = (struct TopologyIterateContext *)core_context->iter_context;
   if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
     return;
 
-  if (core_context->topology_context->connected > MAX_OUTSTANDING_CONNECTIONS)
+  if (topology_context->connected > MAX_OUTSTANDING_CONNECTIONS)
     {
 #if VERBOSE_TESTING > 2
           GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
                       _("Delaying connect, we have too many outstanding connections!\n"));
 #endif
-      GNUNET_SCHEDULER_add_delayed(core_context->daemon->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &schedule_connect, core_context);
+      GNUNET_SCHEDULER_add_delayed(core_context->daemon->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &schedule_get_topology, core_context);
     }
   else
     {
@@ -2803,7 +2874,7 @@ schedule_get_topology(void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
           GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
                       _("Creating connection, outstanding_connections is %d\n"), outstanding_connects);
 #endif
-      core_context->topology_context->connected++;
+      topology_context->connected++;
       if (GNUNET_OK != GNUNET_CORE_iterate_peers (core_context->daemon->sched, core_context->daemon->cfg, &internal_topology_callback, core_context))
         internal_topology_callback(core_context, NULL, GNUNET_TIME_relative_get_zero(), 0);
 
@@ -2818,10 +2889,11 @@ void
 GNUNET_TESTING_get_topology (struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING_NotifyTopology cb, void *cls)
 {
   struct TopologyIterateContext *topology_context;
-  struct TopologyCoreContext *core_ctx;
+  struct CoreContext *core_ctx;
   unsigned int i;
   unsigned int total_count;
 
+  /* Allocate a single topology iteration context */
   topology_context = GNUNET_malloc(sizeof(struct TopologyIterateContext));
   topology_context->topology_cb = cb;
   topology_context->cls = cls;
@@ -2830,9 +2902,11 @@ GNUNET_TESTING_get_topology (struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING
     {
       if (pg->peers[i].daemon->running == GNUNET_YES)
         {
-          core_ctx = GNUNET_malloc(sizeof(struct TopologyCoreContext));
+          /* Allocate one core context per core we need to connect to */
+          core_ctx = GNUNET_malloc(sizeof(struct CoreContext));
           core_ctx->daemon = pg->peers[i].daemon;
-          core_ctx->topology_context = topology_context;
+          /* Set back pointer to topology iteration context */
+          core_ctx->iter_context = topology_context;
           GNUNET_SCHEDULER_add_now(pg->sched, &schedule_get_topology, core_ctx);
           total_count++;
         }
@@ -2841,6 +2915,137 @@ GNUNET_TESTING_get_topology (struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING
   return;
 }
 
+/**
+ * Callback function to process statistic values.
+ * This handler is here only really to insert a peer
+ * identity (or daemon) so the statistics can be uniquely
+ * tied to a single running peer.
+ *
+ * @param cls closure
+ * @param subsystem name of subsystem that created the statistic
+ * @param name the name of the datum
+ * @param value the current value
+ * @param is_persistent GNUNET_YES if the value is persistent, GNUNET_NO if not
+ * @return GNUNET_OK to continue, GNUNET_SYSERR to abort iteration
+ */
+static int internal_stats_callback (void *cls,
+                                    const char *subsystem,
+                                    const char *name,
+                                    uint64_t value,
+                                    int is_persistent)
+{
+  struct CoreContext *core_context = cls;
+  struct StatsIterateContext *stats_context = (struct StatsIterateContext *)core_context->iter_context;
+
+  return stats_context->proc(stats_context->cls, &core_context->daemon->id, subsystem, name, value, is_persistent);
+}
+
+/**
+ * Internal continuation call for statistics iteration.
+ *
+ * @param cls closure, the CoreContext for this iteration
+ * @param success whether or not the statistics iterations
+ *        was canceled or not (we don't care)
+ */
+static void internal_stats_cont (void *cls, int success)
+{
+  struct CoreContext *core_context = cls;
+  struct StatsIterateContext *stats_context = (struct StatsIterateContext *)core_context->iter_context;
+
+  stats_context->connected--;
+  stats_context->completed++;
+
+  if (stats_context->completed == stats_context->total)
+    {
+      stats_context->cont(stats_context->cls, GNUNET_YES);
+      if (stats_context->stats_handle != NULL)
+        GNUNET_STATISTICS_destroy(stats_context->stats_handle, GNUNET_NO);
+      GNUNET_free(stats_context);
+    }
+  GNUNET_free(core_context);
+}
+
+/**
+ * Check running topology iteration tasks, if below max start a new one, otherwise
+ * schedule for some time in the future.
+ */
+static void
+schedule_get_statistics(void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
+{
+  struct CoreContext *core_context = cls;
+  struct StatsIterateContext *stats_context = (struct StatsIterateContext *)core_context->iter_context;
+
+  if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
+    return;
+
+  if (stats_context->connected > MAX_OUTSTANDING_CONNECTIONS)
+    {
+#if VERBOSE_TESTING > 2
+          GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+                      _("Delaying connect, we have too many outstanding connections!\n"));
+#endif
+      GNUNET_SCHEDULER_add_delayed(core_context->daemon->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &schedule_get_statistics, core_context);
+    }
+  else
+    {
+#if VERBOSE_TESTING > 2
+          GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+                      _("Creating connection, outstanding_connections is %d\n"), outstanding_connects);
+#endif
+
+      stats_context->connected++;
+      stats_context->stats_handle = GNUNET_STATISTICS_create(core_context->daemon->sched, "testing", core_context->daemon->cfg);
+      if (stats_context->stats_handle == NULL)
+        {
+          internal_stats_cont (core_context, GNUNET_NO);
+          return;
+        }
+
+      stats_context->stats_get_handle = GNUNET_STATISTICS_get(stats_context->stats_handle, NULL, NULL, GNUNET_TIME_relative_get_forever(), &internal_stats_cont, &internal_stats_callback, core_context);
+      if (stats_context->stats_get_handle == NULL)
+         internal_stats_cont (core_context, GNUNET_NO);
+
+    }
+}
+
+
+/**
+ * Iterate over all (running) peers in the peer group, retrieve
+ * all statistics from each.
+ */
+void
+GNUNET_TESTING_get_statistics (struct GNUNET_TESTING_PeerGroup *pg,
+                               GNUNET_STATISTICS_Callback cont,
+                               GNUNET_TESTING_STATISTICS_Iterator proc, void *cls)
+{
+  struct StatsIterateContext *stats_context;
+  struct CoreContext *core_ctx;
+  unsigned int i;
+  unsigned int total_count;
+
+  /* Allocate a single stats iteration context */
+  stats_context = GNUNET_malloc(sizeof(struct StatsIterateContext));
+  stats_context->cont = cont;
+  stats_context->proc = proc;
+  stats_context->cls = cls;
+  total_count = 0;
+  for (i = 0; i < pg->total; i++)
+    {
+      if (pg->peers[i].daemon->running == GNUNET_YES)
+        {
+          /* Allocate one core context per core we need to connect to */
+          core_ctx = GNUNET_malloc(sizeof(struct CoreContext));
+          core_ctx->daemon = pg->peers[i].daemon;
+          /* Set back pointer to topology iteration context */
+          core_ctx->iter_context = stats_context;
+          GNUNET_SCHEDULER_add_now(pg->sched, &schedule_get_statistics, core_ctx);
+          total_count++;
+        }
+    }
+  stats_context->total = total_count;
+  return;
+}
+
 /**
  * There are many ways to connect peers that are supported by this function.
  * To connect peers in the same topology that was created via the
@@ -3620,7 +3825,9 @@ GNUNET_TESTING_daemons_churn (struct GNUNET_TESTING_PeerGroup *pg,
  * @param callback_cls closure for the callback function
  */
 void
-GNUNET_TESTING_daemons_restart (struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING_NotifyCompletion callback, void *callback_cls)
+GNUNET_TESTING_daemons_restart (struct GNUNET_TESTING_PeerGroup *pg,
+                                GNUNET_TESTING_NotifyCompletion callback,
+                                void *callback_cls)
 {
   struct RestartContext *restart_context;
   unsigned int off;
@@ -3699,10 +3906,11 @@ GNUNET_TESTING_daemons_vary (struct GNUNET_TESTING_PeerGroup *pg,
  * @param emsg NULL on success
  */
 void internal_shutdown_callback (void *cls,
-                        const char *emsg)
+                                 const char *emsg)
 {
   struct ShutdownContext *shutdown_ctx = cls;
 
+  shutdown_ctx->outstanding--;
   if (emsg == NULL)
     {
       shutdown_ctx->peers_down++;
@@ -3722,6 +3930,47 @@ void internal_shutdown_callback (void *cls,
     }
 }
 
+/**
+ * Individual shutdown context for a particular peer.
+ */
+struct PeerShutdownContext
+{
+  /**
+   * Pointer to the high level shutdown context.
+   */
+  struct ShutdownContext *shutdown_ctx;
+
+  /**
+   * The daemon handle for the peer to shut down.
+   */
+  struct GNUNET_TESTING_Daemon *daemon;
+};
+
+/**
+ * Task to rate limit the number of outstanding peer shutdown
+ * requests.  This is necessary for making sure we don't do
+ * too many ssh connections at once, but is generally nicer
+ * to any system as well (graduated task starts, as opposed
+ * to calling gnunet-arm N times all at once).
+ */
+static void
+schedule_shutdown_task (void *cls, const struct GNUNET_SCHEDULER_TaskContext * tc)
+{
+  struct PeerShutdownContext *peer_shutdown_ctx = cls;
+  struct ShutdownContext *shutdown_ctx = peer_shutdown_ctx->shutdown_ctx;
+
+  GNUNET_assert(peer_shutdown_ctx != NULL);
+  GNUNET_assert(shutdown_ctx != NULL);
+
+  if (shutdown_ctx->outstanding > MAX_CONCURRENT_SHUTDOWN)
+    GNUNET_SCHEDULER_add_delayed(peer_shutdown_ctx->daemon->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &schedule_shutdown_task, peer_shutdown_ctx);
+  else
+    {
+      shutdown_ctx->outstanding++;
+      GNUNET_TESTING_daemon_stop (peer_shutdown_ctx->daemon, shutdown_ctx->timeout, &internal_shutdown_callback, shutdown_ctx, GNUNET_YES, GNUNET_NO);
+      GNUNET_free(peer_shutdown_ctx);
+    }
+}
 /**
  * Shutdown all peers started in the given group.
  *
@@ -3738,28 +3987,25 @@ GNUNET_TESTING_daemons_stop (struct GNUNET_TESTING_PeerGroup *pg,
 {
   unsigned int off;
   struct ShutdownContext *shutdown_ctx;
-  GNUNET_TESTING_NotifyCompletion shutdown_cb;
-  void *shutdown_cb_cls;
+  struct PeerShutdownContext *peer_shutdown_ctx;
 
   GNUNET_assert(pg->total > 0);
 
-  shutdown_cb = NULL;
-  shutdown_ctx = NULL;
-
-  if (cb != NULL)
-    {
-      shutdown_ctx = GNUNET_malloc(sizeof(struct ShutdownContext));
-      shutdown_ctx->cb = cb;
-      shutdown_ctx->cb_cls = cb_cls;
-      shutdown_ctx->total_peers = pg->total;
-      shutdown_cb = &internal_shutdown_callback;
-      shutdown_cb_cls = cb_cls;
-    }
+  shutdown_ctx = GNUNET_malloc(sizeof(struct ShutdownContext));
+  shutdown_ctx->cb = cb;
+  shutdown_ctx->cb_cls = cb_cls;
+  shutdown_ctx->total_peers = pg->total;
+  shutdown_ctx->timeout = timeout;
+  /* shtudown_ctx->outstanding = 0; */
 
   for (off = 0; off < pg->total; off++)
     {
       GNUNET_assert(NULL != pg->peers[off].daemon);
-      GNUNET_TESTING_daemon_stop (pg->peers[off].daemon, timeout, shutdown_cb, shutdown_ctx, GNUNET_YES, GNUNET_NO);
+      peer_shutdown_ctx = GNUNET_malloc(sizeof(struct PeerShutdownContext));
+      peer_shutdown_ctx->daemon = pg->peers[off].daemon;
+      peer_shutdown_ctx->shutdown_ctx = shutdown_ctx;
+      GNUNET_SCHEDULER_add_now(pg->peers[off].daemon->sched, &schedule_shutdown_task, peer_shutdown_ctx);
+      //GNUNET_TESTING_daemon_stop (pg->peers[off].daemon, timeout, shutdown_cb, shutdown_ctx, GNUNET_YES, GNUNET_NO);
       if (NULL != pg->peers[off].cfg)
         GNUNET_CONFIGURATION_destroy (pg->peers[off].cfg);
       if (pg->peers[off].allowed_peers != NULL)