authentication of ciphertexts (+ seed)
[oweals/gnunet.git] / src / testing / testing_group.c
index 1c9f1c4bbb1f0db412a8e9291772e2df0d83a376..72b56ae9998984b68d423e707ac729770f5a5111 100644 (file)
@@ -32,6 +32,8 @@
 
 #define VERBOSE_TESTING GNUNET_NO
 
+#define VERBOSE_TOPOLOGY GNUNET_YES
+
 #define DEBUG_CHURN GNUNET_NO
 
 /**
@@ -49,7 +51,7 @@
  */
 #define HIGH_PORT 56000
 
-#define MAX_OUTSTANDING_CONNECTIONS 10
+#define MAX_OUTSTANDING_CONNECTIONS 40
 
 #define MAX_CONCURRENT_HOSTKEYS 10
 
@@ -176,6 +178,39 @@ struct ShutdownContext
   void *cb_cls;
 };
 
+/**
+ * Individual shutdown context for a particular peer.
+ */
+struct PeerShutdownContext
+{
+  /**
+   * Pointer to the high level shutdown context.
+   */
+  struct ShutdownContext *shutdown_ctx;
+
+  /**
+   * The daemon handle for the peer to shut down.
+   */
+  struct GNUNET_TESTING_Daemon *daemon;
+};
+
+/**
+ * Individual shutdown context for a particular peer.
+ */
+struct PeerRestartContext
+{
+  /**
+   * Pointer to the high level restart context.
+   */
+  struct ChurnRestartContext *churn_restart_ctx;
+
+  /**
+   * The daemon handle for the peer to shut down.
+   */
+  struct GNUNET_TESTING_Daemon *daemon;
+};
+
+
 struct CreateTopologyContext
 {
 
@@ -256,6 +291,24 @@ struct InternalStartContext
 
 };
 
+struct ChurnRestartContext
+{
+  /**
+   * Number of restarts currently in flight.
+   */
+  unsigned int outstanding;
+
+  /**
+   * Handle to the underlying churn context.
+   */
+  struct ChurnContext *churn_ctx;
+
+  /**
+   * How long to allow the operation to take.
+   */
+  struct GNUNET_TIME_Relative timeout;
+};
+
 /**
  * Data we keep per peer.
  */
@@ -314,7 +367,7 @@ struct PeerData
 
 
 /**
- * Data we keep per host.
+ * Linked list of per-host data.
  */
 struct HostData
 {
@@ -458,11 +511,15 @@ struct GNUNET_TESTING_PeerGroup
   void *notify_connection_cls;
 
   /**
-   * NULL-terminated array of information about
-   * hosts.
+   * Array of information about hosts.
    */
   struct HostData *hosts;
 
+  /**
+   * Number of hosts (size of HostData)
+   */
+  unsigned int num_hosts;
+
   /**
    * Array of "total" peers.
    */
@@ -670,6 +727,13 @@ GNUNET_TESTING_topology_option_get (enum GNUNET_TESTING_TopologyOption *topology
        */
       "CONNECT_DFS",
       
+      /**
+       * Find the N closest peers to each allowed peer in the
+       * topology and make sure a connection to those peers
+       * exists in the connect topology.
+       */
+      "CONNECT_CLOSEST",
+
       /**
        * No options specified.
        */
@@ -839,7 +903,6 @@ make_config (const struct GNUNET_CONFIGURATION_Handle *cfg,
       GNUNET_CONFIGURATION_set_value_string(uc.ret, "transport-udp", "BINDTO", "127.0.0.1");
     }
 
-
   *port = (uint16_t) uc.nport;
   *upnum = uc.upnum;
   uc.fdnum++;
@@ -1554,7 +1617,9 @@ create_erdos_renyi (struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING_Connecti
 
 /**
  * Create a topology given a peer group (set of running peers)
- * and a connection processor.
+ * and a connection processor.  This particular function creates
+ * the connections for a 2d-torus, plus additional "closest"
+ * connections per peer.
  *
  * @param pg the peergroup to create the topology on
  * @param proc the connection processor to call to actually set
@@ -2614,8 +2679,8 @@ struct DFSContext
  */
 static int
 random_connect_iterator (void *cls,
-                  const GNUNET_HashCode * key,
-                  void *value)
+                         const GNUNET_HashCode * key,
+                         void *value)
 {
   struct RandomContext *random_ctx = cls;
   double random_number;
@@ -2815,6 +2880,96 @@ static unsigned int count_allowed_connections(struct GNUNET_TESTING_PeerGroup *p
   return count;
 }
 
+
+struct FindClosestContext
+{
+  /**
+   * The currently known closest peer.
+   */
+  struct GNUNET_TESTING_Daemon *closest;
+
+  /**
+   * The info for the peer we are adding connections for.
+   */
+  struct PeerData *curr_peer;
+
+  /**
+   * The distance (bits) between the current
+   * peer and the currently known closest.
+   */
+  unsigned int closest_dist;
+
+  /**
+   * The offset of the closest known peer in
+   * the peer group.
+   */
+  unsigned int closest_num;
+};
+
+/**
+ * Iterator over hash map entries of the allowed
+ * peer connections.  Find the closest, not already
+ * connected peer and return it.
+ *
+ * @param cls closure (struct FindClosestContext)
+ * @param key current key code (hash of offset in pg)
+ * @param value value in the hash map - a GNUNET_TESTING_Daemon
+ * @return GNUNET_YES if we should continue to
+ *         iterate,
+ *         GNUNET_NO if not.
+ */
+static
+int find_closest_peers (void *cls, const GNUNET_HashCode * key, void *value)
+{
+  struct FindClosestContext *closest_ctx = cls;
+  struct GNUNET_TESTING_Daemon *daemon = value;
+
+  if (((closest_ctx->closest == NULL) ||
+       (GNUNET_CRYPTO_hash_matching_bits(&daemon->id.hashPubKey, &closest_ctx->curr_peer->daemon->id.hashPubKey) > closest_ctx->closest_dist))
+      && (GNUNET_YES != GNUNET_CONTAINER_multihashmap_contains(closest_ctx->curr_peer->connect_peers, key)))
+    {
+      closest_ctx->closest_dist = GNUNET_CRYPTO_hash_matching_bits(&daemon->id.hashPubKey, &closest_ctx->curr_peer->daemon->id.hashPubKey);
+      closest_ctx->closest = daemon;
+      uid_from_hash(key, &closest_ctx->closest_num);
+    }
+  return GNUNET_YES;
+}
+
+/**
+ * From the set of connections possible, choose at num connections per
+ * peer based on depth which are closest out of those allowed.  Guaranteed
+ * to add num peers to connect to, provided there are that many peers
+ * in the underlay topology to connect to.
+ *
+ * @param pg the peergroup we are dealing with
+ * @param num how many connections at least should each peer have (if possible)?
+ * @param proc processor to actually add the connections
+ */
+void
+add_closest (struct GNUNET_TESTING_PeerGroup *pg, unsigned int num, GNUNET_TESTING_ConnectionProcessor proc)
+{
+  struct FindClosestContext closest_ctx;
+  uint32_t pg_iter;
+  uint32_t i;
+
+  for (i = 0; i < num; i++) /* Each time find a closest peer (from those available) */
+    {
+      for (pg_iter = 0; pg_iter < pg->total; pg_iter++)
+        {
+          closest_ctx.curr_peer = &pg->peers[pg_iter];
+          closest_ctx.closest = NULL;
+          closest_ctx.closest_dist = 0;
+          closest_ctx.closest_num = 0;
+          GNUNET_CONTAINER_multihashmap_iterate(pg->peers[pg_iter].allowed_peers, &find_closest_peers, &closest_ctx);
+          if (closest_ctx.closest != NULL)
+            {
+              GNUNET_assert((0 <= closest_ctx.closest_num) && (closest_ctx.closest_num < pg->total));
+              proc(pg, pg_iter, closest_ctx.closest_num);
+            }
+        }
+    }
+}
+
 /**
  * From the set of connections possible, choose at least num connections per
  * peer based on depth first traversal of peer connections.  If DFS leaves
@@ -3078,6 +3233,66 @@ schedule_get_statistics(void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc
     }
 }
 
+struct DuplicateStats
+{
+  /**
+   * Next item in the list
+   */
+  struct DuplicateStats *next;
+
+  /**
+   * Nasty string, concatenation of relevant information.
+   */
+  char *unique_string;
+};
+
+/**
+ * Check whether the combination of port/host/unix domain socket
+ * already exists in the list of peers being checked for statistics.
+ *
+ * @param pg the peergroup in question
+ * @param specific_peer the peer we're concerned with
+ * @param stats_list the list to return to the caller
+ *
+ * @return GNUNET_YES if the statistics instance has been seen already,
+ *         GNUNET_NO if not (and we may have added it to the list)
+ */
+static int
+stats_check_existing(struct GNUNET_TESTING_PeerGroup *pg, struct PeerData *specific_peer, struct DuplicateStats **stats_list)
+{
+  struct DuplicateStats *pos;
+  char *unix_domain_socket;
+  unsigned long long port;
+  char *to_match;
+  if (GNUNET_YES != GNUNET_CONFIGURATION_get_value_yesno(pg->cfg, "testing", "single_statistics_per_host"))
+    return GNUNET_NO; /* Each peer has its own statistics instance, do nothing! */
+
+  pos = *stats_list;
+  if (GNUNET_OK != GNUNET_CONFIGURATION_get_value_string(specific_peer->cfg, "statistics", "unixpath", &unix_domain_socket))
+    return GNUNET_NO;
+
+  GNUNET_CONFIGURATION_get_value_number(specific_peer->cfg, "statistics", "port", &port);
+
+  if (specific_peer->daemon->hostname != NULL)
+    GNUNET_asprintf(&to_match, "%s%s%llu", specific_peer->daemon->hostname, unix_domain_socket, port);
+  else
+    GNUNET_asprintf(&to_match, "%s%llu", unix_domain_socket, port);
+
+  while (pos != NULL)
+    {
+      if (0 == strcmp(to_match, pos->unique_string))
+        {
+          GNUNET_free(to_match);
+          return GNUNET_YES;
+        }
+      pos = pos->next;
+    }
+  pos = GNUNET_malloc(sizeof(struct DuplicateStats));
+  pos->unique_string = to_match;
+  pos->next = *stats_list;
+  *stats_list = pos;
+  return GNUNET_NO;
+}
 
 /**
  * Iterate over all (running) peers in the peer group, retrieve
@@ -3092,6 +3307,9 @@ GNUNET_TESTING_get_statistics (struct GNUNET_TESTING_PeerGroup *pg,
   struct StatsCoreContext *core_ctx;
   unsigned int i;
   unsigned int total_count;
+  struct DuplicateStats *stats_list;
+  struct DuplicateStats *pos;
+  stats_list = NULL;
 
   /* Allocate a single stats iteration context */
   stats_context = GNUNET_malloc(sizeof(struct StatsIterateContext));
@@ -3099,9 +3317,10 @@ GNUNET_TESTING_get_statistics (struct GNUNET_TESTING_PeerGroup *pg,
   stats_context->proc = proc;
   stats_context->cls = cls;
   total_count = 0;
+
   for (i = 0; i < pg->total; i++)
     {
-      if (pg->peers[i].daemon->running == GNUNET_YES)
+      if ((pg->peers[i].daemon->running == GNUNET_YES) && (GNUNET_NO == stats_check_existing(pg, &pg->peers[i], &stats_list)))
         {
           /* Allocate one core context per core we need to connect to */
           core_ctx = GNUNET_malloc(sizeof(struct StatsCoreContext));
@@ -3112,7 +3331,20 @@ GNUNET_TESTING_get_statistics (struct GNUNET_TESTING_PeerGroup *pg,
           total_count++;
         }
     }
+
+  GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "Retrieving stats from %u total instances.\n", total_count);
   stats_context->total = total_count;
+  if (stats_list != NULL)
+    {
+      pos = stats_list;
+      while(pos != NULL)
+        {
+          GNUNET_free(pos->unique_string);
+          stats_list = pos->next;
+          GNUNET_free(pos);
+          pos = stats_list->next;
+        }
+    }
   return;
 }
 
@@ -3238,6 +3470,13 @@ GNUNET_TESTING_connect_topology (struct GNUNET_TESTING_PeerGroup *pg,
 #endif
       perform_dfs(pg, (int)option_modifier);
       break;
+    case GNUNET_TESTING_TOPOLOGY_OPTION_ADD_CLOSEST:
+#if VERBOSE_TOPOLOGY
+      GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
+                  _("Finding additional %u closest peers each (if possible)\n"), (unsigned int)option_modifier);
+#endif
+      add_closest(pg, (unsigned int)option_modifier, &add_actual_connections);
+      break;
     case GNUNET_TESTING_TOPOLOGY_OPTION_NONE:
       break;
     case GNUNET_TESTING_TOPOLOGY_OPTION_ALL:
@@ -3319,6 +3558,84 @@ internal_continue_startup (void *cls, const struct GNUNET_SCHEDULER_TaskContext
     }
 }
 
+
+/**
+ * Callback for informing us about a successful
+ * or unsuccessful churn start call.
+ *
+ * @param cls a ChurnContext
+ * @param id the peer identity of the started peer
+ * @param cfg the handle to the configuration of the peer
+ * @param d handle to the daemon for the peer
+ * @param emsg NULL on success, non-NULL on failure
+ *
+ */
+void
+churn_start_callback (void *cls,
+                      const struct GNUNET_PeerIdentity *id,
+                      const struct GNUNET_CONFIGURATION_Handle *cfg,
+                      struct GNUNET_TESTING_Daemon *d,
+                      const char *emsg)
+{
+  struct ChurnRestartContext *startup_ctx = cls;
+  struct ChurnContext *churn_ctx = startup_ctx->churn_ctx;
+
+  unsigned int total_left;
+  char *error_message;
+
+  error_message = NULL;
+  if (emsg != NULL)
+    {
+      GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
+                  "Churn stop callback failed with error `%s'\n",
+                  emsg);
+      churn_ctx->num_failed_start++;
+    }
+  else
+    {
+      churn_ctx->num_to_start--;
+    }
+
+#if DEBUG_CHURN
+  GNUNET_log(GNUNET_ERROR_TYPE_WARNING,
+             "Started peer, %d left.\n",
+             churn_ctx->num_to_start);
+#endif
+
+  total_left = (churn_ctx->num_to_stop - churn_ctx->num_failed_stop) + (churn_ctx->num_to_start - churn_ctx->num_failed_start);
+
+  if (total_left == 0)
+  {
+    if ((churn_ctx->num_failed_stop > 0) || (churn_ctx->num_failed_start > 0))
+      GNUNET_asprintf(&error_message,
+                      "Churn didn't complete successfully, %u peers failed to start %u peers failed to be stopped!",
+                      churn_ctx->num_failed_start,
+                      churn_ctx->num_failed_stop);
+    churn_ctx->cb(churn_ctx->cb_cls, error_message);
+    GNUNET_free_non_null(error_message);
+    GNUNET_free(churn_ctx);
+    GNUNET_free(startup_ctx);
+  }
+}
+
+
+static void schedule_churn_restart(void *cls, const struct GNUNET_SCHEDULER_TaskContext * tc)
+{
+  struct PeerRestartContext *peer_restart_ctx = cls;
+  struct ChurnRestartContext *startup_ctx = peer_restart_ctx->churn_restart_ctx;
+
+  if (startup_ctx->outstanding > MAX_CONCURRENT_STARTING)
+    GNUNET_SCHEDULER_add_delayed(peer_restart_ctx->daemon->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &schedule_churn_restart, peer_restart_ctx);
+  else
+    {
+      GNUNET_TESTING_daemon_start_stopped(peer_restart_ctx->daemon,
+                                          startup_ctx->timeout,
+                                          &churn_start_callback,
+                                          startup_ctx);
+      GNUNET_free(peer_restart_ctx);
+    }
+}
+
 static void
 internal_start (void *cls, const struct GNUNET_SCHEDULER_TaskContext * tc)
 {
@@ -3442,7 +3759,7 @@ GNUNET_TESTING_daemons_start (struct GNUNET_SCHEDULER_Handle *sched,
   pg->peers = GNUNET_malloc (total * sizeof (struct PeerData));
   if (NULL != hostnames)
     {
-      off = 2;
+      off = 0;
       hostpos = hostnames;
       while (hostpos != NULL)
         {
@@ -3456,21 +3773,21 @@ GNUNET_TESTING_daemons_start (struct GNUNET_SCHEDULER_Handle *sched,
       while (hostpos != NULL)
         {
           pg->hosts[off].minport = LOW_PORT;
-          off++;
           pg->hosts[off].hostname = GNUNET_strdup(hostpos->hostname);
           if (hostpos->username != NULL)
             pg->hosts[off].username = GNUNET_strdup(hostpos->username);
           pg->hosts[off].sshport = hostpos->port;
           hostpos = hostpos->next;
+          off++;
         }
 
       if (off == 0)
         {
-          GNUNET_free (pg->hosts);
           pg->hosts = NULL;
         }
       hostcnt = off;
       minport = 0;
+      pg->num_hosts = off;
 
 #if NO_LL
       off = 2;
@@ -3684,11 +4001,14 @@ void restart_callback (void *cls,
 void
 churn_stop_callback (void *cls, const char *emsg)
 {
-  struct ChurnContext *churn_ctx = cls;
+  struct ShutdownContext *shutdown_ctx = cls;
+  struct ChurnContext *churn_ctx = shutdown_ctx->cb_cls;
   unsigned int total_left;
   char *error_message;
 
   error_message = NULL;
+  shutdown_ctx->outstanding--;
+
   if (emsg != NULL)
     {
       GNUNET_log(GNUNET_ERROR_TYPE_WARNING, 
@@ -3719,62 +4039,7 @@ churn_stop_callback (void *cls, const char *emsg)
     churn_ctx->cb(churn_ctx->cb_cls, error_message);
     GNUNET_free_non_null(error_message);
     GNUNET_free(churn_ctx);
-  }
-}
-
-/**
- * Callback for informing us about a successful
- * or unsuccessful churn start call.
- *
- * @param cls a ChurnContext
- * @param id the peer identity of the started peer
- * @param cfg the handle to the configuration of the peer
- * @param d handle to the daemon for the peer
- * @param emsg NULL on success, non-NULL on failure
- *
- */
-void
-churn_start_callback (void *cls,
-                      const struct GNUNET_PeerIdentity *id,
-                      const struct GNUNET_CONFIGURATION_Handle *cfg,
-                      struct GNUNET_TESTING_Daemon *d,
-                      const char *emsg)
-{
-  struct ChurnContext *churn_ctx = cls;
-  unsigned int total_left;
-  char *error_message;
-
-  error_message = NULL;
-  if (emsg != NULL)
-    {
-      GNUNET_log (GNUNET_ERROR_TYPE_WARNING, 
-                 "Churn stop callback failed with error `%s'\n",
-                 emsg);
-      churn_ctx->num_failed_start++;
-    }
-  else
-    {
-      churn_ctx->num_to_start--;
-    }
-  
-#if DEBUG_CHURN
-  GNUNET_log(GNUNET_ERROR_TYPE_WARNING,
-            "Started peer, %d left.\n", 
-            churn_ctx->num_to_start);
-#endif
-
-  total_left = (churn_ctx->num_to_stop - churn_ctx->num_failed_stop) + (churn_ctx->num_to_start - churn_ctx->num_failed_start);
-
-  if (total_left == 0)
-  {
-    if ((churn_ctx->num_failed_stop > 0) || (churn_ctx->num_failed_start > 0))
-      GNUNET_asprintf(&error_message, 
-                     "Churn didn't complete successfully, %u peers failed to start %u peers failed to be stopped!", 
-                     churn_ctx->num_failed_start,
-                     churn_ctx->num_failed_stop);
-    churn_ctx->cb(churn_ctx->cb_cls, error_message);
-    GNUNET_free_non_null(error_message);
-    GNUNET_free(churn_ctx);
+    GNUNET_free(shutdown_ctx);
   }
 }
 
@@ -3801,6 +4066,33 @@ GNUNET_TESTING_daemons_running (struct GNUNET_TESTING_PeerGroup *pg)
   return running;
 }
 
+/**
+ * Task to rate limit the number of outstanding peer shutdown
+ * requests.  This is necessary for making sure we don't do
+ * too many ssh connections at once, but is generally nicer
+ * to any system as well (graduated task starts, as opposed
+ * to calling gnunet-arm N times all at once).
+ */
+static void
+schedule_churn_shutdown_task (void *cls, const struct GNUNET_SCHEDULER_TaskContext * tc)
+{
+  struct PeerShutdownContext *peer_shutdown_ctx = cls;
+  struct ShutdownContext *shutdown_ctx;
+
+  GNUNET_assert(peer_shutdown_ctx != NULL);
+  shutdown_ctx = peer_shutdown_ctx->shutdown_ctx;
+  GNUNET_assert(shutdown_ctx != NULL);
+
+  if (shutdown_ctx->outstanding > MAX_CONCURRENT_SHUTDOWN)
+    GNUNET_SCHEDULER_add_delayed(peer_shutdown_ctx->daemon->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &schedule_churn_shutdown_task, peer_shutdown_ctx);
+  else
+    {
+      shutdown_ctx->outstanding++;
+      GNUNET_TESTING_daemon_stop (peer_shutdown_ctx->daemon, shutdown_ctx->timeout, shutdown_ctx->cb, shutdown_ctx, GNUNET_NO, GNUNET_YES);
+      GNUNET_free(peer_shutdown_ctx);
+    }
+}
+
 /**
  * Simulate churn by stopping some peers (and possibly
  * re-starting others if churn is called multiple times).  This
@@ -3830,6 +4122,11 @@ GNUNET_TESTING_daemons_churn (struct GNUNET_TESTING_PeerGroup *pg,
                               void *cb_cls)
 {
   struct ChurnContext *churn_ctx;
+  struct ShutdownContext *shutdown_ctx;
+  struct PeerShutdownContext *peer_shutdown_ctx;
+  struct PeerRestartContext *peer_restart_ctx;
+  struct ChurnRestartContext *churn_startup_ctx;
+
   unsigned int running;
   unsigned int stopped;
   unsigned int total_running;
@@ -3922,27 +4219,53 @@ GNUNET_TESTING_daemons_churn (struct GNUNET_TESTING_PeerGroup *pg,
   }
 
   GNUNET_assert(running >= voff);
+  if (voff > 0)
+    {
+      shutdown_ctx = GNUNET_malloc(sizeof(struct ShutdownContext));
+      shutdown_ctx->cb = &churn_stop_callback;
+      shutdown_ctx->cb_cls = churn_ctx;
+      shutdown_ctx->total_peers = voff;
+      shutdown_ctx->timeout = timeout;
+    }
+
   for (i = 0; i < voff; i++)
   {
 #if DEBUG_CHURN
     GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "Stopping peer %d!\n", running_permute[i]);
 #endif
     GNUNET_assert(running_arr != NULL);
+    peer_shutdown_ctx = GNUNET_malloc(sizeof(struct PeerShutdownContext));
+    peer_shutdown_ctx->daemon = pg->peers[running_arr[running_permute[i]]].daemon;
+    peer_shutdown_ctx->shutdown_ctx = shutdown_ctx;
+    GNUNET_SCHEDULER_add_now(peer_shutdown_ctx->daemon->sched, &schedule_churn_shutdown_task, peer_shutdown_ctx);
+
+    /*
     GNUNET_TESTING_daemon_stop (pg->peers[running_arr[running_permute[i]]].daemon,
                                timeout, 
                                &churn_stop_callback, churn_ctx, 
-                               GNUNET_NO, GNUNET_YES);
+                               GNUNET_NO, GNUNET_YES); */
   }
 
   GNUNET_assert(stopped >= von);
+  if (von > 0)
+    {
+      churn_startup_ctx = GNUNET_malloc(sizeof(struct ChurnRestartContext));
+      churn_startup_ctx->churn_ctx = churn_ctx;
+      churn_startup_ctx->timeout = timeout;
+    }
   for (i = 0; i < von; i++)
     {
 #if DEBUG_CHURN
       GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "Starting up peer %d!\n", stopped_permute[i]);
 #endif
       GNUNET_assert(stopped_arr != NULL);
+      peer_restart_ctx = GNUNET_malloc(sizeof(struct PeerRestartContext));
+      peer_restart_ctx->churn_restart_ctx = churn_startup_ctx;
+      peer_restart_ctx->daemon = pg->peers[stopped_arr[stopped_permute[i]]].daemon;
+      GNUNET_SCHEDULER_add_now(peer_restart_ctx->daemon->sched, &schedule_churn_restart, peer_restart_ctx);
+      /*
       GNUNET_TESTING_daemon_start_stopped(pg->peers[stopped_arr[stopped_permute[i]]].daemon, 
-                                         timeout, &churn_start_callback, churn_ctx);
+                                         timeout, &churn_start_callback, churn_ctx);*/
   }
 
   GNUNET_free_non_null(running_arr);
@@ -4000,19 +4323,23 @@ GNUNET_TESTING_daemons_vary (struct GNUNET_TESTING_PeerGroup *pg,
                             GNUNET_TESTING_NotifyCompletion cb,
                             void *cb_cls)
 {
+  struct ShutdownContext *shutdown_ctx;
+  struct ChurnRestartContext *startup_ctx;
   struct ChurnContext *churn_ctx;
 
   if (GNUNET_NO == desired_status)
     {
       if (NULL != pg->peers[offset].daemon)
        {
+          shutdown_ctx = GNUNET_malloc(sizeof(struct ShutdownContext));
          churn_ctx = GNUNET_malloc(sizeof(struct ChurnContext));
          churn_ctx->num_to_start = 0;
          churn_ctx->num_to_stop = 1;
          churn_ctx->cb = cb;
-         churn_ctx->cb_cls = cb_cls;  
+         churn_ctx->cb_cls = cb_cls;
+         shutdown_ctx->cb_cls = churn_ctx;
          GNUNET_TESTING_daemon_stop(pg->peers[offset].daemon, 
-                                    timeout, &churn_stop_callback, churn_ctx, 
+                                    timeout, &churn_stop_callback, shutdown_ctx,
                                     GNUNET_NO, GNUNET_YES);     
        }
     }
@@ -4020,13 +4347,15 @@ GNUNET_TESTING_daemons_vary (struct GNUNET_TESTING_PeerGroup *pg,
     {
       if (NULL == pg->peers[offset].daemon)
        {
+          startup_ctx = GNUNET_malloc(sizeof(struct ChurnRestartContext));
          churn_ctx = GNUNET_malloc(sizeof(struct ChurnContext));
          churn_ctx->num_to_start = 1;
          churn_ctx->num_to_stop = 0;
          churn_ctx->cb = cb;
          churn_ctx->cb_cls = cb_cls;  
+         startup_ctx->churn_ctx = churn_ctx;
          GNUNET_TESTING_daemon_start_stopped(pg->peers[offset].daemon, 
-                                             timeout, &churn_start_callback, churn_ctx);
+                                             timeout, &churn_start_callback, startup_ctx);
        }
     }
   else
@@ -4065,21 +4394,6 @@ void internal_shutdown_callback (void *cls,
     }
 }
 
-/**
- * Individual shutdown context for a particular peer.
- */
-struct PeerShutdownContext
-{
-  /**
-   * Pointer to the high level shutdown context.
-   */
-  struct ShutdownContext *shutdown_ctx;
-
-  /**
-   * The daemon handle for the peer to shut down.
-   */
-  struct GNUNET_TESTING_Daemon *daemon;
-};
 
 /**
  * Task to rate limit the number of outstanding peer shutdown
@@ -4107,6 +4421,7 @@ schedule_shutdown_task (void *cls, const struct GNUNET_SCHEDULER_TaskContext * t
       GNUNET_free(peer_shutdown_ctx);
     }
 }
+
 /**
  * Shutdown all peers started in the given group.
  *
@@ -4152,11 +4467,12 @@ GNUNET_TESTING_daemons_stop (struct GNUNET_TESTING_PeerGroup *pg,
         GNUNET_CONTAINER_multihashmap_destroy(pg->peers[off].blacklisted_peers);
     }
   GNUNET_free (pg->peers);
-  if (NULL != pg->hosts)
+  for (off = 0; off < pg->num_hosts; off++)
     {
-      GNUNET_free (pg->hosts[0].hostname);
-      GNUNET_free (pg->hosts);
+      GNUNET_free (pg->hosts[off].hostname);
+      GNUNET_free_non_null (pg->hosts[off].username);
     }
+  GNUNET_free_non_null (pg->hosts);
   GNUNET_free (pg);
 }