+ pg->peers[pg_iter].connect_peers_working_set =
+ GNUNET_CONTAINER_multihashmap_create (num);
+ }
+
+ starting_peer = 0;
+ dfs_count = 0;
+ while ((count_workingset_connections (pg) < num * pg->total)
+ && (count_allowed_connections (pg) > 0))
+ {
+ if (dfs_count % pg->total == 0) /* Restart the DFS at some weakly connected peer */
+ {
+ least_connections = -1; /* Set to very high number */
+ for (pg_iter = 0; pg_iter < pg->total; pg_iter++)
+ {
+ if (GNUNET_CONTAINER_multihashmap_size
+ (pg->peers[pg_iter].connect_peers_working_set) <
+ least_connections)
+ {
+ starting_peer = pg_iter;
+ least_connections =
+ GNUNET_CONTAINER_multihashmap_size (pg->
+ peers
+ [pg_iter].connect_peers_working_set);
+ }
+ }
+ }
+
+ if (GNUNET_CONTAINER_multihashmap_size (pg->peers[starting_peer].connect_peers) == 0) /* Ensure there is at least one peer left to connect! */
+ {
+ dfs_count = 0;
+ continue;
+ }
+
+ /* Choose a random peer from the chosen peers set of connections to add */
+ dfs_ctx.chosen =
+ GNUNET_CRYPTO_random_u32 (GNUNET_CRYPTO_QUALITY_WEAK,
+ GNUNET_CONTAINER_multihashmap_size
+ (pg->peers[starting_peer].connect_peers));
+ dfs_ctx.first_uid = starting_peer;
+ dfs_ctx.first = &pg->peers[starting_peer];
+ dfs_ctx.pg = pg;
+ dfs_ctx.current = 0;
+
+ GNUNET_CONTAINER_multihashmap_iterate (pg->
+ peers
+ [starting_peer].connect_peers,
+ &dfs_connect_iterator, &dfs_ctx);
+ /* Remove the second from the first, since we will be continuing the search and may encounter the first peer again! */
+ hash_from_uid (dfs_ctx.second_uid, &second_hash);
+ GNUNET_assert (GNUNET_YES ==
+ GNUNET_CONTAINER_multihashmap_remove (pg->peers
+ [starting_peer].connect_peers,
+ &second_hash,
+ pg->
+ peers
+ [dfs_ctx.second_uid].daemon));
+ starting_peer = dfs_ctx.second_uid;
+ }
+
+ for (pg_iter = 0; pg_iter < pg->total; pg_iter++)
+ {
+ /* Remove the "old" connections */
+ GNUNET_CONTAINER_multihashmap_destroy (pg->
+ peers[pg_iter].connect_peers);
+ /* And replace with the working set */
+ pg->peers[pg_iter].connect_peers =
+ pg->peers[pg_iter].connect_peers_working_set;
+ }
+}
+
+/**
+ * Internal callback for topology information for a particular peer.
+ */
+static void
+internal_topology_callback (void *cls,
+ const struct GNUNET_PeerIdentity *peer,
+ const struct GNUNET_TRANSPORT_ATS_Information
+ *atsi)
+{
+ struct CoreContext *core_ctx = cls;
+ struct TopologyIterateContext *iter_ctx = core_ctx->iter_context;
+
+ if (peer == NULL) /* Either finished, or something went wrong */
+ {
+ iter_ctx->completed++;
+ iter_ctx->connected--;
+ /* One core context allocated per iteration, must free! */
+ GNUNET_free (core_ctx);
+ }
+ else
+ {
+ iter_ctx->topology_cb (iter_ctx->cls, &core_ctx->daemon->id,
+ peer, NULL);
+ }
+
+ if (iter_ctx->completed == iter_ctx->total)
+ {
+ iter_ctx->topology_cb (iter_ctx->cls, NULL, NULL, NULL);
+ /* Once all are done, free the iteration context */
+ GNUNET_free (iter_ctx);
+ }
+}
+
+
+/**
+ * Check running topology iteration tasks, if below max start a new one, otherwise
+ * schedule for some time in the future.
+ */
+static void
+schedule_get_topology (void *cls,
+ const struct GNUNET_SCHEDULER_TaskContext *tc)
+{
+ struct CoreContext *core_context = cls;
+ struct TopologyIterateContext *topology_context =
+ (struct TopologyIterateContext *) core_context->iter_context;
+ if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
+ return;
+
+ if (topology_context->connected > MAX_OUTSTANDING_CONNECTIONS)
+ {
+#if VERBOSE_TESTING > 2
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _
+ ("Delaying connect, we have too many outstanding connections!\n"));
+#endif
+ GNUNET_SCHEDULER_add_delayed (GNUNET_TIME_relative_multiply
+ (GNUNET_TIME_UNIT_MILLISECONDS, 100),
+ &schedule_get_topology, core_context);
+ }
+ else
+ {
+#if VERBOSE_TESTING > 2
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating connection, outstanding_connections is %d\n"),
+ outstanding_connects);
+#endif
+ topology_context->connected++;
+
+ if (GNUNET_OK !=
+ GNUNET_CORE_iterate_peers (core_context->daemon->cfg,
+ &internal_topology_callback,
+ core_context))
+ {
+ GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "Topology iteration failed.\n");
+ internal_topology_callback (core_context, NULL, NULL);
+ }
+ }
+}
+
+/**
+ * Iterate over all (running) peers in the peer group, retrieve
+ * all connections that each currently has.
+ */
+void
+GNUNET_TESTING_get_topology (struct GNUNET_TESTING_PeerGroup *pg,
+ GNUNET_TESTING_NotifyTopology cb, void *cls)
+{
+ struct TopologyIterateContext *topology_context;
+ struct CoreContext *core_ctx;
+ unsigned int i;
+ unsigned int total_count;
+
+ /* Allocate a single topology iteration context */
+ topology_context = GNUNET_malloc (sizeof (struct TopologyIterateContext));
+ topology_context->topology_cb = cb;
+ topology_context->cls = cls;
+ total_count = 0;
+ for (i = 0; i < pg->total; i++)
+ {
+ if (pg->peers[i].daemon->running == GNUNET_YES)
+ {
+ /* Allocate one core context per core we need to connect to */
+ core_ctx = GNUNET_malloc (sizeof (struct CoreContext));
+ core_ctx->daemon = pg->peers[i].daemon;
+ /* Set back pointer to topology iteration context */
+ core_ctx->iter_context = topology_context;
+ GNUNET_SCHEDULER_add_now (&schedule_get_topology, core_ctx);
+ total_count++;
+ }
+ }
+ if (total_count == 0)
+ {
+ cb (cls, NULL, NULL, "Cannot iterate over topology, no running peers!");
+ GNUNET_free (topology_context);
+ }
+ else
+ topology_context->total = total_count;
+ return;
+}
+
+/**
+ * Callback function to process statistic values.
+ * This handler is here only really to insert a peer
+ * identity (or daemon) so the statistics can be uniquely
+ * tied to a single running peer.
+ *
+ * @param cls closure
+ * @param subsystem name of subsystem that created the statistic
+ * @param name the name of the datum
+ * @param value the current value
+ * @param is_persistent GNUNET_YES if the value is persistent, GNUNET_NO if not
+ * @return GNUNET_OK to continue, GNUNET_SYSERR to abort iteration
+ */
+static int
+internal_stats_callback (void *cls,
+ const char *subsystem,
+ const char *name, uint64_t value, int is_persistent)
+{
+ struct StatsCoreContext *core_context = cls;
+ struct StatsIterateContext *stats_context =
+ (struct StatsIterateContext *) core_context->iter_context;
+
+ return stats_context->proc (stats_context->cls, &core_context->daemon->id,
+ subsystem, name, value, is_persistent);
+}
+
+/**
+ * Internal continuation call for statistics iteration.
+ *
+ * @param cls closure, the CoreContext for this iteration
+ * @param success whether or not the statistics iterations
+ * was canceled or not (we don't care)
+ */
+static void
+internal_stats_cont (void *cls, int success)
+{
+ struct StatsCoreContext *core_context = cls;
+ struct StatsIterateContext *stats_context =
+ (struct StatsIterateContext *) core_context->iter_context;
+
+ stats_context->connected--;
+ stats_context->completed++;
+
+ if (stats_context->completed == stats_context->total)
+ {
+ stats_context->cont (stats_context->cls, GNUNET_YES);
+ GNUNET_free (stats_context);
+ }
+
+ if (core_context->stats_handle != NULL)
+ GNUNET_STATISTICS_destroy (core_context->stats_handle, GNUNET_NO);
+
+ GNUNET_free (core_context);
+}
+
+/**
+ * Check running topology iteration tasks, if below max start a new one, otherwise
+ * schedule for some time in the future.
+ */
+static void
+schedule_get_statistics (void *cls,
+ const struct GNUNET_SCHEDULER_TaskContext *tc)
+{
+ struct StatsCoreContext *core_context = cls;
+ struct StatsIterateContext *stats_context =
+ (struct StatsIterateContext *) core_context->iter_context;
+
+ if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
+ return;
+
+ if (stats_context->connected > MAX_OUTSTANDING_CONNECTIONS)
+ {
+#if VERBOSE_TESTING > 2
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _
+ ("Delaying connect, we have too many outstanding connections!\n"));
+#endif
+ GNUNET_SCHEDULER_add_delayed (GNUNET_TIME_relative_multiply
+ (GNUNET_TIME_UNIT_MILLISECONDS, 100),
+ &schedule_get_statistics, core_context);
+ }
+ else
+ {
+#if VERBOSE_TESTING > 2
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating connection, outstanding_connections is %d\n"),
+ outstanding_connects);
+#endif
+
+ stats_context->connected++;
+ core_context->stats_handle =
+ GNUNET_STATISTICS_create ("testing", core_context->daemon->cfg);
+ if (core_context->stats_handle == NULL)
+ {
+ internal_stats_cont (core_context, GNUNET_NO);
+ return;
+ }
+
+ core_context->stats_get_handle =
+ GNUNET_STATISTICS_get (core_context->stats_handle, NULL, NULL,
+ GNUNET_TIME_relative_get_forever (),
+ &internal_stats_cont, &internal_stats_callback,
+ core_context);
+ if (core_context->stats_get_handle == NULL)
+ internal_stats_cont (core_context, GNUNET_NO);
+
+ }
+}
+
+struct DuplicateStats
+{
+ /**
+ * Next item in the list
+ */
+ struct DuplicateStats *next;
+
+ /**
+ * Nasty string, concatenation of relevant information.
+ */
+ char *unique_string;
+};
+
+/**
+ * Check whether the combination of port/host/unix domain socket
+ * already exists in the list of peers being checked for statistics.
+ *
+ * @param pg the peergroup in question
+ * @param specific_peer the peer we're concerned with
+ * @param stats_list the list to return to the caller
+ *
+ * @return GNUNET_YES if the statistics instance has been seen already,
+ * GNUNET_NO if not (and we may have added it to the list)
+ */
+static int
+stats_check_existing (struct GNUNET_TESTING_PeerGroup *pg,
+ struct PeerData *specific_peer,
+ struct DuplicateStats **stats_list)
+{
+ struct DuplicateStats *pos;
+ char *unix_domain_socket;
+ unsigned long long port;
+ char *to_match;
+ if (GNUNET_YES !=
+ GNUNET_CONFIGURATION_get_value_yesno (pg->cfg, "testing",
+ "single_statistics_per_host"))
+ return GNUNET_NO; /* Each peer has its own statistics instance, do nothing! */
+
+ pos = *stats_list;
+ if (GNUNET_OK !=
+ GNUNET_CONFIGURATION_get_value_string (specific_peer->cfg, "statistics",
+ "unixpath", &unix_domain_socket))
+ return GNUNET_NO;
+
+ if (GNUNET_OK !=
+ GNUNET_CONFIGURATION_get_value_number (specific_peer->cfg, "statistics",
+ "port", &port))
+ {
+ GNUNET_free(unix_domain_socket);
+ return GNUNET_NO;
+ }
+
+ if (specific_peer->daemon->hostname != NULL)
+ GNUNET_asprintf (&to_match, "%s%s%llu", specific_peer->daemon->hostname,
+ unix_domain_socket, port);
+ else
+ GNUNET_asprintf (&to_match, "%s%llu", unix_domain_socket, port);
+
+ while (pos != NULL)
+ {
+ if (0 == strcmp (to_match, pos->unique_string))
+ {
+ GNUNET_free (unix_domain_socket);
+ GNUNET_free (to_match);
+ return GNUNET_YES;
+ }
+ pos = pos->next;
+ }
+ pos = GNUNET_malloc (sizeof (struct DuplicateStats));
+ pos->unique_string = to_match;
+ pos->next = *stats_list;
+ *stats_list = pos;
+ GNUNET_free (unix_domain_socket);
+ return GNUNET_NO;
+}
+
+/**
+ * Iterate over all (running) peers in the peer group, retrieve
+ * all statistics from each.
+ */
+void
+GNUNET_TESTING_get_statistics (struct GNUNET_TESTING_PeerGroup *pg,
+ GNUNET_STATISTICS_Callback cont,
+ GNUNET_TESTING_STATISTICS_Iterator proc,
+ void *cls)
+{
+ struct StatsIterateContext *stats_context;
+ struct StatsCoreContext *core_ctx;
+ unsigned int i;
+ unsigned int total_count;
+ struct DuplicateStats *stats_list;
+ struct DuplicateStats *pos;
+ stats_list = NULL;
+
+ /* Allocate a single stats iteration context */
+ stats_context = GNUNET_malloc (sizeof (struct StatsIterateContext));
+ stats_context->cont = cont;
+ stats_context->proc = proc;
+ stats_context->cls = cls;
+ total_count = 0;
+
+ for (i = 0; i < pg->total; i++)
+ {
+ if ((pg->peers[i].daemon->running == GNUNET_YES)
+ && (GNUNET_NO ==
+ stats_check_existing (pg, &pg->peers[i], &stats_list)))
+ {
+ /* Allocate one core context per core we need to connect to */
+ core_ctx = GNUNET_malloc (sizeof (struct StatsCoreContext));
+ core_ctx->daemon = pg->peers[i].daemon;
+ /* Set back pointer to topology iteration context */
+ core_ctx->iter_context = stats_context;
+ GNUNET_SCHEDULER_add_now (&schedule_get_statistics, core_ctx);
+ total_count++;
+ }
+ }
+
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Retrieving stats from %u total instances.\n", total_count);
+ stats_context->total = total_count;
+ if (stats_list != NULL)
+ {
+ pos = stats_list;
+ while (pos != NULL)
+ {
+ GNUNET_free (pos->unique_string);
+ stats_list = pos->next;
+ GNUNET_free (pos);
+ pos = stats_list->next;
+ }
+ }
+ return;
+}
+
+/**
+ * There are many ways to connect peers that are supported by this function.
+ * To connect peers in the same topology that was created via the
+ * GNUNET_TESTING_create_topology, the topology variable must be set to
+ * GNUNET_TESTING_TOPOLOGY_NONE. If the topology variable is specified,
+ * a new instance of that topology will be generated and attempted to be
+ * connected. This could result in some connections being impossible,
+ * because some topologies are non-deterministic.
+ *
+ * @param pg the peer group struct representing the running peers
+ * @param topology which topology to connect the peers in
+ * @param options options for connecting the topology
+ * @param option_modifier modifier for options that take a parameter
+ * @param notify_callback notification to be called once all connections completed
+ * @param notify_cls closure for notification callback
+ *
+ * @return the number of connections that will be attempted, GNUNET_SYSERR on error
+ */
+int
+GNUNET_TESTING_connect_topology (struct GNUNET_TESTING_PeerGroup *pg,
+ enum GNUNET_TESTING_Topology topology,
+ enum GNUNET_TESTING_TopologyOption options,
+ double option_modifier,
+ GNUNET_TESTING_NotifyCompletion
+ notify_callback, void *notify_cls)
+{
+ switch (topology)
+ {
+ case GNUNET_TESTING_TOPOLOGY_CLIQUE:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating clique CONNECT topology\n"));
+#endif
+ create_clique (pg, &add_actual_connections);
+ break;
+ case GNUNET_TESTING_TOPOLOGY_SMALL_WORLD_RING:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating small world (ring) CONNECT topology\n"));
+#endif
+ create_small_world_ring (pg, &add_actual_connections);
+ break;
+ case GNUNET_TESTING_TOPOLOGY_SMALL_WORLD:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating small world (2d-torus) CONNECT topology\n"));
+#endif
+ create_small_world (pg, &add_actual_connections);
+ break;
+ case GNUNET_TESTING_TOPOLOGY_RING:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating ring CONNECT topology\n"));
+#endif
+ create_ring (pg, &add_actual_connections);
+ break;
+ case GNUNET_TESTING_TOPOLOGY_2D_TORUS:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating 2d torus CONNECT topology\n"));
+#endif
+ create_2d_torus (pg, &add_actual_connections);
+ break;
+ case GNUNET_TESTING_TOPOLOGY_ERDOS_RENYI:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating Erdos-Renyi CONNECT topology\n"));
+#endif
+ create_erdos_renyi (pg, &add_actual_connections);
+ break;
+ case GNUNET_TESTING_TOPOLOGY_INTERNAT:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating InterNAT CONNECT topology\n"));
+#endif
+ create_nated_internet (pg, &add_actual_connections);
+ break;
+ case GNUNET_TESTING_TOPOLOGY_SCALE_FREE:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating Scale Free CONNECT topology\n"));
+#endif
+ create_scale_free (pg, &add_actual_connections);
+ break;
+ case GNUNET_TESTING_TOPOLOGY_LINE:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating straight line CONNECT topology\n"));
+#endif
+ create_line (pg, &add_actual_connections);
+ break;
+ case GNUNET_TESTING_TOPOLOGY_NONE:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating no CONNECT topology\n"));
+#endif
+ copy_allowed_topology (pg);
+ break;
+ default:
+ GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
+ _
+ ("Unknown topology specification, can't connect peers!\n"));
+ return GNUNET_SYSERR;
+ }
+
+ switch (options)
+ {
+ case GNUNET_TESTING_TOPOLOGY_OPTION_RANDOM:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _
+ ("Connecting random subset (%'.2f percent) of possible peers\n"),
+ 100 * option_modifier);
+#endif
+ choose_random_connections (pg, option_modifier);
+ break;
+ case GNUNET_TESTING_TOPOLOGY_OPTION_MINIMUM:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Connecting a minimum of %u peers each (if possible)\n"),
+ (unsigned int) option_modifier);
+#endif
+ choose_minimum (pg, (unsigned int) option_modifier);
+ break;
+ case GNUNET_TESTING_TOPOLOGY_OPTION_DFS:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _
+ ("Using DFS to connect a minimum of %u peers each (if possible)\n"),
+ (unsigned int) option_modifier);
+#endif
+ perform_dfs (pg, (int) option_modifier);
+ break;
+ case GNUNET_TESTING_TOPOLOGY_OPTION_ADD_CLOSEST:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
+ _
+ ("Finding additional %u closest peers each (if possible)\n"),
+ (unsigned int) option_modifier);
+#endif
+ add_closest (pg, (unsigned int) option_modifier,
+ &add_actual_connections);
+ break;
+ case GNUNET_TESTING_TOPOLOGY_OPTION_NONE:
+ break;
+ case GNUNET_TESTING_TOPOLOGY_OPTION_ALL:
+ break;
+ default:
+ break;
+ }
+
+ return connect_topology (pg, notify_callback, notify_cls);
+}
+
+/**
+ * Callback that is called whenever a hostkey is generated
+ * for a peer. Call the real callback and decrement the
+ * starting counter for the peergroup.
+ *
+ * @param cls closure
+ * @param id identifier for the daemon, NULL on error
+ * @param d handle for the daemon
+ * @param emsg error message (NULL on success)
+ */
+static void
+internal_hostkey_callback (void *cls,
+ const struct GNUNET_PeerIdentity *id,
+ struct GNUNET_TESTING_Daemon *d, const char *emsg)
+{
+ struct InternalStartContext *internal_context = cls;
+ internal_context->peer->pg->starting--;
+ internal_context->peer->pg->started++;
+ if (internal_context->hostkey_callback != NULL)
+ internal_context->hostkey_callback (internal_context->hostkey_cls, id, d,
+ emsg);
+ else if (internal_context->peer->pg->started ==
+ internal_context->peer->pg->total)
+ {
+ internal_context->peer->pg->started = 0; /* Internal startup may use this counter! */
+ GNUNET_TESTING_daemons_continue_startup (internal_context->peer->pg);
+ }
+}
+
+/**
+ * Callback that is called whenever a peer has finished starting.
+ * Call the real callback and decrement the starting counter
+ * for the peergroup.
+ *
+ * @param cls closure
+ * @param id identifier for the daemon, NULL on error
+ * @param cfg config
+ * @param d handle for the daemon
+ * @param emsg error message (NULL on success)
+ */
+static void
+internal_startup_callback (void *cls,
+ const struct GNUNET_PeerIdentity *id,
+ const struct GNUNET_CONFIGURATION_Handle *cfg,
+ struct GNUNET_TESTING_Daemon *d, const char *emsg)
+{
+ struct InternalStartContext *internal_context = cls;
+ internal_context->peer->pg->starting--;
+ if (internal_context->start_cb != NULL)
+ internal_context->start_cb (internal_context->start_cb_cls, id, cfg, d,
+ emsg);
+}
+
+static void
+internal_continue_startup (void *cls,
+ const struct GNUNET_SCHEDULER_TaskContext *tc)
+{
+ struct InternalStartContext *internal_context = cls;
+
+ if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
+ {
+ return;
+ }
+
+ if (internal_context->peer->pg->starting < MAX_CONCURRENT_STARTING)
+ {
+ internal_context->peer->pg->starting++;
+ GNUNET_TESTING_daemon_continue_startup (internal_context->peer->daemon);
+ }
+ else
+ {
+ GNUNET_SCHEDULER_add_delayed (GNUNET_TIME_relative_multiply
+ (GNUNET_TIME_UNIT_MILLISECONDS, 100),
+ &internal_continue_startup,
+ internal_context);
+ }
+}
+
+
+/**
+ * Callback for informing us about a successful
+ * or unsuccessful churn start call.
+ *
+ * @param cls a ChurnContext
+ * @param id the peer identity of the started peer
+ * @param cfg the handle to the configuration of the peer
+ * @param d handle to the daemon for the peer
+ * @param emsg NULL on success, non-NULL on failure
+ *
+ */
+void
+churn_start_callback (void *cls,
+ const struct GNUNET_PeerIdentity *id,
+ const struct GNUNET_CONFIGURATION_Handle *cfg,
+ struct GNUNET_TESTING_Daemon *d, const char *emsg)
+{
+ struct ChurnRestartContext *startup_ctx = cls;
+ struct ChurnContext *churn_ctx = startup_ctx->churn_ctx;
+
+ unsigned int total_left;
+ char *error_message;
+
+ error_message = NULL;
+ if (emsg != NULL)
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
+ "Churn stop callback failed with error `%s'\n", emsg);
+ churn_ctx->num_failed_start++;