GNUnet is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 2, or (at your
+ by the Free Software Foundation; either version 3, or (at your
option) any later version.
GNUnet is distributed in the hope that it will be useful, but
/**
* @file testing/testing_group.c
* @brief convenience API for writing testcases for GNUnet
+ * @author Nathan Evans
* @author Christian Grothoff
+ *
*/
#include "platform.h"
#include "gnunet_arm_service.h"
#include "gnunet_testing_lib.h"
+#include "gnunet_core_service.h"
#define VERBOSE_TESTING GNUNET_NO
* conflict with the port range for "local" ports (client apps; see
* /proc/sys/net/ipv4/ip_local_port_range on Linux for example).
*/
-#define HIGH_PORT 32000
+#define HIGH_PORT 56000
+
+#define MAX_OUTSTANDING_CONNECTIONS 10
+
+#define MAX_CONCURRENT_HOSTKEYS 10
+
+#define MAX_CONCURRENT_STARTING 10
-#define MAX_OUTSTANDING_CONNECTIONS 50
+#define MAX_CONCURRENT_SHUTDOWN 10
#define CONNECT_TIMEOUT GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 300)
* Prototype of a function called whenever two peers would be connected
* in a certain topology.
*/
-typedef int (*GNUNET_TESTING_ConnectionProcessor)
-(struct GNUNET_TESTING_PeerGroup *pg, unsigned int first, unsigned int second);
+typedef int (*GNUNET_TESTING_ConnectionProcessor)(struct GNUNET_TESTING_PeerGroup *pg,
+ unsigned int first,
+ unsigned int second);
+
/**
* Context for handling churning a peer group
};
+
+struct ShutdownContext
+{
+ /**
+ * Total peers to wait for
+ */
+ unsigned int total_peers;
+
+ /**
+ * Number of peers successfully shut down
+ */
+ unsigned int peers_down;
+
+ /**
+ * Number of peers failed to shut down
+ */
+ unsigned int peers_failed;
+
+ /**
+ * Number of peers we have started shutting
+ * down. If too many, wait on them.
+ */
+ unsigned int outstanding;
+
+ /**
+ * Timeout for shutdown.
+ */
+ struct GNUNET_TIME_Relative timeout;
+
+ /**
+ * Callback to call when all peers either
+ * shutdown or failed to shutdown
+ */
+ GNUNET_TESTING_NotifyCompletion cb;
+
+ /**
+ * Closure for cb
+ */
+ void *cb_cls;
+};
+
struct CreateTopologyContext
{
};
#endif
+struct InternalStartContext
+{
+ /**
+ * Pointer to peerdata
+ */
+ struct PeerData *peer;
+
+ /**
+ * Timeout for peer startup
+ */
+ struct GNUNET_TIME_Relative timeout;
+
+ /**
+ * Client callback for hostkey notification
+ */
+ GNUNET_TESTING_NotifyHostkeyCreated hostkey_callback;
+
+ /**
+ * Closure for hostkey_callback
+ */
+ void *hostkey_cls;
+
+ /**
+ * Client callback for peer start notification
+ */
+ GNUNET_TESTING_NotifyDaemonRunning start_cb;
+
+ /**
+ * Closure for cb
+ */
+ void *start_cb_cls;
+
+ /**
+ * Hostname, where to start the peer
+ */
+ const char *hostname;
+};
+
/**
* Data we keep per peer.
*/
*/
struct GNUNET_TESTING_PeerGroup *pg;
- /**
- * Linked list of peer connections (pointers)
- */
- //struct PeerConnection *connected_peers;
/**
* Hash map of allowed peer connections (F2F created topology)
*/
* creating any topology so the count is valid once finished.
*/
int num_connections;
+
+ /**
+ * Context to keep track of peers being started, to
+ * stagger hostkey generation and peer startup.
+ */
+ struct InternalStartContext internal_context;
};
uint16_t minport;
};
+struct TopologyIterateContext
+{
+ /**
+ * Callback for notifying of two connected peers.
+ */
+ GNUNET_TESTING_NotifyTopology topology_cb;
+
+ /**
+ * Closure for topology_cb
+ */
+ void *cls;
+
+ /**
+ * Number of peers currently connected to.
+ */
+ unsigned int connected;
+
+ /**
+ * Number of peers we have finished iterating.
+ */
+ unsigned int completed;
+
+ /**
+ * Number of peers total.
+ */
+ unsigned int total;
+};
+
+struct StatsIterateContext
+{
+ /**
+ * Continuation to call once all stats information has been retrieved.
+ */
+ GNUNET_STATISTICS_Callback cont;
+
+ /**
+ * Proc function to call on each value received.
+ */
+ GNUNET_TESTING_STATISTICS_Iterator proc;
+
+ /**
+ * Closure for topology_cb
+ */
+ void *cls;
+
+ /**
+ * Number of peers currently connected to.
+ */
+ unsigned int connected;
+
+ /**
+ * Number of peers we have finished iterating.
+ */
+ unsigned int completed;
+
+ /**
+ * Number of peers total.
+ */
+ unsigned int total;
+};
+
+struct CoreContext
+{
+ void *iter_context;
+ struct GNUNET_TESTING_Daemon *daemon;
+};
+
+struct StatsCoreContext
+{
+ void *iter_context;
+ struct GNUNET_TESTING_Daemon *daemon;
+ /**
+ * Handle to the statistics service.
+ */
+ struct GNUNET_STATISTICS_Handle *stats_handle;
+
+ /**
+ * Handle for getting statistics.
+ */
+ struct GNUNET_STATISTICS_GetHandle *stats_get_handle;
+};
/**
* Handle to a group of GNUnet peers.
/**
* Function to call on each started daemon.
*/
- GNUNET_TESTING_NotifyDaemonRunning cb;
+ //GNUNET_TESTING_NotifyDaemonRunning cb;
/**
* Closure for cb.
*/
- void *cb_cls;
+ //void *cb_cls;
/*
* Function to call on each topology connection created
* At what time should we fail the peer startup process?
*/
struct GNUNET_TIME_Absolute max_timeout;
+
+ /**
+ * How many peers are being started right now?
+ */
+ unsigned int starting;
+
+ /**
+ * How many peers have already been started?
+ */
+ unsigned int started;
+};
+
+struct UpdateContext
+{
+ struct GNUNET_CONFIGURATION_Handle *ret;
+ const char *hostname;
+ unsigned int nport;
+ unsigned int upnum;
+};
+
+
+struct ConnectContext
+{
+ struct GNUNET_TESTING_Daemon *first;
+
+ struct GNUNET_TESTING_Daemon *second;
+
+ struct GNUNET_TESTING_PeerGroup *pg;
};
/**
memcpy (uid, hash, sizeof(uint32_t));
}
-struct UpdateContext
-{
- struct GNUNET_CONFIGURATION_Handle *ret;
- const char *hostname;
- unsigned int nport;
- unsigned int upnum;
-};
-
-
-struct ConnectContext
-{
- struct GNUNET_TESTING_Daemon *first;
-
- struct GNUNET_TESTING_Daemon *second;
-
- struct GNUNET_TESTING_PeerGroup *pg;
-};
-
/**
* Number of connects we are waiting on, allows us to rate limit
* connect attempts.
*/
static int outstanding_connects;
+/**
+ * Get a topology from a string input.
+ *
+ * @param topology where to write the retrieved topology
+ * @param topology_string The string to attempt to
+ * get a configuration value from
+ * @return GNUNET_YES if topology string matched a
+ * known topology, GNUNET_NO if not
+ */
+int
+GNUNET_TESTING_topology_get(enum GNUNET_TESTING_Topology *topology, char * topology_string)
+{
+ /**
+ * Strings representing topologies in enum
+ */
+ static const char * topology_strings[] =
+ {
+ /**
+ * A clique (everyone connected to everyone else).
+ */
+ "CLIQUE",
+
+ /**
+ * Small-world network (2d torus plus random links).
+ */
+ "SMALL_WORLD",
+
+ /**
+ * Small-world network (ring plus random links).
+ */
+ "SMALL_WORLD_RING",
+
+ /**
+ * Ring topology.
+ */
+ "RING",
+
+ /**
+ * 2-d torus.
+ */
+ "2D_TORUS",
+
+ /**
+ * Random graph.
+ */
+ "ERDOS_RENYI",
+
+ /**
+ * Certain percentage of peers are unable to communicate directly
+ * replicating NAT conditions
+ */
+ "INTERNAT",
+
+ /**
+ * Scale free topology.
+ */
+ "SCALE_FREE",
+
+ /**
+ * Straight line topology.
+ */
+ "LINE",
+
+ /**
+ * All peers are disconnected.
+ */
+ "NONE",
+
+ NULL
+ };
+
+ int curr = 0;
+ if (topology_string == NULL)
+ return GNUNET_NO;
+ while (topology_strings[curr] != NULL)
+ {
+ if (strcasecmp(topology_strings[curr], topology_string) == 0)
+ {
+ *topology = curr;
+ return GNUNET_YES;
+ }
+ curr++;
+ }
+ *topology = GNUNET_TESTING_TOPOLOGY_NONE;
+ return GNUNET_NO;
+}
+
+
+/**
+ * Get connect topology option from string input.
+ *
+ * @param topology_option where to write the retrieved topology
+ * @param topology_string The string to attempt to
+ * get a configuration value from
+ * @return GNUNET_YES if string matched a known
+ * topology option, GNUNET_NO if not
+ */
+int
+GNUNET_TESTING_topology_option_get (enum GNUNET_TESTING_TopologyOption *topology_option,
+ char * topology_string)
+{
+ /**
+ * Options for connecting a topology as strings.
+ */
+ static const char * topology_option_strings[] =
+ {
+ /**
+ * Try to connect all peers specified in the topology.
+ */
+ "CONNECT_ALL",
+
+ /**
+ * Choose a random subset of connections to create.
+ */
+ "CONNECT_RANDOM_SUBSET",
+
+ /**
+ * Create at least X connections for each peer.
+ */
+ "CONNECT_MINIMUM",
+
+ /**
+ * Using a depth first search, create one connection
+ * per peer. If any are missed (graph disconnected)
+ * start over at those peers until all have at least one
+ * connection.
+ */
+ "CONNECT_DFS",
+
+ /**
+ * No options specified.
+ */
+ "CONNECT_NONE",
+
+ NULL
+ };
+ int curr = 0;
+
+ if (topology_string == NULL)
+ return GNUNET_NO;
+ while (NULL != topology_option_strings[curr])
+ {
+ if (strcasecmp(topology_option_strings[curr], topology_string) == 0)
+ {
+ *topology_option = curr;
+ return GNUNET_YES;
+ }
+ curr++;
+ }
+ *topology_option = GNUNET_TESTING_TOPOLOGY_OPTION_NONE;
+ return GNUNET_NO;
+}
/**
* Function to iterate over options. Copies
if (GNUNET_CONFIGURATION_get_value_string(cfg, "testing", "control_host", &control_host) == GNUNET_OK)
{
- GNUNET_asprintf(&allowed_hosts, "%s; 127.0.0.1;", control_host);
+ if (hostname != NULL)
+ GNUNET_asprintf(&allowed_hosts, "%s; 127.0.0.1; %s;", control_host, hostname);
+ else
+ GNUNET_asprintf(&allowed_hosts, "%s; 127.0.0.1;", control_host);
+
GNUNET_CONFIGURATION_set_value_string(uc.ret, "core", "ACCEPT_FROM", allowed_hosts);
+ GNUNET_CONFIGURATION_set_value_string(uc.ret, "transport", "ACCEPT_FROM", allowed_hosts);
+ GNUNET_CONFIGURATION_set_value_string(uc.ret, "dht", "ACCEPT_FROM", allowed_hosts);
+ GNUNET_CONFIGURATION_set_value_string(uc.ret, "statistics", "ACCEPT_FROM", allowed_hosts);
GNUNET_free_non_null(control_host);
GNUNET_free(allowed_hosts);
}
if (hostname != NULL)
{
GNUNET_asprintf(&allowed_hosts, "%s; 127.0.0.1;", hostname);
+ GNUNET_CONFIGURATION_set_value_string(uc.ret, "transport-udp", "BINDTO", hostname);
+ GNUNET_CONFIGURATION_set_value_string(uc.ret, "transport-tcp", "BINDTO", hostname);
GNUNET_CONFIGURATION_set_value_string(uc.ret, "arm", "ACCEPT_FROM", allowed_hosts);
GNUNET_free(allowed_hosts);
}
+ else
+ {
+ GNUNET_CONFIGURATION_set_value_string(uc.ret, "transport-tcp", "BINDTO", "127.0.0.1");
+ GNUNET_CONFIGURATION_set_value_string(uc.ret, "transport-udp", "BINDTO", "127.0.0.1");
+ }
*port = (uint16_t) uc.nport;
*upnum = uc.upnum;
* @param first index of the first peer
* @param second index of the second peer
*
- * @return the number of connections added (can be 0, 1 or 2)
- * technically should only be 0 or 2, but the small price
- * of iterating over the lists (hashmaps in the future)
- * for being sure doesn't bother me!
+ * @return the number of connections added
+ * technically should only be 0 or 2
*
*/
static int
return total_connections;
}
+/**
+ * Create a topology given a peer group (set of running peers)
+ * and a connection processor.
+ *
+ * @param pg the peergroup to create the topology on
+ * @param proc the connection processor to call to actually set
+ * up connections between two peers
+ *
+ * @return the number of connections that were set up
+ *
+ */
int
create_small_world_ring(struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING_ConnectionProcessor proc)
{
return connect_attempts;
}
-
+/**
+ * Create a topology given a peer group (set of running peers)
+ * and a connection processor.
+ *
+ * @param pg the peergroup to create the topology on
+ * @param proc the connection processor to call to actually set
+ * up connections between two peers
+ *
+ * @return the number of connections that were set up
+ *
+ */
static int
create_nated_internet (struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING_ConnectionProcessor proc)
{
}
-
-
+/**
+ * Create a topology given a peer group (set of running peers)
+ * and a connection processor.
+ *
+ * @param pg the peergroup to create the topology on
+ * @param proc the connection processor to call to actually set
+ * up connections between two peers
+ *
+ * @return the number of connections that were set up
+ *
+ */
static int
create_small_world (struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING_ConnectionProcessor proc)
{
return connect_attempts;
}
-
-
+/**
+ * Create a topology given a peer group (set of running peers)
+ * and a connection processor.
+ *
+ * @param pg the peergroup to create the topology on
+ * @param proc the connection processor to call to actually set
+ * up connections between two peers
+ *
+ * @return the number of connections that were set up
+ *
+ */
static int
create_erdos_renyi (struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING_ConnectionProcessor proc)
{
return connect_attempts;
}
+/**
+ * Create a topology given a peer group (set of running peers)
+ * and a connection processor.
+ *
+ * @param pg the peergroup to create the topology on
+ * @param proc the connection processor to call to actually set
+ * up connections between two peers
+ *
+ * @return the number of connections that were set up
+ *
+ */
static int
create_2d_torus (struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING_ConnectionProcessor proc)
{
}
-
+/**
+ * Create a topology given a peer group (set of running peers)
+ * and a connection processor.
+ *
+ * @param pg the peergroup to create the topology on
+ * @param proc the connection processor to call to actually set
+ * up connections between two peers
+ *
+ * @return the number of connections that were set up
+ *
+ */
static int
create_clique (struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING_ConnectionProcessor proc)
{
return connect_attempts;
}
+/**
+ * Create a topology given a peer group (set of running peers)
+ * and a connection processor.
+ *
+ * @param pg the peergroup to create the topology on
+ * @param proc the connection processor to call to actually set
+ * up connections between two peers
+ *
+ * @return the number of connections that were set up
+ *
+ */
+static int
+create_line (struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING_ConnectionProcessor proc)
+{
+ unsigned int count;
+ int connect_attempts;
+
+ connect_attempts = 0;
+
+ /* Connect each peer to the next highest numbered peer */
+ for (count = 0; count < pg->total - 1; count++)
+ {
+#if VERBOSE_TESTING
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Connecting peer %d to peer %d\n",
+ count, count + 1);
+#endif
+ connect_attempts += proc(pg, count, count + 1);
+ }
+
+ return connect_attempts;
+}
+/**
+ * Create a topology given a peer group (set of running peers)
+ * and a connection processor.
+ *
+ * @param pg the peergroup to create the topology on
+ * @param proc the connection processor to call to actually set
+ * up connections between two peers
+ *
+ * @return the number of connections that were set up
+ *
+ */
static int
create_ring (struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING_ConnectionProcessor proc)
{
static void internal_connect_notify (void *cls,
const struct GNUNET_PeerIdentity *first,
const struct GNUNET_PeerIdentity *second,
+ uint32_t distance,
const struct GNUNET_CONFIGURATION_Handle *first_cfg,
const struct GNUNET_CONFIGURATION_Handle *second_cfg,
struct GNUNET_TESTING_Daemon *first_daemon,
struct GNUNET_TESTING_PeerGroup *pg = cls;
outstanding_connects--;
- pg->notify_connection(pg->notify_connection_cls, first, second, first_cfg, second_cfg, first_daemon, second_daemon, emsg);
-
+ pg->notify_connection(pg->notify_connection_cls, first, second, distance, first_cfg, second_cfg, first_daemon, second_daemon, emsg);
}
-static void schedule_connect(void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
+
+/**
+ * Either delay a connection (because there are too many outstanding)
+ * or schedule it for right now.
+ *
+ * @param cls a connection context
+ * @param tc the task runtime context
+ */
+static void schedule_connect(void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
{
struct ConnectContext *connect_context = cls;
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
_("Delaying connect, we have too many outstanding connections!\n"));
#endif
- GNUNET_SCHEDULER_add_delayed(connect_context->pg->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_SECONDS, 3), &schedule_connect, connect_context);
+ GNUNET_SCHEDULER_add_delayed(connect_context->pg->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &schedule_connect, connect_context);
}
else
{
}
}
+
/**
* Iterator for actually scheduling connections to be created
* between two peers.
return GNUNET_YES;
}
+
/**
* Iterator for copying all entries in the allowed hashmap to the
* connect hashmap.
* by the topology. This will only have an effect once peers
* are started if the FRIENDS_ONLY option is set in the base
* config. Also takes an optional restrict topology which
- * disallows direct TCP connections UNLESS they are specified in
- * the restricted topology.
+ * disallows connections based on a particular transport
+ * UNLESS they are specified in the restricted topology.
*
* @param pg the peer group struct representing the running peers
* @param topology which topology to connect the peers in
#endif
num_connections = create_scale_free (pg, &add_allowed_connections);
break;
+ case GNUNET_TESTING_TOPOLOGY_LINE:
+#if VERBOSE_TESTING
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating straight line topology\n"));
+#endif
+ num_connections = create_line (pg, &add_allowed_connections);
+ break;
case GNUNET_TESTING_TOPOLOGY_NONE:
+#if VERBOSE_TESTING
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating no allowed topology (all peers can connect at core level)\n"));
+#endif
num_connections = 0;
break;
default:
num_connections = 0;
break;
}
- if (num_connections < 1)
+
+ if (num_connections < 0)
return GNUNET_SYSERR;
if (GNUNET_YES == GNUNET_CONFIGURATION_get_value_yesno (pg->cfg, "TESTING", "F2F"))
{
ret = create_and_copy_friend_files(pg);
- }
-
- if (ret != GNUNET_OK)
- {
+ if (ret != GNUNET_OK)
+ {
#if VERBOSE_TESTING
- GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
- _("Failed during friend file copying!\n"));
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Failed during friend file copying!\n"));
#endif
- return GNUNET_SYSERR;
- }
- else
- {
+ return GNUNET_SYSERR;
+ }
+ else
+ {
#if VERBOSE_TESTING
- GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
- _("Friend files created/copied successfully!\n"));
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Friend files created/copied successfully!\n"));
#endif
+ }
}
/* Use the create clique method to initially set all connections as blacklisted. */
- create_clique (pg, &blacklist_connections);
+ if (restrict_topology != GNUNET_TESTING_TOPOLOGY_NONE)
+ create_clique (pg, &blacklist_connections);
unblacklisted_connections = 0;
/* Un-blacklist connections as per the topology specified */
switch (restrict_topology)
#endif
unblacklisted_connections = create_scale_free (pg, &unblacklist_connections);
break;
+ case GNUNET_TESTING_TOPOLOGY_LINE:
+#if VERBOSE_TESTING
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Blacklisting all but straight line topology\n"));
+#endif
+ unblacklisted_connections = create_line (pg, &unblacklist_connections);
+ break;
case GNUNET_TESTING_TOPOLOGY_NONE:
- /* Fall through */
+#if VERBOSE_TESTING
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating no blacklist topology (all peers can connect at transport level)\n"));
+#endif
default:
break;
}
* @param pg the peergroup we are dealing with
* @param num how many connections at least should each peer have (if possible)?
*/
-void
+static void
choose_minimum(struct GNUNET_TESTING_PeerGroup *pg, unsigned int num)
{
struct MinimumContext minimum_ctx;
uint32_t pg_iter;
for (pg_iter = 0; pg_iter < pg->total; pg_iter++)
- {
- pg->peers[pg_iter].connect_peers_working_set = GNUNET_CONTAINER_multihashmap_create(num);
- }
+ {
+ pg->peers[pg_iter].connect_peers_working_set = GNUNET_CONTAINER_multihashmap_create(num);
+ }
for (pg_iter = 0; pg_iter < pg->total; pg_iter++)
{
minimum_ctx.first_uid = pg_iter;
- minimum_ctx.pg_array = GNUNET_CRYPTO_random_permute(GNUNET_CRYPTO_QUALITY_WEAK, GNUNET_CONTAINER_multihashmap_size(pg->peers[pg_iter].connect_peers));
+ minimum_ctx.pg_array = GNUNET_CRYPTO_random_permute(GNUNET_CRYPTO_QUALITY_WEAK,
+ GNUNET_CONTAINER_multihashmap_size(pg->peers[pg_iter].connect_peers));
minimum_ctx.first = &pg->peers[pg_iter];
minimum_ctx.pg = pg;
minimum_ctx.num_to_add = num;
minimum_ctx.current = 0;
- pg->peers[pg_iter].connect_peers_working_set = GNUNET_CONTAINER_multihashmap_create(pg->total);
- GNUNET_CONTAINER_multihashmap_iterate(pg->peers[pg_iter].connect_peers, &minimum_connect_iterator, &minimum_ctx);
+ GNUNET_CONTAINER_multihashmap_iterate(pg->peers[pg_iter].connect_peers,
+ &minimum_connect_iterator,
+ &minimum_ctx);
}
for (pg_iter = 0; pg_iter < pg->total; pg_iter++)
GNUNET_CONTAINER_multihashmap_destroy(pg->peers[pg_iter].connect_peers);
/* And replace with the working set */
pg->peers[pg_iter].connect_peers = pg->peers[pg_iter].connect_peers_working_set;
- fprintf(stderr, "Finished! Hashmap size %u\n", GNUNET_CONTAINER_multihashmap_size(pg->peers[pg_iter].connect_peers));
}
}
-static unsigned int count_workingset_connections(struct GNUNET_TESTING_PeerGroup *pg)
+static unsigned int
+count_workingset_connections(struct GNUNET_TESTING_PeerGroup *pg)
{
unsigned int count;
unsigned int pg_iter;
starting_peer = dfs_ctx.second_uid;
}
- for (pg_iter = 0; pg_iter < pg->total; pg_iter++)
- {
-
- }
-
for (pg_iter = 0; pg_iter < pg->total; pg_iter++)
{
/* Remove the "old" connections */
GNUNET_CONTAINER_multihashmap_destroy(pg->peers[pg_iter].connect_peers);
/* And replace with the working set */
pg->peers[pg_iter].connect_peers = pg->peers[pg_iter].connect_peers_working_set;
- fprintf(stderr, "Finished! Hashmap size %u\n", GNUNET_CONTAINER_multihashmap_size(pg->peers[pg_iter].connect_peers));
+ }
+}
+
+/**
+ * Internal callback for topology information for a particular peer.
+ */
+static void
+internal_topology_callback(void *cls,
+ const struct GNUNET_PeerIdentity *peer,
+ struct GNUNET_TIME_Relative latency, uint32_t distance)
+{
+ struct CoreContext *core_ctx = cls;
+ struct TopologyIterateContext *iter_ctx = core_ctx->iter_context;
+
+ if (peer == NULL) /* Either finished, or something went wrong */
+ {
+ iter_ctx->completed++;
+ iter_ctx->connected--;
+ /* One core context allocated per iteration, must free! */
+ GNUNET_free(core_ctx);
+ }
+ else
+ {
+ iter_ctx->topology_cb(iter_ctx->cls, &core_ctx->daemon->id, peer, latency, distance, NULL);
}
+ if (iter_ctx->completed == iter_ctx->total)
+ {
+ iter_ctx->topology_cb(iter_ctx->cls, NULL, NULL, GNUNET_TIME_relative_get_zero(), 0, NULL);
+ /* Once all are done, free the iteration context */
+ GNUNET_free(iter_ctx);
+ }
+}
+
+
+/**
+ * Check running topology iteration tasks, if below max start a new one, otherwise
+ * schedule for some time in the future.
+ */
+static void
+schedule_get_topology(void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
+{
+ struct CoreContext *core_context = cls;
+ struct TopologyIterateContext *topology_context = (struct TopologyIterateContext *)core_context->iter_context;
+ if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
+ return;
+
+ if (topology_context->connected > MAX_OUTSTANDING_CONNECTIONS)
+ {
+#if VERBOSE_TESTING > 2
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Delaying connect, we have too many outstanding connections!\n"));
+#endif
+ GNUNET_SCHEDULER_add_delayed(core_context->daemon->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &schedule_get_topology, core_context);
+ }
+ else
+ {
+#if VERBOSE_TESTING > 2
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating connection, outstanding_connections is %d\n"), outstanding_connects);
+#endif
+ topology_context->connected++;
+ if (GNUNET_OK != GNUNET_CORE_iterate_peers (core_context->daemon->sched, core_context->daemon->cfg, &internal_topology_callback, core_context))
+ internal_topology_callback(core_context, NULL, GNUNET_TIME_relative_get_zero(), 0);
+
+ }
+}
+
+/**
+ * Iterate over all (running) peers in the peer group, retrieve
+ * all connections that each currently has.
+ */
+void
+GNUNET_TESTING_get_topology (struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING_NotifyTopology cb, void *cls)
+{
+ struct TopologyIterateContext *topology_context;
+ struct CoreContext *core_ctx;
+ unsigned int i;
+ unsigned int total_count;
+
+ /* Allocate a single topology iteration context */
+ topology_context = GNUNET_malloc(sizeof(struct TopologyIterateContext));
+ topology_context->topology_cb = cb;
+ topology_context->cls = cls;
+ total_count = 0;
+ for (i = 0; i < pg->total; i++)
+ {
+ if (pg->peers[i].daemon->running == GNUNET_YES)
+ {
+ /* Allocate one core context per core we need to connect to */
+ core_ctx = GNUNET_malloc(sizeof(struct CoreContext));
+ core_ctx->daemon = pg->peers[i].daemon;
+ /* Set back pointer to topology iteration context */
+ core_ctx->iter_context = topology_context;
+ GNUNET_SCHEDULER_add_now(pg->sched, &schedule_get_topology, core_ctx);
+ total_count++;
+ }
+ }
+ topology_context->total = total_count;
+ return;
+}
+
+/**
+ * Callback function to process statistic values.
+ * This handler is here only really to insert a peer
+ * identity (or daemon) so the statistics can be uniquely
+ * tied to a single running peer.
+ *
+ * @param cls closure
+ * @param subsystem name of subsystem that created the statistic
+ * @param name the name of the datum
+ * @param value the current value
+ * @param is_persistent GNUNET_YES if the value is persistent, GNUNET_NO if not
+ * @return GNUNET_OK to continue, GNUNET_SYSERR to abort iteration
+ */
+static int internal_stats_callback (void *cls,
+ const char *subsystem,
+ const char *name,
+ uint64_t value,
+ int is_persistent)
+{
+ struct StatsCoreContext *core_context = cls;
+ struct StatsIterateContext *stats_context = (struct StatsIterateContext *)core_context->iter_context;
+
+ return stats_context->proc(stats_context->cls, &core_context->daemon->id, subsystem, name, value, is_persistent);
+}
+
+/**
+ * Internal continuation call for statistics iteration.
+ *
+ * @param cls closure, the CoreContext for this iteration
+ * @param success whether or not the statistics iterations
+ * was canceled or not (we don't care)
+ */
+static void internal_stats_cont (void *cls, int success)
+{
+ struct StatsCoreContext *core_context = cls;
+ struct StatsIterateContext *stats_context = (struct StatsIterateContext *)core_context->iter_context;
+
+ stats_context->connected--;
+ stats_context->completed++;
+
+ if (stats_context->completed == stats_context->total)
+ {
+ stats_context->cont(stats_context->cls, GNUNET_YES);
+ GNUNET_free(stats_context);
+ }
+
+ if (core_context->stats_handle != NULL)
+ GNUNET_STATISTICS_destroy(core_context->stats_handle, GNUNET_NO);
+
+ GNUNET_free(core_context);
+}
+
+/**
+ * Check running topology iteration tasks, if below max start a new one, otherwise
+ * schedule for some time in the future.
+ */
+static void
+schedule_get_statistics(void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
+{
+ struct StatsCoreContext *core_context = cls;
+ struct StatsIterateContext *stats_context = (struct StatsIterateContext *)core_context->iter_context;
+
+ if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
+ return;
+
+ if (stats_context->connected > MAX_OUTSTANDING_CONNECTIONS)
+ {
+#if VERBOSE_TESTING > 2
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Delaying connect, we have too many outstanding connections!\n"));
+#endif
+ GNUNET_SCHEDULER_add_delayed(core_context->daemon->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &schedule_get_statistics, core_context);
+ }
+ else
+ {
+#if VERBOSE_TESTING > 2
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating connection, outstanding_connections is %d\n"), outstanding_connects);
+#endif
+
+ stats_context->connected++;
+ core_context->stats_handle = GNUNET_STATISTICS_create(core_context->daemon->sched, "testing", core_context->daemon->cfg);
+ if (core_context->stats_handle == NULL)
+ {
+ internal_stats_cont (core_context, GNUNET_NO);
+ return;
+ }
+
+ core_context->stats_get_handle = GNUNET_STATISTICS_get(core_context->stats_handle, NULL, NULL, GNUNET_TIME_relative_get_forever(), &internal_stats_cont, &internal_stats_callback, core_context);
+ if (core_context->stats_get_handle == NULL)
+ internal_stats_cont (core_context, GNUNET_NO);
+
+ }
+}
+
+
+/**
+ * Iterate over all (running) peers in the peer group, retrieve
+ * all statistics from each.
+ */
+void
+GNUNET_TESTING_get_statistics (struct GNUNET_TESTING_PeerGroup *pg,
+ GNUNET_STATISTICS_Callback cont,
+ GNUNET_TESTING_STATISTICS_Iterator proc, void *cls)
+{
+ struct StatsIterateContext *stats_context;
+ struct CoreContext *core_ctx;
+ unsigned int i;
+ unsigned int total_count;
+
+ /* Allocate a single stats iteration context */
+ stats_context = GNUNET_malloc(sizeof(struct StatsIterateContext));
+ stats_context->cont = cont;
+ stats_context->proc = proc;
+ stats_context->cls = cls;
+ total_count = 0;
+ for (i = 0; i < pg->total; i++)
+ {
+ if (pg->peers[i].daemon->running == GNUNET_YES)
+ {
+ /* Allocate one core context per core we need to connect to */
+ core_ctx = GNUNET_malloc(sizeof(struct CoreContext));
+ core_ctx->daemon = pg->peers[i].daemon;
+ /* Set back pointer to topology iteration context */
+ core_ctx->iter_context = stats_context;
+ GNUNET_SCHEDULER_add_now(pg->sched, &schedule_get_statistics, core_ctx);
+ total_count++;
+ }
+ }
+ stats_context->total = total_count;
+ return;
}
/**
switch (topology)
{
case GNUNET_TESTING_TOPOLOGY_CLIQUE:
- #if VERBOSE_TESTING
- GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
- _("Creating clique topology\n"));
- #endif
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating clique CONNECT topology\n"));
+#endif
create_clique (pg, &add_actual_connections);
break;
case GNUNET_TESTING_TOPOLOGY_SMALL_WORLD_RING:
- #if VERBOSE_TESTING
- GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
- _("Creating small world (ring) topology\n"));
- #endif
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating small world (ring) CONNECT topology\n"));
+#endif
create_small_world_ring (pg, &add_actual_connections);
break;
case GNUNET_TESTING_TOPOLOGY_SMALL_WORLD:
- #if VERBOSE_TESTING
- GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
- _("Creating small world (2d-torus) topology\n"));
- #endif
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating small world (2d-torus) CONNECT topology\n"));
+#endif
create_small_world (pg, &add_actual_connections);
break;
case GNUNET_TESTING_TOPOLOGY_RING:
- #if VERBOSE_TESTING
- GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
- _("Creating ring topology\n"));
- #endif
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating ring CONNECT topology\n"));
+#endif
create_ring (pg, &add_actual_connections);
break;
case GNUNET_TESTING_TOPOLOGY_2D_TORUS:
- #if VERBOSE_TESTING
- GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
- _("Creating 2d torus topology\n"));
- #endif
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating 2d torus CONNECT topology\n"));
+#endif
create_2d_torus (pg, &add_actual_connections);
break;
case GNUNET_TESTING_TOPOLOGY_ERDOS_RENYI:
- #if VERBOSE_TESTING
- GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
- _("Creating Erdos-Renyi topology\n"));
- #endif
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating Erdos-Renyi CONNECT topology\n"));
+#endif
create_erdos_renyi (pg, &add_actual_connections);
break;
case GNUNET_TESTING_TOPOLOGY_INTERNAT:
- #if VERBOSE_TESTING
- GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
- _("Creating InterNAT topology\n"));
- #endif
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating InterNAT CONNECT topology\n"));
+#endif
create_nated_internet (pg, &add_actual_connections);
break;
case GNUNET_TESTING_TOPOLOGY_SCALE_FREE:
- #if VERBOSE_TESTING
- GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
- _("Creating Scale Free topology\n"));
- #endif
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating Scale Free CONNECT topology\n"));
+#endif
create_scale_free (pg, &add_actual_connections);
break;
+ case GNUNET_TESTING_TOPOLOGY_LINE:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating straight line CONNECT topology\n"));
+#endif
+ create_line (pg, &add_actual_connections);
+ break;
case GNUNET_TESTING_TOPOLOGY_NONE:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating no CONNECT topology\n"));
+#endif
copy_allowed_topology(pg);
break;
default:
switch (options)
{
- case GNUNET_TESTING_TOPOLOGY_OPTION_RANDOM:
+ case GNUNET_TESTING_TOPOLOGY_OPTION_RANDOM:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Connecting random subset (%'.2f percent) of possible peers\n"), 100 * option_modifier);
+#endif
choose_random_connections(pg, option_modifier);
break;
- case GNUNET_TESTING_TOPOLOGY_OPTION_MINIMUM:
+ case GNUNET_TESTING_TOPOLOGY_OPTION_MINIMUM:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Connecting a minimum of %u peers each (if possible)\n"), (unsigned int)option_modifier);
+#endif
choose_minimum(pg, (unsigned int)option_modifier);
break;
- case GNUNET_TESTING_TOPOLOGY_OPTION_DFS:
+ case GNUNET_TESTING_TOPOLOGY_OPTION_DFS:
+#if VERBOSE_TOPOLOGY
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Using DFS to connect a minimum of %u peers each (if possible)\n"), (unsigned int)option_modifier);
+#endif
perform_dfs(pg, (int)option_modifier);
break;
case GNUNET_TESTING_TOPOLOGY_OPTION_NONE:
return connect_topology(pg);
}
+/**
+ * Callback that is called whenever a hostkey is generated
+ * for a peer. Call the real callback and decrement the
+ * starting counter for the peergroup.
+ *
+ * @param cls closure
+ * @param id identifier for the daemon, NULL on error
+ * @param d handle for the daemon
+ * @param emsg error message (NULL on success)
+ */
+static void internal_hostkey_callback (void *cls,
+ const struct GNUNET_PeerIdentity *id,
+ struct GNUNET_TESTING_Daemon *d,
+ const char *emsg)
+{
+ struct InternalStartContext *internal_context = cls;
+ internal_context->peer->pg->starting--;
+ internal_context->peer->pg->started++;
+ if (internal_context->hostkey_callback != NULL)
+ internal_context->hostkey_callback(internal_context->hostkey_cls, id, d, emsg);
+ else if (internal_context->peer->pg->started == internal_context->peer->pg->total)
+ {
+ internal_context->peer->pg->started = 0; /* Internal startup may use this counter! */
+ GNUNET_TESTING_daemons_continue_startup(internal_context->peer->pg);
+ }
+}
+
+/**
+ * Callback that is called whenever a peer has finished starting.
+ * Call the real callback and decrement the starting counter
+ * for the peergroup.
+ *
+ * @param cls closure
+ * @param id identifier for the daemon, NULL on error
+ * @param d handle for the daemon
+ * @param emsg error message (NULL on success)
+ */
+static void internal_startup_callback (void *cls,
+ const struct GNUNET_PeerIdentity *id,
+ const struct GNUNET_CONFIGURATION_Handle *cfg,
+ struct GNUNET_TESTING_Daemon *d,
+ const char *emsg)
+{
+ struct InternalStartContext *internal_context = cls;
+ internal_context->peer->pg->starting--;
+ if (internal_context->start_cb != NULL)
+ internal_context->start_cb(internal_context->start_cb_cls, id, cfg, d, emsg);
+}
+
+static void
+internal_continue_startup (void *cls, const struct GNUNET_SCHEDULER_TaskContext * tc)
+{
+ struct InternalStartContext *internal_context = cls;
+
+ if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
+ {
+ return;
+ }
+
+ if (internal_context->peer->pg->starting < MAX_CONCURRENT_STARTING)
+ {
+ internal_context->peer->pg->starting++;
+ GNUNET_TESTING_daemon_continue_startup (internal_context->peer->daemon);
+ }
+ else
+ {
+ GNUNET_SCHEDULER_add_delayed(internal_context->peer->pg->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &internal_continue_startup, internal_context);
+ }
+}
+
+static void
+internal_start (void *cls, const struct GNUNET_SCHEDULER_TaskContext * tc)
+{
+ struct InternalStartContext *internal_context = cls;
+
+ if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
+ {
+ return;
+ }
+
+ if (internal_context->peer->pg->starting < MAX_CONCURRENT_HOSTKEYS)
+ {
+ internal_context->peer->pg->starting++;
+ internal_context->peer->daemon = GNUNET_TESTING_daemon_start (internal_context->peer->pg->sched,
+ internal_context->peer->cfg,
+ internal_context->timeout,
+ internal_context->hostname,
+ &internal_hostkey_callback,
+ internal_context,
+ &internal_startup_callback,
+ internal_context);
+ }
+ else
+ {
+ GNUNET_SCHEDULER_add_delayed(internal_context->peer->pg->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &internal_start, internal_context);
+ }
+}
+
/**
* Function which continues a peer group starting up
* after successfully generating hostkeys for each peer.
{
unsigned int i;
+ pg->starting = 0;
for (i = 0; i < pg->total; i++)
{
- GNUNET_TESTING_daemon_continue_startup(pg->peers[i].daemon);
+ GNUNET_SCHEDULER_add_now (pg->sched, &internal_continue_startup, &pg->peers[i].internal_context);
+ //GNUNET_TESTING_daemon_continue_startup(pg->peers[i].daemon);
}
}
/**
- * Start count gnunetd processes with the same set of transports and
+ * Start count gnunet instances with the same set of transports and
* applications. The port numbers (any option called "PORT") will be
* adjusted to ensure that no two peers running on the same system
* have the same port(s) in their respective configurations.
* @param cb_cls closure for cb
* @param connect_callback function to call each time two hosts are connected
* @param connect_callback_cls closure for connect_callback
- * @param hostnames space-separated list of hostnames to use; can be NULL (to run
- * everything on localhost).
+ * @param hostnames linked list of hosts to use to start peers on (NULL to run on localhost only)
+ *
* @return NULL on error, otherwise handle to control peer group
*/
struct GNUNET_TESTING_PeerGroup *
void *cb_cls,
GNUNET_TESTING_NotifyConnection
connect_callback, void *connect_callback_cls,
- const char *hostnames)
+ const struct GNUNET_TESTING_Host *hostnames)
{
struct GNUNET_TESTING_PeerGroup *pg;
- const char *rpos;
+ const struct GNUNET_TESTING_Host *hostpos;
+#if 0
char *pos;
+ const char *rpos;
char *start;
+#endif
const char *hostname;
char *baseservicehome;
char *newservicehome;
pg = GNUNET_malloc (sizeof (struct GNUNET_TESTING_PeerGroup));
pg->sched = sched;
pg->cfg = cfg;
- pg->cb = cb;
- pg->cb_cls = cb_cls;
pg->notify_connection = connect_callback;
pg->notify_connection_cls = connect_callback_cls;
pg->total = total;
pg->peers = GNUNET_malloc (total * sizeof (struct PeerData));
if (NULL != hostnames)
{
+ off = 2;
+ hostpos = hostnames;
+ while (hostpos != NULL)
+ {
+ hostpos = hostpos->next;
+ off++;
+ }
+ pg->hosts = GNUNET_malloc (off * sizeof (struct HostData));
+ off = 0;
+
+ hostpos = hostnames;
+ while (hostpos != NULL)
+ {
+ pg->hosts[off].minport = LOW_PORT;
+ pg->hosts[off++].hostname = GNUNET_strdup(hostpos->hostname);
+ hostpos = hostpos->next;
+ }
+
+ if (off == 0)
+ {
+ GNUNET_free (pg->hosts);
+ pg->hosts = NULL;
+ }
+ hostcnt = off;
+ minport = 0;
+
+#if NO_LL
off = 2;
/* skip leading spaces */
while ((0 != *hostnames) && (isspace ( (unsigned char) *hostnames)))
}
hostcnt = off;
minport = 0; /* make gcc happy */
+#endif
}
else
{
pg->peers[off].connect_peers = GNUNET_CONTAINER_multihashmap_create(total);
pg->peers[off].blacklisted_peers = GNUNET_CONTAINER_multihashmap_create(total);
pg->peers[off].pg = pg;
- pg->peers[off].daemon = GNUNET_TESTING_daemon_start (sched,
- pcfg,
- timeout,
- hostname,
- hostkey_callback,
- hostkey_cls,
- cb, cb_cls);
- if (NULL == pg->peers[off].daemon)
- GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
- _("Could not start peer number %u!\n"), off);
+
+ pg->peers[off].internal_context.peer = &pg->peers[off];
+ pg->peers[off].internal_context.timeout = timeout;
+ pg->peers[off].internal_context.hostname = hostname;
+ pg->peers[off].internal_context.hostkey_callback = hostkey_callback;
+ pg->peers[off].internal_context.hostkey_cls = hostkey_cls;
+ pg->peers[off].internal_context.start_cb = cb;
+ pg->peers[off].internal_context.start_cb_cls = cb_cls;
+
+ GNUNET_SCHEDULER_add_now (sched, &internal_start, &pg->peers[off].internal_context);
}
return pg;
struct ChurnContext *churn_ctx;
unsigned int running;
unsigned int stopped;
+ unsigned int total_running;
+ unsigned int total_stopped;
unsigned int i;
unsigned int *running_arr;
unsigned int *stopped_arr;
{
if (pg->peers[i].daemon->running == GNUNET_YES)
{
+ GNUNET_assert(running != -1);
running++;
}
else
{
+ GNUNET_assert(stopped != -1);
stopped++;
}
}
}
churn_ctx = GNUNET_malloc(sizeof(struct ChurnContext));
- running_arr = GNUNET_malloc(running * sizeof(unsigned int));
- stopped_arr = GNUNET_malloc(stopped * sizeof(unsigned int));
+
+ running_arr = NULL;
+ if (running > 0)
+ running_arr = GNUNET_malloc(running * sizeof(unsigned int));
+
+ stopped_arr = NULL;
+ if (stopped > 0)
+ stopped_arr = GNUNET_malloc(stopped * sizeof(unsigned int));
running_permute = NULL;
stopped_permute = NULL;
if (stopped > 0)
stopped_permute = GNUNET_CRYPTO_random_permute(GNUNET_CRYPTO_QUALITY_WEAK, stopped);
+ total_running = running;
+ total_stopped = stopped;
running = 0;
stopped = 0;
{
if (pg->peers[i].daemon->running == GNUNET_YES)
{
+ GNUNET_assert((running_arr != NULL) && (total_running > running));
running_arr[running] = i;
running++;
}
else
{
+ GNUNET_assert((stopped_arr != NULL) && (total_stopped > stopped));
stopped_arr[stopped] = i;
stopped++;
}
timeout, &churn_start_callback, churn_ctx);
}
- GNUNET_free(running_arr);
- GNUNET_free(stopped_arr);
+ GNUNET_free_non_null(running_arr);
+ GNUNET_free_non_null(stopped_arr);
GNUNET_free_non_null(running_permute);
GNUNET_free_non_null(stopped_permute);
}
* @param callback_cls closure for the callback function
*/
void
-GNUNET_TESTING_daemons_restart (struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING_NotifyCompletion callback, void *callback_cls)
+GNUNET_TESTING_daemons_restart (struct GNUNET_TESTING_PeerGroup *pg,
+ GNUNET_TESTING_NotifyCompletion callback,
+ void *callback_cls)
{
struct RestartContext *restart_context;
unsigned int off;
}
+/**
+ * Callback for shutting down peers in a peer group.
+ *
+ * @param cls closure (struct ShutdownContext)
+ * @param emsg NULL on success
+ */
+void internal_shutdown_callback (void *cls,
+ const char *emsg)
+{
+ struct ShutdownContext *shutdown_ctx = cls;
+
+ shutdown_ctx->outstanding--;
+ if (emsg == NULL)
+ {
+ shutdown_ctx->peers_down++;
+ }
+ else
+ {
+ shutdown_ctx->peers_failed++;
+ }
+
+ if ((shutdown_ctx->cb != NULL) && (shutdown_ctx->peers_down + shutdown_ctx->peers_failed == shutdown_ctx->total_peers))
+ {
+ if (shutdown_ctx->peers_failed > 0)
+ shutdown_ctx->cb(shutdown_ctx->cb_cls, "Not all peers successfully shut down!");
+ else
+ shutdown_ctx->cb(shutdown_ctx->cb_cls, NULL);
+ GNUNET_free(shutdown_ctx);
+ }
+}
+
+/**
+ * Individual shutdown context for a particular peer.
+ */
+struct PeerShutdownContext
+{
+ /**
+ * Pointer to the high level shutdown context.
+ */
+ struct ShutdownContext *shutdown_ctx;
+
+ /**
+ * The daemon handle for the peer to shut down.
+ */
+ struct GNUNET_TESTING_Daemon *daemon;
+};
+
+/**
+ * Task to rate limit the number of outstanding peer shutdown
+ * requests. This is necessary for making sure we don't do
+ * too many ssh connections at once, but is generally nicer
+ * to any system as well (graduated task starts, as opposed
+ * to calling gnunet-arm N times all at once).
+ */
+static void
+schedule_shutdown_task (void *cls, const struct GNUNET_SCHEDULER_TaskContext * tc)
+{
+ struct PeerShutdownContext *peer_shutdown_ctx = cls;
+ struct ShutdownContext *shutdown_ctx = peer_shutdown_ctx->shutdown_ctx;
+
+ GNUNET_assert(peer_shutdown_ctx != NULL);
+ GNUNET_assert(shutdown_ctx != NULL);
+
+ if (shutdown_ctx->outstanding > MAX_CONCURRENT_SHUTDOWN)
+ GNUNET_SCHEDULER_add_delayed(peer_shutdown_ctx->daemon->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &schedule_shutdown_task, peer_shutdown_ctx);
+ else
+ {
+ shutdown_ctx->outstanding++;
+ GNUNET_TESTING_daemon_stop (peer_shutdown_ctx->daemon, shutdown_ctx->timeout, &internal_shutdown_callback, shutdown_ctx, GNUNET_YES, GNUNET_NO);
+ GNUNET_free(peer_shutdown_ctx);
+ }
+}
/**
* Shutdown all peers started in the given group.
*
* @param pg handle to the peer group
* @param timeout how long to wait for shutdown
+ * @param cb callback to notify upon success or failure
+ * @param cb_cls closure for cb
*/
void
GNUNET_TESTING_daemons_stop (struct GNUNET_TESTING_PeerGroup *pg,
- struct GNUNET_TIME_Relative timeout)
+ struct GNUNET_TIME_Relative timeout,
+ GNUNET_TESTING_NotifyCompletion cb,
+ void *cb_cls)
{
unsigned int off;
+ struct ShutdownContext *shutdown_ctx;
+ struct PeerShutdownContext *peer_shutdown_ctx;
+
+ GNUNET_assert(pg->total > 0);
+
+ shutdown_ctx = GNUNET_malloc(sizeof(struct ShutdownContext));
+ shutdown_ctx->cb = cb;
+ shutdown_ctx->cb_cls = cb_cls;
+ shutdown_ctx->total_peers = pg->total;
+ shutdown_ctx->timeout = timeout;
+ /* shtudown_ctx->outstanding = 0; */
for (off = 0; off < pg->total; off++)
{
- /* FIXME: should we wait for our continuations to be called
- here? This would require us to take a continuation as
- well... */
-
- if (NULL != pg->peers[off].daemon)
- GNUNET_TESTING_daemon_stop (pg->peers[off].daemon, timeout, NULL, NULL, GNUNET_YES, GNUNET_NO);
+ GNUNET_assert(NULL != pg->peers[off].daemon);
+ peer_shutdown_ctx = GNUNET_malloc(sizeof(struct PeerShutdownContext));
+ peer_shutdown_ctx->daemon = pg->peers[off].daemon;
+ peer_shutdown_ctx->shutdown_ctx = shutdown_ctx;
+ GNUNET_SCHEDULER_add_now(pg->peers[off].daemon->sched, &schedule_shutdown_task, peer_shutdown_ctx);
+ //GNUNET_TESTING_daemon_stop (pg->peers[off].daemon, timeout, shutdown_cb, shutdown_ctx, GNUNET_YES, GNUNET_NO);
if (NULL != pg->peers[off].cfg)
GNUNET_CONFIGURATION_destroy (pg->peers[off].cfg);
if (pg->peers[off].allowed_peers != NULL)