GNUnet is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 2, or (at your
+ by the Free Software Foundation; either version 3, or (at your
option) any later version.
GNUnet is distributed in the hope that it will be useful, but
#include "platform.h"
#include "gnunet_arm_service.h"
#include "gnunet_testing_lib.h"
+#include "gnunet_core_service.h"
#define VERBOSE_TESTING GNUNET_NO
*/
#define HIGH_PORT 56000
-#define MAX_OUTSTANDING_CONNECTIONS 50
+#define MAX_OUTSTANDING_CONNECTIONS 10
#define MAX_CONCURRENT_HOSTKEYS 16
+#define MAX_CONCURRENT_STARTING 50
+
#define CONNECT_TIMEOUT GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 300)
#define CONNECT_ATTEMPTS 8
};
#endif
+struct InternalStartContext
+{
+ /**
+ * Pointer to peerdata
+ */
+ struct PeerData *peer;
+
+ /**
+ * Timeout for peer startup
+ */
+ struct GNUNET_TIME_Relative timeout;
+
+ /**
+ * Client callback for hostkey notification
+ */
+ GNUNET_TESTING_NotifyHostkeyCreated hostkey_callback;
+
+ /**
+ * Closure for hostkey_callback
+ */
+ void *hostkey_cls;
+
+ /**
+ * Client callback for peer start notification
+ */
+ GNUNET_TESTING_NotifyDaemonRunning start_cb;
+
+ /**
+ * Closure for cb
+ */
+ void *start_cb_cls;
+
+ /**
+ * Hostname, where to start the peer
+ */
+ const char *hostname;
+};
+
/**
* Data we keep per peer.
*/
*/
struct GNUNET_TESTING_PeerGroup *pg;
- /**
- * Linked list of peer connections (pointers)
- */
- //struct PeerConnection *connected_peers;
/**
* Hash map of allowed peer connections (F2F created topology)
*/
* creating any topology so the count is valid once finished.
*/
int num_connections;
+
+ /**
+ * Context to keep track of peers being started, to
+ * stagger hostkey generation and peer startup.
+ */
+ struct InternalStartContext internal_context;
};
uint16_t minport;
};
+struct TopologyIterateContext
+{
+ /**
+ * Callback for notifying of two connected peers.
+ */
+ GNUNET_TESTING_NotifyTopology topology_cb;
+
+ /**
+ * Closure for topology_cb
+ */
+ void *cls;
+
+ /**
+ * Number of peers currently connected to.
+ */
+ unsigned int connected;
+
+ /**
+ * Number of peers we have finished iterating.
+ */
+ unsigned int completed;
+
+ /**
+ * Number of peers total.
+ */
+ unsigned int total;
+};
+
+struct StatsIterateContext
+{
+ /**
+ * Handle to the statistics service.
+ */
+ struct GNUNET_STATISTICS_Handle *stats_handle;
+
+ /**
+ * Handle for getting statistics.
+ */
+ struct GNUNET_STATISTICS_GetHandle *stats_get_handle;
+
+ /**
+ * Continuation to call once all stats information has been retrieved.
+ */
+ GNUNET_STATISTICS_Callback cont;
+
+ /**
+ * Proc function to call on each value received.
+ */
+ GNUNET_TESTING_STATISTICS_Iterator proc;
+
+ /**
+ * Closure for topology_cb
+ */
+ void *cls;
+
+ /**
+ * Number of peers currently connected to.
+ */
+ unsigned int connected;
+
+ /**
+ * Number of peers we have finished iterating.
+ */
+ unsigned int completed;
+
+ /**
+ * Number of peers total.
+ */
+ unsigned int total;
+};
+
+struct CoreContext
+{
+ void *iter_context;
+ struct GNUNET_TESTING_Daemon *daemon;
+};
/**
* Handle to a group of GNUnet peers.
/**
* Function to call on each started daemon.
*/
- GNUNET_TESTING_NotifyDaemonRunning cb;
+ //GNUNET_TESTING_NotifyDaemonRunning cb;
/**
* Closure for cb.
*/
- void *cb_cls;
+ //void *cb_cls;
/*
* Function to call on each topology connection created
* How many peers are being started right now?
*/
unsigned int starting;
+
+ /**
+ * How many peers have already been started?
+ */
+ unsigned int started;
+};
+
+struct UpdateContext
+{
+ struct GNUNET_CONFIGURATION_Handle *ret;
+ const char *hostname;
+ unsigned int nport;
+ unsigned int upnum;
+};
+
+
+struct ConnectContext
+{
+ struct GNUNET_TESTING_Daemon *first;
+
+ struct GNUNET_TESTING_Daemon *second;
+
+ struct GNUNET_TESTING_PeerGroup *pg;
};
/**
memcpy (uid, hash, sizeof(uint32_t));
}
-struct UpdateContext
-{
- struct GNUNET_CONFIGURATION_Handle *ret;
- const char *hostname;
- unsigned int nport;
- unsigned int upnum;
-};
-
-
-struct ConnectContext
-{
- struct GNUNET_TESTING_Daemon *first;
-
- struct GNUNET_TESTING_Daemon *second;
-
- struct GNUNET_TESTING_PeerGroup *pg;
-};
-
/**
* Number of connects we are waiting on, allows us to rate limit
* connect attempts.
return GNUNET_NO;
while (topology_strings[curr] != NULL)
{
- if (strcmp(topology_strings[curr], topology_string) == 0)
+ if (strcasecmp(topology_strings[curr], topology_string) == 0)
{
*topology = curr;
return GNUNET_YES;
* topology option, GNUNET_NO if not
*/
int
-GNUNET_TESTING_topology_option_get(enum GNUNET_TESTING_TopologyOption *topology_option,
- char * topology_string)
+GNUNET_TESTING_topology_option_get (enum GNUNET_TESTING_TopologyOption *topology_option,
+ char * topology_string)
{
/**
* Options for connecting a topology as strings.
return GNUNET_NO;
while (NULL != topology_option_strings[curr])
{
- if (strcmp(topology_option_strings[curr], topology_string) == 0)
+ if (strcasecmp(topology_option_strings[curr], topology_string) == 0)
{
*topology_option = curr;
return GNUNET_YES;
{
GNUNET_asprintf(&allowed_hosts, "%s; 127.0.0.1;", control_host);
GNUNET_CONFIGURATION_set_value_string(uc.ret, "core", "ACCEPT_FROM", allowed_hosts);
+ GNUNET_CONFIGURATION_set_value_string(uc.ret, "transport", "ACCEPT_FROM", allowed_hosts);
GNUNET_free_non_null(control_host);
GNUNET_free(allowed_hosts);
}
}
+/**
+ * Either delay a connection (because there are too many outstanding)
+ * or schedule it for right now.
+ *
+ * @param cls a connection context
+ * @param tc the task runtime context
+ */
static void schedule_connect(void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
{
struct ConnectContext *connect_context = cls;
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
_("Delaying connect, we have too many outstanding connections!\n"));
#endif
- GNUNET_SCHEDULER_add_delayed(connect_context->pg->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_SECONDS, 3), &schedule_connect, connect_context);
+ GNUNET_SCHEDULER_add_delayed(connect_context->pg->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &schedule_connect, connect_context);
}
else
{
}
}
+/**
+ * Internal callback for topology information for a particular peer.
+ */
+static void
+internal_topology_callback(void *cls,
+ const struct GNUNET_PeerIdentity *peer,
+ struct GNUNET_TIME_Relative latency, uint32_t distance)
+{
+ struct CoreContext *core_ctx = cls;
+ struct TopologyIterateContext *iter_ctx = core_ctx->iter_context;
+
+ if (peer == NULL) /* Either finished, or something went wrong */
+ {
+ iter_ctx->completed++;
+ iter_ctx->connected--;
+ /* One core context allocated per iteration, must free! */
+ GNUNET_free(core_ctx);
+ }
+ else
+ {
+ iter_ctx->topology_cb(iter_ctx->cls, &core_ctx->daemon->id, peer, latency, distance, NULL);
+ }
+
+ if (iter_ctx->completed == iter_ctx->total)
+ {
+ iter_ctx->topology_cb(iter_ctx->cls, NULL, NULL, GNUNET_TIME_relative_get_zero(), 0, NULL);
+ /* Once all are done, free the iteration context */
+ GNUNET_free(iter_ctx);
+ }
+}
+
+
+/**
+ * Check running topology iteration tasks, if below max start a new one, otherwise
+ * schedule for some time in the future.
+ */
+static void
+schedule_get_topology(void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
+{
+ struct CoreContext *core_context = cls;
+ struct TopologyIterateContext *topology_context = (struct TopologyIterateContext *)core_context->iter_context;
+ if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
+ return;
+
+ if (topology_context->connected > MAX_OUTSTANDING_CONNECTIONS)
+ {
+#if VERBOSE_TESTING > 2
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Delaying connect, we have too many outstanding connections!\n"));
+#endif
+ GNUNET_SCHEDULER_add_delayed(core_context->daemon->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &schedule_get_topology, core_context);
+ }
+ else
+ {
+#if VERBOSE_TESTING > 2
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating connection, outstanding_connections is %d\n"), outstanding_connects);
+#endif
+ topology_context->connected++;
+ if (GNUNET_OK != GNUNET_CORE_iterate_peers (core_context->daemon->sched, core_context->daemon->cfg, &internal_topology_callback, core_context))
+ internal_topology_callback(core_context, NULL, GNUNET_TIME_relative_get_zero(), 0);
+
+ }
+}
+
+/**
+ * Iterate over all (running) peers in the peer group, retrieve
+ * all connections that each currently has.
+ */
+void
+GNUNET_TESTING_get_topology (struct GNUNET_TESTING_PeerGroup *pg, GNUNET_TESTING_NotifyTopology cb, void *cls)
+{
+ struct TopologyIterateContext *topology_context;
+ struct CoreContext *core_ctx;
+ unsigned int i;
+ unsigned int total_count;
+
+ /* Allocate a single topology iteration context */
+ topology_context = GNUNET_malloc(sizeof(struct TopologyIterateContext));
+ topology_context->topology_cb = cb;
+ topology_context->cls = cls;
+ total_count = 0;
+ for (i = 0; i < pg->total; i++)
+ {
+ if (pg->peers[i].daemon->running == GNUNET_YES)
+ {
+ /* Allocate one core context per core we need to connect to */
+ core_ctx = GNUNET_malloc(sizeof(struct CoreContext));
+ core_ctx->daemon = pg->peers[i].daemon;
+ /* Set back pointer to topology iteration context */
+ core_ctx->iter_context = topology_context;
+ GNUNET_SCHEDULER_add_now(pg->sched, &schedule_get_topology, core_ctx);
+ total_count++;
+ }
+ }
+ topology_context->total = total_count;
+ return;
+}
+
+/**
+ * Callback function to process statistic values.
+ * This handler is here only really to insert a peer
+ * identity (or daemon) so the statistics can be uniquely
+ * tied to a single running peer.
+ *
+ * @param cls closure
+ * @param subsystem name of subsystem that created the statistic
+ * @param name the name of the datum
+ * @param value the current value
+ * @param is_persistent GNUNET_YES if the value is persistent, GNUNET_NO if not
+ * @return GNUNET_OK to continue, GNUNET_SYSERR to abort iteration
+ */
+static int internal_stats_callback (void *cls,
+ const char *subsystem,
+ const char *name,
+ uint64_t value,
+ int is_persistent)
+{
+ struct CoreContext *core_context = cls;
+ struct StatsIterateContext *stats_context = (struct StatsIterateContext *)core_context->iter_context;
+
+ return stats_context->proc(stats_context->cls, &core_context->daemon->id, subsystem, name, value, is_persistent);
+}
+
+/**
+ * Internal continuation call for statistics iteration.
+ *
+ * @param cls closure, the CoreContext for this iteration
+ * @param success whether or not the statistics iterations
+ * was canceled or not (we don't care)
+ */
+static void internal_stats_cont (void *cls, int success)
+{
+ struct CoreContext *core_context = cls;
+ struct StatsIterateContext *stats_context = (struct StatsIterateContext *)core_context->iter_context;
+
+ stats_context->connected--;
+ stats_context->completed++;
+
+ if (stats_context->completed == stats_context->total)
+ {
+ stats_context->cont(stats_context->cls, GNUNET_YES);
+ if (stats_context->stats_handle != NULL)
+ GNUNET_STATISTICS_destroy(stats_context->stats_handle, GNUNET_NO);
+ GNUNET_free(stats_context);
+ }
+ GNUNET_free(core_context);
+}
+
+/**
+ * Check running topology iteration tasks, if below max start a new one, otherwise
+ * schedule for some time in the future.
+ */
+static void
+schedule_get_statistics(void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
+{
+ struct CoreContext *core_context = cls;
+ struct StatsIterateContext *stats_context = (struct StatsIterateContext *)core_context->iter_context;
+
+ if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
+ return;
+
+ if (stats_context->connected > MAX_OUTSTANDING_CONNECTIONS)
+ {
+#if VERBOSE_TESTING > 2
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Delaying connect, we have too many outstanding connections!\n"));
+#endif
+ GNUNET_SCHEDULER_add_delayed(core_context->daemon->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &schedule_get_statistics, core_context);
+ }
+ else
+ {
+#if VERBOSE_TESTING > 2
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ _("Creating connection, outstanding_connections is %d\n"), outstanding_connects);
+#endif
+
+ stats_context->connected++;
+ stats_context->stats_handle = GNUNET_STATISTICS_create(core_context->daemon->sched, "testing", core_context->daemon->cfg);
+ if (stats_context->stats_handle == NULL)
+ {
+ internal_stats_cont (core_context, GNUNET_NO);
+ return;
+ }
+
+ stats_context->stats_get_handle = GNUNET_STATISTICS_get(stats_context->stats_handle, NULL, NULL, GNUNET_TIME_relative_get_forever(), &internal_stats_cont, &internal_stats_callback, core_context);
+ if (stats_context->stats_get_handle == NULL)
+ internal_stats_cont (core_context, GNUNET_NO);
+
+ }
+}
+
+
+/**
+ * Iterate over all (running) peers in the peer group, retrieve
+ * all statistics from each.
+ */
+void
+GNUNET_TESTING_get_statistics (struct GNUNET_TESTING_PeerGroup *pg,
+ GNUNET_STATISTICS_Callback cont,
+ GNUNET_TESTING_STATISTICS_Iterator proc, void *cls)
+{
+ struct StatsIterateContext *stats_context;
+ struct CoreContext *core_ctx;
+ unsigned int i;
+ unsigned int total_count;
+
+ /* Allocate a single stats iteration context */
+ stats_context = GNUNET_malloc(sizeof(struct StatsIterateContext));
+ stats_context->cont = cont;
+ stats_context->proc = proc;
+ stats_context->cls = cls;
+ total_count = 0;
+ for (i = 0; i < pg->total; i++)
+ {
+ if (pg->peers[i].daemon->running == GNUNET_YES)
+ {
+ /* Allocate one core context per core we need to connect to */
+ core_ctx = GNUNET_malloc(sizeof(struct CoreContext));
+ core_ctx->daemon = pg->peers[i].daemon;
+ /* Set back pointer to topology iteration context */
+ core_ctx->iter_context = stats_context;
+ GNUNET_SCHEDULER_add_now(pg->sched, &schedule_get_statistics, core_ctx);
+ total_count++;
+ }
+ }
+ stats_context->total = total_count;
+ return;
+}
+
/**
* There are many ways to connect peers that are supported by this function.
* To connect peers in the same topology that was created via the
}
/**
- * Function which continues a peer group starting up
- * after successfully generating hostkeys for each peer.
- *
- * @param pg the peer group to continue starting
+ * Callback that is called whenever a hostkey is generated
+ * for a peer. Call the real callback and decrement the
+ * starting counter for the peergroup.
*
+ * @param cls closure
+ * @param id identifier for the daemon, NULL on error
+ * @param d handle for the daemon
+ * @param emsg error message (NULL on success)
*/
-void
-GNUNET_TESTING_daemons_continue_startup(struct GNUNET_TESTING_PeerGroup *pg)
+static void internal_hostkey_callback (void *cls,
+ const struct GNUNET_PeerIdentity *id,
+ struct GNUNET_TESTING_Daemon *d,
+ const char *emsg)
{
- unsigned int i;
-
- for (i = 0; i < pg->total; i++)
+ struct InternalStartContext *internal_context = cls;
+ internal_context->peer->pg->starting--;
+ internal_context->peer->pg->started++;
+ if (internal_context->hostkey_callback != NULL)
+ internal_context->hostkey_callback(internal_context->hostkey_cls, id, d, emsg);
+ else if (internal_context->peer->pg->started == internal_context->peer->pg->total)
{
- GNUNET_TESTING_daemon_continue_startup(pg->peers[i].daemon);
+ internal_context->peer->pg->started = 0; /* Internal startup may use this counter! */
+ GNUNET_TESTING_daemons_continue_startup(internal_context->peer->pg);
}
}
-struct InternalStartContext
-{
- struct PeerData *peer;
- struct GNUNET_SCHEDULER_Handle *sched;
- const struct GNUNET_CONFIGURATION_Handle *pcfg;
- struct GNUNET_TIME_Relative timeout;
- GNUNET_TESTING_NotifyHostkeyCreated hostkey_callback;
- void *hostkey_cls;
- GNUNET_TESTING_NotifyDaemonRunning cb;
- void *cb_cls;
- const char *hostname;
-};
-
-
/**
- * Prototype of a function that will be called whenever
- * a daemon was started by the testing library.
+ * Callback that is called whenever a peer has finished starting.
+ * Call the real callback and decrement the starting counter
+ * for the peergroup.
*
* @param cls closure
* @param id identifier for the daemon, NULL on error
* @param d handle for the daemon
* @param emsg error message (NULL on success)
*/
-static void internal_hostkey_callback (void *cls,
+static void internal_startup_callback (void *cls,
const struct GNUNET_PeerIdentity *id,
+ const struct GNUNET_CONFIGURATION_Handle *cfg,
struct GNUNET_TESTING_Daemon *d,
const char *emsg)
{
struct InternalStartContext *internal_context = cls;
internal_context->peer->pg->starting--;
- internal_context->hostkey_callback(internal_context->hostkey_cls, id, d, emsg);
- GNUNET_free(internal_context);
+ if (internal_context->start_cb != NULL)
+ internal_context->start_cb(internal_context->start_cb_cls, id, cfg, d, emsg);
+}
+
+static void
+internal_continue_startup (void *cls, const struct GNUNET_SCHEDULER_TaskContext * tc)
+{
+ struct InternalStartContext *internal_context = cls;
+
+ if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
+ {
+ return;
+ }
+
+ if (internal_context->peer->pg->starting < MAX_CONCURRENT_STARTING)
+ {
+ internal_context->peer->pg->starting++;
+ GNUNET_TESTING_daemon_continue_startup (internal_context->peer->daemon);
+ }
+ else
+ {
+ GNUNET_SCHEDULER_add_delayed(internal_context->peer->pg->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &internal_continue_startup, internal_context);
+ }
}
static void
if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
{
- GNUNET_free(internal_context);
return;
}
if (internal_context->peer->pg->starting < MAX_CONCURRENT_HOSTKEYS)
{
internal_context->peer->pg->starting++;
- internal_context->peer->daemon = GNUNET_TESTING_daemon_start (internal_context->sched,
- internal_context->pcfg,
+ internal_context->peer->daemon = GNUNET_TESTING_daemon_start (internal_context->peer->pg->sched,
+ internal_context->peer->cfg,
internal_context->timeout,
internal_context->hostname,
&internal_hostkey_callback,
internal_context,
- internal_context->cb,
- internal_context->cb_cls);
+ &internal_startup_callback,
+ internal_context);
}
else
{
- GNUNET_SCHEDULER_add_delayed(internal_context->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &internal_start, internal_context);
+ GNUNET_SCHEDULER_add_delayed(internal_context->peer->pg->sched, GNUNET_TIME_relative_multiply(GNUNET_TIME_UNIT_MILLISECONDS, 100), &internal_start, internal_context);
}
}
/**
- * Start count gnunetd processes with the same set of transports and
+ * Function which continues a peer group starting up
+ * after successfully generating hostkeys for each peer.
+ *
+ * @param pg the peer group to continue starting
+ *
+ */
+void
+GNUNET_TESTING_daemons_continue_startup(struct GNUNET_TESTING_PeerGroup *pg)
+{
+ unsigned int i;
+
+ pg->starting = 0;
+ for (i = 0; i < pg->total; i++)
+ {
+ GNUNET_SCHEDULER_add_now (pg->sched, &internal_continue_startup, &pg->peers[i].internal_context);
+ //GNUNET_TESTING_daemon_continue_startup(pg->peers[i].daemon);
+ }
+}
+
+/**
+ * Start count gnunet instances with the same set of transports and
* applications. The port numbers (any option called "PORT") will be
* adjusted to ensure that no two peers running on the same system
* have the same port(s) in their respective configurations.
* @param cb_cls closure for cb
* @param connect_callback function to call each time two hosts are connected
* @param connect_callback_cls closure for connect_callback
- * @param hostnames space-separated list of hostnames to use; can be NULL (to run
- * everything on localhost).
+ * @param hostnames linked list of hosts to use to start peers on (NULL to run on localhost only)
+ *
* @return NULL on error, otherwise handle to control peer group
*/
struct GNUNET_TESTING_PeerGroup *
void *cb_cls,
GNUNET_TESTING_NotifyConnection
connect_callback, void *connect_callback_cls,
- const char *hostnames)
+ const struct GNUNET_TESTING_Host *hostnames)
{
struct GNUNET_TESTING_PeerGroup *pg;
- const char *rpos;
+ const struct GNUNET_TESTING_Host *hostpos;
+#if 0
char *pos;
+ const char *rpos;
char *start;
+#endif
const char *hostname;
char *baseservicehome;
char *newservicehome;
char *tmpdir;
- struct InternalStartContext *internal_context;
struct GNUNET_CONFIGURATION_Handle *pcfg;
unsigned int off;
unsigned int hostcnt;
pg = GNUNET_malloc (sizeof (struct GNUNET_TESTING_PeerGroup));
pg->sched = sched;
pg->cfg = cfg;
- pg->cb = cb;
- pg->cb_cls = cb_cls;
pg->notify_connection = connect_callback;
pg->notify_connection_cls = connect_callback_cls;
pg->total = total;
pg->peers = GNUNET_malloc (total * sizeof (struct PeerData));
if (NULL != hostnames)
{
+ off = 2;
+ hostpos = hostnames;
+ while (hostpos != NULL)
+ {
+ hostpos = hostpos->next;
+ off++;
+ }
+ pg->hosts = GNUNET_malloc (off * sizeof (struct HostData));
+ off = 0;
+
+ hostpos = hostnames;
+ while (hostpos != NULL)
+ {
+ pg->hosts[off].minport = LOW_PORT;
+ pg->hosts[off++].hostname = GNUNET_strdup(hostpos->hostname);
+ hostpos = hostpos->next;
+ }
+
+ if (off == 0)
+ {
+ GNUNET_free (pg->hosts);
+ pg->hosts = NULL;
+ }
+ hostcnt = off;
+ minport = 0;
+
+#if NO_LL
off = 2;
/* skip leading spaces */
while ((0 != *hostnames) && (isspace ( (unsigned char) *hostnames)))
}
hostcnt = off;
minport = 0; /* make gcc happy */
+#endif
}
else
{
pg->peers[off].connect_peers = GNUNET_CONTAINER_multihashmap_create(total);
pg->peers[off].blacklisted_peers = GNUNET_CONTAINER_multihashmap_create(total);
pg->peers[off].pg = pg;
- internal_context = GNUNET_malloc(sizeof(struct InternalStartContext));
- internal_context->sched = sched;
- internal_context->peer = &pg->peers[off];
- internal_context->pcfg = pcfg;
- internal_context->timeout = timeout;
- internal_context->hostname = hostname;
- internal_context->hostkey_callback = hostkey_callback;
- internal_context->hostkey_cls = hostkey_cls;
- internal_context->cb = cb;
- internal_context->cb_cls = cb_cls;
-
- GNUNET_SCHEDULER_add_now (sched, &internal_start, internal_context);
-
- /*
- pg->peers[off].daemon = GNUNET_TESTING_daemon_start (sched,
- pcfg,
- timeout,
- hostname,
- hostkey_callback,
- hostkey_cls,
- cb, cb_cls);
- if (NULL == pg->peers[off].daemon)
- GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
- _("Could not start peer number %u!\n"), off);
- */
+
+ pg->peers[off].internal_context.peer = &pg->peers[off];
+ pg->peers[off].internal_context.timeout = timeout;
+ pg->peers[off].internal_context.hostname = hostname;
+ pg->peers[off].internal_context.hostkey_callback = hostkey_callback;
+ pg->peers[off].internal_context.hostkey_cls = hostkey_cls;
+ pg->peers[off].internal_context.start_cb = cb;
+ pg->peers[off].internal_context.start_cb_cls = cb_cls;
+
+ GNUNET_SCHEDULER_add_now (sched, &internal_start, &pg->peers[off].internal_context);
}
return pg;
for (off = 0; off < pg->total; off++)
{
- if (NULL != pg->peers[off].daemon)
- GNUNET_TESTING_daemon_stop (pg->peers[off].daemon, timeout, shutdown_cb, shutdown_ctx, GNUNET_YES, GNUNET_NO);
+ GNUNET_assert(NULL != pg->peers[off].daemon);
+ GNUNET_TESTING_daemon_stop (pg->peers[off].daemon, timeout, shutdown_cb, shutdown_ctx, GNUNET_YES, GNUNET_NO);
if (NULL != pg->peers[off].cfg)
GNUNET_CONFIGURATION_destroy (pg->peers[off].cfg);
if (pg->peers[off].allowed_peers != NULL)