Boston, MA 02110-1301, USA.
*/
/**
- * @file rps/test_rps_multipeer.c
+ * @file rps/test_rps.c
* @brief Testcase for the random peer sampling service. Starts
* a peergroup with a given number of peers, then waits to
* receive size pushes/pulls from each peer. Expects to wait
*/
static struct GNUNET_TESTBED_Peer **testbed_peers;
+/**
+ * @brief Indicates whether peer should go off- or online
+ */
+enum PEER_ONLINE_DELTA {
+ /**
+ * @brief Indicates peer going online
+ */
+ PEER_GO_ONLINE = 1,
+ /**
+ * @brief Indicates peer going offline
+ */
+ PEER_GO_OFFLINE = -1,
+};
/**
* Operation map entry
struct GNUNET_TESTBED_Operation *op;
/**
- * Depending on whether we start or stop NSE service at the peer set this to 1
- * or -1
+ * Depending on whether we start or stop RPS service at the peer, set this to
+ * #PEER_GO_ONLINE (1) or #PEER_GO_OFFLINE (-1)
*/
- int delta;
+ enum PEER_ONLINE_DELTA delta;
/**
* Index of the regarding peer
int online;
/**
- * Number of Peer IDs to request
+ * Number of Peer IDs to request during the whole test
*/
unsigned int num_ids_to_request;
* Number of received PeerIDs
*/
unsigned int num_recv_ids;
+
+ /**
+ * Pending operation on that peer
+ */
+ const struct OpListEntry *entry_op_manage;
+
+ /**
+ * Testbed operation to connect to statistics service
+ */
+ struct GNUNET_TESTBED_Operation *stat_op;
+
+ /**
+ * Handle to the statistics service
+ */
+ struct GNUNET_STATISTICS_Handle *stats_h;
+
+ /**
+ * @brief flags to indicate which statistics values have been already
+ * collected from the statistics service.
+ * Used to check whether we are able to shutdown.
+ */
+ uint32_t stat_collected_flags;
+};
+
+enum STAT_TYPE
+{
+ STAT_TYPE_ROUNDS = 0x1, /* 1 */
+ STAT_TYPE_BLOCKS = 0x2, /* 2 */
+ STAT_TYPE_BLOCKS_MANY_PUSH = 0x4, /* 3 */
+ STAT_TYPE_BLOCKS_FEW_PUSH = 0x8, /* 4 */
+ STAT_TYPE_BLOCKS_FEW_PULL = 0x10, /* 5 */
+ STAT_TYPE_ISSUED_PUSH_SEND = 0x20, /* 6 */
+ STAT_TYPE_ISSUED_PULL_REQ = 0x40, /* 7 */
+ STAT_TYPE_ISSUED_PULL_REP = 0x80, /* 8 */
+ STAT_TYPE_SENT_PUSH_SEND = 0x100, /* 9 */
+ STAT_TYPE_SENT_PULL_REQ = 0x200, /* 10 */
+ STAT_TYPE_SENT_PULL_REP = 0x400, /* 11 */
+ STAT_TYPE_RECV_PUSH_SEND = 0x800, /* 12 */
+ STAT_TYPE_RECV_PULL_REQ = 0x1000, /* 13 */
+ STAT_TYPE_RECV_PULL_REP = 0x2000, /* 14 */
+ STAT_TYPE_MAX = 0x80000000, /* 32 */
+};
+
+struct STATcls
+{
+ struct RPSPeer *rps_peer;
+ enum STAT_TYPE stat_type;
};
*/
static int ok;
+/**
+ * Identifier for the churn task that runs periodically
+ */
+static struct GNUNET_SCHEDULER_Task *shutdown_task;
+
/**
* Identifier for the churn task that runs periodically
*/
typedef void (*InitPeer) (struct RPSPeer *rps_peer);
/**
- * Called directly after connecting to the service
+ * @brief Called directly after connecting to the service
+ *
+ * @param rps_peer Specific peer the function is called on
+ * @param h the handle to the rps service
*/
-typedef void (*PreTest) (void *cls, struct GNUNET_RPS_Handle *h);
+typedef void (*PreTest) (struct RPSPeer *rps_peer, struct GNUNET_RPS_Handle *h);
/**
+ * @brief Executes functions to test the api/service for a given peer
+ *
* Called from within #rps_connect_complete_cb ()
- * Executes functions to test the api/service
+ * Implemented by #churn_test_cb, #profiler_cb, #mal_cb, #single_req_cb,
+ * #delay_req_cb, #seed_big_cb, #single_peer_seed_cb, #seed_cb, #req_cancel_cb
+ *
+ * @param rps_peer the peer the task runs on
*/
typedef void (*MainTest) (struct RPSPeer *rps_peer);
/**
* Called directly before disconnecting from the service
*/
-typedef void (*PostTest) (void *cls, struct GNUNET_RPS_Handle *h);
+typedef void (*PostTest) (struct RPSPeer *peer);
/**
* Function called after disconnect to evaluate test success
*/
typedef int (*EvaluationCallback) (void);
+/**
+ * @brief Do we have Churn?
+ */
+enum OPTION_CHURN {
+ /**
+ * @brief If we have churn this is set
+ */
+ HAVE_CHURN,
+ /**
+ * @brief If we have no churn this is set
+ */
+ HAVE_NO_CHURN,
+};
+
+/**
+ * @brief Is it ok to quit the test before the timeout?
+ */
+enum OPTION_QUICK_QUIT {
+ /**
+ * @brief It is ok for the test to quit before the timeout triggers
+ */
+ HAVE_QUICK_QUIT,
+
+ /**
+ * @brief It is NOT ok for the test to quit before the timeout triggers
+ */
+ HAVE_NO_QUICK_QUIT,
+};
+
+/**
+ * @brief Do we collect statistics at the end?
+ */
+enum OPTION_COLLECT_STATISTICS {
+ /**
+ * @brief We collect statistics at the end
+ */
+ COLLECT_STATISTICS,
+
+ /**
+ * @brief We do not collect statistics at the end
+ */
+ NO_COLLECT_STATISTICS,
+};
/**
* Structure to define a single test
char *name;
/**
- * Called to initialise peer
+ * Called with a single peer in order to initialise that peer
*/
InitPeer init_peer;
PreTest pre_test;
/**
- * Function to execute the functions to be tested
+ * Main function for each peer
*/
MainTest main_test;
* Number of Requests to make.
*/
uint32_t num_requests;
+
+ /**
+ * Run with (-out) churn
+ */
+ enum OPTION_CHURN have_churn;
+
+ /**
+ * Quit test before timeout?
+ */
+ enum OPTION_QUICK_QUIT have_quick_quit;
+
+ /**
+ * Collect statistics at the end?
+ */
+ enum OPTION_COLLECT_STATISTICS have_collect_statistics;
+
+ /**
+ * @brief Mark which values from the statistics service to collect at the end
+ * of the run
+ */
+ uint32_t stat_collect_flags;
} cur_test_run;
/**
}
+/**
+ * @brief Checks if given peer already received its statistics value from the
+ * statistics service.
+ *
+ * @param rps_peer the peer to check for
+ *
+ * @return #GNUNET_YES if so
+ * #GNUNET_NO otherwise
+ */
+static int check_statistics_collect_completed_single_peer (
+ const struct RPSPeer *rps_peer)
+{
+ if (cur_test_run.stat_collect_flags !=
+ (cur_test_run.stat_collect_flags &
+ rps_peer->stat_collected_flags))
+ {
+ return GNUNET_NO;
+ }
+ return GNUNET_YES;
+}
+/**
+ * @brief Checks if all peers already received their statistics value from the
+ * statistics service.
+ *
+ * @return #GNUNET_YES if so
+ * #GNUNET_NO otherwise
+ */
+static int check_statistics_collect_completed ()
+{
+ uint32_t i;
+
+ for (i = 0; i < num_peers; i++)
+ {
+ if (GNUNET_NO == check_statistics_collect_completed_single_peer (&rps_peers[i]))
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "At least Peer %" PRIu32 " did not yet receive all statistics values\n",
+ i);
+ return GNUNET_NO;
+ }
+ }
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "All peers received their statistics values\n");
+ return GNUNET_YES;
+}
+
/**
* Task run on timeout to shut everything down.
*/
{
unsigned int i;
+ GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
+ "Shutdown task scheduled, going down.\n");
in_shutdown = GNUNET_YES;
if (NULL != churn_task)
{
churn_task = NULL;
}
for (i = 0; i < num_peers; i++)
+ {
if (NULL != rps_peers[i].op)
GNUNET_TESTBED_operation_done (rps_peers[i].op);
- GNUNET_SCHEDULER_shutdown ();
+ if (NULL != cur_test_run.post_test)
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Executing post_test for peer %u\n", i);
+ cur_test_run.post_test (&rps_peers[i]);
+ }
+ }
+ /* If we do not collect statistics, shut down directly */
+ if (NO_COLLECT_STATISTICS == cur_test_run.have_collect_statistics ||
+ GNUNET_YES == check_statistics_collect_completed())
+ {
+ GNUNET_SCHEDULER_shutdown ();
+ }
}
{
struct OpListEntry *entry = (struct OpListEntry *) cb_cls;
+ if (GNUNET_YES == in_shutdown)
+ {
+ return;
+ }
+
if (NULL == pinfo || NULL != emsg)
{
GNUNET_log (GNUNET_ERROR_TYPE_ERROR, "Got Error: %s\n", emsg);
struct RPSPeer *rps_peer = cls;
struct GNUNET_RPS_Handle *rps = ca_result;
+ if (GNUNET_YES == in_shutdown)
+ {
+ return;
+ }
+
rps_peer->rps_handle = rps;
rps_peer->online = GNUNET_YES;
num_peers_online++;
return h;
}
+/**
+ * Called to open a connection to the peer's statistics
+ *
+ * @param cls peer context
+ * @param cfg configuration of the peer to connect to; will be available until
+ * GNUNET_TESTBED_operation_done() is called on the operation returned
+ * from GNUNET_TESTBED_service_connect()
+ * @return service handle to return in 'op_result', NULL on error
+ */
+static void *
+stat_connect_adapter (void *cls,
+ const struct GNUNET_CONFIGURATION_Handle *cfg)
+{
+ struct RPSPeer *peer = cls;
+
+ peer->stats_h = GNUNET_STATISTICS_create ("rps-profiler", cfg);
+ return peer->stats_h;
+}
+
+/**
+ * Called to disconnect from peer's statistics service
+ *
+ * @param cls peer context
+ * @param op_result service handle returned from the connect adapter
+ */
+static void
+stat_disconnect_adapter (void *cls, void *op_result)
+{
+ struct RPSPeer *peer = cls;
+
+ //GNUNET_break (GNUNET_OK == GNUNET_STATISTICS_watch_cancel
+ // (peer->stats_h, "core", "# peers connected",
+ // stat_iterator, peer));
+ //GNUNET_break (GNUNET_OK == GNUNET_STATISTICS_watch_cancel
+ // (peer->stats_h, "nse", "# peers connected",
+ // stat_iterator, peer));
+ GNUNET_STATISTICS_destroy (op_result, GNUNET_NO);
+ peer->stats_h = NULL;
+}
+
+/**
+ * Called after successfully opening a connection to a peer's statistics
+ * service; we register statistics monitoring for CORE and NSE here.
+ *
+ * @param cls the callback closure from functions generating an operation
+ * @param op the operation that has been finished
+ * @param ca_result the service handle returned from GNUNET_TESTBED_ConnectAdapter()
+ * @param emsg error message in case the operation has failed; will be NULL if
+ * operation has executed successfully.
+ */
+static void
+stat_complete_cb (void *cls, struct GNUNET_TESTBED_Operation *op,
+ void *ca_result, const char *emsg )
+{
+ //struct GNUNET_STATISTICS_Handle *sh = ca_result;
+ //struct RPSPeer *peer = (struct RPSPeer *) cls;
+
+ if (NULL != emsg)
+ {
+ GNUNET_break (0);
+ return;
+ }
+ //GNUNET_break (GNUNET_OK == GNUNET_STATISTICS_watch
+ // (sh, "core", "# peers connected",
+ // stat_iterator, peer));
+ //GNUNET_break (GNUNET_OK == GNUNET_STATISTICS_watch
+ // (sh, "nse", "# peers connected",
+ // stat_iterator, peer));
+}
+
/**
* Adapter function called to destroy connection to
rps_peer->num_recv_ids++;
}
- if (0 == evaluate ())
+ if (0 == evaluate () && HAVE_QUICK_QUIT == cur_test_run.have_quick_quit)
{
- GNUNET_SCHEDULER_shutdown ();
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Test succeeded before timeout\n");
+ GNUNET_assert (NULL != shutdown_task);
+ GNUNET_SCHEDULER_cancel (shutdown_task);
+ shutdown_task = GNUNET_SCHEDULER_add_now (&shutdown_op, NULL);
+ GNUNET_assert (NULL!= shutdown_task);
}
}
rps_peer->num_ids_to_request = 1;
}
+
+/**
+ * @brief Set peers to (non-)malicious before execution
+ *
+ * Of signature #PreTest
+ *
+ * @param rps_peer the peer to set (non-) malicious
+ * @param h the handle to the service
+ */
static void
-mal_pre (void *cls, struct GNUNET_RPS_Handle *h)
+mal_pre (struct RPSPeer *rps_peer, struct GNUNET_RPS_Handle *h)
{
#ifdef ENABLE_MALICIOUS
uint32_t num_mal_peers;
- struct RPSPeer *rps_peer = (struct RPSPeer *) cls;
GNUNET_assert ( (1 >= portion) &&
(0 < portion) );
{
uint32_t num_mal_peers;
+ if (GNUNET_YES == in_shutdown)
+ {
+ return;
+ }
+
#ifdef ENABLE_MALICIOUS
GNUNET_assert ( (1 >= portion) &&
(0 < portion) );
static void
single_req_cb (struct RPSPeer *rps_peer)
{
+ if (GNUNET_YES == in_shutdown)
+ {
+ return;
+ }
+
schedule_missing_requests (rps_peer);
}
static void
delay_req_cb (struct RPSPeer *rps_peer)
{
+ if (GNUNET_YES == in_shutdown)
+ {
+ return;
+ }
+
schedule_missing_requests (rps_peer);
}
static void
seed_cb (struct RPSPeer *rps_peer)
{
+ if (GNUNET_YES == in_shutdown)
+ {
+ return;
+ }
+
GNUNET_SCHEDULER_add_delayed (
GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 10),
seed_peers, rps_peer);
static void
seed_big_cb (struct RPSPeer *rps_peer)
{
+ if (GNUNET_YES == in_shutdown)
+ {
+ return;
+ }
+
// TODO test seeding > GNUNET_MAX_MESSAGE_SIZE peers
GNUNET_SCHEDULER_add_delayed (
GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 2),
static void
seed_req_cb (struct RPSPeer *rps_peer)
{
+ if (GNUNET_YES == in_shutdown)
+ {
+ return;
+ }
+
GNUNET_SCHEDULER_add_delayed (
GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 2),
seed_peers, rps_peer);
static void
req_cancel_cb (struct RPSPeer *rps_peer)
{
+ if (GNUNET_YES == in_shutdown)
+ {
+ return;
+ }
+
schedule_missing_requests (rps_peer);
GNUNET_SCHEDULER_add_delayed (
GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS,
cancel_request_cb, rps_peer);
}
+/***********************************
+ * CHURN
+***********************************/
+
+static void
+churn (void *cls);
+
+/**
+ * @brief Starts churn
+ *
+ * Has signature of #MainTest
+ *
+ * This is not implemented too nicely as this is called for each peer, but we
+ * only need to call it once. (Yes we check that we only schedule the task
+ * once.)
+ *
+ * @param rps_peer The peer it's called for
+ */
+static void
+churn_test_cb (struct RPSPeer *rps_peer)
+{
+ if (GNUNET_YES == in_shutdown)
+ {
+ return;
+ }
+
+ /* Start churn */
+ if (HAVE_CHURN == cur_test_run.have_churn && NULL == churn_task)
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Starting churn task\n");
+ churn_task = GNUNET_SCHEDULER_add_delayed (
+ GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 5),
+ churn,
+ NULL);
+ } else {
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Not starting churn task\n");
+ }
+
+ schedule_missing_requests (rps_peer);
+}
+
/***********************************
* PROFILER
***********************************/
// FIXME
struct OpListEntry *entry = cls;
+ if (GNUNET_YES == in_shutdown)
+ {
+ return;
+ }
+
GNUNET_TESTBED_operation_done (entry->op);
if (NULL != emsg)
{
num_peers_online += entry->delta;
- if (0 > entry->delta)
+ if (PEER_GO_OFFLINE == entry->delta)
{ /* Peer hopefully just went offline */
if (GNUNET_YES != rps_peers[entry->index].online)
{
rps_peers[entry->index].online = GNUNET_NO;
}
- else if (0 < entry->delta)
+ else if (PEER_GO_ONLINE < entry->delta)
{ /* Peer hopefully just went online */
if (GNUNET_NO != rps_peers[entry->index].online)
{
}
rps_peers[entry->index].online = GNUNET_YES;
}
+ else
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_ERROR,
+ "Invalid value for delta: %i\n", entry->delta);
+ GNUNET_break (0);
+ }
GNUNET_CONTAINER_DLL_remove (oplist_head, oplist_tail, entry);
+ rps_peers[entry->index].entry_op_manage = NULL;
GNUNET_free (entry);
//if (num_peers_in_round[current_round] == peers_running)
// run_round ();
}
+/**
+ * @brief Set the rps-service up or down for a specific peer
+ *
+ * @param i index of action
+ * @param j index of peer
+ * @param delta (#PEER_ONLINE_DELTA) down (-1) or up (1)
+ * @param prob_go_on_off the probability of the action
+ */
static void
-manage_service_wrapper (unsigned int i, unsigned int j, int delta,
- double prob_go_on_off)
+manage_service_wrapper (unsigned int i, unsigned int j,
+ enum PEER_ONLINE_DELTA delta,
+ double prob_go_on_off)
{
- struct OpListEntry *entry;
+ struct OpListEntry *entry = NULL;
uint32_t prob;
+ /* make sure that management operation is not already scheduled */
+ if (NULL != rps_peers[j].entry_op_manage)
+ {
+ return;
+ }
+
prob = GNUNET_CRYPTO_random_u32 (GNUNET_CRYPTO_QUALITY_WEAK,
UINT32_MAX);
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
i,
j,
GNUNET_i2s (rps_peers[j].peer_id),
- (0 > delta) ? "online" : "offline");
+ (PEER_GO_ONLINE == delta) ? "online" : "offline");
if (prob < prob_go_on_off * UINT32_MAX)
{
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
"%s goes %s\n",
GNUNET_i2s (rps_peers[j].peer_id),
- (0 > delta) ? "offline" : "online");
+ (PEER_GO_OFFLINE == delta) ? "offline" : "online");
- if (0 > delta)
+ if (PEER_GO_OFFLINE == delta)
cancel_pending_req_rep (&rps_peers[j]);
entry = make_oplist_entry ();
entry->delta = delta;
"rps",
&churn_cb,
entry,
- (0 > delta) ? 0 : 1);
+ (PEER_GO_OFFLINE == delta) ? 0 : 1);
}
+ rps_peers[j].entry_op_manage = entry;
}
double portion_go_online;
double portion_go_offline;
+ if (GNUNET_YES == in_shutdown)
+ {
+ return;
+ }
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Churn function executing\n");
+
+ churn_task = NULL; /* Should be invalid by now */
+
/* Compute the probability for an online peer to go offline
* this round */
portion_online = num_peers_online * 1.0 / num_peers;
static void
profiler_cb (struct RPSPeer *rps_peer)
{
+ if (GNUNET_YES == in_shutdown)
+ {
+ return;
+ }
+
/* Start churn */
- if (NULL == churn_task)
+ if (HAVE_CHURN == cur_test_run.have_churn && NULL == churn_task)
{
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Starting churn task\n");
churn_task = GNUNET_SCHEDULER_add_delayed (
GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 5),
churn,
NULL);
+ } else {
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Not starting churn task\n");
}
/* Only request peer ids at one peer.
return evaluate ();
}
+/**
+ * Continuation called by #GNUNET_STATISTICS_get() functions.
+ *
+ * Remembers that this specific statistics value was received for this peer.
+ * Checks whether all peers received their statistics yet.
+ * Issues the shutdown.
+ *
+ * @param cls closure
+ * @param success #GNUNET_OK if statistics were
+ * successfully obtained, #GNUNET_SYSERR if not.
+ */
+void
+post_test_shutdown_ready_cb (void *cls,
+ int success)
+{
+ struct STATcls *stat_cls = (struct STATcls *) cls;
+ struct RPSPeer *rps_peer = stat_cls->rps_peer;
+ if (GNUNET_OK == success)
+ {
+ /* set flag that we we got the value */
+ rps_peer->stat_collected_flags |= stat_cls->stat_type;
+ } else {
+ GNUNET_log (GNUNET_ERROR_TYPE_ERROR,
+ "Peer %u did not receive statistics value\n",
+ rps_peer->index);
+ GNUNET_free (stat_cls);
+ GNUNET_break (0);
+ }
+
+ if (NULL != rps_peer->stat_op &&
+ GNUNET_YES == check_statistics_collect_completed_single_peer (rps_peer))
+ {
+ GNUNET_TESTBED_operation_done (rps_peer->stat_op);
+ }
+
+ if (GNUNET_YES == check_statistics_collect_completed())
+ {
+ GNUNET_free (stat_cls);
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Shutting down\n");
+ GNUNET_SCHEDULER_shutdown ();
+ } else {
+ GNUNET_free (stat_cls);
+ }
+}
+
+/**
+ * @brief Converts #STAT_TYPE enum to the equivalent string representation that
+ * is stored with the statistics service.
+ *
+ * @param stat_type #STAT_TYPE enum
+ *
+ * @return string representation that matches statistics value
+ */
+char* stat_type_2_str (enum STAT_TYPE stat_type)
+{
+ switch (stat_type)
+ {
+ case STAT_TYPE_ROUNDS:
+ return "# rounds";
+ case STAT_TYPE_BLOCKS:
+ return "# rounds blocked";
+ case STAT_TYPE_BLOCKS_MANY_PUSH:
+ return "# rounds blocked - too many pushes";
+ case STAT_TYPE_BLOCKS_FEW_PUSH:
+ return "# rounds blocked - no pushes";
+ case STAT_TYPE_BLOCKS_FEW_PULL:
+ return "# rounds blocked - no pull replies";
+ case STAT_TYPE_ISSUED_PUSH_SEND:
+ return "# push send issued";
+ case STAT_TYPE_ISSUED_PULL_REQ:
+ return "# pull request send issued";
+ case STAT_TYPE_ISSUED_PULL_REP:
+ return "# pull reply send issued";
+ case STAT_TYPE_SENT_PUSH_SEND:
+ return "# pushes sent";
+ case STAT_TYPE_SENT_PULL_REQ:
+ return "# pull requests sent";
+ case STAT_TYPE_SENT_PULL_REP:
+ return "# pull replys sent";
+ case STAT_TYPE_RECV_PUSH_SEND:
+ return "# push message received";
+ case STAT_TYPE_RECV_PULL_REQ:
+ return "# pull request message received";
+ case STAT_TYPE_RECV_PULL_REP:
+ return "# pull reply messages received";
+ case STAT_TYPE_MAX:
+ default:
+ return "ERROR";
+ ;
+ }
+}
+
+/**
+ * Callback function to process statistic values.
+ *
+ * @param cls closure
+ * @param subsystem name of subsystem that created the statistic
+ * @param name the name of the datum
+ * @param value the current value
+ * @param is_persistent #GNUNET_YES if the value is persistent, #GNUNET_NO if not
+ * @return #GNUNET_OK to continue, #GNUNET_SYSERR to abort iteration
+ */
+int
+stat_iterator (void *cls,
+ const char *subsystem,
+ const char *name,
+ uint64_t value,
+ int is_persistent)
+{
+ const struct STATcls *stat_cls = (const struct STATcls *) cls;
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Got stat value: %s - %" PRIu64 "\n",
+ stat_type_2_str (stat_cls->stat_type),
+ value);
+ return GNUNET_OK;
+}
+
+void post_profiler (struct RPSPeer *rps_peer)
+{
+ if (COLLECT_STATISTICS != cur_test_run.have_collect_statistics)
+ {
+ return;
+ }
+
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Going to request statistic values with mask 0x%" PRIx32 "\n",
+ cur_test_run.stat_collect_flags);
+
+ struct STATcls *stat_cls;
+ uint32_t stat_type;
+ for (stat_type = STAT_TYPE_ROUNDS;
+ stat_type < STAT_TYPE_MAX;
+ stat_type = stat_type <<1)
+ {
+ if (stat_type & cur_test_run.stat_collect_flags)
+ {
+ stat_cls = GNUNET_malloc (sizeof (struct STATcls));
+ stat_cls->rps_peer = rps_peer;
+ stat_cls->stat_type = stat_type;
+ GNUNET_STATISTICS_get (rps_peer->stats_h,
+ "rps",
+ stat_type_2_str (stat_type),
+ post_test_shutdown_ready_cb,
+ stat_iterator,
+ (struct STATcls *) stat_cls);
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Requested statistics for %s (peer %" PRIu32 ")\n",
+ stat_type_2_str (stat_type),
+ rps_peer->index);
+ }
+ }
+}
+
/***********************************************************************
* /Definition of tests
struct OpListEntry *entry;
uint32_t num_mal_peers;
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "RUN was called\n");
+
+ /* Check whether we timed out */
+ if (n_peers != num_peers ||
+ NULL == peers ||
+ 0 == links_succeeded)
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Going down due to args (eg. timeout)\n");
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "\tn_peers: %u\n", n_peers);
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "\tnum_peers: %" PRIu32 "\n", num_peers);
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "\tpeers: %p\n", peers);
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "\tlinks_succeeded: %u\n", links_succeeded);
+ GNUNET_SCHEDULER_shutdown ();
+ return;
+ }
+
+
+ /* Initialize peers */
testbed_peers = peers;
num_peers_online = 0;
for (i = 0; i < num_peers; i++)
entry);
}
+ /* Bring peers up */
num_mal_peers = round (portion * num_peers);
GNUNET_assert (num_peers == n_peers);
for (i = 0; i < n_peers; i++)
&rps_disconnect_adapter,
&rps_peers[i]);
}
+ /* Connect all peers to statistics service */
+ if (COLLECT_STATISTICS == cur_test_run.have_collect_statistics)
+ {
+ rps_peers[i].stat_op =
+ GNUNET_TESTBED_service_connect (NULL,
+ peers[i],
+ "statistics",
+ stat_complete_cb,
+ &rps_peers[i],
+ &stat_connect_adapter,
+ &stat_disconnect_adapter,
+ &rps_peers[i]);
+ }
}
if (NULL != churn_task)
GNUNET_SCHEDULER_cancel (churn_task);
- GNUNET_SCHEDULER_add_delayed (timeout, &shutdown_op, NULL);
+ shutdown_task = GNUNET_SCHEDULER_add_delayed (timeout, &shutdown_op, NULL);
}
cur_test_run.pre_test = NULL;
cur_test_run.reply_handle = default_reply_handle;
cur_test_run.eval_cb = default_eval_cb;
+ cur_test_run.post_test = NULL;
+ cur_test_run.have_churn = HAVE_CHURN;
+ cur_test_run.have_collect_statistics = NO_COLLECT_STATISTICS;
+ cur_test_run.stat_collect_flags = 0;
churn_task = NULL;
timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 30);
GNUNET_log (GNUNET_ERROR_TYPE_ERROR, "Test single request\n");
cur_test_run.name = "test-rps-single-req";
cur_test_run.main_test = single_req_cb;
+ cur_test_run.have_churn = HAVE_NO_CHURN;
}
else if (strstr (argv[0], "_delayed_reqs") != NULL)
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Test delayed requests\n");
cur_test_run.name = "test-rps-delayed-reqs";
cur_test_run.main_test = delay_req_cb;
+ cur_test_run.have_churn = HAVE_NO_CHURN;
}
else if (strstr (argv[0], "_seed_big") != NULL)
cur_test_run.name = "test-rps-seed-big";
cur_test_run.main_test = seed_big_cb;
cur_test_run.eval_cb = no_eval;
+ cur_test_run.have_churn = HAVE_NO_CHURN;
timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 10);
}
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Test seeding and requesting on a single peer\n");
cur_test_run.name = "test-rps-single-peer-seed";
cur_test_run.main_test = single_peer_seed_cb;
+ cur_test_run.have_churn = HAVE_NO_CHURN;
}
else if (strstr (argv[0], "_seed_request") != NULL)
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Test seeding and requesting on multiple peers\n");
cur_test_run.name = "test-rps-seed-request";
cur_test_run.main_test = seed_req_cb;
+ cur_test_run.have_churn = HAVE_NO_CHURN;
}
else if (strstr (argv[0], "_seed") != NULL)
cur_test_run.name = "test-rps-seed";
cur_test_run.main_test = seed_cb;
cur_test_run.eval_cb = no_eval;
+ cur_test_run.have_churn = HAVE_NO_CHURN;
}
else if (strstr (argv[0], "_req_cancel") != NULL)
num_peers = 1;
cur_test_run.main_test = req_cancel_cb;
cur_test_run.eval_cb = no_eval;
+ cur_test_run.have_churn = HAVE_NO_CHURN;
+ timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 10);
+ }
+
+ else if (strstr (argv[0], "_churn") != NULL)
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Test churn\n");
+ cur_test_run.name = "test-rps-churn";
+ num_peers = 5;
+ cur_test_run.init_peer = default_init_peer;
+ cur_test_run.main_test = churn_test_cb;
+ cur_test_run.reply_handle = default_reply_handle;
+ cur_test_run.eval_cb = default_eval_cb;
+ cur_test_run.have_churn = HAVE_CHURN;
+ cur_test_run.have_quick_quit = HAVE_NO_QUICK_QUIT;
timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 10);
}
cur_test_run.main_test = profiler_cb;
cur_test_run.reply_handle = profiler_reply_handle;
cur_test_run.eval_cb = profiler_eval;
+ cur_test_run.post_test = post_profiler;
cur_test_run.request_interval = 2;
cur_test_run.num_requests = 5;
- timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 90);
+ cur_test_run.have_churn = HAVE_CHURN;
+ cur_test_run.have_quick_quit = HAVE_NO_QUICK_QUIT;
+ cur_test_run.have_collect_statistics = COLLECT_STATISTICS;
+ cur_test_run.stat_collect_flags = STAT_TYPE_ROUNDS |
+ STAT_TYPE_BLOCKS |
+ STAT_TYPE_BLOCKS_MANY_PUSH |
+ STAT_TYPE_BLOCKS_FEW_PUSH |
+ STAT_TYPE_BLOCKS_FEW_PULL |
+ STAT_TYPE_ISSUED_PUSH_SEND |
+ STAT_TYPE_ISSUED_PULL_REQ |
+ STAT_TYPE_ISSUED_PULL_REP |
+ STAT_TYPE_SENT_PUSH_SEND |
+ STAT_TYPE_SENT_PULL_REQ |
+ STAT_TYPE_SENT_PULL_REP |
+ STAT_TYPE_RECV_PUSH_SEND |
+ STAT_TYPE_RECV_PULL_REQ |
+ STAT_TYPE_RECV_PULL_REP;
+ timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 300);
/* 'Clean' directory */
(void) GNUNET_DISK_directory_remove ("/tmp/rps/");
with the malicious portion */
ok = 1;
- (void) GNUNET_TESTBED_test_run (cur_test_run.name,
- "test_rps.conf",
- num_peers,
- 0, NULL, NULL,
- &run, NULL);
+ ret_value = GNUNET_TESTBED_test_run (cur_test_run.name,
+ "test_rps.conf",
+ num_peers,
+ 0, NULL, NULL,
+ &run, NULL);
+ GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
+ "_test_run returned.\n");
+ if (GNUNET_OK != ret_value)
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
+ "Test did not run successfully!\n");
+ }
ret_value = cur_test_run.eval_cb();
- GNUNET_free (rps_peers );
+ GNUNET_free (rps_peers);
GNUNET_free (rps_peer_ids);
GNUNET_CONTAINER_multipeermap_destroy (peer_map);
return ret_value;
}
-/* end of test_rps_multipeer.c */
+/* end of test_rps.c */