This file is part of GNUnet.
Copyright (C) 2009, 2012 GNUnet e.V.
- GNUnet is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3, or (at your
- option) any later version.
+ GNUnet is free software: you can redistribute it and/or modify it
+ under the terms of the GNU Affero General Public License as published
+ by the Free Software Foundation, either version 3 of the License,
+ or (at your option) any later version.
GNUnet is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with GNUnet; see the file COPYING. If not, write to the
- Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- Boston, MA 02110-1301, USA.
+ Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @file rps/test_rps.c
*/
static uint32_t num_peers;
+/**
+ * How long do we run the test?
+ * In seconds.
+ */
+static uint32_t timeout_s;
+
/**
* How long do we run the test?
*/
* Pending operation on that peer
*/
const struct OpListEntry *entry_op_manage;
+
+ /**
+ * Testbed operation to connect to statistics service
+ */
+ struct GNUNET_TESTBED_Operation *stat_op;
+
+ /**
+ * Handle to the statistics service
+ */
+ struct GNUNET_STATISTICS_Handle *stats_h;
+
+ /**
+ * @brief flags to indicate which statistics values have been already
+ * collected from the statistics service.
+ * Used to check whether we are able to shutdown.
+ */
+ uint32_t stat_collected_flags;
+
+ /**
+ * @brief File name of the file the stats are finally written to
+ */
+ const char *file_name_stats;
+
+ /**
+ * @brief File name of the file the stats are finally written to
+ */
+ const char *file_name_probs;
+
+ /**
+ * @brief The current view
+ */
+ struct GNUNET_PeerIdentity *cur_view;
+
+ /**
+ * @brief Number of peers in the #cur_view.
+ */
+ uint32_t cur_view_count;
+
+ /**
+ * @brief Number of occurrences in other peer's view
+ */
+ uint32_t count_in_views;
+
+ /**
+ * @brief statistics values
+ */
+ uint64_t num_rounds;
+ uint64_t num_blocks;
+ uint64_t num_blocks_many_push;
+ uint64_t num_blocks_no_push;
+ uint64_t num_blocks_no_pull;
+ uint64_t num_blocks_many_push_no_pull;
+ uint64_t num_blocks_no_push_no_pull;
+ uint64_t num_issued_push;
+ uint64_t num_issued_pull_req;
+ uint64_t num_issued_pull_rep;
+ uint64_t num_sent_push;
+ uint64_t num_sent_pull_req;
+ uint64_t num_sent_pull_rep;
+ uint64_t num_recv_push;
+ uint64_t num_recv_pull_req;
+ uint64_t num_recv_pull_rep;
+};
+
+enum STAT_TYPE
+{
+ STAT_TYPE_ROUNDS = 0x1, /* 1 */
+ STAT_TYPE_BLOCKS = 0x2, /* 2 */
+ STAT_TYPE_BLOCKS_MANY_PUSH = 0x4, /* 3 */
+ STAT_TYPE_BLOCKS_NO_PUSH = 0x8, /* 4 */
+ STAT_TYPE_BLOCKS_NO_PULL = 0x10, /* 5 */
+ STAT_TYPE_BLOCKS_MANY_PUSH_NO_PULL = 0x20, /* 6 */
+ STAT_TYPE_BLOCKS_NO_PUSH_NO_PULL = 0x40, /* 7 */
+ STAT_TYPE_ISSUED_PUSH_SEND = 0x80, /* 8 */
+ STAT_TYPE_ISSUED_PULL_REQ = 0x100, /* 9 */
+ STAT_TYPE_ISSUED_PULL_REP = 0x200, /* 10 */
+ STAT_TYPE_SENT_PUSH_SEND = 0x400, /* 11 */
+ STAT_TYPE_SENT_PULL_REQ = 0x800, /* 12 */
+ STAT_TYPE_SENT_PULL_REP = 0x1000, /* 13 */
+ STAT_TYPE_RECV_PUSH_SEND = 0x2000, /* 14 */
+ STAT_TYPE_RECV_PULL_REQ = 0x4000, /* 15 */
+ STAT_TYPE_RECV_PULL_REP = 0x8000, /* 16 */
+ STAT_TYPE_MAX = 0x80000000, /* 32 */
+};
+
+struct STATcls
+{
+ struct RPSPeer *rps_peer;
+ enum STAT_TYPE stat_type;
};
*/
static unsigned int num_peers_online;
+/**
+ * @brief The added sizes of the peer's views
+ */
+static unsigned int view_sizes;
+
/**
* Return value from 'main'.
*/
static int ok;
+/**
+ * Identifier for the churn task that runs periodically
+ */
+static struct GNUNET_SCHEDULER_Task *post_test_task;
+
/**
* Identifier for the churn task that runs periodically
*/
/**
* Called directly before disconnecting from the service
*/
-typedef void (*PostTest) (void *cls, struct GNUNET_RPS_Handle *h);
+typedef void (*PostTest) (struct RPSPeer *peer);
/**
* Function called after disconnect to evaluate test success
HAVE_NO_QUICK_QUIT,
};
+/**
+ * @brief Do we collect statistics at the end?
+ */
+enum OPTION_COLLECT_STATISTICS {
+ /**
+ * @brief We collect statistics at the end
+ */
+ COLLECT_STATISTICS,
+
+ /**
+ * @brief We do not collect statistics at the end
+ */
+ NO_COLLECT_STATISTICS,
+};
+
+/**
+ * @brief Do we collect views during run?
+ */
+enum OPTION_COLLECT_VIEW {
+ /**
+ * @brief We collect view during run
+ */
+ COLLECT_VIEW,
+
+ /**
+ * @brief We do not collect the view during run
+ */
+ NO_COLLECT_VIEW,
+};
+
/**
* Structure to define a single test
*/
* Quit test before timeout?
*/
enum OPTION_QUICK_QUIT have_quick_quit;
+
+ /**
+ * Collect statistics at the end?
+ */
+ enum OPTION_COLLECT_STATISTICS have_collect_statistics;
+
+ /**
+ * Collect view during run?
+ */
+ enum OPTION_COLLECT_VIEW have_collect_view;
+
+ /**
+ * @brief Mark which values from the statistics service to collect at the end
+ * of the run
+ */
+ uint32_t stat_collect_flags;
} cur_test_run;
+/**
+ * Did we finish the test?
+ */
+static int post_test;
+
/**
* Are we shutting down?
*/
}
+/**
+ * @brief Checks if given peer already received its statistics value from the
+ * statistics service.
+ *
+ * @param rps_peer the peer to check for
+ *
+ * @return #GNUNET_YES if so
+ * #GNUNET_NO otherwise
+ */
+static int check_statistics_collect_completed_single_peer (
+ const struct RPSPeer *rps_peer)
+{
+ if (cur_test_run.stat_collect_flags !=
+ (cur_test_run.stat_collect_flags &
+ rps_peer->stat_collected_flags))
+ {
+ return GNUNET_NO;
+ }
+ return GNUNET_YES;
+}
+/**
+ * @brief Checks if all peers already received their statistics value from the
+ * statistics service.
+ *
+ * @return #GNUNET_YES if so
+ * #GNUNET_NO otherwise
+ */
+static int check_statistics_collect_completed ()
+{
+ uint32_t i;
+
+ for (i = 0; i < num_peers; i++)
+ {
+ if (GNUNET_NO == check_statistics_collect_completed_single_peer (&rps_peers[i]))
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "At least Peer %" PRIu32 " did not yet receive all statistics values\n",
+ i);
+ return GNUNET_NO;
+ }
+ }
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "All peers received their statistics values\n");
+ return GNUNET_YES;
+}
+
/**
* Task run on timeout to shut everything down.
*/
GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
"Shutdown task scheduled, going down.\n");
in_shutdown = GNUNET_YES;
+ if (NULL != post_test_task)
+ {
+ GNUNET_SCHEDULER_cancel (post_test_task);
+ }
+ if (NULL != churn_task)
+ {
+ GNUNET_SCHEDULER_cancel (churn_task);
+ churn_task = NULL;
+ }
+ for (i = 0; i < num_peers; i++)
+ {
+ if (NULL != rps_peers[i].rps_handle)
+ {
+ GNUNET_RPS_disconnect (rps_peers[i].rps_handle);
+ }
+ if (NULL != rps_peers[i].op)
+ {
+ GNUNET_TESTBED_operation_done (rps_peers[i].op);
+ }
+ }
+}
+
+
+/**
+ * Task run on timeout to collect statistics and potentially shut down.
+ */
+static void
+post_test_op (void *cls)
+{
+ unsigned int i;
+
+ post_test_task = NULL;
+ post_test = GNUNET_YES;
+ GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
+ "Post test task scheduled, going down.\n");
if (NULL != churn_task)
{
GNUNET_SCHEDULER_cancel (churn_task);
churn_task = NULL;
}
for (i = 0; i < num_peers; i++)
+ {
if (NULL != rps_peers[i].op)
+ {
GNUNET_TESTBED_operation_done (rps_peers[i].op);
- GNUNET_SCHEDULER_shutdown ();
+ rps_peers[i].op = NULL;
+ }
+ if (NULL != cur_test_run.post_test)
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Executing post_test for peer %u\n", i);
+ cur_test_run.post_test (&rps_peers[i]);
+ }
+ }
+ /* If we do not collect statistics, shut down directly */
+ if (NO_COLLECT_STATISTICS == cur_test_run.have_collect_statistics ||
+ GNUNET_YES == check_statistics_collect_completed())
+ {
+ GNUNET_SCHEDULER_shutdown ();
+ }
}
unsigned int amount;
unsigned int i;
+ if (GNUNET_YES == in_shutdown || GNUNET_YES == post_test)
+ {
+ return;
+ }
+
+ GNUNET_assert (NULL != peer->rps_handle);
+
// TODO if malicious don't seed mal peers
amount = round (.5 * num_peers);
{
struct OpListEntry *entry = (struct OpListEntry *) cb_cls;
- if (GNUNET_YES == in_shutdown)
+ if (GNUNET_YES == in_shutdown || GNUNET_YES == post_test)
{
return;
}
struct RPSPeer *rps_peer = cls;
struct GNUNET_RPS_Handle *rps = ca_result;
- if (GNUNET_YES == in_shutdown)
+ GNUNET_assert (NULL != ca_result);
+
+ if (GNUNET_YES == in_shutdown || GNUNET_YES == post_test)
{
return;
}
struct GNUNET_RPS_Handle *h;
h = GNUNET_RPS_connect (cfg);
+ GNUNET_assert (NULL != h);
if (NULL != cur_test_run.pre_test)
cur_test_run.pre_test (cls, h);
+ GNUNET_assert (NULL != h);
return h;
}
+/**
+ * Called to open a connection to the peer's statistics
+ *
+ * @param cls peer context
+ * @param cfg configuration of the peer to connect to; will be available until
+ * GNUNET_TESTBED_operation_done() is called on the operation returned
+ * from GNUNET_TESTBED_service_connect()
+ * @return service handle to return in 'op_result', NULL on error
+ */
+static void *
+stat_connect_adapter (void *cls,
+ const struct GNUNET_CONFIGURATION_Handle *cfg)
+{
+ struct RPSPeer *peer = cls;
+
+ peer->stats_h = GNUNET_STATISTICS_create ("rps-profiler", cfg);
+ return peer->stats_h;
+}
+
+/**
+ * Called to disconnect from peer's statistics service
+ *
+ * @param cls peer context
+ * @param op_result service handle returned from the connect adapter
+ */
+static void
+stat_disconnect_adapter (void *cls, void *op_result)
+{
+ struct RPSPeer *peer = cls;
+
+ //GNUNET_break (GNUNET_OK == GNUNET_STATISTICS_watch_cancel
+ // (peer->stats_h, "core", "# peers connected",
+ // stat_iterator, peer));
+ //GNUNET_break (GNUNET_OK == GNUNET_STATISTICS_watch_cancel
+ // (peer->stats_h, "nse", "# peers connected",
+ // stat_iterator, peer));
+ GNUNET_STATISTICS_destroy (op_result, GNUNET_NO);
+ peer->stats_h = NULL;
+}
+
+/**
+ * Called after successfully opening a connection to a peer's statistics
+ * service; we register statistics monitoring for CORE and NSE here.
+ *
+ * @param cls the callback closure from functions generating an operation
+ * @param op the operation that has been finished
+ * @param ca_result the service handle returned from GNUNET_TESTBED_ConnectAdapter()
+ * @param emsg error message in case the operation has failed; will be NULL if
+ * operation has executed successfully.
+ */
+static void
+stat_complete_cb (void *cls, struct GNUNET_TESTBED_Operation *op,
+ void *ca_result, const char *emsg )
+{
+ //struct GNUNET_STATISTICS_Handle *sh = ca_result;
+ //struct RPSPeer *peer = (struct RPSPeer *) cls;
+
+ if (NULL != emsg)
+ {
+ GNUNET_break (0);
+ return;
+ }
+ //GNUNET_break (GNUNET_OK == GNUNET_STATISTICS_watch
+ // (sh, "core", "# peers connected",
+ // stat_iterator, peer));
+ //GNUNET_break (GNUNET_OK == GNUNET_STATISTICS_watch
+ // (sh, "nse", "# peers connected",
+ // stat_iterator, peer));
+}
+
/**
* Adapter function called to destroy connection to
if (0 == evaluate () && HAVE_QUICK_QUIT == cur_test_run.have_quick_quit)
{
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Test succeeded before timeout\n");
- GNUNET_assert (NULL != shutdown_task);
- GNUNET_SCHEDULER_cancel (shutdown_task);
- shutdown_task = GNUNET_SCHEDULER_add_now (&shutdown_op, NULL);
- GNUNET_assert (NULL!= shutdown_task);
+ GNUNET_assert (NULL != post_test_task);
+ GNUNET_SCHEDULER_cancel (post_test_task);
+ post_test_task = GNUNET_SCHEDULER_add_now (&post_test_op, NULL);
+ GNUNET_assert (NULL!= post_test_task);
}
}
struct RPSPeer *rps_peer;
struct PendingReply *pending_rep;
- if (GNUNET_YES == in_shutdown)
+ if (GNUNET_YES == in_shutdown || GNUNET_YES == post_test)
return;
rps_peer = pending_req->rps_peer;
GNUNET_assert (1 <= rps_peer->num_pending_reqs);
struct RPSPeer *rps_peer = cls;
struct PendingReply *pending_rep;
- if (GNUNET_YES == in_shutdown)
+ if (GNUNET_YES == in_shutdown || GNUNET_YES == post_test)
return;
pending_rep = rps_peer->pending_rep_head;
GNUNET_assert (1 <= rps_peer->num_pending_reps);
{
uint32_t num_mal_peers;
- if (GNUNET_YES == in_shutdown)
+ if (GNUNET_YES == in_shutdown || GNUNET_YES == post_test)
{
return;
}
static void
single_req_cb (struct RPSPeer *rps_peer)
{
- if (GNUNET_YES == in_shutdown)
+ if (GNUNET_YES == in_shutdown || GNUNET_YES == post_test)
{
return;
}
static void
delay_req_cb (struct RPSPeer *rps_peer)
{
- if (GNUNET_YES == in_shutdown)
+ if (GNUNET_YES == in_shutdown || GNUNET_YES == post_test)
{
return;
}
static void
seed_cb (struct RPSPeer *rps_peer)
{
- if (GNUNET_YES == in_shutdown)
+ if (GNUNET_YES == in_shutdown || GNUNET_YES == post_test)
{
return;
}
static void
seed_big_cb (struct RPSPeer *rps_peer)
{
- if (GNUNET_YES == in_shutdown)
+ if (GNUNET_YES == in_shutdown || GNUNET_YES == post_test)
{
return;
}
static void
seed_req_cb (struct RPSPeer *rps_peer)
{
- if (GNUNET_YES == in_shutdown)
+ if (GNUNET_YES == in_shutdown || GNUNET_YES == post_test)
{
return;
}
static void
req_cancel_cb (struct RPSPeer *rps_peer)
{
- if (GNUNET_YES == in_shutdown)
+ if (GNUNET_YES == in_shutdown || GNUNET_YES == post_test)
{
return;
}
static void
churn_test_cb (struct RPSPeer *rps_peer)
{
- if (GNUNET_YES == in_shutdown)
+ if (GNUNET_YES == in_shutdown || GNUNET_YES == post_test)
{
return;
}
// FIXME
struct OpListEntry *entry = cls;
- if (GNUNET_YES == in_shutdown)
+ if (GNUNET_YES == in_shutdown || GNUNET_YES == post_test)
{
return;
}
enum PEER_ONLINE_DELTA delta,
double prob_go_on_off)
{
- struct OpListEntry *entry;
+ struct OpListEntry *entry = NULL;
uint32_t prob;
/* make sure that management operation is not already scheduled */
&churn_cb,
entry,
(PEER_GO_OFFLINE == delta) ? 0 : 1);
+ rps_peers[j].entry_op_manage = entry;
}
- rps_peers[j].entry_op_manage = entry;
}
double portion_go_online;
double portion_go_offline;
- if (GNUNET_YES == in_shutdown)
+ if (GNUNET_YES == in_shutdown || GNUNET_YES == post_test)
{
return;
}
static void
profiler_cb (struct RPSPeer *rps_peer)
{
- if (GNUNET_YES == in_shutdown)
+ if (GNUNET_YES == in_shutdown || GNUNET_YES == post_test)
{
return;
}
return evaluate ();
}
+static uint32_t fac (uint32_t x)
+{
+ if (1 >= x)
+ {
+ return x;
+ }
+ return x * fac (x - 1);
+}
+
+static uint32_t binom (uint32_t n, uint32_t k)
+{
+ //GNUNET_assert (n >= k);
+ if (k > n) return 0;
+ if (0 > n) return 0;
+ if (0 > k) return 0;
+ if (0 == k) return 1;
+ return fac (n)
+ /
+ fac(k) * fac(n - k);
+}
+
+/**
+ * @brief is b in view of a?
+ *
+ * @param a
+ * @param b
+ *
+ * @return
+ */
+static int is_in_view (uint32_t a, uint32_t b)
+{
+ uint32_t i;
+ for (i = 0; i < rps_peers[a].cur_view_count; i++)
+ {
+ if (0 == memcmp (rps_peers[b].peer_id,
+ &rps_peers[a].cur_view[i],
+ sizeof (struct GNUNET_PeerIdentity)))
+ {
+ return GNUNET_YES;
+ }
+ }
+ return GNUNET_NO;
+}
+
+static uint32_t get_idx_of_pid (const struct GNUNET_PeerIdentity *pid)
+{
+ uint32_t i;
+
+ for (i = 0; i < num_peers; i++)
+ {
+ if (0 == memcmp (pid,
+ rps_peers[i].peer_id,
+ sizeof (struct GNUNET_PeerIdentity)))
+ {
+ return i;
+ }
+ }
+ //return 0; /* Should not happen - make compiler happy */
+ GNUNET_log (GNUNET_ERROR_TYPE_ERROR,
+ "No known _PeerIdentity %s!\n",
+ GNUNET_i2s_full (pid));
+ GNUNET_assert (0);
+}
+
+/**
+ * @brief Counts number of peers in view of a that have b in their view
+ *
+ * @param a
+ * @param uint32_tb
+ *
+ * @return
+ */
+static uint32_t count_containing_views (uint32_t a, uint32_t b)
+{
+ uint32_t i;
+ uint32_t peer_idx;
+ uint32_t count = 0;
+
+ for (i = 0; i < rps_peers[a].cur_view_count; i++)
+ {
+ peer_idx = get_idx_of_pid (&rps_peers[a].cur_view[i]);
+ if (GNUNET_YES == is_in_view (peer_idx, b))
+ {
+ count++;
+ }
+ }
+ return count;
+}
+
+/**
+ * @brief Computes the probability for each other peer to be selected by the
+ * sampling process based on the views of all peers
+ *
+ * @param peer_idx index of the peer that is about to sample
+ */
+static void compute_probabilities (uint32_t peer_idx)
+{
+ //double probs[num_peers] = { 0 };
+ double probs[num_peers];
+ size_t probs_as_str_size = (num_peers * 10 + 1) * sizeof (char);
+ char *probs_as_str = GNUNET_malloc (probs_as_str_size);
+ char *probs_as_str_cpy;
+ uint32_t i;
+ double prob_push;
+ double prob_pull;
+ uint32_t view_size;
+ uint32_t cont_views;
+ uint32_t number_of_being_in_pull_events;
+ int tmp;
+ uint32_t count_non_zero_prob = 0;
+
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Computing probabilities for peer %" PRIu32 "\n", peer_idx);
+ /* Firstly without knowledge of old views */
+ for (i = 0; i < num_peers; i++)
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "\tfor peer %" PRIu32 ":\n", i);
+ view_size = rps_peers[i].cur_view_count;
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "\t\tview_size: %" PRIu32 "\n", view_size);
+ /* For peer i the probability of being sampled is
+ * evenly distributed among all possibly observed peers. */
+ /* We could have observed a peer in three cases:
+ * 1. peer sent a push
+ * 2. peer was contained in a pull reply
+ * 3. peer was in history (sampler) - ignored for now */
+ /* 1. Probability of having received a push from peer i */
+ if ((GNUNET_YES == is_in_view (i, peer_idx)) &&
+ (1 <= (0.45 * view_size)))
+ {
+ prob_push = 1.0 * binom (0.45 * view_size, 1)
+ /
+ binom (view_size, 0.45 * view_size);
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "\t\t%" PRIu32 " is in %" PRIu32 "'s view, prob: %f\n",
+ peer_idx,
+ i,
+ prob_push);
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "\t\tposs choices from view: %" PRIu32 ", containing i: %" PRIu32 "\n",
+ binom (view_size, 0.45 * view_size),
+ binom (0.45 * view_size, 1));
+ } else {
+ prob_push = 0;
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "\t\t%" PRIu32 " is not in %" PRIu32 "'s view, prob: 0\n",
+ peer_idx,
+ i);
+ }
+ /* 2. Probability of peer i being contained in pulls */
+ view_size = rps_peers[peer_idx].cur_view_count;
+ cont_views = count_containing_views (peer_idx, i);
+ number_of_being_in_pull_events =
+ (binom (view_size, 0.45 * view_size) -
+ binom (view_size - cont_views, 0.45 * view_size));
+ if (0 != number_of_being_in_pull_events)
+ {
+ prob_pull = number_of_being_in_pull_events
+ /
+ (1.0 * binom (view_size, 0.45 * view_size));
+ } else
+ {
+ prob_pull = 0;
+ }
+ probs[i] = prob_push + prob_pull - (prob_push * prob_pull);
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "\t\t%" PRIu32 " has %" PRIu32 " of %" PRIu32
+ " peers in its view who know %" PRIu32 " prob: %f\n",
+ peer_idx,
+ cont_views,
+ view_size,
+ i,
+ prob_pull);
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "\t\tnumber of possible pull combinations: %" PRIu32 "\n",
+ binom (view_size, 0.45 * view_size));
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "\t\tnumber of possible pull combinations without %" PRIu32
+ ": %" PRIu32 "\n",
+ i,
+ binom (view_size - cont_views, 0.45 * view_size));
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "\t\tnumber of possible pull combinations with %" PRIu32
+ ": %" PRIu32 "\n",
+ i,
+ number_of_being_in_pull_events);
+
+ if (0 != probs[i]) count_non_zero_prob++;
+ }
+ /* normalize */
+ if (0 != count_non_zero_prob)
+ {
+ for (i = 0; i < num_peers; i++)
+ {
+ probs[i] = probs[i] * (1.0 / count_non_zero_prob);
+ }
+ } else {
+ for (i = 0; i < num_peers; i++)
+ {
+ probs[i] = 0;
+ }
+ }
+ /* str repr */
+ for (i = 0; i < num_peers; i++)
+ {
+ probs_as_str_cpy = GNUNET_strndup (probs_as_str, probs_as_str_size);
+ tmp = GNUNET_snprintf (probs_as_str,
+ probs_as_str_size,
+ "%s %7.6f", probs_as_str_cpy, probs[i]);
+ GNUNET_free (probs_as_str_cpy);
+ GNUNET_assert (0 <= tmp);
+ }
+
+ to_file_w_len (rps_peers[peer_idx].file_name_probs,
+ probs_as_str_size,
+ probs_as_str);
+ GNUNET_free (probs_as_str);
+}
+
+/**
+ * @brief This counts the number of peers in which views a given peer occurs.
+ *
+ * It also stores this value in the rps peer.
+ *
+ * @param peer_idx the index of the peer to count the representation
+ *
+ * @return the number of occurrences
+ */
+static uint32_t count_peer_in_views_2 (uint32_t peer_idx)
+{
+ uint32_t i, j;
+ uint32_t count = 0;
+
+ for (i = 0; i < num_peers; i++) /* Peer in which view is counted */
+ {
+ for (j = 0; j < rps_peers[i].cur_view_count; j++) /* entry in view */
+ {
+ if (0 == memcmp (rps_peers[peer_idx].peer_id,
+ &rps_peers[i].cur_view[j],
+ sizeof (struct GNUNET_PeerIdentity)))
+ {
+ count++;
+ break;
+ }
+ }
+ }
+ rps_peers[peer_idx].count_in_views = count;
+ return count;
+}
+
+static uint32_t cumulated_view_sizes ()
+{
+ uint32_t i;
+
+ view_sizes = 0;
+ for (i = 0; i < num_peers; i++) /* Peer in which view is counted */
+ {
+ view_sizes += rps_peers[i].cur_view_count;
+ }
+ return view_sizes;
+}
+
+static void count_peer_in_views (uint32_t *count_peers)
+{
+ uint32_t i, j;
+
+ for (i = 0; i < num_peers; i++) /* Peer in which view is counted */
+ {
+ for (j = 0; j < rps_peers[i].cur_view_count; j++) /* entry in view */
+ {
+ if (0 == memcmp (rps_peers[i].peer_id,
+ &rps_peers[i].cur_view[j],
+ sizeof (struct GNUNET_PeerIdentity)))
+ {
+ count_peers[i]++;
+ }
+ }
+ }
+}
+
+void compute_diversity ()
+{
+ uint32_t i;
+ /* ith entry represents the numer of occurrences in other peer's views */
+ uint32_t *count_peers = GNUNET_new_array (num_peers, uint32_t);
+ uint32_t views_total_size;
+ double expected;
+ /* deviation from expected number of peers */
+ double *deviation = GNUNET_new_array (num_peers, double);
+
+ views_total_size = 0;
+ expected = 0;
+
+ /* For each peer count its representation in other peer's views*/
+ for (i = 0; i < num_peers; i++) /* Peer to count */
+ {
+ views_total_size += rps_peers[i].cur_view_count;
+ count_peer_in_views (count_peers);
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Counted representation of %" PRIu32 "th peer [%s]: %" PRIu32"\n",
+ i,
+ GNUNET_i2s (rps_peers[i].peer_id),
+ count_peers[i]);
+ }
+
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "size of all views combined: %" PRIu32 "\n",
+ views_total_size);
+ expected = ((double) 1/num_peers) * views_total_size;
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Expected number of occurrences of each peer in all views: %f\n",
+ expected);
+ for (i = 0; i < num_peers; i++) /* Peer to count */
+ {
+ deviation[i] = expected - count_peers[i];
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Deviation from expectation: %f\n", deviation[i]);
+ }
+ GNUNET_free (count_peers);
+ GNUNET_free (deviation);
+}
+
+void print_view_sizes()
+{
+ uint32_t i;
+
+ for (i = 0; i < num_peers; i++) /* Peer to count */
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "View size of %" PRIu32 ". [%s] is %" PRIu32 "\n",
+ i,
+ GNUNET_i2s (rps_peers[i].peer_id),
+ rps_peers[i].cur_view_count);
+ }
+}
+
+void all_views_updated_cb()
+{
+ compute_diversity();
+ print_view_sizes();
+}
+
+void view_update_cb (void *cls,
+ uint64_t view_size,
+ const struct GNUNET_PeerIdentity *peers)
+{
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "View was updated (%" PRIu64 ")\n", view_size);
+ struct RPSPeer *rps_peer = (struct RPSPeer *) cls;
+ to_file ("/tmp/rps/view_sizes.txt",
+ "%" PRIu64 " %" PRIu32 "",
+ rps_peer->index,
+ view_size);
+ for (int i = 0; i < view_size; i++)
+ {
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "\t%s\n", GNUNET_i2s (&peers[i]));
+ }
+ GNUNET_array_grow (rps_peer->cur_view,
+ rps_peer->cur_view_count,
+ view_size);
+ //*rps_peer->cur_view = *peers;
+ GNUNET_memcpy (rps_peer->cur_view,
+ peers,
+ view_size * sizeof (struct GNUNET_PeerIdentity));
+ to_file ("/tmp/rps/count_in_views.txt",
+ "%" PRIu64 " %" PRIu32 "",
+ rps_peer->index,
+ count_peer_in_views_2 (rps_peer->index));
+ cumulated_view_sizes();
+ if (0 != view_size)
+ {
+ to_file ("/tmp/rps/repr.txt",
+ "%" PRIu64 /* index */
+ " %" PRIu32 /* occurrence in views */
+ " %" PRIu32 /* view sizes */
+ " %f" /* fraction of repr in views */
+ " %f" /* average view size */
+ " %f" /* prob of occurrence in view slot */
+ " %f" "", /* exp frac of repr in views */
+ rps_peer->index,
+ count_peer_in_views_2 (rps_peer->index),
+ view_sizes,
+ count_peer_in_views_2 (rps_peer->index) / (view_size * 1.0), /* fraction of representation in views */
+ view_sizes / (view_size * 1.0), /* average view size */
+ 1.0 /view_size, /* prob of occurrence in view slot */
+ (1.0/view_size) * (view_sizes/view_size) /* expected fraction of repr in views */
+ );
+ }
+ compute_probabilities (rps_peer->index);
+ all_views_updated_cb();
+}
+
+static void
+pre_profiler (struct RPSPeer *rps_peer, struct GNUNET_RPS_Handle *h)
+{
+ rps_peer->file_name_probs =
+ store_prefix_file_name (rps_peer->peer_id, "probs");
+ GNUNET_RPS_view_request (h, 0, view_update_cb, rps_peer);
+}
+
+void write_final_stats (void){
+ uint32_t i;
+
+ for (i = 0; i < num_peers; i++)
+ {
+ to_file ("/tmp/rps/final_stats.dat",
+ "%" PRIu32 " " /* index */
+ "%s %" /* id */
+ PRIu64 " %" /* rounds */
+ PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" /* blocking */
+ PRIu64 " %" PRIu64 " %" PRIu64 " %" /* issued */
+ PRIu64 " %" PRIu64 " %" PRIu64 " %" /* sent */
+ PRIu64 " %" PRIu64 " %" PRIu64 /* recv */,
+ i,
+ GNUNET_i2s (rps_peers[i].peer_id),
+ rps_peers[i].num_rounds,
+ rps_peers[i].num_blocks,
+ rps_peers[i].num_blocks_many_push,
+ rps_peers[i].num_blocks_no_push,
+ rps_peers[i].num_blocks_no_pull,
+ rps_peers[i].num_blocks_many_push_no_pull,
+ rps_peers[i].num_blocks_no_push_no_pull,
+ rps_peers[i].num_issued_push,
+ rps_peers[i].num_issued_pull_req,
+ rps_peers[i].num_issued_pull_rep,
+ rps_peers[i].num_sent_push,
+ rps_peers[i].num_sent_pull_req,
+ rps_peers[i].num_sent_pull_rep,
+ rps_peers[i].num_recv_push,
+ rps_peers[i].num_recv_pull_req,
+ rps_peers[i].num_recv_pull_rep);
+ }
+}
+
+/**
+ * Continuation called by #GNUNET_STATISTICS_get() functions.
+ *
+ * Remembers that this specific statistics value was received for this peer.
+ * Checks whether all peers received their statistics yet.
+ * Issues the shutdown.
+ *
+ * @param cls closure
+ * @param success #GNUNET_OK if statistics were
+ * successfully obtained, #GNUNET_SYSERR if not.
+ */
+void
+post_test_shutdown_ready_cb (void *cls,
+ int success)
+{
+ struct STATcls *stat_cls = (struct STATcls *) cls;
+ struct RPSPeer *rps_peer = stat_cls->rps_peer;
+ if (GNUNET_OK == success)
+ {
+ /* set flag that we we got the value */
+ rps_peer->stat_collected_flags |= stat_cls->stat_type;
+ } else {
+ GNUNET_log (GNUNET_ERROR_TYPE_ERROR,
+ "Peer %u did not receive statistics value\n",
+ rps_peer->index);
+ GNUNET_free (stat_cls);
+ GNUNET_break (0);
+ }
+
+ if (NULL != rps_peer->stat_op &&
+ GNUNET_YES == check_statistics_collect_completed_single_peer (rps_peer))
+ {
+ GNUNET_TESTBED_operation_done (rps_peer->stat_op);
+ }
+
+ write_final_stats ();
+ if (GNUNET_YES == check_statistics_collect_completed())
+ {
+ //write_final_stats ();
+ GNUNET_free (stat_cls);
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Shutting down\n");
+ GNUNET_SCHEDULER_shutdown ();
+ } else {
+ GNUNET_free (stat_cls);
+ }
+}
+
+/**
+ * @brief Converts string representation to the corresponding #STAT_TYPE enum.
+ *
+ * @param stat_str string representation of statistics specifier
+ *
+ * @return corresponding enum
+ */
+enum STAT_TYPE stat_str_2_type (const char *stat_str)
+{
+ if (0 == strncmp ("# rounds blocked - no pull replies", stat_str, strlen ("# rounds blocked - no pull replies")))
+ {
+ return STAT_TYPE_BLOCKS_NO_PULL;
+ }
+ else if (0 == strncmp ("# rounds blocked - too many pushes, no pull replies", stat_str, strlen ("# rounds blocked - too many pushes, no pull replies")))
+ {
+ return STAT_TYPE_BLOCKS_MANY_PUSH_NO_PULL;
+ }
+ else if (0 == strncmp ("# rounds blocked - too many pushes", stat_str, strlen ("# rounds blocked - too many pushes")))
+ {
+ return STAT_TYPE_BLOCKS_MANY_PUSH;
+ }
+ else if (0 == strncmp ("# rounds blocked - no pushes, no pull replies", stat_str, strlen ("# rounds blocked - no pushes, no pull replies")))
+ {
+ return STAT_TYPE_BLOCKS_NO_PUSH_NO_PULL;
+ }
+ else if (0 == strncmp ("# rounds blocked - no pushes", stat_str, strlen ("# rounds blocked - no pushes")))
+ {
+ return STAT_TYPE_BLOCKS_NO_PUSH;
+ }
+ else if (0 == strncmp ("# rounds blocked", stat_str, strlen ("# rounds blocked")))
+ {
+ return STAT_TYPE_BLOCKS;
+ }
+ else if (0 == strncmp ("# rounds", stat_str, strlen ("# rounds")))
+ {
+ return STAT_TYPE_ROUNDS;
+ }
+ else if (0 == strncmp ("# push send issued", stat_str, strlen ("# push send issued")))
+ {
+ return STAT_TYPE_ISSUED_PUSH_SEND;
+ }
+ else if (0 == strncmp ("# pull request send issued", stat_str, strlen ("# pull request send issued")))
+ {
+ return STAT_TYPE_ISSUED_PULL_REQ;
+ }
+ else if (0 == strncmp ("# pull reply send issued", stat_str, strlen ("# pull reply send issued")))
+ {
+ return STAT_TYPE_ISSUED_PULL_REP;
+ }
+ else if (0 == strncmp ("# pushes sent", stat_str, strlen ("# pushes sent")))
+ {
+ return STAT_TYPE_SENT_PUSH_SEND;
+ }
+ else if (0 == strncmp ("# pull requests sent", stat_str, strlen ("# pull requests sent")))
+ {
+ return STAT_TYPE_SENT_PULL_REQ;
+ }
+ else if (0 == strncmp ("# pull replys sent", stat_str, strlen ("# pull replys sent")))
+ {
+ return STAT_TYPE_SENT_PULL_REP;
+ }
+ else if (0 == strncmp ("# push message received", stat_str, strlen ("# push message received")))
+ {
+ return STAT_TYPE_RECV_PUSH_SEND;
+ }
+ else if (0 == strncmp ("# pull request message received", stat_str, strlen ("# pull request message received")))
+ {
+ return STAT_TYPE_RECV_PULL_REQ;
+ }
+ else if (0 == strncmp ("# pull reply messages received", stat_str, strlen ("# pull reply messages received")))
+ {
+ return STAT_TYPE_RECV_PULL_REP;
+ }
+ return STAT_TYPE_MAX;
+}
+
+
+/**
+ * @brief Converts #STAT_TYPE enum to the equivalent string representation that
+ * is stored with the statistics service.
+ *
+ * @param stat_type #STAT_TYPE enum
+ *
+ * @return string representation that matches statistics value
+ */
+char* stat_type_2_str (enum STAT_TYPE stat_type)
+{
+ switch (stat_type)
+ {
+ case STAT_TYPE_ROUNDS:
+ return "# rounds";
+ case STAT_TYPE_BLOCKS:
+ return "# rounds blocked";
+ case STAT_TYPE_BLOCKS_MANY_PUSH:
+ return "# rounds blocked - too many pushes";
+ case STAT_TYPE_BLOCKS_NO_PUSH:
+ return "# rounds blocked - no pushes";
+ case STAT_TYPE_BLOCKS_NO_PULL:
+ return "# rounds blocked - no pull replies";
+ case STAT_TYPE_BLOCKS_MANY_PUSH_NO_PULL:
+ return "# rounds blocked - too many pushes, no pull replies";
+ case STAT_TYPE_BLOCKS_NO_PUSH_NO_PULL:
+ return "# rounds blocked - no pushes, no pull replies";
+ case STAT_TYPE_ISSUED_PUSH_SEND:
+ return "# push send issued";
+ case STAT_TYPE_ISSUED_PULL_REQ:
+ return "# pull request send issued";
+ case STAT_TYPE_ISSUED_PULL_REP:
+ return "# pull reply send issued";
+ case STAT_TYPE_SENT_PUSH_SEND:
+ return "# pushes sent";
+ case STAT_TYPE_SENT_PULL_REQ:
+ return "# pull requests sent";
+ case STAT_TYPE_SENT_PULL_REP:
+ return "# pull replys sent";
+ case STAT_TYPE_RECV_PUSH_SEND:
+ return "# push message received";
+ case STAT_TYPE_RECV_PULL_REQ:
+ return "# pull request message received";
+ case STAT_TYPE_RECV_PULL_REP:
+ return "# pull reply messages received";
+ case STAT_TYPE_MAX:
+ default:
+ return "ERROR";
+ ;
+ }
+}
+
+/**
+ * Callback function to process statistic values.
+ *
+ * @param cls closure
+ * @param subsystem name of subsystem that created the statistic
+ * @param name the name of the datum
+ * @param value the current value
+ * @param is_persistent #GNUNET_YES if the value is persistent, #GNUNET_NO if not
+ * @return #GNUNET_OK to continue, #GNUNET_SYSERR to abort iteration
+ */
+int
+stat_iterator (void *cls,
+ const char *subsystem,
+ const char *name,
+ uint64_t value,
+ int is_persistent)
+{
+ const struct STATcls *stat_cls = (const struct STATcls *) cls;
+ struct RPSPeer *rps_peer = (struct RPSPeer *) stat_cls->rps_peer;
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Got stat value: %s - %" PRIu64 "\n",
+ //stat_type_2_str (stat_cls->stat_type),
+ name,
+ value);
+ to_file (rps_peer->file_name_stats,
+ "%s: %" PRIu64 "\n",
+ name,
+ value);
+ switch (stat_str_2_type (name))
+ {
+ case STAT_TYPE_ROUNDS:
+ rps_peer->num_rounds = value;
+ break;
+ case STAT_TYPE_BLOCKS:
+ rps_peer->num_blocks = value;
+ break;
+ case STAT_TYPE_BLOCKS_MANY_PUSH:
+ rps_peer->num_blocks_many_push = value;
+ break;
+ case STAT_TYPE_BLOCKS_NO_PUSH:
+ rps_peer->num_blocks_no_push = value;
+ break;
+ case STAT_TYPE_BLOCKS_NO_PULL:
+ rps_peer->num_blocks_no_pull = value;
+ break;
+ case STAT_TYPE_BLOCKS_MANY_PUSH_NO_PULL:
+ rps_peer->num_blocks_many_push_no_pull = value;
+ break;
+ case STAT_TYPE_BLOCKS_NO_PUSH_NO_PULL:
+ rps_peer->num_blocks_no_push_no_pull = value;
+ break;
+ case STAT_TYPE_ISSUED_PUSH_SEND:
+ rps_peer->num_issued_push = value;
+ break;
+ case STAT_TYPE_ISSUED_PULL_REQ:
+ rps_peer->num_issued_pull_req = value;
+ break;
+ case STAT_TYPE_ISSUED_PULL_REP:
+ rps_peer->num_issued_pull_rep = value;
+ break;
+ case STAT_TYPE_SENT_PUSH_SEND:
+ rps_peer->num_sent_push = value;
+ break;
+ case STAT_TYPE_SENT_PULL_REQ:
+ rps_peer->num_sent_pull_req = value;
+ break;
+ case STAT_TYPE_SENT_PULL_REP:
+ rps_peer->num_sent_pull_rep = value;
+ break;
+ case STAT_TYPE_RECV_PUSH_SEND:
+ rps_peer->num_recv_push = value;
+ break;
+ case STAT_TYPE_RECV_PULL_REQ:
+ rps_peer->num_recv_pull_req = value;
+ break;
+ case STAT_TYPE_RECV_PULL_REP:
+ rps_peer->num_recv_pull_rep = value;
+ break;
+ case STAT_TYPE_MAX:
+ default:
+ GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
+ "Unknown statistics string: %s\n",
+ name);
+ break;
+ }
+ return GNUNET_OK;
+}
+
+void post_profiler (struct RPSPeer *rps_peer)
+{
+ if (COLLECT_STATISTICS != cur_test_run.have_collect_statistics)
+ {
+ return;
+ }
+
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Going to request statistic values with mask 0x%" PRIx32 "\n",
+ cur_test_run.stat_collect_flags);
+
+ struct STATcls *stat_cls;
+ uint32_t stat_type;
+ for (stat_type = STAT_TYPE_ROUNDS;
+ stat_type < STAT_TYPE_MAX;
+ stat_type = stat_type <<1)
+ {
+ if (stat_type & cur_test_run.stat_collect_flags)
+ {
+ stat_cls = GNUNET_malloc (sizeof (struct STATcls));
+ stat_cls->rps_peer = rps_peer;
+ stat_cls->stat_type = stat_type;
+ rps_peer->file_name_stats =
+ store_prefix_file_name (rps_peer->peer_id, "stats");
+ GNUNET_STATISTICS_get (rps_peer->stats_h,
+ "rps",
+ stat_type_2_str (stat_type),
+ post_test_shutdown_ready_cb,
+ stat_iterator,
+ (struct STATcls *) stat_cls);
+ GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+ "Requested statistics for %s (peer %" PRIu32 ")\n",
+ stat_type_2_str (stat_type),
+ rps_peer->index);
+ }
+ }
+}
+
/***********************************************************************
* /Definition of tests
{
unsigned int i;
struct OpListEntry *entry;
- uint32_t num_mal_peers;
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "RUN was called\n");
rps_peers[i].index = i;
if (NULL != cur_test_run.init_peer)
cur_test_run.init_peer (&rps_peers[i]);
+ if (NO_COLLECT_VIEW == cur_test_run.have_collect_view)
+ {
+ rps_peers->cur_view_count = 0;
+ rps_peers->cur_view = NULL;
+ }
entry->op = GNUNET_TESTBED_peer_get_information (peers[i],
GNUNET_TESTBED_PIT_IDENTITY,
&info_cb,
}
/* Bring peers up */
- num_mal_peers = round (portion * num_peers);
GNUNET_assert (num_peers == n_peers);
for (i = 0; i < n_peers; i++)
{
rps_peers[i].index = i;
- if ( (rps_peers[i].num_recv_ids < rps_peers[i].num_ids_to_request) ||
- (i < num_mal_peers) )
+ rps_peers[i].op =
+ GNUNET_TESTBED_service_connect (&rps_peers[i],
+ peers[i],
+ "rps",
+ &rps_connect_complete_cb,
+ &rps_peers[i],
+ &rps_connect_adapter,
+ &rps_disconnect_adapter,
+ &rps_peers[i]);
+ /* Connect all peers to statistics service */
+ if (COLLECT_STATISTICS == cur_test_run.have_collect_statistics)
{
- rps_peers[i].op =
- GNUNET_TESTBED_service_connect (&rps_peers[i],
+ rps_peers[i].stat_op =
+ GNUNET_TESTBED_service_connect (NULL,
peers[i],
- "rps",
- &rps_connect_complete_cb,
+ "statistics",
+ stat_complete_cb,
&rps_peers[i],
- &rps_connect_adapter,
- &rps_disconnect_adapter,
+ &stat_connect_adapter,
+ &stat_disconnect_adapter,
&rps_peers[i]);
}
}
if (NULL != churn_task)
GNUNET_SCHEDULER_cancel (churn_task);
+ post_test_task = GNUNET_SCHEDULER_add_delayed (timeout, &post_test_op, NULL);
+ timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS,
+ (timeout_s * 1.2) + 0.1 * num_peers);
shutdown_task = GNUNET_SCHEDULER_add_delayed (timeout, &shutdown_op, NULL);
+ shutdown_task = GNUNET_SCHEDULER_add_shutdown (shutdown_op, NULL);
+
}
{
int ret_value;
+ /* Defaults for tests */
num_peers = 5;
cur_test_run.name = "test-rps-default";
cur_test_run.init_peer = default_init_peer;
cur_test_run.pre_test = NULL;
cur_test_run.reply_handle = default_reply_handle;
cur_test_run.eval_cb = default_eval_cb;
+ cur_test_run.post_test = NULL;
cur_test_run.have_churn = HAVE_CHURN;
+ cur_test_run.have_collect_statistics = NO_COLLECT_STATISTICS;
+ cur_test_run.stat_collect_flags = 0;
+ cur_test_run.have_collect_view = NO_COLLECT_VIEW;
churn_task = NULL;
- timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 30);
+ timeout_s = 30;
if (strstr (argv[0], "malicious") != NULL)
{
cur_test_run.main_test = seed_big_cb;
cur_test_run.eval_cb = no_eval;
cur_test_run.have_churn = HAVE_NO_CHURN;
- timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 10);
+ timeout_s = 10;
}
else if (strstr (argv[0], "_single_peer_seed") != NULL)
cur_test_run.main_test = req_cancel_cb;
cur_test_run.eval_cb = no_eval;
cur_test_run.have_churn = HAVE_NO_CHURN;
- timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 10);
+ timeout_s = 10;
}
else if (strstr (argv[0], "_churn") != NULL)
cur_test_run.main_test = churn_test_cb;
cur_test_run.reply_handle = default_reply_handle;
cur_test_run.eval_cb = default_eval_cb;
- cur_test_run.have_churn = HAVE_CHURN;
+ cur_test_run.have_churn = HAVE_NO_CHURN;
cur_test_run.have_quick_quit = HAVE_NO_QUICK_QUIT;
- timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 10);
+ timeout_s = 10;
}
else if (strstr (argv[0], "profiler") != NULL)
{
GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "This is the profiler\n");
cur_test_run.name = "test-rps-profiler";
- num_peers = 10;
+ num_peers = 16;
mal_type = 3;
cur_test_run.init_peer = profiler_init_peer;
- cur_test_run.pre_test = mal_pre;
+ //cur_test_run.pre_test = mal_pre;
+ cur_test_run.pre_test = pre_profiler;
cur_test_run.main_test = profiler_cb;
cur_test_run.reply_handle = profiler_reply_handle;
cur_test_run.eval_cb = profiler_eval;
+ cur_test_run.post_test = post_profiler;
cur_test_run.request_interval = 2;
cur_test_run.num_requests = 5;
- cur_test_run.have_churn = HAVE_CHURN;
+ //cur_test_run.have_churn = HAVE_CHURN;
+ cur_test_run.have_churn = HAVE_NO_CHURN;
cur_test_run.have_quick_quit = HAVE_NO_QUICK_QUIT;
- timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 300);
+ cur_test_run.have_collect_statistics = COLLECT_STATISTICS;
+ cur_test_run.stat_collect_flags = STAT_TYPE_ROUNDS |
+ STAT_TYPE_BLOCKS |
+ STAT_TYPE_BLOCKS_MANY_PUSH |
+ STAT_TYPE_BLOCKS_NO_PUSH |
+ STAT_TYPE_BLOCKS_NO_PULL |
+ STAT_TYPE_BLOCKS_MANY_PUSH_NO_PULL |
+ STAT_TYPE_BLOCKS_NO_PUSH_NO_PULL |
+ STAT_TYPE_ISSUED_PUSH_SEND |
+ STAT_TYPE_ISSUED_PULL_REQ |
+ STAT_TYPE_ISSUED_PULL_REP |
+ STAT_TYPE_SENT_PUSH_SEND |
+ STAT_TYPE_SENT_PULL_REQ |
+ STAT_TYPE_SENT_PULL_REP |
+ STAT_TYPE_RECV_PUSH_SEND |
+ STAT_TYPE_RECV_PULL_REQ |
+ STAT_TYPE_RECV_PULL_REP;
+ cur_test_run.have_collect_view = COLLECT_VIEW;
+ timeout_s = 150;
/* 'Clean' directory */
(void) GNUNET_DISK_directory_remove ("/tmp/rps/");
GNUNET_DISK_directory_create ("/tmp/rps/");
}
+ timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, timeout_s);
rps_peers = GNUNET_new_array (num_peers, struct RPSPeer);
peer_map = GNUNET_CONTAINER_multipeermap_create (num_peers, GNUNET_NO);
}
ret_value = cur_test_run.eval_cb();
+
+ if (NO_COLLECT_VIEW == cur_test_run.have_collect_view)
+ {
+ GNUNET_array_grow (rps_peers->cur_view,
+ rps_peers->cur_view_count,
+ 0);
+ }
GNUNET_free (rps_peers);
GNUNET_free (rps_peer_ids);
GNUNET_CONTAINER_multipeermap_destroy (peer_map);