fix crash on unexpected client disconnect on incoming message, remove bogus assertion
[oweals/gnunet.git] / src / rps / test_rps.c
index b8f7588e101f78d96026321d97984c1cfd9a62d8..542919425da19ca90984fe8603233794f3157e12 100644 (file)
@@ -62,6 +62,19 @@ static unsigned int mal_type = 0;
  */
 static struct GNUNET_TESTBED_Peer **testbed_peers;
 
+/**
+ * @brief Indicates whether peer should go off- or online
+ */
+enum PEER_ONLINE_DELTA {
+  /**
+   * @brief Indicates peer going online
+   */
+  PEER_GO_ONLINE = 1,
+  /**
+   * @brief Indicates peer going offline
+   */
+  PEER_GO_OFFLINE = -1,
+};
 
 /**
  * Operation map entry
@@ -84,10 +97,10 @@ struct OpListEntry
   struct GNUNET_TESTBED_Operation *op;
 
   /**
-   * Depending on whether we start or stop NSE service at the peer set this to 1
-   * or -1
+   * Depending on whether we start or stop RPS service at the peer, set this to
+   * #PEER_GO_ONLINE (1) or #PEER_GO_OFFLINE (-1)
    */
-  int delta;
+  enum PEER_ONLINE_DELTA delta;
 
   /**
    * Index of the regarding peer
@@ -188,7 +201,7 @@ struct RPSPeer
   int online;
 
   /**
-   * Number of Peer IDs to request
+   * Number of Peer IDs to request during the whole test
    */
   unsigned int num_ids_to_request;
 
@@ -218,6 +231,100 @@ struct RPSPeer
    * Number of received PeerIDs
    */
   unsigned int num_recv_ids;
+
+  /**
+   * Pending operation on that peer
+   */
+  const struct OpListEntry *entry_op_manage;
+
+  /**
+   * Testbed operation to connect to statistics service
+   */
+  struct GNUNET_TESTBED_Operation *stat_op;
+
+  /**
+   * Handle to the statistics service
+   */
+  struct GNUNET_STATISTICS_Handle *stats_h;
+
+  /**
+   * @brief flags to indicate which statistics values have been already
+   * collected from the statistics service.
+   * Used to check whether we are able to shutdown.
+   */
+  uint32_t stat_collected_flags;
+
+  /**
+   * @brief File name of the file the stats are finally written to
+   */
+  const char *file_name_stats;
+
+  /**
+   * @brief File name of the file the stats are finally written to
+   */
+  const char *file_name_probs;
+
+  /**
+   * @brief The current view
+   */
+  struct GNUNET_PeerIdentity *cur_view;
+
+  /**
+   * @brief Number of peers in the #cur_view.
+   */
+  uint32_t cur_view_count;
+
+  /**
+   * @brief Number of occurrences in other peer's view
+   */
+  uint32_t count_in_views;
+
+  /**
+   * @brief statistics values
+   */
+  uint64_t num_rounds;
+  uint64_t num_blocks;
+  uint64_t num_blocks_many_push;
+  uint64_t num_blocks_no_push;
+  uint64_t num_blocks_no_pull;
+  uint64_t num_blocks_many_push_no_pull;
+  uint64_t num_blocks_no_push_no_pull;
+  uint64_t num_issued_push;
+  uint64_t num_issued_pull_req;
+  uint64_t num_issued_pull_rep;
+  uint64_t num_sent_push;
+  uint64_t num_sent_pull_req;
+  uint64_t num_sent_pull_rep;
+  uint64_t num_recv_push;
+  uint64_t num_recv_pull_req;
+  uint64_t num_recv_pull_rep;
+};
+
+enum STAT_TYPE
+{
+  STAT_TYPE_ROUNDS                    =    0x1, /*   1 */
+  STAT_TYPE_BLOCKS                    =    0x2, /*   2 */
+  STAT_TYPE_BLOCKS_MANY_PUSH          =    0x4, /*   3 */
+  STAT_TYPE_BLOCKS_NO_PUSH            =    0x8, /*   4 */
+  STAT_TYPE_BLOCKS_NO_PULL            =   0x10, /*   5 */
+  STAT_TYPE_BLOCKS_MANY_PUSH_NO_PULL  =   0x20, /*   6 */
+  STAT_TYPE_BLOCKS_NO_PUSH_NO_PULL    =   0x40, /*   7 */
+  STAT_TYPE_ISSUED_PUSH_SEND          =   0x80, /*   8 */
+  STAT_TYPE_ISSUED_PULL_REQ           =  0x100, /*   9 */
+  STAT_TYPE_ISSUED_PULL_REP           =  0x200, /*  10 */
+  STAT_TYPE_SENT_PUSH_SEND            =  0x400, /*  11 */
+  STAT_TYPE_SENT_PULL_REQ             =  0x800, /*  12 */
+  STAT_TYPE_SENT_PULL_REP             = 0x1000, /*  13 */
+  STAT_TYPE_RECV_PUSH_SEND            = 0x2000, /*  14 */
+  STAT_TYPE_RECV_PULL_REQ             = 0x4000, /*  15 */
+  STAT_TYPE_RECV_PULL_REP             = 0x8000, /*  16 */
+  STAT_TYPE_MAX          = 0x80000000, /*  32 */
+};
+
+struct STATcls
+{
+  struct RPSPeer *rps_peer;
+  enum STAT_TYPE stat_type;
 };
 
 
@@ -251,11 +358,21 @@ static struct RPSPeer *eval_peer;
  */
 static unsigned int num_peers_online;
 
+/**
+ * @brief The added sizes of the peer's views
+ */
+static unsigned int view_sizes;
+
 /**
  * Return value from 'main'.
  */
 static int ok;
 
+/**
+ * Identifier for the churn task that runs periodically
+ */
+static struct GNUNET_SCHEDULER_Task *shutdown_task;
+
 /**
  * Identifier for the churn task that runs periodically
  */
@@ -267,13 +384,21 @@ static struct GNUNET_SCHEDULER_Task *churn_task;
 typedef void (*InitPeer) (struct RPSPeer *rps_peer);
 
 /**
- * Called directly after connecting to the service
+ * @brief Called directly after connecting to the service
+ *
+ * @param rps_peer Specific peer the function is called on
+ * @param h the handle to the rps service
  */
-typedef void (*PreTest) (void *cls, struct GNUNET_RPS_Handle *h);
+typedef void (*PreTest) (struct RPSPeer *rps_peer, struct GNUNET_RPS_Handle *h);
 
 /**
+ * @brief Executes functions to test the api/service for a given peer
+ *
  * Called from within #rps_connect_complete_cb ()
- * Executes functions to test the api/service
+ * Implemented by #churn_test_cb, #profiler_cb, #mal_cb, #single_req_cb,
+ * #delay_req_cb, #seed_big_cb, #single_peer_seed_cb, #seed_cb, #req_cancel_cb
+ *
+ * @param rps_peer the peer the task runs on
  */
 typedef void (*MainTest) (struct RPSPeer *rps_peer);
 
@@ -287,13 +412,71 @@ typedef void (*ReplyHandle) (void *cls,
 /**
  * Called directly before disconnecting from the service
  */
-typedef void (*PostTest) (void *cls, struct GNUNET_RPS_Handle *h);
+typedef void (*PostTest) (struct RPSPeer *peer);
 
 /**
  * Function called after disconnect to evaluate test success
  */
 typedef int (*EvaluationCallback) (void);
 
+/**
+ * @brief Do we have Churn?
+ */
+enum OPTION_CHURN {
+  /**
+   * @brief If we have churn this is set
+   */
+  HAVE_CHURN,
+  /**
+   * @brief If we have no churn this is set
+   */
+  HAVE_NO_CHURN,
+};
+
+/**
+ * @brief Is it ok to quit the test before the timeout?
+ */
+enum OPTION_QUICK_QUIT {
+  /**
+   * @brief It is ok for the test to quit before the timeout triggers
+   */
+  HAVE_QUICK_QUIT,
+
+  /**
+   * @brief It is NOT ok for the test to quit before the timeout triggers
+   */
+  HAVE_NO_QUICK_QUIT,
+};
+
+/**
+ * @brief Do we collect statistics at the end?
+ */
+enum OPTION_COLLECT_STATISTICS {
+  /**
+   * @brief We collect statistics at the end
+   */
+  COLLECT_STATISTICS,
+
+  /**
+   * @brief We do not collect statistics at the end
+   */
+  NO_COLLECT_STATISTICS,
+};
+
+/**
+ * @brief Do we collect views during run?
+ */
+enum OPTION_COLLECT_VIEW {
+  /**
+   * @brief We collect view during run
+   */
+  COLLECT_VIEW,
+
+  /**
+   * @brief We do not collect the view during run
+   */
+  NO_COLLECT_VIEW,
+};
 
 /**
  * Structure to define a single test
@@ -306,7 +489,7 @@ struct SingleTestRun
   char *name;
 
   /**
-   * Called to initialise peer
+   * Called with a single peer in order to initialise that peer
    */
   InitPeer init_peer;
 
@@ -316,7 +499,7 @@ struct SingleTestRun
   PreTest pre_test;
 
   /**
-   * Function to execute the functions to be tested
+   * Main function for each peer
    */
   MainTest main_test;
 
@@ -346,9 +529,30 @@ struct SingleTestRun
   uint32_t num_requests;
 
   /**
-   * Run with churn
+   * Run with (-out) churn
+   */
+  enum OPTION_CHURN have_churn;
+
+  /**
+   * Quit test before timeout?
+   */
+  enum OPTION_QUICK_QUIT have_quick_quit;
+
+  /**
+   * Collect statistics at the end?
+   */
+  enum OPTION_COLLECT_STATISTICS have_collect_statistics;
+
+  /**
+   * Collect view during run?
+   */
+  enum OPTION_COLLECT_VIEW have_collect_view;
+
+  /**
+   * @brief Mark which values from the statistics service to collect at the end
+   * of the run
    */
-  int have_churn;
+  uint32_t stat_collect_flags;
 } cur_test_run;
 
 /**
@@ -494,6 +698,52 @@ make_oplist_entry ()
 }
 
 
+/**
+ * @brief Checks if given peer already received its statistics value from the
+ * statistics service.
+ *
+ * @param rps_peer the peer to check for
+ *
+ * @return #GNUNET_YES if so
+ *         #GNUNET_NO otherwise
+ */
+static int check_statistics_collect_completed_single_peer (
+    const struct RPSPeer *rps_peer)
+{
+  if (cur_test_run.stat_collect_flags !=
+        (cur_test_run.stat_collect_flags &
+          rps_peer->stat_collected_flags))
+  {
+    return GNUNET_NO;
+  }
+  return GNUNET_YES;
+}
+/**
+ * @brief Checks if all peers already received their statistics value from the
+ * statistics service.
+ *
+ * @return #GNUNET_YES if so
+ *         #GNUNET_NO otherwise
+ */
+static int check_statistics_collect_completed ()
+{
+  uint32_t i;
+
+  for (i = 0; i < num_peers; i++)
+  {
+    if (GNUNET_NO == check_statistics_collect_completed_single_peer (&rps_peers[i]))
+    {
+      GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+          "At least Peer %" PRIu32 " did not yet receive all statistics values\n",
+          i);
+      return GNUNET_NO;
+    }
+  }
+  GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+      "All peers received their statistics values\n");
+  return GNUNET_YES;
+}
+
 /**
  * Task run on timeout to shut everything down.
  */
@@ -502,6 +752,8 @@ shutdown_op (void *cls)
 {
   unsigned int i;
 
+  GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
+              "Shutdown task scheduled, going down.\n");
   in_shutdown = GNUNET_YES;
   if (NULL != churn_task)
   {
@@ -509,9 +761,21 @@ shutdown_op (void *cls)
     churn_task = NULL;
   }
   for (i = 0; i < num_peers; i++)
+  {
     if (NULL != rps_peers[i].op)
       GNUNET_TESTBED_operation_done (rps_peers[i].op);
-  GNUNET_SCHEDULER_shutdown ();
+    if (NULL != cur_test_run.post_test)
+    {
+      GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Executing post_test for peer %u\n", i);
+      cur_test_run.post_test (&rps_peers[i]);
+    }
+  }
+  /* If we do not collect statistics, shut down directly */
+  if (NO_COLLECT_STATISTICS == cur_test_run.have_collect_statistics ||
+      GNUNET_YES == check_statistics_collect_completed())
+  {
+    GNUNET_SCHEDULER_shutdown ();
+  }
 }
 
 
@@ -583,6 +847,11 @@ info_cb (void *cb_cls,
 {
   struct OpListEntry *entry = (struct OpListEntry *) cb_cls;
 
+  if (GNUNET_YES == in_shutdown)
+  {
+    return;
+  }
+
   if (NULL == pinfo || NULL != emsg)
   {
     GNUNET_log (GNUNET_ERROR_TYPE_ERROR, "Got Error: %s\n", emsg);
@@ -632,6 +901,11 @@ rps_connect_complete_cb (void *cls,
   struct RPSPeer *rps_peer = cls;
   struct GNUNET_RPS_Handle *rps = ca_result;
 
+  if (GNUNET_YES == in_shutdown)
+  {
+    return;
+  }
+
   rps_peer->rps_handle = rps;
   rps_peer->online = GNUNET_YES;
   num_peers_online++;
@@ -677,6 +951,76 @@ rps_connect_adapter (void *cls,
   return h;
 }
 
+/**
+ * Called to open a connection to the peer's statistics
+ *
+ * @param cls peer context
+ * @param cfg configuration of the peer to connect to; will be available until
+ *          GNUNET_TESTBED_operation_done() is called on the operation returned
+ *          from GNUNET_TESTBED_service_connect()
+ * @return service handle to return in 'op_result', NULL on error
+ */
+static void *
+stat_connect_adapter (void *cls,
+                      const struct GNUNET_CONFIGURATION_Handle *cfg)
+{
+  struct RPSPeer *peer = cls;
+
+  peer->stats_h = GNUNET_STATISTICS_create ("rps-profiler", cfg);
+  return peer->stats_h;
+}
+
+/**
+ * Called to disconnect from peer's statistics service
+ *
+ * @param cls peer context
+ * @param op_result service handle returned from the connect adapter
+ */
+static void
+stat_disconnect_adapter (void *cls, void *op_result)
+{
+  struct RPSPeer *peer = cls;
+
+  //GNUNET_break (GNUNET_OK == GNUNET_STATISTICS_watch_cancel
+  //              (peer->stats_h, "core", "# peers connected",
+  //               stat_iterator, peer));
+  //GNUNET_break (GNUNET_OK == GNUNET_STATISTICS_watch_cancel
+  //              (peer->stats_h, "nse", "# peers connected",
+  //               stat_iterator, peer));
+  GNUNET_STATISTICS_destroy (op_result, GNUNET_NO);
+  peer->stats_h = NULL;
+}
+
+/**
+ * Called after successfully opening a connection to a peer's statistics
+ * service; we register statistics monitoring for CORE and NSE here.
+ *
+ * @param cls the callback closure from functions generating an operation
+ * @param op the operation that has been finished
+ * @param ca_result the service handle returned from GNUNET_TESTBED_ConnectAdapter()
+ * @param emsg error message in case the operation has failed; will be NULL if
+ *          operation has executed successfully.
+ */
+static void
+stat_complete_cb (void *cls, struct GNUNET_TESTBED_Operation *op,
+                  void *ca_result, const char *emsg )
+{
+  //struct GNUNET_STATISTICS_Handle *sh = ca_result;
+  //struct RPSPeer *peer = (struct RPSPeer *) cls;
+
+  if (NULL != emsg)
+  {
+    GNUNET_break (0);
+    return;
+  }
+  //GNUNET_break (GNUNET_OK == GNUNET_STATISTICS_watch
+  //              (sh, "core", "# peers connected",
+  //               stat_iterator, peer));
+  //GNUNET_break (GNUNET_OK == GNUNET_STATISTICS_watch
+  //              (sh, "nse", "# peers connected",
+  //               stat_iterator, peer));
+}
+
 
 /**
  * Adapter function called to destroy connection to
@@ -758,9 +1102,13 @@ default_reply_handle (void *cls,
     rps_peer->num_recv_ids++;
   }
 
-  if (0 == evaluate ())
+  if (0 == evaluate () && HAVE_QUICK_QUIT == cur_test_run.have_quick_quit)
   {
-    GNUNET_SCHEDULER_shutdown ();
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Test succeeded before timeout\n");
+    GNUNET_assert (NULL != shutdown_task);
+    GNUNET_SCHEDULER_cancel (shutdown_task);
+    shutdown_task = GNUNET_SCHEDULER_add_now (&shutdown_op, NULL);
+    GNUNET_assert (NULL!= shutdown_task);
   }
 }
 
@@ -904,12 +1252,20 @@ static void mal_init_peer (struct RPSPeer *rps_peer)
     rps_peer->num_ids_to_request = 1;
 }
 
+
+/**
+ * @brief Set peers to (non-)malicious before execution
+ *
+ * Of signature #PreTest
+ *
+ * @param rps_peer the peer to set (non-) malicious
+ * @param h the handle to the service
+ */
 static void
-mal_pre (void *cls, struct GNUNET_RPS_Handle *h)
+mal_pre (struct RPSPeer *rps_peer, struct GNUNET_RPS_Handle *h)
 {
   #ifdef ENABLE_MALICIOUS
   uint32_t num_mal_peers;
-  struct RPSPeer *rps_peer = (struct RPSPeer *) cls;
 
   GNUNET_assert ( (1 >= portion) &&
                   (0 <  portion) );
@@ -934,6 +1290,11 @@ mal_cb (struct RPSPeer *rps_peer)
 {
   uint32_t num_mal_peers;
 
+  if (GNUNET_YES == in_shutdown)
+  {
+    return;
+  }
+
   #ifdef ENABLE_MALICIOUS
   GNUNET_assert ( (1 >= portion) &&
                   (0 <  portion) );
@@ -956,6 +1317,11 @@ mal_cb (struct RPSPeer *rps_peer)
 static void
 single_req_cb (struct RPSPeer *rps_peer)
 {
+  if (GNUNET_YES == in_shutdown)
+  {
+    return;
+  }
+
   schedule_missing_requests (rps_peer);
 }
 
@@ -965,6 +1331,11 @@ single_req_cb (struct RPSPeer *rps_peer)
 static void
 delay_req_cb (struct RPSPeer *rps_peer)
 {
+  if (GNUNET_YES == in_shutdown)
+  {
+    return;
+  }
+
   schedule_missing_requests (rps_peer);
 }
 
@@ -974,6 +1345,11 @@ delay_req_cb (struct RPSPeer *rps_peer)
 static void
 seed_cb (struct RPSPeer *rps_peer)
 {
+  if (GNUNET_YES == in_shutdown)
+  {
+    return;
+  }
+
   GNUNET_SCHEDULER_add_delayed (
       GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 10),
       seed_peers, rps_peer);
@@ -985,6 +1361,11 @@ seed_cb (struct RPSPeer *rps_peer)
 static void
 seed_big_cb (struct RPSPeer *rps_peer)
 {
+  if (GNUNET_YES == in_shutdown)
+  {
+    return;
+  }
+
   // TODO test seeding > GNUNET_MAX_MESSAGE_SIZE peers
   GNUNET_SCHEDULER_add_delayed (
       GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 2),
@@ -1006,6 +1387,11 @@ single_peer_seed_cb (struct RPSPeer *rps_peer)
 static void
 seed_req_cb (struct RPSPeer *rps_peer)
 {
+  if (GNUNET_YES == in_shutdown)
+  {
+    return;
+  }
+
   GNUNET_SCHEDULER_add_delayed (
       GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 2),
       seed_peers, rps_peer);
@@ -1020,6 +1406,11 @@ seed_req_cb (struct RPSPeer *rps_peer)
 static void
 req_cancel_cb (struct RPSPeer *rps_peer)
 {
+  if (GNUNET_YES == in_shutdown)
+  {
+    return;
+  }
+
   schedule_missing_requests (rps_peer);
   GNUNET_SCHEDULER_add_delayed (
       GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS,
@@ -1027,6 +1418,49 @@ req_cancel_cb (struct RPSPeer *rps_peer)
       cancel_request_cb, rps_peer);
 }
 
+/***********************************
+ * CHURN
+***********************************/
+
+static void
+churn (void *cls);
+
+/**
+ * @brief Starts churn
+ *
+ * Has signature of #MainTest
+ *
+ * This is not implemented too nicely as this is called for each peer, but we
+ * only need to call it once. (Yes we check that we only schedule the task
+ * once.)
+ *
+ * @param rps_peer The peer it's called for
+ */
+static void
+churn_test_cb (struct RPSPeer *rps_peer)
+{
+  if (GNUNET_YES == in_shutdown)
+  {
+    return;
+  }
+
+  /* Start churn */
+  if (HAVE_CHURN == cur_test_run.have_churn && NULL == churn_task)
+  {
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+                "Starting churn task\n");
+    churn_task = GNUNET_SCHEDULER_add_delayed (
+          GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 5),
+          churn,
+          NULL);
+  } else {
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+                "Not starting churn task\n");
+  }
+
+  schedule_missing_requests (rps_peer);
+}
+
 /***********************************
  * PROFILER
 ***********************************/
@@ -1046,6 +1480,11 @@ churn_cb (void *cls,
   // FIXME
   struct OpListEntry *entry = cls;
 
+  if (GNUNET_YES == in_shutdown)
+  {
+    return;
+  }
+
   GNUNET_TESTBED_operation_done (entry->op);
   if (NULL != emsg)
   {
@@ -1057,7 +1496,7 @@ churn_cb (void *cls,
 
   num_peers_online += entry->delta;
 
-  if (0 > entry->delta)
+  if (PEER_GO_OFFLINE == entry->delta)
   { /* Peer hopefully just went offline */
     if (GNUNET_YES != rps_peers[entry->index].online)
     {
@@ -1075,7 +1514,7 @@ churn_cb (void *cls,
     rps_peers[entry->index].online = GNUNET_NO;
   }
 
-  else if (0 < entry->delta)
+  else if (PEER_GO_ONLINE < entry->delta)
   { /* Peer hopefully just went online */
     if (GNUNET_NO != rps_peers[entry->index].online)
     {
@@ -1098,20 +1537,42 @@ churn_cb (void *cls,
     }
     rps_peers[entry->index].online = GNUNET_YES;
   }
-
+  else
+  {
+    GNUNET_log (GNUNET_ERROR_TYPE_ERROR,
+        "Invalid value for delta: %i\n", entry->delta);
+    GNUNET_break (0);
+  }
+
   GNUNET_CONTAINER_DLL_remove (oplist_head, oplist_tail, entry);
+  rps_peers[entry->index].entry_op_manage = NULL;
   GNUNET_free (entry);
   //if (num_peers_in_round[current_round] == peers_running)
   //  run_round ();
 }
 
+/**
+ * @brief Set the rps-service up or down for a specific peer
+ *
+ * @param i index of action
+ * @param j index of peer
+ * @param delta (#PEER_ONLINE_DELTA) down (-1) or up (1)
+ * @param prob_go_on_off the probability of the action
+ */
 static void
-manage_service_wrapper (unsigned int i, unsigned int j, int delta,
-    double prob_go_on_off)
+manage_service_wrapper (unsigned int i, unsigned int j,
+                        enum PEER_ONLINE_DELTA delta,
+                        double prob_go_on_off)
 {
-  struct OpListEntry *entry;
+  struct OpListEntry *entry = NULL;
   uint32_t prob;
 
+  /* make sure that management operation is not already scheduled */
+  if (NULL != rps_peers[j].entry_op_manage)
+  {
+    return;
+  }
+
   prob = GNUNET_CRYPTO_random_u32 (GNUNET_CRYPTO_QUALITY_WEAK,
                                    UINT32_MAX);
   GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
@@ -1119,15 +1580,15 @@ manage_service_wrapper (unsigned int i, unsigned int j, int delta,
               i,
               j,
               GNUNET_i2s (rps_peers[j].peer_id),
-              (0 > delta) ? "online" : "offline");
+              (PEER_GO_ONLINE == delta) ? "online" : "offline");
   if (prob < prob_go_on_off * UINT32_MAX)
   {
     GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
                 "%s goes %s\n",
                 GNUNET_i2s (rps_peers[j].peer_id),
-                (0 > delta) ? "offline" : "online");
+                (PEER_GO_OFFLINE == delta) ? "offline" : "online");
 
-    if (0 > delta)
+    if (PEER_GO_OFFLINE == delta)
       cancel_pending_req_rep (&rps_peers[j]);
     entry = make_oplist_entry ();
     entry->delta = delta;
@@ -1137,7 +1598,8 @@ manage_service_wrapper (unsigned int i, unsigned int j, int delta,
                                                     "rps",
                                                     &churn_cb,
                                                     entry,
-                                                    (0 > delta) ? 0 : 1);
+                                                    (PEER_GO_OFFLINE == delta) ? 0 : 1);
+    rps_peers[j].entry_op_manage = entry;
   }
 }
 
@@ -1153,6 +1615,15 @@ churn (void *cls)
   double portion_go_online;
   double portion_go_offline;
 
+  if (GNUNET_YES == in_shutdown)
+  {
+    return;
+  }
+  GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+              "Churn function executing\n");
+
+  churn_task = NULL; /* Should be invalid by now */
+
   /* Compute the probability for an online peer to go offline
    * this round */
   portion_online = num_peers_online * 1.0 / num_peers;
@@ -1260,13 +1731,23 @@ profiler_reply_handle (void *cls,
 static void
 profiler_cb (struct RPSPeer *rps_peer)
 {
+  if (GNUNET_YES == in_shutdown)
+  {
+    return;
+  }
+
   /* Start churn */
-  if (GNUNET_YES == cur_test_run.have_churn && NULL == churn_task)
+  if (HAVE_CHURN == cur_test_run.have_churn && NULL == churn_task)
   {
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+                "Starting churn task\n");
     churn_task = GNUNET_SCHEDULER_add_delayed (
           GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 5),
           churn,
           NULL);
+  } else {
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+                "Not starting churn task\n");
   }
 
   /* Only request peer ids at one peer.
@@ -1328,6 +1809,743 @@ profiler_eval (void)
   return evaluate ();
 }
 
+static uint32_t fac (uint32_t x)
+{
+  if (1 >= x)
+  {
+    return x;
+  }
+  return x * fac (x - 1);
+}
+
+static uint32_t binom (uint32_t n, uint32_t k)
+{
+  //GNUNET_assert (n >= k);
+  if (k > n) return 0;
+  if (0 > n) return 0;
+  if (0 > k) return 0;
+  if (0 == k) return 1;
+  return fac (n)
+    /
+    fac(k) * fac(n - k);
+}
+
+/**
+ * @brief is b in view of a?
+ *
+ * @param a
+ * @param b
+ *
+ * @return
+ */
+static int is_in_view (uint32_t a, uint32_t b)
+{
+  uint32_t i;
+  for (i = 0; i < rps_peers[a].cur_view_count; i++)
+  {
+    if (0 == memcmp (rps_peers[b].peer_id,
+          &rps_peers[a].cur_view[i],
+          sizeof (struct GNUNET_PeerIdentity)))
+    {
+      return GNUNET_YES;
+    }
+  }
+  return GNUNET_NO;
+}
+
+static uint32_t get_idx_of_pid (const struct GNUNET_PeerIdentity *pid)
+{
+  uint32_t i;
+
+  for (i = 0; i < num_peers; i++)
+  {
+    if (0 == memcmp (pid,
+          rps_peers[i].peer_id,
+          sizeof (struct GNUNET_PeerIdentity)))
+    {
+      return i;
+    }
+  }
+  //return 0; /* Should not happen - make compiler happy */
+  GNUNET_log (GNUNET_ERROR_TYPE_ERROR,
+             "No known _PeerIdentity %s!\n",
+             GNUNET_i2s_full (pid));
+  GNUNET_assert (0);
+}
+
+/**
+ * @brief Counts number of peers in view of a that have b in their view
+ *
+ * @param a
+ * @param uint32_tb
+ *
+ * @return
+ */
+static uint32_t count_containing_views (uint32_t a, uint32_t b)
+{
+  uint32_t i;
+  uint32_t peer_idx;
+  uint32_t count = 0;
+
+  for (i = 0; i < rps_peers[a].cur_view_count; i++)
+  {
+    peer_idx = get_idx_of_pid (&rps_peers[a].cur_view[i]);
+    if (GNUNET_YES == is_in_view (peer_idx, b))
+    {
+      count++;
+    }
+  }
+  return count;
+}
+
+/**
+ * @brief Computes the probability for each other peer to be selected by the
+ * sampling process based on the views of all peers
+ *
+ * @param peer_idx index of the peer that is about to sample
+ */
+static void compute_probabilities (uint32_t peer_idx)
+{
+  //double probs[num_peers] = { 0 };
+  double probs[num_peers];
+  size_t probs_as_str_size = (num_peers * 10 + 1) * sizeof (char);
+  char *probs_as_str = GNUNET_malloc (probs_as_str_size);
+  char *probs_as_str_cpy;
+  uint32_t i;
+  double prob_push;
+  double prob_pull;
+  uint32_t view_size;
+  uint32_t cont_views;
+  uint32_t number_of_being_in_pull_events;
+  int tmp;
+  uint32_t count_non_zero_prob = 0;
+
+  GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+      "Computing probabilities for peer %" PRIu32 "\n", peer_idx);
+  /* Firstly without knowledge of old views */
+  for (i = 0; i < num_peers; i++)
+  {
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+        "\tfor peer %" PRIu32 ":\n", i);
+    view_size = rps_peers[i].cur_view_count;
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+        "\t\tview_size: %" PRIu32 "\n", view_size);
+    /* For peer i the probability of being sampled is
+     * evenly distributed among all possibly observed peers. */
+    /* We could have observed a peer in three cases:
+     *   1. peer sent a push
+     *   2. peer was contained in a pull reply
+     *   3. peer was in history (sampler) - ignored for now */
+    /* 1. Probability of having received a push from peer i */
+    if ((GNUNET_YES == is_in_view (i, peer_idx)) &&
+        (1 <= (0.45 * view_size)))
+    {
+      prob_push = 1.0 * binom (0.45 * view_size, 1)
+        /
+        binom (view_size, 0.45 * view_size);
+      GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+                 "\t\t%" PRIu32 " is in %" PRIu32 "'s view, prob: %f\n",
+                 peer_idx,
+                 i,
+                 prob_push);
+      GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+                 "\t\tposs choices from view: %" PRIu32 ", containing i: %" PRIu32 "\n",
+                 binom (view_size, 0.45 * view_size),
+                 binom (0.45 * view_size, 1));
+    } else {
+      prob_push = 0;
+      GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+                 "\t\t%" PRIu32 " is not in %" PRIu32 "'s view, prob: 0\n",
+                 peer_idx,
+                 i);
+    }
+    /* 2. Probability of peer i being contained in pulls */
+    view_size = rps_peers[peer_idx].cur_view_count;
+    cont_views = count_containing_views (peer_idx, i);
+    number_of_being_in_pull_events =
+      (binom (view_size, 0.45 * view_size) -
+       binom (view_size - cont_views, 0.45 * view_size));
+    if (0 != number_of_being_in_pull_events)
+    {
+      prob_pull = number_of_being_in_pull_events
+        /
+        (1.0 * binom (view_size, 0.45 * view_size));
+    } else
+    {
+      prob_pull = 0;
+    }
+    probs[i] = prob_push + prob_pull - (prob_push * prob_pull);
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+               "\t\t%" PRIu32 " has %" PRIu32 " of %" PRIu32
+               " peers in its view who know %" PRIu32 " prob: %f\n",
+               peer_idx,
+               cont_views,
+               view_size,
+               i,
+               prob_pull);
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+               "\t\tnumber of possible pull combinations: %" PRIu32 "\n",
+               binom (view_size, 0.45 * view_size));
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+               "\t\tnumber of possible pull combinations without %" PRIu32
+               ": %" PRIu32 "\n",
+               i,
+               binom (view_size - cont_views, 0.45 * view_size));
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+               "\t\tnumber of possible pull combinations with %" PRIu32
+               ": %" PRIu32 "\n",
+               i,
+               number_of_being_in_pull_events);
+
+    if (0 != probs[i]) count_non_zero_prob++;
+  }
+  /* normalize */
+  if (0 != count_non_zero_prob)
+  {
+    for (i = 0; i < num_peers; i++)
+    {
+      probs[i] = probs[i] * (1.0 / count_non_zero_prob);
+    }
+  } else {
+    for (i = 0; i < num_peers; i++)
+    {
+      probs[i] = 0;
+    }
+  }
+  /* str repr */
+  for (i = 0; i < num_peers; i++)
+  {
+    probs_as_str_cpy = GNUNET_strndup (probs_as_str, probs_as_str_size);
+    tmp = GNUNET_snprintf (probs_as_str,
+                           probs_as_str_size,
+                           "%s %7.6f", probs_as_str_cpy, probs[i]);
+    GNUNET_free (probs_as_str_cpy);
+    GNUNET_assert (0 <= tmp);
+  }
+
+  to_file_w_len (rps_peers[peer_idx].file_name_probs,
+                 probs_as_str_size,
+                 probs_as_str);
+  GNUNET_free (probs_as_str);
+}
+
+/**
+ * @brief This counts the number of peers in which views a given peer occurs.
+ *
+ * It also stores this value in the rps peer.
+ *
+ * @param peer_idx the index of the peer to count the representation
+ *
+ * @return the number of occurrences
+ */
+static uint32_t count_peer_in_views_2 (uint32_t peer_idx)
+{
+  uint32_t i, j;
+  uint32_t count = 0;
+
+  for (i = 0; i < num_peers; i++) /* Peer in which view is counted */
+  {
+    for (j = 0; j < rps_peers[i].cur_view_count; j++) /* entry in view */
+    {
+      if (0 == memcmp (rps_peers[peer_idx].peer_id,
+            &rps_peers[i].cur_view[j],
+            sizeof (struct GNUNET_PeerIdentity)))
+      {
+        count++;
+        break;
+      }
+    }
+  }
+  rps_peers[peer_idx].count_in_views = count;
+  return count;
+}
+
+static uint32_t cumulated_view_sizes ()
+{
+  uint32_t i;
+
+  view_sizes = 0;
+  for (i = 0; i < num_peers; i++) /* Peer in which view is counted */
+  {
+    view_sizes += rps_peers[i].cur_view_count;
+  }
+  return view_sizes;
+}
+
+static void count_peer_in_views (uint32_t *count_peers)
+{
+  uint32_t i, j;
+
+  for (i = 0; i < num_peers; i++) /* Peer in which view is counted */
+  {
+    for (j = 0; j < rps_peers[i].cur_view_count; j++) /* entry in view */
+    {
+      if (0 == memcmp (rps_peers[i].peer_id,
+            &rps_peers[i].cur_view[j],
+            sizeof (struct GNUNET_PeerIdentity)))
+      {
+        count_peers[i]++;
+      }
+    }
+  }
+}
+
+void compute_diversity ()
+{
+  uint32_t i;
+  /* ith entry represents the numer of occurrences in other peer's views */
+  uint32_t *count_peers = GNUNET_new_array (num_peers, uint32_t);
+  uint32_t views_total_size;
+  double expected;
+  /* deviation from expected number of peers */
+  double *deviation = GNUNET_new_array (num_peers, double);
+
+  views_total_size = 0;
+  expected = 0;
+
+  /* For each peer count its representation in other peer's views*/
+  for (i = 0; i < num_peers; i++) /* Peer to count */
+  {
+    views_total_size += rps_peers[i].cur_view_count;
+    count_peer_in_views (count_peers);
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+               "Counted representation of %" PRIu32 "th peer [%s]: %" PRIu32"\n",
+               i,
+               GNUNET_i2s (rps_peers[i].peer_id),
+               count_peers[i]);
+  }
+
+  GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+             "size of all views combined: %" PRIu32 "\n",
+             views_total_size);
+  expected = ((double) 1/num_peers) * views_total_size;
+  GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+             "Expected number of occurrences of each peer in all views: %f\n",
+             expected);
+  for (i = 0; i < num_peers; i++) /* Peer to count */
+  {
+    deviation[i] = expected - count_peers[i];
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+               "Deviation from expectation: %f\n", deviation[i]);
+  }
+  GNUNET_free (count_peers);
+  GNUNET_free (deviation);
+}
+
+void print_view_sizes()
+{
+  uint32_t i;
+
+  for (i = 0; i < num_peers; i++) /* Peer to count */
+  {
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+               "View size of %" PRIu32 ". [%s] is %" PRIu32 "\n",
+               i,
+               GNUNET_i2s (rps_peers[i].peer_id),
+               rps_peers[i].cur_view_count);
+  }
+}
+
+void all_views_updated_cb()
+{
+  compute_diversity();
+  print_view_sizes();
+}
+
+void view_update_cb (void *cls,
+                     uint64_t view_size,
+                     const struct GNUNET_PeerIdentity *peers)
+{
+  GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+              "View was updated (%" PRIu64 ")\n", view_size);
+  struct RPSPeer *rps_peer = (struct RPSPeer *) cls;
+  to_file ("/tmp/rps/view_sizes.txt",
+         "%" PRIu64 " %" PRIu32 "",
+         rps_peer->index,
+         view_size);
+  for (int i = 0; i < view_size; i++)
+  {
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+               "\t%s\n", GNUNET_i2s (&peers[i]));
+  }
+  GNUNET_array_grow (rps_peer->cur_view,
+                     rps_peer->cur_view_count,
+                     view_size);
+  //*rps_peer->cur_view = *peers;
+  GNUNET_memcpy (rps_peer->cur_view,
+                 peers,
+                 view_size * sizeof (struct GNUNET_PeerIdentity));
+  to_file ("/tmp/rps/count_in_views.txt",
+         "%" PRIu64 " %" PRIu32 "",
+         rps_peer->index,
+         count_peer_in_views_2 (rps_peer->index));
+  cumulated_view_sizes();
+  if (0 != view_size)
+  {
+    to_file ("/tmp/rps/repr.txt",
+           "%" PRIu64 /* index */
+           " %" PRIu32 /* occurrence in views */
+           " %" PRIu32 /* view sizes */
+           " %f" /* fraction of repr in views */
+           " %f" /* average view size */
+           " %f" /* prob of occurrence in view slot */
+           " %f" "", /* exp frac of repr in views */
+           rps_peer->index,
+           count_peer_in_views_2 (rps_peer->index),
+           view_sizes,
+           count_peer_in_views_2 (rps_peer->index) / (view_size * 1.0), /* fraction of representation in views */
+           view_sizes / (view_size * 1.0), /* average view size */
+           1.0 /view_size, /* prob of occurrence in view slot */
+           (1.0/view_size) * (view_sizes/view_size) /* expected fraction of repr in views */
+           );
+  }
+  compute_probabilities (rps_peer->index);
+  all_views_updated_cb();
+}
+
+static void
+pre_profiler (struct RPSPeer *rps_peer, struct GNUNET_RPS_Handle *h)
+{
+  rps_peer->file_name_probs =
+    store_prefix_file_name (rps_peer->peer_id, "probs");
+  GNUNET_RPS_view_request (h, 0, view_update_cb, rps_peer);
+}
+
+void write_final_stats (void){
+  uint32_t i;
+
+  for (i = 0; i < num_peers; i++)
+  {
+    to_file ("/tmp/rps/final_stats.dat",
+             "%" PRIu32 " " /* index */
+             "%s %" /* id */
+             PRIu64 " %" /* rounds */
+             PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" /* blocking */
+             PRIu64 " %" PRIu64 " %" PRIu64 " %" /* issued */
+             PRIu64 " %" PRIu64 " %" PRIu64 " %" /* sent */
+             PRIu64 " %" PRIu64 " %" PRIu64 /* recv */,
+             i,
+             GNUNET_i2s (rps_peers[i].peer_id),
+             rps_peers[i].num_rounds,
+             rps_peers[i].num_blocks,
+             rps_peers[i].num_blocks_many_push,
+             rps_peers[i].num_blocks_no_push,
+             rps_peers[i].num_blocks_no_pull,
+             rps_peers[i].num_blocks_many_push_no_pull,
+             rps_peers[i].num_blocks_no_push_no_pull,
+             rps_peers[i].num_issued_push,
+             rps_peers[i].num_issued_pull_req,
+             rps_peers[i].num_issued_pull_rep,
+             rps_peers[i].num_sent_push,
+             rps_peers[i].num_sent_pull_req,
+             rps_peers[i].num_sent_pull_rep,
+             rps_peers[i].num_recv_push,
+             rps_peers[i].num_recv_pull_req,
+             rps_peers[i].num_recv_pull_rep);
+  }
+}
+
+/**
+ * Continuation called by #GNUNET_STATISTICS_get() functions.
+ *
+ * Remembers that this specific statistics value was received for this peer.
+ * Checks whether all peers received their statistics yet.
+ * Issues the shutdown.
+ *
+ * @param cls closure
+ * @param success #GNUNET_OK if statistics were
+ *        successfully obtained, #GNUNET_SYSERR if not.
+ */
+void
+post_test_shutdown_ready_cb (void *cls,
+                             int success)
+{
+  struct STATcls *stat_cls = (struct STATcls *) cls;
+  struct RPSPeer *rps_peer = stat_cls->rps_peer;
+  if (GNUNET_OK == success)
+  {
+    /* set flag that we we got the value */
+    rps_peer->stat_collected_flags |= stat_cls->stat_type;
+  } else {
+    GNUNET_log (GNUNET_ERROR_TYPE_ERROR,
+        "Peer %u did not receive statistics value\n",
+        rps_peer->index);
+    GNUNET_free (stat_cls);
+    GNUNET_break (0);
+  }
+
+  if (NULL != rps_peer->stat_op &&
+      GNUNET_YES == check_statistics_collect_completed_single_peer (rps_peer))
+  {
+    GNUNET_TESTBED_operation_done (rps_peer->stat_op);
+  }
+
+  write_final_stats ();
+  if (GNUNET_YES == check_statistics_collect_completed())
+  {
+    //write_final_stats ();
+    GNUNET_free (stat_cls);
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+        "Shutting down\n");
+    GNUNET_SCHEDULER_shutdown ();
+  } else {
+    GNUNET_free (stat_cls);
+  }
+}
+
+/**
+ * @brief Converts string representation to the corresponding #STAT_TYPE enum.
+ *
+ * @param stat_str string representation of statistics specifier
+ *
+ * @return corresponding enum
+ */
+enum STAT_TYPE stat_str_2_type (const char *stat_str)
+{
+  if (0 == strncmp ("# rounds blocked - no pull replies", stat_str, strlen ("# rounds blocked - no pull replies")))
+  {
+    return STAT_TYPE_BLOCKS_NO_PULL;
+  }
+  else if (0 == strncmp ("# rounds blocked - too many pushes, no pull replies", stat_str, strlen ("# rounds blocked - too many pushes, no pull replies")))
+  {
+    return STAT_TYPE_BLOCKS_MANY_PUSH_NO_PULL;
+  }
+  else if (0 == strncmp ("# rounds blocked - too many pushes", stat_str, strlen ("# rounds blocked - too many pushes")))
+  {
+    return STAT_TYPE_BLOCKS_MANY_PUSH;
+  }
+  else if (0 == strncmp ("# rounds blocked - no pushes, no pull replies", stat_str, strlen ("# rounds blocked - no pushes, no pull replies")))
+  {
+    return STAT_TYPE_BLOCKS_NO_PUSH_NO_PULL;
+  }
+  else if (0 == strncmp ("# rounds blocked - no pushes", stat_str, strlen ("# rounds blocked - no pushes")))
+  {
+    return STAT_TYPE_BLOCKS_NO_PUSH;
+  }
+  else if (0 == strncmp ("# rounds blocked", stat_str, strlen ("# rounds blocked")))
+  {
+    return STAT_TYPE_BLOCKS;
+  }
+  else if (0 == strncmp ("# rounds", stat_str, strlen ("# rounds")))
+  {
+    return STAT_TYPE_ROUNDS;
+  }
+  else if (0 == strncmp ("# push send issued", stat_str, strlen ("# push send issued")))
+  {
+    return STAT_TYPE_ISSUED_PUSH_SEND;
+  }
+  else if (0 == strncmp ("# pull request send issued", stat_str, strlen ("# pull request send issued")))
+  {
+    return STAT_TYPE_ISSUED_PULL_REQ;
+  }
+  else if (0 == strncmp ("# pull reply send issued", stat_str, strlen ("# pull reply send issued")))
+  {
+    return STAT_TYPE_ISSUED_PULL_REP;
+  }
+  else if (0 == strncmp ("# pushes sent", stat_str, strlen ("# pushes sent")))
+  {
+    return STAT_TYPE_SENT_PUSH_SEND;
+  }
+  else if (0 == strncmp ("# pull requests sent", stat_str, strlen ("# pull requests sent")))
+  {
+    return STAT_TYPE_SENT_PULL_REQ;
+  }
+  else if (0 == strncmp ("# pull replys sent", stat_str, strlen ("# pull replys sent")))
+  {
+    return STAT_TYPE_SENT_PULL_REP;
+  }
+  else if (0 == strncmp ("# push message received", stat_str, strlen ("# push message received")))
+  {
+    return STAT_TYPE_RECV_PUSH_SEND;
+  }
+  else if (0 == strncmp ("# pull request message received", stat_str, strlen ("# pull request message received")))
+  {
+    return STAT_TYPE_RECV_PULL_REQ;
+  }
+  else if (0 == strncmp ("# pull reply messages received", stat_str, strlen ("# pull reply messages received")))
+  {
+    return STAT_TYPE_RECV_PULL_REP;
+  }
+  return STAT_TYPE_MAX;
+}
+
+
+/**
+ * @brief Converts #STAT_TYPE enum to the equivalent string representation that
+ * is stored with the statistics service.
+ *
+ * @param stat_type #STAT_TYPE enum
+ *
+ * @return string representation that matches statistics value
+ */
+char* stat_type_2_str (enum STAT_TYPE stat_type)
+{
+  switch (stat_type)
+  {
+    case STAT_TYPE_ROUNDS:
+      return "# rounds";
+    case STAT_TYPE_BLOCKS:
+      return "# rounds blocked";
+    case STAT_TYPE_BLOCKS_MANY_PUSH:
+      return "# rounds blocked - too many pushes";
+    case STAT_TYPE_BLOCKS_NO_PUSH:
+      return "# rounds blocked - no pushes";
+    case STAT_TYPE_BLOCKS_NO_PULL:
+      return "# rounds blocked - no pull replies";
+    case STAT_TYPE_BLOCKS_MANY_PUSH_NO_PULL:
+      return "# rounds blocked - too many pushes, no pull replies";
+    case STAT_TYPE_BLOCKS_NO_PUSH_NO_PULL:
+      return "# rounds blocked - no pushes, no pull replies";
+    case STAT_TYPE_ISSUED_PUSH_SEND:
+      return "# push send issued";
+    case STAT_TYPE_ISSUED_PULL_REQ:
+      return "# pull request send issued";
+    case STAT_TYPE_ISSUED_PULL_REP:
+      return "# pull reply send issued";
+    case STAT_TYPE_SENT_PUSH_SEND:
+      return "# pushes sent";
+    case STAT_TYPE_SENT_PULL_REQ:
+      return "# pull requests sent";
+    case STAT_TYPE_SENT_PULL_REP:
+      return "# pull replys sent";
+    case STAT_TYPE_RECV_PUSH_SEND:
+      return "# push message received";
+    case STAT_TYPE_RECV_PULL_REQ:
+      return "# pull request message received";
+    case STAT_TYPE_RECV_PULL_REP:
+      return "# pull reply messages received";
+    case STAT_TYPE_MAX:
+    default:
+      return "ERROR";
+      ;
+  }
+}
+
+/**
+ * Callback function to process statistic values.
+ *
+ * @param cls closure
+ * @param subsystem name of subsystem that created the statistic
+ * @param name the name of the datum
+ * @param value the current value
+ * @param is_persistent #GNUNET_YES if the value is persistent, #GNUNET_NO if not
+ * @return #GNUNET_OK to continue, #GNUNET_SYSERR to abort iteration
+ */
+int
+stat_iterator (void *cls,
+               const char *subsystem,
+               const char *name,
+               uint64_t value,
+               int is_persistent)
+{
+  const struct STATcls *stat_cls = (const struct STATcls *) cls;
+  struct RPSPeer *rps_peer = (struct RPSPeer *) stat_cls->rps_peer;
+  GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Got stat value: %s - %" PRIu64 "\n",
+      //stat_type_2_str (stat_cls->stat_type),
+      name,
+      value);
+  to_file (rps_peer->file_name_stats,
+          "%s: %" PRIu64 "\n",
+          name,
+          value);
+  switch (stat_str_2_type (name))
+  {
+    case STAT_TYPE_ROUNDS:
+      rps_peer->num_rounds = value;
+      break;
+    case STAT_TYPE_BLOCKS:
+      rps_peer->num_blocks = value;
+      break;
+    case STAT_TYPE_BLOCKS_MANY_PUSH:
+      rps_peer->num_blocks_many_push = value;
+      break;
+    case STAT_TYPE_BLOCKS_NO_PUSH:
+      rps_peer->num_blocks_no_push = value;
+      break;
+    case STAT_TYPE_BLOCKS_NO_PULL:
+      rps_peer->num_blocks_no_pull = value;
+      break;
+    case STAT_TYPE_BLOCKS_MANY_PUSH_NO_PULL:
+      rps_peer->num_blocks_many_push_no_pull = value;
+      break;
+    case STAT_TYPE_BLOCKS_NO_PUSH_NO_PULL:
+      rps_peer->num_blocks_no_push_no_pull = value;
+      break;
+    case STAT_TYPE_ISSUED_PUSH_SEND:
+      rps_peer->num_issued_push = value;
+      break;
+    case STAT_TYPE_ISSUED_PULL_REQ:
+      rps_peer->num_issued_pull_req = value;
+      break;
+    case STAT_TYPE_ISSUED_PULL_REP:
+      rps_peer->num_issued_pull_rep = value;
+      break;
+    case STAT_TYPE_SENT_PUSH_SEND:
+      rps_peer->num_sent_push = value;
+      break;
+    case STAT_TYPE_SENT_PULL_REQ:
+      rps_peer->num_sent_pull_req = value;
+      break;
+    case STAT_TYPE_SENT_PULL_REP:
+      rps_peer->num_sent_pull_rep = value;
+      break;
+    case STAT_TYPE_RECV_PUSH_SEND:
+      rps_peer->num_recv_push = value;
+      break;
+    case STAT_TYPE_RECV_PULL_REQ:
+      rps_peer->num_recv_pull_req = value;
+      break;
+    case STAT_TYPE_RECV_PULL_REP:
+      rps_peer->num_recv_pull_rep = value;
+      break;
+    case STAT_TYPE_MAX:
+    default:
+      GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
+                 "Unknown statistics string: %s\n",
+                 name);
+      break;
+  }
+  return GNUNET_OK;
+}
+
+void post_profiler (struct RPSPeer *rps_peer)
+{
+  if (COLLECT_STATISTICS != cur_test_run.have_collect_statistics)
+  {
+    return;
+  }
+
+  GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+      "Going to request statistic values with mask 0x%" PRIx32 "\n",
+      cur_test_run.stat_collect_flags);
+
+  struct STATcls *stat_cls;
+  uint32_t stat_type;
+  for (stat_type = STAT_TYPE_ROUNDS;
+      stat_type < STAT_TYPE_MAX;
+      stat_type = stat_type <<1)
+  {
+    if (stat_type & cur_test_run.stat_collect_flags)
+    {
+      stat_cls = GNUNET_malloc (sizeof (struct STATcls));
+      stat_cls->rps_peer = rps_peer;
+      stat_cls->stat_type = stat_type;
+      rps_peer->file_name_stats =
+        store_prefix_file_name (rps_peer->peer_id, "stats");
+      GNUNET_STATISTICS_get (rps_peer->stats_h,
+                             "rps",
+                             stat_type_2_str (stat_type),
+                             post_test_shutdown_ready_cb,
+                             stat_iterator,
+                             (struct STATcls *) stat_cls);
+      GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
+          "Requested statistics for %s (peer %" PRIu32 ")\n",
+          stat_type_2_str (stat_type),
+          rps_peer->index);
+    }
+  }
+}
+
 
 /***********************************************************************
  * /Definition of tests
@@ -1356,8 +2574,25 @@ run (void *cls,
 {
   unsigned int i;
   struct OpListEntry *entry;
-  uint32_t num_mal_peers;
 
+  GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "RUN was called\n");
+
+  /* Check whether we timed out */
+  if (n_peers != num_peers ||
+      NULL == peers ||
+      0 == links_succeeded)
+  {
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Going down due to args (eg. timeout)\n");
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "\tn_peers: %u\n", n_peers);
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "\tnum_peers: %" PRIu32 "\n", num_peers);
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "\tpeers: %p\n", peers);
+    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "\tlinks_succeeded: %u\n", links_succeeded);
+    GNUNET_SCHEDULER_shutdown ();
+    return;
+  }
+
+
+  /* Initialize peers */
   testbed_peers = peers;
   num_peers_online = 0;
   for (i = 0; i < num_peers; i++)
@@ -1367,35 +2602,49 @@ run (void *cls,
     rps_peers[i].index = i;
     if (NULL != cur_test_run.init_peer)
       cur_test_run.init_peer (&rps_peers[i]);
+    if (NO_COLLECT_VIEW == cur_test_run.have_collect_view)
+    {
+      rps_peers->cur_view_count = 0;
+      rps_peers->cur_view = NULL;
+    }
     entry->op = GNUNET_TESTBED_peer_get_information (peers[i],
                                                      GNUNET_TESTBED_PIT_IDENTITY,
                                                      &info_cb,
                                                      entry);
   }
 
-  num_mal_peers = round (portion * num_peers);
+  /* Bring peers up */
   GNUNET_assert (num_peers == n_peers);
   for (i = 0; i < n_peers; i++)
   {
     rps_peers[i].index = i;
-    if ( (rps_peers[i].num_recv_ids < rps_peers[i].num_ids_to_request) ||
-         (i < num_mal_peers) )
+    rps_peers[i].op =
+      GNUNET_TESTBED_service_connect (&rps_peers[i],
+                                      peers[i],
+                                      "rps",
+                                      &rps_connect_complete_cb,
+                                      &rps_peers[i],
+                                      &rps_connect_adapter,
+                                      &rps_disconnect_adapter,
+                                      &rps_peers[i]);
+    /* Connect all peers to statistics service */
+    if (COLLECT_STATISTICS == cur_test_run.have_collect_statistics)
     {
-      rps_peers[i].op =
-        GNUNET_TESTBED_service_connect (&rps_peers[i],
+      rps_peers[i].stat_op =
+        GNUNET_TESTBED_service_connect (NULL,
                                         peers[i],
-                                        "rps",
-                                        &rps_connect_complete_cb,
+                                        "statistics",
+                                        stat_complete_cb,
                                         &rps_peers[i],
-                                        &rps_connect_adapter,
-                                        &rps_disconnect_adapter,
+                                        &stat_connect_adapter,
+                                        &stat_disconnect_adapter,
                                         &rps_peers[i]);
     }
   }
 
   if (NULL != churn_task)
     GNUNET_SCHEDULER_cancel (churn_task);
-  GNUNET_SCHEDULER_add_delayed (timeout, &shutdown_op, NULL);
+  shutdown_task = GNUNET_SCHEDULER_add_delayed (timeout, &shutdown_op, NULL);
 }
 
 
@@ -1411,13 +2660,18 @@ main (int argc, char *argv[])
 {
   int ret_value;
 
+  /* Defaults for tests */
   num_peers = 5;
   cur_test_run.name = "test-rps-default";
   cur_test_run.init_peer = default_init_peer;
   cur_test_run.pre_test = NULL;
   cur_test_run.reply_handle = default_reply_handle;
   cur_test_run.eval_cb = default_eval_cb;
-  cur_test_run.have_churn = GNUNET_YES;
+  cur_test_run.post_test = NULL;
+  cur_test_run.have_churn = HAVE_CHURN;
+  cur_test_run.have_collect_statistics = NO_COLLECT_STATISTICS;
+  cur_test_run.stat_collect_flags = 0;
+  cur_test_run.have_collect_view = NO_COLLECT_VIEW;
   churn_task = NULL;
   timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 30);
 
@@ -1452,7 +2706,7 @@ main (int argc, char *argv[])
     GNUNET_log (GNUNET_ERROR_TYPE_ERROR, "Test single request\n");
     cur_test_run.name = "test-rps-single-req";
     cur_test_run.main_test = single_req_cb;
-    cur_test_run.have_churn = GNUNET_NO;
+    cur_test_run.have_churn = HAVE_NO_CHURN;
   }
 
   else if (strstr (argv[0], "_delayed_reqs") != NULL)
@@ -1460,7 +2714,7 @@ main (int argc, char *argv[])
     GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Test delayed requests\n");
     cur_test_run.name = "test-rps-delayed-reqs";
     cur_test_run.main_test = delay_req_cb;
-    cur_test_run.have_churn = GNUNET_NO;
+    cur_test_run.have_churn = HAVE_NO_CHURN;
   }
 
   else if (strstr (argv[0], "_seed_big") != NULL)
@@ -1470,7 +2724,7 @@ main (int argc, char *argv[])
     cur_test_run.name = "test-rps-seed-big";
     cur_test_run.main_test = seed_big_cb;
     cur_test_run.eval_cb = no_eval;
-    cur_test_run.have_churn = GNUNET_NO;
+    cur_test_run.have_churn = HAVE_NO_CHURN;
     timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 10);
   }
 
@@ -1479,7 +2733,7 @@ main (int argc, char *argv[])
     GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Test seeding and requesting on a single peer\n");
     cur_test_run.name = "test-rps-single-peer-seed";
     cur_test_run.main_test = single_peer_seed_cb;
-    cur_test_run.have_churn = GNUNET_NO;
+    cur_test_run.have_churn = HAVE_NO_CHURN;
   }
 
   else if (strstr (argv[0], "_seed_request") != NULL)
@@ -1487,7 +2741,7 @@ main (int argc, char *argv[])
     GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Test seeding and requesting on multiple peers\n");
     cur_test_run.name = "test-rps-seed-request";
     cur_test_run.main_test = seed_req_cb;
-    cur_test_run.have_churn = GNUNET_NO;
+    cur_test_run.have_churn = HAVE_NO_CHURN;
   }
 
   else if (strstr (argv[0], "_seed") != NULL)
@@ -1496,7 +2750,7 @@ main (int argc, char *argv[])
     cur_test_run.name = "test-rps-seed";
     cur_test_run.main_test = seed_cb;
     cur_test_run.eval_cb = no_eval;
-    cur_test_run.have_churn = GNUNET_NO;
+    cur_test_run.have_churn = HAVE_NO_CHURN;
   }
 
   else if (strstr (argv[0], "_req_cancel") != NULL)
@@ -1506,7 +2760,7 @@ main (int argc, char *argv[])
     num_peers = 1;
     cur_test_run.main_test = req_cancel_cb;
     cur_test_run.eval_cb = no_eval;
-    cur_test_run.have_churn = GNUNET_NO;
+    cur_test_run.have_churn = HAVE_NO_CHURN;
     timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 10);
   }
 
@@ -1515,8 +2769,12 @@ main (int argc, char *argv[])
     GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Test churn\n");
     cur_test_run.name = "test-rps-churn";
     num_peers = 5;
-    cur_test_run.main_test = single_req_cb;
-    cur_test_run.have_churn = GNUNET_YES;
+    cur_test_run.init_peer = default_init_peer;
+    cur_test_run.main_test = churn_test_cb;
+    cur_test_run.reply_handle = default_reply_handle;
+    cur_test_run.eval_cb = default_eval_cb;
+    cur_test_run.have_churn = HAVE_NO_CHURN;
+    cur_test_run.have_quick_quit = HAVE_NO_QUICK_QUIT;
     timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 10);
   }
 
@@ -1527,14 +2785,36 @@ main (int argc, char *argv[])
     num_peers = 10;
     mal_type = 3;
     cur_test_run.init_peer = profiler_init_peer;
-    cur_test_run.pre_test = mal_pre;
+    //cur_test_run.pre_test = mal_pre;
+    cur_test_run.pre_test = pre_profiler;
     cur_test_run.main_test = profiler_cb;
     cur_test_run.reply_handle = profiler_reply_handle;
     cur_test_run.eval_cb = profiler_eval;
+    cur_test_run.post_test = post_profiler;
     cur_test_run.request_interval = 2;
     cur_test_run.num_requests = 5;
-    cur_test_run.have_churn = GNUNET_YES;
-    timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 90);
+    //cur_test_run.have_churn = HAVE_CHURN;
+    cur_test_run.have_churn = HAVE_NO_CHURN;
+    cur_test_run.have_quick_quit = HAVE_NO_QUICK_QUIT;
+    cur_test_run.have_collect_statistics = COLLECT_STATISTICS;
+    cur_test_run.stat_collect_flags = STAT_TYPE_ROUNDS |
+                                      STAT_TYPE_BLOCKS |
+                                      STAT_TYPE_BLOCKS_MANY_PUSH |
+                                      STAT_TYPE_BLOCKS_NO_PUSH |
+                                      STAT_TYPE_BLOCKS_NO_PULL |
+                                      STAT_TYPE_BLOCKS_MANY_PUSH_NO_PULL |
+                                      STAT_TYPE_BLOCKS_NO_PUSH_NO_PULL |
+                                      STAT_TYPE_ISSUED_PUSH_SEND |
+                                      STAT_TYPE_ISSUED_PULL_REQ |
+                                      STAT_TYPE_ISSUED_PULL_REP |
+                                      STAT_TYPE_SENT_PUSH_SEND |
+                                      STAT_TYPE_SENT_PULL_REQ |
+                                      STAT_TYPE_SENT_PULL_REP |
+                                      STAT_TYPE_RECV_PUSH_SEND |
+                                      STAT_TYPE_RECV_PULL_REQ |
+                                      STAT_TYPE_RECV_PULL_REP;
+    cur_test_run.have_collect_view = COLLECT_VIEW;
+    timeout = GNUNET_TIME_relative_multiply (GNUNET_TIME_UNIT_SECONDS, 300);
 
     /* 'Clean' directory */
     (void) GNUNET_DISK_directory_remove ("/tmp/rps/");
@@ -1553,14 +2833,27 @@ main (int argc, char *argv[])
                                                  with the malicious portion */
 
   ok = 1;
-  (void) GNUNET_TESTBED_test_run (cur_test_run.name,
-                                  "test_rps.conf",
-                                  num_peers,
-                                  0, NULL, NULL,
-                                  &run, NULL);
+  ret_value = GNUNET_TESTBED_test_run (cur_test_run.name,
+                                       "test_rps.conf",
+                                       num_peers,
+                                       0, NULL, NULL,
+                                       &run, NULL);
+  GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
+              "_test_run returned.\n");
+  if (GNUNET_OK != ret_value)
+  {
+    GNUNET_log (GNUNET_ERROR_TYPE_WARNING,
+                "Test did not run successfully!\n");
+  }
 
   ret_value = cur_test_run.eval_cb();
-  GNUNET_free (rps_peers );
+  if (NO_COLLECT_VIEW == cur_test_run.have_collect_view)
+  {
+    GNUNET_array_grow (rps_peers->cur_view,
+                       rps_peers->cur_view_count,
+                       0);
+  }
+  GNUNET_free (rps_peers);
   GNUNET_free (rps_peer_ids);
   GNUNET_CONTAINER_multipeermap_destroy (peer_map);
   return ret_value;