X-Git-Url: https://git.librecmc.org/?a=blobdiff_plain;f=src%2Frps%2Fgnunet-service-rps.c;h=fd398b863c02fd60f1c8c18ad18850a1387a92eb;hb=200fd67bcca55ad08351719b154516d91443a298;hp=b8bfe8f9e0c1c79618b7ebc09315864559a14fc1;hpb=c8be0a0d42d7bf5ca96cc050fc12d71a43d8dca3;p=oweals%2Fgnunet.git diff --git a/src/rps/gnunet-service-rps.c b/src/rps/gnunet-service-rps.c index b8bfe8f9e..fd398b863 100644 --- a/src/rps/gnunet-service-rps.c +++ b/src/rps/gnunet-service-rps.c @@ -14,8 +14,8 @@ You should have received a copy of the GNU General Public License along with GNUnet; see the file COPYING. If not, write to the - Free Software Foundation, Inc., 59 Temple Place - Suite 330, - Boston, MA 02111-1307, USA. + Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ /** @@ -26,8 +26,10 @@ #include "platform.h" #include "gnunet_util_lib.h" #include "gnunet_cadet_service.h" +#include "gnunet_peerinfo_service.h" #include "gnunet_nse_service.h" #include "rps.h" +#include "rps-test_util.h" #include "gnunet-service-rps_sampler.h" @@ -50,6 +52,8 @@ // TODO API request_cancel +// TODO don't pass peermap all the time + // hist_size_init, hist_size_max /** @@ -96,7 +100,14 @@ enum PeerFlags /** * We set this bit when we can be sure the other peer is/was live. */ - VALID = 0x10 + VALID = 0x10, + + /** + * We set this bit when we are going to destroy the channel to this peer. + * When cleanup_channel is called, we know that we wanted to destroy it. + * Otherwise the channel to the other peer was destroyed. + */ + TO_DESTROY = 0x20, }; @@ -130,7 +141,7 @@ struct PeerOutstandingOp struct PeerContext { /** - * In own gossip/sampler list, in other's gossip/sampler list + * Flags indicating status of peer */ uint32_t peer_flags; @@ -208,15 +219,22 @@ static struct GNUNET_CONTAINER_MultiPeerMap *peer_map; /** - * The gossiped list of peers. + * Name to log view to + */ +static char *file_name_view_log; + +/** + * The "local view" containing peers we learned from gossip and history */ -static struct GNUNET_PeerIdentity *gossip_list; +static struct GNUNET_CONTAINER_MultiPeerMap *view; /** - * Size of the gossiped list + * An array containing the peers of the local view. + * + * This is created every time we send a pull reply if it has changed since the + * last pull reply we sent. */ -//static unsigned int gossip_list_size; -static uint32_t gossip_list_size; +static struct GNUNET_PeerIdentity *view_array; /** @@ -229,22 +247,19 @@ static unsigned int sampler_size_client_need; * The size of sampler we need to be able to satisfy the Brahms protocol's * need of random peers. * - * This is directly taken as the #gossip_list_size on update of the - * #gossip_list - * * This is one minimum size the sampler grows to. */ static unsigned int sampler_size_est_need; /** - * Percentage of total peer number in the gossip list + * Percentage of total peer number in the view * to send random PUSHes to */ static float alpha; /** - * Percentage of total peer number in the gossip list + * Percentage of total peer number in the view * to send random PULLs to */ static float beta; @@ -304,6 +319,16 @@ static struct GNUNET_NSE_Handle *nse; */ static struct GNUNET_CADET_Handle *cadet_handle; +/** + * Handler to PEERINFO. + */ +static struct GNUNET_PEERINFO_Handle *peerinfo_handle; + +/** + * Handle for cancellation of iteration over peers. + */ +struct GNUNET_PEERINFO_NotifyContext *peerinfo_notify_handle; + /** * Request counter. @@ -337,17 +362,6 @@ static struct GNUNET_TIME_Relative request_deltas[REQUEST_DELTAS_SIZE]; static struct GNUNET_TIME_Relative request_rate; -/** - * List with the peers we sent requests to. - */ -struct GNUNET_PeerIdentity *pending_pull_reply_list; - -/** - * Size of #pending_pull_reply_list. - */ -uint32_t pending_pull_reply_list_size; - - /** * Number of history update tasks. */ @@ -599,7 +613,7 @@ get_rand_peer_ignore_list (const struct GNUNET_PeerIdentity *peer_list, /**; * Choose the r_index of the peer we want to return - * at random from the interval of the gossip list + * at random from the interval of the view */ r_index = GNUNET_CRYPTO_random_u64 (GNUNET_CRYPTO_QUALITY_STRONG, tmp_size); @@ -619,7 +633,7 @@ get_rand_peer_ignore_list (const struct GNUNET_PeerIdentity *peer_list, /**; * Choose the r_index of the peer we want to return - * at random from the interval of the gossip list + * at random from the interval of the view */ r_index = GNUNET_CRYPTO_random_u64 (GNUNET_CRYPTO_QUALITY_STRONG, tmp_size); @@ -665,15 +679,37 @@ get_peer_ctx (struct GNUNET_CONTAINER_MultiPeerMap *peer_map, /** - * Put random peer from sampler into the gossip list as history update. + * Put random peer from sampler into the view as history update. */ void hist_update (void *cls, struct GNUNET_PeerIdentity *ids, uint32_t num_peers) { - GNUNET_assert (1 == num_peers); + unsigned int i; + + for (i = 0; i < GNUNET_MIN ( + sampler_size_est_need - GNUNET_CONTAINER_multipeermap_size (view), + num_peers); i++) + { + if (GNUNET_OK != GNUNET_CONTAINER_multipeermap_put (view, + &ids[i], + NULL, + GNUNET_CONTAINER_MULTIHASHMAPOPTION_UNIQUE_FAST)) + { + LOG (GNUNET_ERROR_TYPE_WARNING, + "Failed to put peer in peermap. (hist_update)\n"); + } + + /* Might want to check that we really updated the view */ + if (NULL != view_array) + { + GNUNET_free (view_array); + view_array = NULL; + } - if (gossip_list_size < sampler_size_est_need) - GNUNET_array_append (gossip_list, gossip_list_size, *ids); + to_file (file_name_view_log, + "+%s\t(hist)", + GNUNET_i2s_full (ids)); + } if (0 < num_hist_update_tasks) num_hist_update_tasks--; @@ -778,12 +814,6 @@ get_channel (struct GNUNET_CONTAINER_MultiPeerMap *peer_map, GNUNET_RPS_CADET_PORT, GNUNET_CADET_OPTION_RELIABLE); - // do I have to explicitly put it in the peer_map? - (void) GNUNET_CONTAINER_multipeermap_put - (peer_map, - peer, - peer_ctx, - GNUNET_CONTAINER_MULTIHASHMAPOPTION_REPLACE); } return peer_ctx->send_channel; } @@ -825,12 +855,14 @@ get_mq (struct GNUNET_CONTAINER_MultiPeerMap *peer_map, void check_peer_live (struct PeerContext *peer_ctx) { - (void) get_channel (peer_map, &peer_ctx->peer_id); LOG (GNUNET_ERROR_TYPE_DEBUG, "Get informed about peer %s getting live\n", GNUNET_i2s (&peer_ctx->peer_id)); - if (NULL == peer_ctx->is_live_task) + + if (NULL == peer_ctx->is_live_task && + NULL == peer_ctx->send_channel) { + (void) get_channel (peer_map, &peer_ctx->peer_id); peer_ctx->is_live_task = GNUNET_CADET_notify_transmit_ready (peer_ctx->send_channel, GNUNET_NO, @@ -839,11 +871,12 @@ check_peer_live (struct PeerContext *peer_ctx) cadet_ntfy_tmt_rdy_cb, peer_ctx); } - else - { + else if (NULL != peer_ctx->is_live_task) LOG (GNUNET_ERROR_TYPE_DEBUG, "Already waiting for notification\n"); - } + else if (NULL != peer_ctx->send_channel) + LOG (GNUNET_ERROR_TYPE_DEBUG, + "Already have established channel to peer\n"); } @@ -885,8 +918,6 @@ insert_in_pull_list (void *cls, const struct GNUNET_PeerIdentity *peer) { if (GNUNET_NO == in_arr (pull_list, pull_list_size, peer)) GNUNET_array_append (pull_list, pull_list_size, *peer); - - peer_clean (peer); } /** @@ -905,29 +936,42 @@ insert_in_pull_list_scheduled (const struct PeerContext *peer_ctx) /** - * Insert PeerID in #gossip_list + * Insert PeerID in #view * * Called once we know a peer is live. */ void -insert_in_gossip_list (void *cls, const struct GNUNET_PeerIdentity *peer) +insert_in_view (void *cls, const struct GNUNET_PeerIdentity *peer) { - if (GNUNET_NO == in_arr (gossip_list, gossip_list_size, peer)) - GNUNET_array_append (gossip_list, gossip_list_size, *peer); + if (GNUNET_YES != GNUNET_CONTAINER_multipeermap_put (view, + peer, + NULL, + GNUNET_CONTAINER_MULTIHASHMAPOPTION_UNIQUE_FAST)) + { + LOG (GNUNET_ERROR_TYPE_WARNING, + "Failed to put peer into view. (insert_in_view)\n"); + } + + /* Might want to check whether we really modified the view */ + if (NULL != view_array) + { + GNUNET_free (view_array); + view_array = NULL; + } (void) get_channel (peer_map, peer); } /** - * Check whether #insert_in_gossip_list was already scheduled + * Check whether #insert_in_view was already scheduled */ int -insert_in_gossip_list_scheduled (const struct PeerContext *peer_ctx) +insert_in_view_scheduled (const struct PeerContext *peer_ctx) { unsigned int i; for ( i = 0 ; i < peer_ctx->num_outstanding_ops ; i++ ) - if (insert_in_gossip_list == peer_ctx->outstanding_ops[i].op) + if (insert_in_view == peer_ctx->outstanding_ops[i].op) return GNUNET_YES; return GNUNET_NO; } @@ -1068,9 +1112,13 @@ add_peer_array_to_set (const struct GNUNET_PeerIdentity *peer_array, { unsigned int i; if (NULL == peer_map) - peer_map = GNUNET_CONTAINER_multipeermap_create (num_peers + 1, - GNUNET_NO); - for (i = 0 ; i < num_peers ; i++) + { + LOG (GNUNET_ERROR_TYPE_WARNING, + "Trying to add peers to an empty peermap.\n"); + return; + } + + for (i = 0; i < num_peers; i++) { GNUNET_CONTAINER_multipeermap_put (peer_map, &peer_array[i], @@ -1142,14 +1190,14 @@ new_peer_id (const struct GNUNET_PeerIdentity *peer_id) struct PeerOutstandingOp out_op; struct PeerContext *peer_ctx; - if (NULL != peer_id - && 0 != GNUNET_CRYPTO_cmp_peer_identity (&own_identity, peer_id)) + if (NULL != peer_id && + 0 != GNUNET_CRYPTO_cmp_peer_identity (&own_identity, peer_id)) { LOG (GNUNET_ERROR_TYPE_DEBUG, - "Got peer_id %s (at %p, gossip_list_size: %u)\n", + "Got peer_id %s (at %p, view size: %u)\n", GNUNET_i2s (peer_id), peer_id, - gossip_list_size); + GNUNET_CONTAINER_multipeermap_size (view)); peer_ctx = get_peer_ctx (peer_map, peer_id); if (GNUNET_YES != get_peer_flag (peer_ctx, VALID)) @@ -1163,9 +1211,9 @@ new_peer_id (const struct GNUNET_PeerIdentity *peer_id) out_op); } - if (GNUNET_NO == insert_in_gossip_list_scheduled (peer_ctx)) + if (GNUNET_NO == insert_in_view_scheduled (peer_ctx)) { - out_op.op = insert_in_gossip_list; + out_op.op = insert_in_view; out_op.op_cls = NULL; GNUNET_array_append (peer_ctx->outstanding_ops, peer_ctx->num_outstanding_ops, @@ -1193,7 +1241,7 @@ new_peer_id (const struct GNUNET_PeerIdentity *peer_id) /** * Function called by NSE. * - * Updates sizes of sampler list and gossip list and adapt those lists + * Updates sizes of sampler list and view and adapt those lists * accordingly. */ void @@ -1281,8 +1329,8 @@ void client_respond (void *cls, */ static void handle_client_request (void *cls, - struct GNUNET_SERVER_Client *client, - const struct GNUNET_MessageHeader *message) + struct GNUNET_SERVER_Client *client, + const struct GNUNET_MessageHeader *message) { struct GNUNET_RPS_CS_RequestMessage *msg; uint32_t num_peers; @@ -1316,8 +1364,7 @@ handle_client_request (void *cls, RPS_sampler_get_n_rand_peers (client_sampler, client_respond, reply_cls, - num_peers, - GNUNET_YES); + num_peers); GNUNET_SERVER_receive_done (client, GNUNET_OK); @@ -1439,6 +1486,10 @@ handle_peer_push (void *cls, { /* We attack one single well-known peer - simply ignore */ return GNUNET_OK; } + else + { + GNUNET_free (tmp_att_peer); + } #endif /* ENABLE_MALICIOUS */ @@ -1450,13 +1501,65 @@ handle_peer_push (void *cls, } +/** + * Iterator over hash map entries. + * Called from #generate_view_array and writes every peer id into #view_array. + * + * @param cls closure - the pointer to the counter + * @param key current public key + * @param value value in the hash map + * @return #GNUNET_YES if we should continue to + * iterate, + * #GNUNET_NO if not. + */ +static int +dump_id_to_view_array (void *cls, + const struct GNUNET_PeerIdentity *key, + void *value) +{ + unsigned int *i = (unsigned int *) cls; + + view_array[(*i)++] = *key; + return GNUNET_YES; +} + + +/** + * Makes sure the view_array is filled with the peer ids currently in #view. + * Called from within #do_round before sending pushes and pulls and from + * #handle_peer_pull_request when a reply is sent. + */ +static void +generate_view_array (unsigned int view_size) +{ + unsigned int *i; + int ret; + + if (NULL == view_array) + { + view_array = GNUNET_new_array (view_size, + struct GNUNET_PeerIdentity); + i = GNUNET_new (unsigned int); + *i = 0; + + ret = GNUNET_CONTAINER_multipeermap_iterate (view, + dump_id_to_view_array, + i); + GNUNET_assert (view_size == ret); + GNUNET_assert (view_size == *i); + + GNUNET_free (i); + } +} + + /** * Handle PULL REQUEST request message from another peer. * - * Reply with the gossip list of PeerIDs. + * Reply with the view of PeerIDs. * * @param cls Closure - * @param channel The channel the PUSH was received over + * @param channel The channel the PULL REQUEST was received over * @param channel_ctx The context associated with this channel * @param msg The message header */ @@ -1467,12 +1570,15 @@ handle_peer_pull_request (void *cls, const struct GNUNET_MessageHeader *msg) { struct GNUNET_PeerIdentity *peer; + unsigned int view_size; peer = (struct GNUNET_PeerIdentity *) GNUNET_CADET_channel_get_info (channel, GNUNET_CADET_OPTION_PEER); // FIXME wait for cadet to change this function + LOG (GNUNET_ERROR_TYPE_DEBUG, "PULL REQUEST received (%s)\n", GNUNET_i2s (peer)); + #ifdef ENABLE_MALICIOUS if (1 == mal_type || 3 == mal_type) @@ -1483,7 +1589,7 @@ handle_peer_pull_request (void *cls, else if (2 == mal_type) { /* Try to partition network */ - if (GNUNET_YES == GNUNET_CRYPTO_cmp_peer_identity (&attacked_peer, peer)) + if (0 == GNUNET_CRYPTO_cmp_peer_identity (&attacked_peer, peer)) { send_pull_reply (peer, mal_peers, num_mal_peers); } @@ -1491,7 +1597,10 @@ handle_peer_pull_request (void *cls, } #endif /* ENABLE_MALICIOUS */ - send_pull_reply (peer, gossip_list, gossip_list_size); + view_size = GNUNET_CONTAINER_multipeermap_size (view); + generate_view_array (view_size); + + send_pull_reply (peer, view_array, view_size); return GNUNET_OK; } @@ -1551,8 +1660,12 @@ handle_peer_pull_reply (void *cls, // FIXME wait for cadet to change this function sender_ctx = get_peer_ctx (peer_map, sender); - if (GNUNET_YES == get_peer_flag (sender_ctx, PULL_REPLY_PENDING)) + LOG (GNUNET_ERROR_TYPE_DEBUG, "PULL REPLY received (%s)\n", GNUNET_i2s (sender)); + + if (GNUNET_YES != get_peer_flag (sender_ctx, PULL_REPLY_PENDING)) { + LOG (GNUNET_ERROR_TYPE_WARNING, + "Received a pull reply from a peer we didn't request one from!\n"); GNUNET_break_op (0); return GNUNET_OK; } @@ -1565,10 +1678,11 @@ handle_peer_pull_reply (void *cls, #endif /* ENABLE_MALICIOUS */ /* Do actual logic */ - peers = (struct GNUNET_PeerIdentity *) &msg[1]; + peers = (struct GNUNET_PeerIdentity *) &in_msg[1]; LOG (GNUNET_ERROR_TYPE_DEBUG, - "PULL REPLY received, got following peers:\n"); + "PULL REPLY received, got following %u peers:\n", + ntohl (in_msg->num_peers)); for (i = 0 ; i < ntohl (in_msg->num_peers) ; i++) { @@ -1586,8 +1700,8 @@ handle_peer_pull_reply (void *cls, &peers[i]) && GNUNET_NO == GNUNET_CONTAINER_multipeermap_contains (mal_peer_set, &peers[i]) - && GNUNET_NO == GNUNET_CRYPTO_cmp_peer_identity (&peers[i], - &own_identity)) + && 0 != GNUNET_CRYPTO_cmp_peer_identity (&peers[i], + &own_identity)) { tmp_att_peer = GNUNET_new (struct AttackedPeer); tmp_att_peer->peer_id = peers[i]; @@ -1599,28 +1713,28 @@ handle_peer_pull_reply (void *cls, continue; } #endif /* ENABLE_MALICIOUS */ - peer_ctx = get_peer_ctx (peer_map, &peers[i]); - if (GNUNET_YES == get_peer_flag (peer_ctx, VALID) - || NULL != peer_ctx->send_channel - || NULL != peer_ctx->recv_channel) - { - if (GNUNET_NO == in_arr (pull_list, pull_list_size, &peers[i]) - && 0 != GNUNET_CRYPTO_cmp_peer_identity (&own_identity, &peers[i])) - GNUNET_array_append (pull_list, pull_list_size, peers[i]); - } - else if (GNUNET_NO == insert_in_pull_list_scheduled (peer_ctx)) + if (0 != GNUNET_CRYPTO_cmp_peer_identity (&own_identity, + &peers[i])) { - out_op.op = insert_in_pull_list; - out_op.op_cls = NULL; - GNUNET_array_append (peer_ctx->outstanding_ops, - peer_ctx->num_outstanding_ops, - out_op); - check_peer_live (peer_ctx); + peer_ctx = get_peer_ctx (peer_map, &peers[i]); + if (GNUNET_YES == get_peer_flag (peer_ctx, VALID)) + { + if (GNUNET_NO == in_arr (pull_list, pull_list_size, &peers[i])) + GNUNET_array_append (pull_list, pull_list_size, peers[i]); + } + else if (GNUNET_NO == insert_in_pull_list_scheduled (peer_ctx)) + { + out_op.op = insert_in_pull_list; + out_op.op_cls = NULL; + GNUNET_array_append (peer_ctx->outstanding_ops, + peer_ctx->num_outstanding_ops, + out_op); + check_peer_live (peer_ctx); + } } } unset_peer_flag (sender_ctx, PULL_REPLY_PENDING); - rem_from_list (&pending_pull_reply_list, &pending_pull_reply_list_size, sender); return GNUNET_OK; } @@ -1682,13 +1796,16 @@ send_pull_request (struct GNUNET_PeerIdentity *peer_id) { struct GNUNET_MQ_Envelope *ev; struct GNUNET_MQ_Handle *mq; + struct PeerContext *peer_ctx; + + peer_ctx = get_peer_ctx (peer_map, peer_id); + GNUNET_assert (GNUNET_NO == get_peer_flag (peer_ctx, PULL_REPLY_PENDING)); + set_peer_flag (peer_ctx, PULL_REPLY_PENDING); LOG (GNUNET_ERROR_TYPE_DEBUG, - "Sending PULL request to peer %s of gossiped list.\n", + "Sending PULL request to peer %s of view.\n", GNUNET_i2s (peer_id)); - GNUNET_array_append (pending_pull_reply_list, pending_pull_reply_list_size, *peer_id); - ev = GNUNET_MQ_msg_header (GNUNET_MESSAGE_TYPE_RPS_PP_PULL_REQUEST); mq = get_mq (peer_map, peer_id); GNUNET_MQ_send (mq, ev); @@ -1707,7 +1824,7 @@ send_push (struct GNUNET_PeerIdentity *peer_id) struct GNUNET_MQ_Handle *mq; LOG (GNUNET_ERROR_TYPE_DEBUG, - "Sending PUSH to peer %s of gossiped list.\n", + "Sending PUSH to peer %s of view.\n", GNUNET_i2s (peer_id)); ev = GNUNET_MQ_msg_header (GNUNET_MESSAGE_TYPE_RPS_PP_PUSH); @@ -1728,8 +1845,7 @@ do_mal_round (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc); * Turn RPS service to act malicious. * * @param cls Closure - * @param channel The channel the PUSH was received over - * @param channel_ctx The context associated with this channel + * @param client The client that sent the message * @param msg The message header */ static void @@ -1852,8 +1968,10 @@ do_mal_round (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc) uint32_t i; struct GNUNET_TIME_Relative time_next_round; struct AttackedPeer *tmp_att_peer; + struct PeerContext *peer_ctx; - LOG (GNUNET_ERROR_TYPE_DEBUG, "Going to execute next round maliciously.\n"); + LOG (GNUNET_ERROR_TYPE_DEBUG, "Going to execute next round maliciously type %" PRIu32 ".\n", + mal_type); /* Do malicious actions */ if (1 == mal_type) @@ -1906,6 +2024,18 @@ do_mal_round (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc) if (3 == mal_type) { /* Combined attack */ + /* Send PUSH to attacked peers */ + peer_ctx = get_peer_ctx (peer_map, &attacked_peer); + if (GNUNET_YES == get_peer_flag (peer_ctx, VALID)) + { + LOG (GNUNET_ERROR_TYPE_DEBUG, + "Goding to send push to attacked peer (%s)\n", + GNUNET_i2s (&attacked_peer)); + send_push (&attacked_peer); + } + else + check_peer_live (peer_ctx); + /* The maximum of pushes we're going to send this round */ num_pushes = GNUNET_MIN (GNUNET_MIN (push_limit - 1, num_attacked_peers), @@ -1915,9 +2045,6 @@ do_mal_round (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc) "Going to send %" PRIu32 " pushes\n", num_pushes); - /* Send PUSHes to attacked peers */ - send_push (&attacked_peer); - for (i = 0 ; i < num_pushes ; i++) { if (att_peers_tail == att_peer_index) @@ -1929,6 +2056,7 @@ do_mal_round (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc) } /* Send PULLs to some peers to learn about additional peers to attack */ + tmp_att_peer = att_peer_index; for (i = 0 ; i < num_pushes * alpha ; i++) { if (att_peers_tail == tmp_att_peer) @@ -1951,7 +2079,7 @@ do_mal_round (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc) /** - * Send out PUSHes and PULLs, possibly update #gossip_list, samplers. + * Send out PUSHes and PULLs, possibly update #view, samplers. * * This is executed regylary. */ @@ -1961,144 +2089,189 @@ do_round (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc) LOG (GNUNET_ERROR_TYPE_DEBUG, "Going to execute next round.\n"); uint32_t i; + unsigned int view_size; unsigned int *permut; - unsigned int n_peers; /* Number of peers we send pushes/pulls to */ + unsigned int a_peers; /* Number of peers we send pushes to */ + unsigned int b_peers; /* Number of peers we send pull requests to */ + uint32_t first_border; + uint32_t second_border; struct GNUNET_PeerIdentity peer; - struct GNUNET_PeerIdentity *tmp_peer; + struct PeerContext *peer_ctx; LOG (GNUNET_ERROR_TYPE_DEBUG, - "Printing gossip list:\n"); - for (i = 0 ; i < gossip_list_size ; i++) + "Printing view:\n"); + to_file (file_name_view_log, + "___ new round ___"); + view_size = GNUNET_CONTAINER_multipeermap_size (view); + generate_view_array (view_size); + for (i = 0 ; i < view_size ; i++) + { LOG (GNUNET_ERROR_TYPE_DEBUG, - "\t%s\n", GNUNET_i2s (&gossip_list[i])); - // TODO log lists, ... + "\t%s\n", GNUNET_i2s (&view_array[i])); + to_file (file_name_view_log, + "=%s\t(do round)", + GNUNET_i2s_full (&view_array[i])); + } - /* Would it make sense to have one shuffeled gossip list and then - * to send PUSHes to first alpha peers, PULL requests to next beta peers and - * use the rest to update sampler? - * in essence get random peers with consumption */ - /* Send PUSHes */ - if (0 < gossip_list_size) + /* Send pushes and pull requests */ + if (0 < view_size) { permut = GNUNET_CRYPTO_random_permute (GNUNET_CRYPTO_QUALITY_STRONG, - (unsigned int) gossip_list_size); - n_peers = ceil (alpha * gossip_list_size); + (unsigned int) view_size); + + /* Send PUSHes */ + a_peers = ceil (alpha * view_size); + LOG (GNUNET_ERROR_TYPE_DEBUG, "Going to send pushes to %u (ceil (%f * %u)) peers.\n", - n_peers, alpha, gossip_list_size); - for (i = 0 ; i < n_peers ; i++) + a_peers, alpha, view_size); + for (i = 0; i < a_peers; i++) { - peer = gossip_list[permut[i]]; + peer = view_array[permut[i]]; if (0 != GNUNET_CRYPTO_cmp_peer_identity (&own_identity, &peer)) // TODO { // FIXME if this fails schedule/loop this for later send_push (&peer); } } - GNUNET_free (permut); - } - - /* Send PULL requests */ - //permut = GNUNET_CRYPTO_random_permute (GNUNET_CRYPTO_QUALITY_STRONG, (unsigned int) sampler_list->size); - n_peers = ceil (beta * gossip_list_size); - LOG (GNUNET_ERROR_TYPE_DEBUG, - "Going to send pulls to %u (ceil (%f * %u)) peers.\n", - n_peers, beta, gossip_list_size); - for (i = 0 ; i < n_peers ; i++) - { - tmp_peer = get_rand_peer_ignore_list (gossip_list, gossip_list_size, - pending_pull_reply_list, pending_pull_reply_list_size); - if (NULL != tmp_peer) + /* Send PULL requests */ + b_peers = ceil (beta * view_size); + first_border = a_peers; + second_border = a_peers + b_peers; + if (second_border > view_size) { - peer = *tmp_peer; - GNUNET_free (tmp_peer); - - if (0 != GNUNET_CRYPTO_cmp_peer_identity (&own_identity, &peer)) - { + first_border = view_size - b_peers; + second_border = view_size; + } + LOG (GNUNET_ERROR_TYPE_DEBUG, + "Going to send pulls to %u (ceil (%f * %u)) peers.\n", + b_peers, beta, view_size); + for (i = first_border; i < second_border; i++) + { + peer = view_array[permut[i]]; + peer_ctx = get_peer_ctx (peer_map, &peer); + if (0 != GNUNET_CRYPTO_cmp_peer_identity (&own_identity, &peer) && + GNUNET_NO == get_peer_flag (peer_ctx, PULL_REPLY_PENDING)) // TODO + { // FIXME if this fails schedule/loop this for later send_pull_request (&peer); } } + + GNUNET_free (permut); + permut = NULL; } - /* Update gossip list */ + /* Update view */ + /* TODO see how many peers are in push-/pull- list! */ - if (push_list_size <= alpha * gossip_list_size - && push_list_size > 0 - && pull_list_size > 0) + if (push_list_size <= alpha * view_size && + 0 < push_list_size && + 0 < pull_list_size) { - LOG (GNUNET_ERROR_TYPE_DEBUG, "Update of the gossip list.\n"); + LOG (GNUNET_ERROR_TYPE_DEBUG, "Update of the view.\n"); - uint32_t first_border; - uint32_t second_border; - uint32_t r_index; + uint32_t final_size; uint32_t peers_to_clean_size; struct GNUNET_PeerIdentity *peers_to_clean; peers_to_clean = NULL; peers_to_clean_size = 0; - GNUNET_array_grow (peers_to_clean, peers_to_clean_size, gossip_list_size); + GNUNET_array_grow (peers_to_clean, peers_to_clean_size, view_size); memcpy (peers_to_clean, - gossip_list, - gossip_list_size * sizeof (struct GNUNET_PeerIdentity)); - - first_border = ceil (alpha * sampler_size_est_need); - second_border = first_border + ceil (beta * sampler_size_est_need); - - GNUNET_array_grow (gossip_list, gossip_list_size, second_border); + view_array, + view_size * sizeof (struct GNUNET_PeerIdentity)); + + /* Seems like recreating is the easiest way of emptying the peermap */ + GNUNET_CONTAINER_multipeermap_destroy (view); + view = GNUNET_CONTAINER_multipeermap_create (view_size, GNUNET_NO); + to_file (file_name_view_log, + "--- emptied ---"); + + first_border = GNUNET_MIN (ceil (alpha * sampler_size_est_need), + push_list_size); + second_border = first_border + + GNUNET_MIN (floor (beta * sampler_size_est_need), + pull_list_size); + final_size = second_border + + ceil ((1 - (alpha + beta)) * sampler_size_est_need); + + GNUNET_array_grow (view_array, view_size, second_border); + + /* Update view with peers received through PUSHes */ + permut = GNUNET_CRYPTO_random_permute (GNUNET_CRYPTO_QUALITY_STRONG, + push_list_size); + for (i = 0; i < first_border; i++) + { + view_array[i] = push_list[permut[i]]; + GNUNET_CONTAINER_multipeermap_put (view, &push_list[permut[i]], NULL, + GNUNET_CONTAINER_MULTIHASHMAPOPTION_UNIQUE_FAST); - for (i = 0 ; i < first_border ; i++) - {/* Update gossip list with peers received through PUSHes */ - r_index = GNUNET_CRYPTO_random_u64 (GNUNET_CRYPTO_QUALITY_STRONG, - push_list_size); - gossip_list[i] = push_list[r_index]; + to_file (file_name_view_log, + "+%s\t(push list)", + GNUNET_i2s_full (&view_array[i])); // TODO change the peer_flags accordingly } + GNUNET_free (permut); + permut = NULL; - for (i = first_border ; i < second_border ; i++) - {/* Update gossip list with peers received through PULLs */ - r_index = GNUNET_CRYPTO_random_u64 (GNUNET_CRYPTO_QUALITY_STRONG, - pull_list_size); - gossip_list[i] = pull_list[r_index]; - // TODO change the peer_flags accordingly - } + /* Update view with peers received through PULLs */ + permut = GNUNET_CRYPTO_random_permute (GNUNET_CRYPTO_QUALITY_STRONG, + pull_list_size); + for (i = first_border; i < second_border; i++) + { + view_array[i] = pull_list[permut[i]]; + GNUNET_CONTAINER_multipeermap_put (view, &pull_list[permut[i]], NULL, + GNUNET_CONTAINER_MULTIHASHMAPOPTION_UNIQUE_FAST); - for (i = second_border ; i < sampler_size_est_need ; i++) - {/* Update gossip list with peers from history */ - RPS_sampler_get_n_rand_peers (prot_sampler, - hist_update, - NULL, - 1, - GNUNET_NO); - num_hist_update_tasks++; + to_file (file_name_view_log, + "+%s\t(pull list)", + GNUNET_i2s_full (&view_array[i])); // TODO change the peer_flags accordingly } + GNUNET_free (permut); + permut = NULL; + + /* Update view with peers from history */ + RPS_sampler_get_n_rand_peers (prot_sampler, + hist_update, + NULL, + final_size - second_border); + num_hist_update_tasks = final_size - second_border; + // TODO change the peer_flags accordingly - for (i = 0 ; i < gossip_list_size ; i++) - rem_from_list (&peers_to_clean, &peers_to_clean_size, &gossip_list[i]); + for (i = 0; i < view_size; i++) + rem_from_list (&peers_to_clean, &peers_to_clean_size, &view_array[i]); - for (i = 0 ; i < peers_to_clean_size ; i++) + /* Clean peers that were removed from the view */ + for (i = 0; i < peers_to_clean_size; i++) + { peer_clean (&peers_to_clean[i]); + to_file (file_name_view_log, + "-%s", + GNUNET_i2s_full (&peers_to_clean[i])); + } GNUNET_free (peers_to_clean); + peers_to_clean = NULL; } else { - LOG (GNUNET_ERROR_TYPE_DEBUG, "No update of the gossip list.\n"); + LOG (GNUNET_ERROR_TYPE_DEBUG, "No update of the view.\n"); } // TODO independent of that also get some peers from CADET_get_peers()? LOG (GNUNET_ERROR_TYPE_DEBUG, - "Received %u pushes and %u pulls last round (alpha (%.2f) * gossip_list_size (%u) = %.2f)\n", + "Received %u pushes and %u pulls last round (alpha (%.2f) * view_size (%u) = %.2f)\n", push_list_size, pull_list_size, alpha, - gossip_list_size, - alpha * gossip_list_size); + view_size, + alpha * view_size); /* Update samplers */ - for ( i = 0 ; i < push_list_size ; i++ ) + for (i = 0; i < push_list_size; i++) { LOG (GNUNET_ERROR_TYPE_DEBUG, "Updating with peer %s from push list\n", @@ -2108,7 +2281,7 @@ do_round (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc) // TODO set in_flag? } - for ( i = 0 ; i < pull_list_size ; i++ ) + for (i = 0; i < pull_list_size; i++) { LOG (GNUNET_ERROR_TYPE_DEBUG, "Updating with peer %s from pull list\n", @@ -2128,7 +2301,6 @@ do_round (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc) time_next_round = compute_rand_delay (round_interval, 2); /* Schedule next round */ - //do_round_task = GNUNET_SCHEDULER_add_delayed (round_interval, &do_round, NULL); do_round_task = GNUNET_SCHEDULER_add_delayed (time_next_round, &do_round, NULL); LOG (GNUNET_ERROR_TYPE_DEBUG, "Finished round\n"); } @@ -2163,24 +2335,25 @@ init_peer_cb (void *cls, /** - * Clean the send channel of a peer + * Iterator over peers from peerinfo. + * + * @param cls closure + * @param peer id of the peer, NULL for last call + * @param hello hello message for the peer (can be NULL) + * @param error message */ void -peer_clean (const struct GNUNET_PeerIdentity *peer) +process_peerinfo_peers (void *cls, + const struct GNUNET_PeerIdentity *peer, + const struct GNUNET_HELLO_Message *hello, + const char *err_msg) { - struct PeerContext *peer_ctx; - struct GNUNET_CADET_Channel *channel; - - if (GNUNET_YES != in_arr (gossip_list, gossip_list_size, peer) - && GNUNET_YES == GNUNET_CONTAINER_multipeermap_contains (peer_map, peer)) + if (NULL != peer) { - peer_ctx = get_peer_ctx (peer_map, peer); - if (NULL != peer_ctx->send_channel) - { - channel = peer_ctx->send_channel; - peer_ctx->send_channel = NULL; - GNUNET_CADET_channel_destroy (channel); - } + LOG (GNUNET_ERROR_TYPE_DEBUG, + "Got peer_id %s from peerinfo\n", + GNUNET_i2s (peer)); + new_peer_id (peer); } } @@ -2194,10 +2367,9 @@ peer_remove_cb (void *cls, const struct GNUNET_PeerIdentity *key, void *value) struct PeerContext *peer_ctx; const struct GNUNET_CADET_Channel *channel = (const struct GNUNET_CADET_Channel *) cls; - struct GNUNET_CADET_Channel *recv; - struct GNUNET_CADET_Channel *send; peer_ctx = (struct PeerContext *) value; + set_peer_flag (peer_ctx, TO_DESTROY); LOG (GNUNET_ERROR_TYPE_DEBUG, "Going to clean peer %s\n", @@ -2205,29 +2377,29 @@ peer_remove_cb (void *cls, const struct GNUNET_PeerIdentity *key, void *value) /* If operations are still scheduled for this peer cancel those */ if (0 != peer_ctx->num_outstanding_ops) + { GNUNET_array_grow (peer_ctx->outstanding_ops, peer_ctx->num_outstanding_ops, 0); + } - /* If we are still waiting for notification whether this peer is live + /* If we are still waiting for notification whether this peer is live * cancel the according task */ if (NULL != peer_ctx->is_live_task) { - LOG (GNUNET_ERROR_TYPE_DEBUG, - "Trying to cancle is_live_task for peer %s\n", - GNUNET_i2s (key)); + LOG (GNUNET_ERROR_TYPE_DEBUG, + "Trying to cancle is_live_task for peer %s\n", + GNUNET_i2s (key)); GNUNET_CADET_notify_transmit_ready_cancel (peer_ctx->is_live_task); peer_ctx->is_live_task = NULL; } + unset_peer_flag (peer_ctx, PULL_REPLY_PENDING); - recv = peer_ctx->recv_channel; - peer_ctx->recv_channel = NULL; - if (NULL != recv - && channel != recv) - { - GNUNET_CADET_channel_destroy (recv); - } + to_file (file_name_view_log, + "-%s\t(cleanup channel, other peer)", + GNUNET_i2s_full (key)); + GNUNET_CONTAINER_multipeermap_remove_all (view, key); /* If there is still a mq destroy it */ if (NULL != peer_ctx->mq) @@ -2236,28 +2408,63 @@ peer_remove_cb (void *cls, const struct GNUNET_PeerIdentity *key, void *value) peer_ctx->mq = NULL; } + /* Remove the send_channel - * The peer map entry should be removed - * from the callback #cleanup_channel */ - send = peer_ctx->send_channel; - peer_ctx->send_channel = NULL; - if (NULL != send - && channel != send) + * This function should be called again from #cleanup_channel (callback + * called on the destruction of channels) and clean up the rest. */ + if (NULL != peer_ctx->send_channel && + channel != peer_ctx->send_channel) { - GNUNET_CADET_channel_destroy (send); + GNUNET_CADET_channel_destroy (peer_ctx->send_channel); + peer_ctx->send_channel = NULL; } - else - { /* If there is no channel we have to remove it now */ - if (GNUNET_YES != GNUNET_CONTAINER_multipeermap_remove_all (peer_map, key)) - LOG (GNUNET_ERROR_TYPE_WARNING, "removing peer from peer_map failed\n"); - else - GNUNET_free (peer_ctx); + + /* Remove the recv_channel + * This function should be called again from #cleanup_channel (callback + * called on the destruction of channels) and clean up the rest. */ + if (NULL != peer_ctx->recv_channel && + channel != peer_ctx->recv_channel) + { + GNUNET_CADET_channel_destroy (peer_ctx->recv_channel); + peer_ctx->recv_channel = NULL; } + /* If there is no channel we have to remove the context now */ + if (GNUNET_YES != GNUNET_CONTAINER_multipeermap_remove_all (peer_map, key)) + LOG (GNUNET_ERROR_TYPE_WARNING, "removing peer from peer_map failed\n"); + + GNUNET_free (peer_ctx); + return GNUNET_YES; } +/** + * Clean the send channel of a peer + * If there is also no channel to receive messages from that peer, remove it + * from the peermap. + */ +void +peer_clean (const struct GNUNET_PeerIdentity *peer) +{ + struct PeerContext *peer_ctx; + /* struct GNUNET_CADET_Channel *channel; */ + + if (GNUNET_YES != GNUNET_CONTAINER_multipeermap_contains (view, peer) && + GNUNET_YES == GNUNET_CONTAINER_multipeermap_contains (peer_map, peer)) + { + peer_ctx = get_peer_ctx (peer_map, peer); + GNUNET_CADET_channel_destroy (peer_ctx->send_channel); + peer_ctx->send_channel = NULL; + + if (NULL == peer_ctx->recv_channel) + { + peer_remove_cb (NULL, peer, peer_ctx); + } + } +} + + /** * Task run during shutdown. * @@ -2271,13 +2478,15 @@ shutdown_task (void *cls, LOG (GNUNET_ERROR_TYPE_DEBUG, "RPS is going down\n"); + GNUNET_PEERINFO_notify_cancel (peerinfo_notify_handle); + GNUNET_PEERINFO_disconnect (peerinfo_handle); + if (NULL != do_round_task) { GNUNET_SCHEDULER_cancel (do_round_task); do_round_task = NULL; } - { if (GNUNET_SYSERR == GNUNET_CONTAINER_multipeermap_iterate (peer_map, peer_remove_cb, NULL)) @@ -2292,9 +2501,10 @@ shutdown_task (void *cls, "Size of the peermap: %u\n", GNUNET_CONTAINER_multipeermap_size (peer_map)); GNUNET_break (0 == GNUNET_CONTAINER_multipeermap_size (peer_map)); - GNUNET_CONTAINER_multipeermap_destroy (peer_map); GNUNET_CADET_disconnect (cadet_handle); - GNUNET_array_grow (gossip_list, gossip_list_size, 0); + GNUNET_CONTAINER_multipeermap_destroy (peer_map); + GNUNET_CONTAINER_multipeermap_destroy (view); + view = NULL; GNUNET_array_grow (push_list, push_list_size, 0); GNUNET_array_grow (pull_list, pull_list_size, 0); #ifdef ENABLE_MALICIOUS @@ -2366,6 +2576,12 @@ handle_inbound_channel (void *cls, (void) GNUNET_CONTAINER_multipeermap_put (peer_map, &peer, peer_ctx, GNUNET_CONTAINER_MULTIHASHMAPOPTION_REPLACE); + /* This would make the push-message unnecessary */ + LOG (GNUNET_ERROR_TYPE_DEBUG, + "Got peer_id %s from peerinfo\n", + GNUNET_i2s (&peer)); + new_peer_id (&peer); + peer_is_live (peer_ctx); return NULL; // TODO @@ -2373,7 +2589,7 @@ handle_inbound_channel (void *cls, /** - * This is called when a remote peer destroys a channel. + * This is called when a channel is destroyed. * * @param cls The closure * @param channel The channel being closed @@ -2392,39 +2608,58 @@ cleanup_channel (void *cls, // Guess simply casting isn't the nicest way... // FIXME wait for cadet to change this function - //if (GNUNET_YES == GNUNET_CONTAINER_multipeermap_contains (peer_map, peer)) - //{ + if (GNUNET_YES == GNUNET_CONTAINER_multipeermap_contains (peer_map, peer)) + {/* We don't want to implicitly create a context that we're about to kill */ peer_ctx = GNUNET_CONTAINER_multipeermap_get (peer_map, peer); - if (NULL == peer_ctx) /* It could have been removed by shutdown_task */ return; - if (channel == peer_ctx->send_channel) - { /* Peer went down or we killd the channel */ - LOG (GNUNET_ERROR_TYPE_DEBUG, - "send channel (%s) was destroyed - cleaning up\n", - GNUNET_i2s (peer)); - rem_from_list (&gossip_list, &gossip_list_size, peer); - rem_from_list (&pending_pull_reply_list, &pending_pull_reply_list_size, peer); - - peer_ctx->send_channel = NULL; - /* Somwewhat {ab,re}use the iterator function */ - /* Cast to void is ok, because it's used as void in peer_remove_cb */ - (void) peer_remove_cb ((void *) channel, peer, peer_ctx); - - //if (GNUNET_YES != GNUNET_CONTAINER_multipeermap_remove_all (peer_map, key)) - // LOG (GNUNET_ERROR_TYPE_WARNING, "Removing peer from peer_map failed\n"); - //else - // GNUNET_free (peer_ctx); + if (get_peer_flag (peer_ctx, TO_DESTROY)) + {/* We initiatad the destruction of this particular peer */ + if (channel == peer_ctx->send_channel) + peer_ctx->send_channel = NULL; + else if (channel == peer_ctx->recv_channel) + peer_ctx->recv_channel = NULL; + + to_file (file_name_view_log, + "-%s\t(cleanup channel, ourself)", + GNUNET_i2s_full (peer)); } - else if (channel == peer_ctx->recv_channel) - { /* Other peer doesn't want to send us messages anymore */ - LOG (GNUNET_ERROR_TYPE_DEBUG, - "Peer %s destroyed recv channel - cleaning up channel\n", - GNUNET_i2s (peer)); - peer_ctx->recv_channel = NULL; + + else + { /* We did not initiate the destruction of this peer */ + if (channel == peer_ctx->send_channel) + { /* Something (but us) killd the channel - clean up peer */ + LOG (GNUNET_ERROR_TYPE_DEBUG, + "send channel (%s) was destroyed - cleaning up\n", + GNUNET_i2s (peer)); + peer_ctx->send_channel = NULL; + /* Somwewhat {ab,re}use the iterator function */ + /* Cast to void is ok, because it's used as void in peer_remove_cb */ + (void) peer_remove_cb ((void *) channel, peer, peer_ctx); + } + else if (channel == peer_ctx->recv_channel) + { /* Other peer doesn't want to send us messages anymore */ + LOG (GNUNET_ERROR_TYPE_DEBUG, + "Peer %s destroyed recv channel - cleaning up channel\n", + GNUNET_i2s (peer)); + peer_ctx->recv_channel = NULL; + } + else + { + LOG (GNUNET_ERROR_TYPE_WARNING, + "unknown channel (%s) was destroyed\n", + GNUNET_i2s (peer)); + } } - //} + } + + else + { /* We don't know a context to that peer */ + LOG (GNUNET_ERROR_TYPE_DEBUG, + "channel (%s) without associated context was destroyed\n", + GNUNET_i2s (peer)); + } } @@ -2472,6 +2707,9 @@ run (void *cls, struct GNUNET_SERVER_Handle *server, const struct GNUNET_CONFIGURATION_Handle *c) { + int size; + int out_size; + // TODO check what this does -- copied from gnunet-boss // - seems to work as expected GNUNET_log_setup ("rps", GNUNET_error_type_to_string (GNUNET_ERROR_TYPE_DEBUG), NULL); @@ -2495,24 +2733,48 @@ run (void *cls, "ROUNDINTERVAL", &round_interval)) { - LOG (GNUNET_ERROR_TYPE_DEBUG, "Failed to read ROUNDINTERVAL from config\n"); + GNUNET_log_config_missing (GNUNET_ERROR_TYPE_ERROR, + "RPS", "ROUNDINTERVAL"); GNUNET_SCHEDULER_shutdown (); return; } - /* Get initial size of sampler/gossip list from the configuration */ - if (GNUNET_OK != GNUNET_CONFIGURATION_get_value_number (cfg, "RPS", - "INITSIZE", - (long long unsigned int *) &sampler_size_est_need)) + /* Get initial size of sampler/view from the configuration */ + if (GNUNET_OK != + GNUNET_CONFIGURATION_get_value_number (cfg, "RPS", "INITSIZE", + (long long unsigned int *) &sampler_size_est_need)) { - LOG (GNUNET_ERROR_TYPE_DEBUG, "Failed to read INITSIZE from config\n"); + GNUNET_log_config_missing (GNUNET_ERROR_TYPE_ERROR, + "RPS", "INITSIZE"); GNUNET_SCHEDULER_shutdown (); return; } LOG (GNUNET_ERROR_TYPE_DEBUG, "INITSIZE is %" PRIu64 "\n", sampler_size_est_need); - gossip_list = NULL; + view = GNUNET_CONTAINER_multipeermap_create (4, GNUNET_NO); + + /* file_name_view_log */ + if (GNUNET_OK != GNUNET_DISK_directory_create ("/tmp/rps/")) + { + LOG (GNUNET_ERROR_TYPE_WARNING, + "Failed to create directory /tmp/rps/\n"); + } + + size = (14 + strlen (GNUNET_i2s_full (&own_identity)) + 1) * sizeof (char); + file_name_view_log = GNUNET_malloc (size); + out_size = GNUNET_snprintf (file_name_view_log, + size, + "/tmp/rps/view-%s", + GNUNET_i2s_full (&own_identity)); + if (size < out_size || + 0 > out_size) + { + LOG (GNUNET_ERROR_TYPE_WARNING, + "Failed to write string to buffer (size: %i, out_size: %i)\n", + size, + out_size); + } /* connect to NSE */ @@ -2543,6 +2805,7 @@ run (void *cls, cadet_handlers, ports); + peerinfo_handle = GNUNET_PEERINFO_connect (cfg); /* Initialise sampler */ struct GNUNET_TIME_Relative half_round_interval; @@ -2551,16 +2814,14 @@ run (void *cls, half_round_interval = GNUNET_TIME_relative_multiply (round_interval, .5); max_round_interval = GNUNET_TIME_relative_add (round_interval, half_round_interval); - prot_sampler = RPS_sampler_init (sampler_size_est_need, max_round_interval); - client_sampler = RPS_sampler_init (sampler_size_est_need, max_round_interval); + prot_sampler = RPS_sampler_init (sampler_size_est_need, max_round_interval); + client_sampler = RPS_sampler_mod_init (sampler_size_est_need, max_round_interval); /* Initialise push and pull maps */ push_list = NULL; push_list_size = 0; pull_list = NULL; pull_list_size = 0; - pending_pull_reply_list = NULL; - pending_pull_reply_list_size = 0; num_hist_update_tasks = 0; @@ -2570,6 +2831,10 @@ run (void *cls, GNUNET_CADET_get_peers (cadet_handle, &init_peer_cb, NULL); // TODO send push/pull to each of those peers? + peerinfo_notify_handle = GNUNET_PEERINFO_notify (cfg, + GNUNET_NO, + process_peerinfo_peers, + NULL); rps_start (server); }