/**
* Other malicious peers
*/
-static struct GNUNET_PeerIdentity *mal_peers;
+static struct GNUNET_PeerIdentity *mal_peers = NULL;
+
+/**
+ * Hashmap of malicious peers used as set.
+ * Used to more efficiently check whether we know that peer.
+ */
+static struct GNUNET_CONTAINER_MultiPeerMap *mal_peer_set = NULL;
/**
* Number of other malicious peers
*/
-static uint32_t num_mal_peers;
+static uint32_t num_mal_peers = 0;
+
/**
- * If type is 2 this is the attacked peer
+ * If type is 2 This struct is used to store the attacked peers in a DLL
+ */
+struct AttackedPeer
+{
+ /**
+ * DLL
+ */
+ struct AttackedPeer *next;
+ struct AttackedPeer *prev;
+
+ /**
+ * PeerID
+ */
+ struct GNUNET_PeerIdentity peer_id;
+};
+
+/**
+ * If type is 2 this is the DLL of attacked peers
+ */
+static struct AttackedPeer *att_peers_head = NULL;
+static struct AttackedPeer *att_peers_tail = NULL;
+
+/**
+ * This index is used to point to an attacked peer to
+ * implement the round-robin-ish way to select attacked peers.
+ */
+static struct AttackedPeer *att_peer_index = NULL;
+
+/**
+ * Hashmap of attacked peers used as set.
+ * Used to more efficiently check whether we know that peer.
+ */
+static struct GNUNET_CONTAINER_MultiPeerMap *att_peer_set = NULL;
+
+/**
+ * Number of attacked peers
+ */
+static uint32_t num_attacked_peers = 0;
+
+
+/**
+ * If type is 1 this is the attacked peer
*/
static struct GNUNET_PeerIdentity attacked_peer;
+
+/**
+ * The limit of PUSHes we can send in one round.
+ * This is an assumption of the Brahms protocol and either implemented
+ * via proof of work
+ * or
+ * assumend to be the bandwidth limitation.
+ */
+static uint32_t push_limit = 10000;
#endif /* ENABLE_MALICIOUS */
*/
#define unset_peer_flag(peer_ctx, mask) (peer_ctx->peer_flags &= (~mask))
+/**
+ * Compute the minimum of two ints
+ */
+#define min(x, y) ((x < y) ? x : y)
/**
* Clean the send channel of a peer
{
struct GNUNET_PeerIdentity *peer;
+ /* Cancle is_live_task if still scheduled */
+ if (NULL != peer_ctx->is_live_task)
+ {
+ GNUNET_CADET_notify_transmit_ready_cancel (peer_ctx->is_live_task);
+ peer_ctx->is_live_task = NULL;
+ }
+
peer = &peer_ctx->peer_id;
set_peer_flag (peer_ctx, VALID);
LOG (GNUNET_ERROR_TYPE_DEBUG, "Peer %s is live\n", GNUNET_i2s (peer));
- if (0 != peer_ctx->num_outstanding_ops)
+ if (0 < peer_ctx->num_outstanding_ops)
{ /* Call outstanding operations */
unsigned int i;
GNUNET_array_grow (peer_ctx->outstanding_ops, peer_ctx->num_outstanding_ops, 0);
}
- if (NULL != peer_ctx->is_live_task)
- {
- GNUNET_CADET_notify_transmit_ready_cancel (peer_ctx->is_live_task);
- peer_ctx->is_live_task = NULL;
- }
-
return 0;
}
{
struct PeerContext *peer_ctx = (struct PeerContext *) cls;
+ peer_ctx->is_live_task = NULL;
+ LOG (GNUNET_ERROR_TYPE_DEBUG,
+ "Set ->is_live_task = NULL for peer %s\n",
+ GNUNET_i2s (&peer_ctx->peer_id));
+
if (NULL != buf
&& 0 != size)
{
- peer_ctx->is_live_task = NULL;
peer_is_live (peer_ctx);
}
+ else
+ {
+ LOG (GNUNET_ERROR_TYPE_WARNING,
+ "Problems establishing a connection to peer %s in order to check liveliness\n",
+ GNUNET_i2s (&peer_ctx->peer_id));
+ // TODO reschedule? cleanup?
+ }
//if (NULL != peer_ctx->is_live_task)
//{
struct PeerContext *peer_ctx;
peer_ctx = get_peer_ctx (peer_map, peer);
+
+ GNUNET_assert (NULL == peer_ctx->is_live_task);
+
if (NULL == peer_ctx->send_channel)
{
LOG (GNUNET_ERROR_TYPE_DEBUG,
peer_ctx = get_peer_ctx (peer_map, peer_id);
+ GNUNET_assert (NULL == peer_ctx->is_live_task);
+
if (NULL == peer_ctx->mq)
{
(void) get_channel (peer_map, peer_id);
LOG (GNUNET_ERROR_TYPE_DEBUG,
"Get informed about peer %s getting live\n",
GNUNET_i2s (&peer_ctx->peer_id));
- peer_ctx->is_live_task =
- GNUNET_CADET_notify_transmit_ready (peer_ctx->send_channel,
- GNUNET_NO,
- GNUNET_TIME_UNIT_FOREVER_REL,
- sizeof (struct GNUNET_MessageHeader),
- cadet_ntfy_tmt_rdy_cb,
- peer_ctx);
- // FIXME check whether this is NULL
+ if (NULL == peer_ctx->is_live_task)
+ {
+ peer_ctx->is_live_task =
+ GNUNET_CADET_notify_transmit_ready (peer_ctx->send_channel,
+ GNUNET_NO,
+ GNUNET_TIME_UNIT_FOREVER_REL,
+ sizeof (struct GNUNET_MessageHeader),
+ cadet_ntfy_tmt_rdy_cb,
+ peer_ctx);
+ }
+ else
+ {
+ LOG (GNUNET_ERROR_TYPE_DEBUG,
+ "Already waiting for notification\n");
+ }
}
{
unsigned int i;
- for ( i = 0 ; i < peer_ctx->num_outstanding_ops ; i++ )
+ for (i = 0 ; i < peer_ctx->num_outstanding_ops ; i++)
if (insert_in_sampler== peer_ctx->outstanding_ops[i].op)
return GNUNET_YES;
return GNUNET_NO;
}
+/**
+ * Add all peers in @a peer_array to @peer_map used as set.
+ *
+ * @param peer_array array containing the peers
+ * @param num_peers number of peers in @peer_array
+ * @param peer_map the peermap to use as set
+ */
+static void
+add_peer_array_to_set (const struct GNUNET_PeerIdentity *peer_array,
+ unsigned int num_peers,
+ struct GNUNET_CONTAINER_MultiPeerMap *peer_map)
+{
+ unsigned int i;
+ if (NULL == peer_map)
+ peer_map = GNUNET_CONTAINER_multipeermap_create (num_peers,
+ GNUNET_NO);
+ for (i = 0 ; i < num_peers ; i++)
+ {
+ GNUNET_CONTAINER_multipeermap_put (peer_map,
+ &peer_array[i],
+ NULL,
+ GNUNET_CONTAINER_MULTIHASHMAPOPTION_UNIQUE_FAST);
+ }
+}
+
+
+/**
+ * Send a PULL REPLY to @a peer_id
+ *
+ * @param peer_id the peer to send the reply to.
+ * @param peer_ids the peers to send to @a peer_id
+ * @param num_peer_ids the number of peers to send to @a peer_id
+ */
+static void
+send_pull_reply (const struct GNUNET_PeerIdentity *peer_id,
+ const struct GNUNET_PeerIdentity *peer_ids,
+ unsigned int num_peer_ids)
+{
+ uint32_t send_size;
+ struct GNUNET_MQ_Handle *mq;
+ struct GNUNET_MQ_Envelope *ev;
+ struct GNUNET_RPS_P2P_PullReplyMessage *out_msg;
+
+ /* Compute actual size */
+ send_size = sizeof (struct GNUNET_RPS_P2P_PullReplyMessage) +
+ num_peer_ids * sizeof (struct GNUNET_PeerIdentity);
+
+ if (GNUNET_CONSTANTS_MAX_CADET_MESSAGE_SIZE < send_size)
+ /* Compute number of peers to send
+ * If too long, simply truncate */
+ // TODO select random ones via permutation
+ // or even better: do good protocol design
+ send_size =
+ (GNUNET_CONSTANTS_MAX_CADET_MESSAGE_SIZE -
+ sizeof (struct GNUNET_RPS_P2P_PullReplyMessage)) /
+ sizeof (struct GNUNET_PeerIdentity);
+ else
+ send_size = num_peer_ids;
+
+ LOG (GNUNET_ERROR_TYPE_DEBUG,
+ "PULL REQUEST from peer %s received, going to send %u peers\n",
+ GNUNET_i2s (peer_id), send_size);
+
+ mq = get_mq (peer_map, peer_id);
+
+ ev = GNUNET_MQ_msg_extra (out_msg,
+ send_size * sizeof (struct GNUNET_PeerIdentity),
+ GNUNET_MESSAGE_TYPE_RPS_PP_PULL_REPLY);
+ out_msg->num_peers = htonl (send_size);
+ memcpy (&out_msg[1], peer_ids,
+ send_size * sizeof (struct GNUNET_PeerIdentity));
+
+ GNUNET_MQ_send (mq, ev);
+}
+
+
/***********************************************************************
* /Util functions
***********************************************************************/
GNUNET_free (ids);
cli_ctx = GNUNET_SERVER_client_get_user_context (client, struct client_ctx);
- if ( NULL == cli_ctx ) {
+ if (NULL == cli_ctx) {
cli_ctx = GNUNET_new (struct client_ctx);
cli_ctx->mq = GNUNET_MQ_queue_for_server_client (client);
GNUNET_SERVER_client_set_user_context (client, cli_ctx);
*/
static void
handle_client_seed (void *cls,
- struct GNUNET_SERVER_Client *client,
- const struct GNUNET_MessageHeader *message)
+ struct GNUNET_SERVER_Client *client,
+ const struct GNUNET_MessageHeader *message)
{
struct GNUNET_RPS_CS_SeedMessage *in_msg;
struct GNUNET_PeerIdentity *peers;
// (check the proof of work)
- peer = (const struct GNUNET_PeerIdentity *) GNUNET_CADET_channel_get_info (channel, GNUNET_CADET_OPTION_PEER);
+ peer = (const struct GNUNET_PeerIdentity *)
+ GNUNET_CADET_channel_get_info (channel, GNUNET_CADET_OPTION_PEER);
// FIXME wait for cadet to change this function
+
LOG (GNUNET_ERROR_TYPE_DEBUG, "PUSH received (%s)\n", GNUNET_i2s (peer));
+ #ifdef ENABLE_MALICIOUS
+ struct AttackedPeer *tmp_att_peer;
+
+ tmp_att_peer = GNUNET_new (struct AttackedPeer);
+ memcpy (&tmp_att_peer->peer_id, peer, sizeof (struct GNUNET_PeerIdentity));
+ if (1 == mal_type)
+ { /* Try to maximise representation */
+ if (NULL == att_peer_set)
+ att_peer_set = GNUNET_CONTAINER_multipeermap_create (1, GNUNET_NO);
+ if (GNUNET_NO == GNUNET_CONTAINER_multipeermap_contains (att_peer_set,
+ peer))
+ {
+ GNUNET_CONTAINER_DLL_insert (att_peers_head,
+ att_peers_tail,
+ tmp_att_peer);
+ add_peer_array_to_set (peer, 1, att_peer_set);
+ }
+ return GNUNET_OK;
+ }
+
+
+ else if (2 == mal_type)
+ { /* We attack one single well-known peer - simply ignore */
+ return GNUNET_OK;
+ }
+
+ #endif /* ENABLE_MALICIOUS */
+
/* Add the sending peer to the push_list */
if (GNUNET_NO == in_arr (push_list, push_list_size, peer))
GNUNET_array_append (push_list, push_list_size, *peer);
return GNUNET_OK;
}
+
/**
* Handle PULL REQUEST request message from another peer.
*
const struct GNUNET_MessageHeader *msg)
{
struct GNUNET_PeerIdentity *peer;
- uint32_t send_size;
- struct GNUNET_MQ_Handle *mq;
- struct GNUNET_MQ_Envelope *ev;
- struct GNUNET_RPS_P2P_PullReplyMessage *out_msg;
-
- peer = (struct GNUNET_PeerIdentity *) GNUNET_CADET_channel_get_info (channel,
- GNUNET_CADET_OPTION_PEER);
+ peer = (struct GNUNET_PeerIdentity *)
+ GNUNET_CADET_channel_get_info (channel,
+ GNUNET_CADET_OPTION_PEER);
// FIXME wait for cadet to change this function
- /* Compute actual size */
- send_size = sizeof (struct GNUNET_RPS_P2P_PullReplyMessage) +
- gossip_list_size * sizeof (struct GNUNET_PeerIdentity);
-
- if (GNUNET_CONSTANTS_MAX_CADET_MESSAGE_SIZE < send_size)
- /* Compute number of peers to send
- * If too long, simply truncate */
- send_size =
- (GNUNET_CONSTANTS_MAX_CADET_MESSAGE_SIZE -
- sizeof (struct GNUNET_RPS_P2P_PullReplyMessage)) /
- sizeof (struct GNUNET_PeerIdentity);
- else
- send_size = gossip_list_size;
-
- LOG (GNUNET_ERROR_TYPE_DEBUG,
- "PULL REQUEST from peer %s received, going to send %u peers\n",
- GNUNET_i2s (peer), send_size);
-
- mq = get_mq (peer_map, peer);
+ #ifdef ENABLE_MALICIOUS
+ if (1 == mal_type)
+ { /* Try to maximise representation */
+ send_pull_reply (peer, mal_peers, num_mal_peers);
+ return GNUNET_OK;
+ }
- ev = GNUNET_MQ_msg_extra (out_msg,
- send_size * sizeof (struct GNUNET_PeerIdentity),
- GNUNET_MESSAGE_TYPE_RPS_PP_PULL_REPLY);
- //out_msg->num_peers = htonl (gossip_list_size);
- out_msg->num_peers = htonl (send_size);
- memcpy (&out_msg[1], gossip_list,
- send_size * sizeof (struct GNUNET_PeerIdentity));
+ else if (2 == mal_type)
+ { /* Try to partition network */
+ if (GNUNET_YES == GNUNET_CRYPTO_cmp_peer_identity (&attacked_peer, peer))
+ {
+ send_pull_reply (peer, mal_peers, num_mal_peers);
+ }
+ return GNUNET_OK;
+ }
+ #endif /* ENABLE_MALICIOUS */
- GNUNET_MQ_send (mq, ev);
+ send_pull_reply (peer, gossip_list, gossip_list_size);
return GNUNET_OK;
}
return GNUNET_OK;
}
+
/* Do actual logic */
peers = (struct GNUNET_PeerIdentity *) &msg[1];
for (i = 0 ; i < ntohl (in_msg->num_peers) ; i++)
}
+/**
+ * Compute a random delay.
+ * A uniformly distributed value between mean + spread and mean - spread.
+ *
+ * For example for mean 4 min and spread 2 the minimum is (4 min - (1/2 * 4 min))
+ * It would return a random value between 2 and 6 min.
+ *
+ * @param mean the mean
+ * @param spread the inverse amount of deviation from the mean
+ */
+static struct GNUNET_TIME_Relative
+compute_rand_delay (struct GNUNET_TIME_Relative mean, unsigned int spread)
+{
+ struct GNUNET_TIME_Relative half_interval;
+ struct GNUNET_TIME_Relative ret;
+ unsigned int rand_delay;
+ unsigned int max_rand_delay;
+
+ if (0 == spread)
+ {
+ LOG (GNUNET_ERROR_TYPE_WARNING,
+ "Not accepting spread of 0\n");
+ GNUNET_break (0);
+ }
+
+ /* Compute random time value between spread * mean and spread * mean */
+ half_interval = GNUNET_TIME_relative_divide (mean, spread);
+
+ max_rand_delay = GNUNET_TIME_UNIT_FOREVER_REL.rel_value_us / mean.rel_value_us * (2/spread);
+ /**
+ * Compute random value between (0 and 1) * round_interval
+ * via multiplying round_interval with a 'fraction' (0 to value)/value
+ */
+ rand_delay = GNUNET_CRYPTO_random_u32 (GNUNET_CRYPTO_QUALITY_WEAK, max_rand_delay);
+ ret = GNUNET_TIME_relative_multiply (mean, rand_delay);
+ ret = GNUNET_TIME_relative_divide (ret, max_rand_delay);
+ ret = GNUNET_TIME_relative_add (ret, half_interval);
+
+ if (GNUNET_TIME_UNIT_FOREVER_REL.rel_value_us == ret.rel_value_us)
+ LOG (GNUNET_ERROR_TYPE_WARNING,
+ "Returning FOREVER_REL\n");
+
+ return ret;
+}
+
+
+/**
+ * Send single pull request
+ *
+ * @param peer_id the peer to send the pull request to.
+ */
+static void
+send_pull_request (struct GNUNET_PeerIdentity *peer_id)
+{
+ struct GNUNET_MQ_Envelope *ev;
+ struct GNUNET_MQ_Handle *mq;
+
+ LOG (GNUNET_ERROR_TYPE_DEBUG,
+ "Sending PULL request to peer %s of gossiped list.\n",
+ GNUNET_i2s (peer_id));
+
+ GNUNET_array_append (pending_pull_reply_list, pending_pull_reply_list_size, *peer_id);
+
+ ev = GNUNET_MQ_msg_header (GNUNET_MESSAGE_TYPE_RPS_PP_PULL_REQUEST);
+ mq = get_mq (peer_map, peer_id);
+ GNUNET_MQ_send (mq, ev);
+}
+
+
+/**
+ * Send single push
+ *
+ * @param peer_id the peer to send the push to.
+ */
+static void
+send_push (struct GNUNET_PeerIdentity *peer_id)
+{
+ struct GNUNET_MQ_Envelope *ev;
+ struct GNUNET_MQ_Handle *mq;
+
+ LOG (GNUNET_ERROR_TYPE_DEBUG,
+ "Sending PUSH to peer %s of gossiped list.\n",
+ GNUNET_i2s (peer_id));
+
+ ev = GNUNET_MQ_msg_header (GNUNET_MESSAGE_TYPE_RPS_PP_PUSH);
+ mq = get_mq (peer_map, peer_id);
+ GNUNET_MQ_send (mq, ev);
+}
+
+
+static void
+do_round (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc);
+
+static void
+do_mal_round (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc);
#ifdef ENABLE_MALICIOUS
* @param channel_ctx The context associated with this channel
* @param msg The message header
*/
- static int
-handle_peer_act_malicious (void *cls,
- struct GNUNET_CADET_Channel *channel,
- void **channel_ctx,
- const struct GNUNET_MessageHeader *msg)
+ static void
+handle_client_act_malicious (void *cls,
+ struct GNUNET_SERVER_Client *client,
+ const struct GNUNET_MessageHeader *msg)
{
struct GNUNET_RPS_CS_ActMaliciousMessage *in_msg;
- struct GNUNET_PeerIdentity *sender;
- struct PeerContext *sender_ctx;
struct GNUNET_PeerIdentity *peers;
+ uint32_t num_mal_peers_sent;
+ uint32_t num_mal_peers_old;
/* Check for protocol violation */
if (sizeof (struct GNUNET_RPS_CS_ActMaliciousMessage) > ntohs (msg->size))
{
GNUNET_break_op (0);
- return GNUNET_SYSERR;
}
in_msg = (struct GNUNET_RPS_CS_ActMaliciousMessage *) msg;
(ntohs (msg->size) - sizeof (struct GNUNET_RPS_CS_ActMaliciousMessage)) /
sizeof (struct GNUNET_PeerIdentity));
GNUNET_break_op (0);
- return GNUNET_SYSERR;
- }
-
- sender = (struct GNUNET_PeerIdentity *) GNUNET_CADET_channel_get_info (
- (struct GNUNET_CADET_Channel *) channel, GNUNET_CADET_OPTION_PEER);
- // Guess simply casting isn't the nicest way...
- // FIXME wait for cadet to change this function
- sender_ctx = get_peer_ctx (peer_map, sender);
-
- if (GNUNET_YES == get_peer_flag (sender_ctx, PULL_REPLY_PENDING))
- {
- GNUNET_break_op (0);
- return GNUNET_OK;
}
/* Do actual logic */
peers = (struct GNUNET_PeerIdentity *) &msg[1];
- num_mal_peers = ntohl (in_msg->num_peers);
mal_type = ntohl (in_msg->type);
LOG (GNUNET_ERROR_TYPE_DEBUG,
- "Now acting malicious type %" PRIX32 "\n",
+ "Now acting malicious type %" PRIu32 "\n",
mal_type);
if (1 == mal_type)
{ /* Try to maximise representation */
- num_mal_peers = ntohl (in_msg->num_peers);
- mal_peers = GNUNET_new_array (num_mal_peers,
- struct GNUNET_PeerIdentity);
- memcpy (mal_peers, peers, num_mal_peers);
+ /* Add other malicious peers to those we already know */
+
+ num_mal_peers_sent = ntohl (in_msg->num_peers);
+ num_mal_peers_old = num_mal_peers;
+ GNUNET_array_grow (mal_peers,
+ num_mal_peers,
+ num_mal_peers + num_mal_peers_sent);
+ memcpy (&mal_peers[num_mal_peers_old],
+ peers,
+ num_mal_peers_sent * sizeof (struct GNUNET_PeerIdentity));
+
+ /* Add all mal peers to mal_peer_set */
+ add_peer_array_to_set (&mal_peers[num_mal_peers_old],
+ num_mal_peers_sent,
+ mal_peer_set);
/* Substitute do_round () with do_mal_round () */
GNUNET_SCHEDULER_cancel (do_round_task);
do_round_task = GNUNET_SCHEDULER_add_now (&do_mal_round, NULL);
}
+
else if (2 == mal_type)
{ /* Try to partition the network */
- num_mal_peers = ntohl (in_msg->num_peers) - 1;
- mal_peers = GNUNET_new_array (num_mal_peers,
- struct GNUNET_PeerIdentity);
- memcpy (mal_peers, peers, num_mal_peers);
- attacked_peer = peers[num_mal_peers];
+ /* Add other malicious peers to those we already know */
+ num_mal_peers_sent = ntohl (in_msg->num_peers) - 1;
+ num_mal_peers_old = num_mal_peers;
+ GNUNET_array_grow (mal_peers,
+ num_mal_peers,
+ num_mal_peers + num_mal_peers_sent);
+ memcpy (&mal_peers[num_mal_peers_old],
+ peers,
+ num_mal_peers_sent * sizeof (struct GNUNET_PeerIdentity));
+
+ /* Add all mal peers to mal_peer_set */
+ add_peer_array_to_set (&mal_peers[num_mal_peers_old],
+ num_mal_peers_sent,
+ mal_peer_set);
+
+ /* Store the one attacked peer */
+ memcpy (&attacked_peer,
+ &peers[num_mal_peers_sent],
+ sizeof (struct GNUNET_PeerIdentity));
+
+ LOG (GNUNET_ERROR_TYPE_DEBUG,
+ "Attacked peer is %s\n",
+ GNUNET_i2s (&attacked_peer));
/* Substitute do_round () with do_mal_round () */
GNUNET_SCHEDULER_cancel (do_round_task);
{
GNUNET_break (0);
}
-
- return GNUNET_OK;
}
static void
do_mal_round (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
{
+ uint32_t num_pushes;
+ uint32_t i;
+ struct GNUNET_TIME_Relative time_next_round;
+ struct AttackedPeer *tmp_att_peer;
+
LOG (GNUNET_ERROR_TYPE_DEBUG, "Going to execute next round maliciously.\n");
- /* Do stuff */
+ /* Do malicious actions */
+ if (1 == mal_type)
+ { /* Try to maximise representation */
+
+ /* The maximum of pushes we're going to send this round */
+ num_pushes = min (min (push_limit,
+ num_attacked_peers),
+ GNUNET_CONSTANTS_MAX_CADET_MESSAGE_SIZE);
+
+ /* Send PUSHes to attacked peers */
+ for (i = 0 ; i < num_pushes ; i++)
+ {
+ if (att_peers_tail == att_peer_index)
+ att_peer_index = att_peers_head;
+ else
+ att_peer_index = att_peer_index->next;
+
+ send_push (&att_peer_index->peer_id);
+ }
+
+ /* Send PULLs to some peers to learn about additional peers to attack */
+ for (i = 0 ; i < num_pushes * alpha ; i++)
+ {
+ if (att_peers_tail == tmp_att_peer)
+ tmp_att_peer = att_peers_head;
+ else
+ att_peer_index = tmp_att_peer->next;
+
+ send_pull_request (&tmp_att_peer->peer_id);
+ }
+ }
+
+
+ else if (2 == mal_type)
+ { /**
+ * Try to partition the network
+ * Send as many pushes to the attacked peer as possible
+ * That is one push per round as it will ignore more.
+ */
+ send_push (&attacked_peer);
+ }
- /* Compute random time value between .5 * round_interval and 1.5 *round_interval */
- half_round_interval = GNUNET_TIME_relative_divide (round_interval, 2);
- do
- {
- /*
- * Compute random value between (0 and 1) * round_interval
- * via multiplying round_interval with a 'fraction' (0 to value)/value
- */
- rand_delay = GNUNET_CRYPTO_random_u32 (GNUNET_CRYPTO_QUALITY_WEAK, UINT_MAX/10);
- time_next_round = GNUNET_TIME_relative_multiply (round_interval, rand_delay);
- time_next_round = GNUNET_TIME_relative_divide (time_next_round, UINT_MAX/10);
- time_next_round = GNUNET_TIME_relative_add (time_next_round, half_round_interval);
- } while (GNUNET_TIME_UNIT_FOREVER_REL.rel_value_us == time_next_round.rel_value_us);
/* Schedule next round */
- do_round_task = GNUNET_SCHEDULER_add_delayed (round_interval, &do_mal_round, NULL);
+ time_next_round = compute_rand_delay (round_interval, 2);
+
+ //do_round_task = GNUNET_SCHEDULER_add_delayed (round_interval, &do_mal_round, NULL);
+ do_round_task = GNUNET_SCHEDULER_add_delayed (time_next_round, &do_mal_round, NULL);
LOG (GNUNET_ERROR_TYPE_DEBUG, "Finished round\n");
}
#endif /* ENABLE_MALICIOUS */
uint32_t i;
unsigned int *permut;
unsigned int n_peers; /* Number of peers we send pushes/pulls to */
- struct GNUNET_MQ_Envelope *ev;
struct GNUNET_PeerIdentity peer;
struct GNUNET_PeerIdentity *tmp_peer;
- struct GNUNET_MQ_Handle *mq;
LOG (GNUNET_ERROR_TYPE_DEBUG,
"Printing gossip list:\n");
peer = gossip_list[permut[i]];
if (0 != GNUNET_CRYPTO_cmp_peer_identity (&own_identity, &peer)) // TODO
{ // FIXME if this fails schedule/loop this for later
- LOG (GNUNET_ERROR_TYPE_DEBUG,
- "Sending PUSH to peer %s of gossiped list.\n",
- GNUNET_i2s (&peer));
-
- ev = GNUNET_MQ_msg_header (GNUNET_MESSAGE_TYPE_RPS_PP_PUSH);
- mq = get_mq (peer_map, &peer);
- GNUNET_MQ_send (mq, ev);
+ send_push (&peer);
}
}
GNUNET_free (permut);
peer = *tmp_peer;
GNUNET_free (tmp_peer);
- GNUNET_array_append (pending_pull_reply_list, pending_pull_reply_list_size, peer);
-
if (0 != GNUNET_CRYPTO_cmp_peer_identity (&own_identity, &peer))
- { // FIXME if this fails schedule/loop this for later
- LOG (GNUNET_ERROR_TYPE_DEBUG,
- "Sending PULL request to peer %s of gossiped list.\n",
- GNUNET_i2s (&peer));
-
- ev = GNUNET_MQ_msg_header (GNUNET_MESSAGE_TYPE_RPS_PP_PULL_REQUEST);
- mq = get_mq (peer_map, &peer);
- GNUNET_MQ_send (mq, ev);
+ {
+ send_pull_request (&peer);
}
}
}
LOG (GNUNET_ERROR_TYPE_DEBUG,
"Updating with peer %s from pull list\n",
GNUNET_i2s (&pull_list[i]));
- RPS_sampler_update (prot_sampler, &push_list[i]);
- RPS_sampler_update (client_sampler, &push_list[i]);
+ RPS_sampler_update (prot_sampler, &pull_list[i]);
+ RPS_sampler_update (client_sampler, &pull_list[i]);
// TODO set in_flag?
}
GNUNET_array_grow (pull_list, pull_list_size, 0);
struct GNUNET_TIME_Relative time_next_round;
- struct GNUNET_TIME_Relative half_round_interval;
- unsigned int rand_delay;
-
- /* Compute random time value between .5 * round_interval and 1.5 *round_interval */
- half_round_interval = GNUNET_TIME_relative_divide (round_interval, 2);
- do
- {
- /*
- * Compute random value between (0 and 1) * round_interval
- * via multiplying round_interval with a 'fraction' (0 to value)/value
- */
- rand_delay = GNUNET_CRYPTO_random_u32 (GNUNET_CRYPTO_QUALITY_WEAK, UINT_MAX/10);
- time_next_round = GNUNET_TIME_relative_multiply (round_interval, rand_delay);
- time_next_round = GNUNET_TIME_relative_divide (time_next_round, UINT_MAX/10);
- time_next_round = GNUNET_TIME_relative_add (time_next_round, half_round_interval);
- } while (GNUNET_TIME_UNIT_FOREVER_REL.rel_value_us == time_next_round.rel_value_us);
+ time_next_round = compute_rand_delay (round_interval, 2);
/* Schedule next round */
- do_round_task = GNUNET_SCHEDULER_add_delayed (round_interval, &do_round, NULL);
+ //do_round_task = GNUNET_SCHEDULER_add_delayed (round_interval, &do_round, NULL);
+ do_round_task = GNUNET_SCHEDULER_add_delayed (time_next_round, &do_round, NULL);
LOG (GNUNET_ERROR_TYPE_DEBUG, "Finished round\n");
}
0);
if (NULL != peer_ctx->mq)
+ {
GNUNET_MQ_destroy (peer_ctx->mq);
+ peer_ctx->mq = NULL;
+ }
+
if (NULL != peer_ctx->is_live_task)
{
GNUNET_array_grow (gossip_list, gossip_list_size, 0);
GNUNET_array_grow (push_list, push_list_size, 0);
GNUNET_array_grow (pull_list, pull_list_size, 0);
+ #ifdef ENABLE_MALICIOUS
+ GNUNET_array_grow (mal_peers, num_mal_peers, 0);
+ if (NULL != mal_peer_set)
+ GNUNET_CONTAINER_multipeermap_destroy (mal_peer_set);
+ if (NULL != att_peer_set)
+ GNUNET_CONTAINER_multipeermap_destroy (att_peer_set);
+ // TODO empty attacked_peers DLL
+ #endif /* ENABLE_MALICIOUS */
}
//}
peer_ctx->recv_channel = channel;
- peer_ctx->mq = NULL;
-
(void) GNUNET_CONTAINER_multipeermap_put (peer_map, &peer, peer_ctx,
GNUNET_CONTAINER_MULTIHASHMAPOPTION_REPLACE);
(struct GNUNET_CADET_Channel *) channel, GNUNET_CADET_OPTION_PEER);
// Guess simply casting isn't the nicest way...
// FIXME wait for cadet to change this function
- LOG (GNUNET_ERROR_TYPE_DEBUG, "Cleaning up channel to peer %s\n",
- GNUNET_i2s (peer));
if (GNUNET_YES == GNUNET_CONTAINER_multipeermap_contains (peer_map, peer))
{
if (channel == peer_ctx->send_channel)
{ /* Peer probably went down */
+ LOG (GNUNET_ERROR_TYPE_DEBUG,
+ "Peer %s destroyed send channel - probably went down, cleaning up\n",
+ GNUNET_i2s (peer));
rem_from_list (&gossip_list, &gossip_list_size, peer);
rem_from_list (&pending_pull_reply_list, &pending_pull_reply_list_size, peer);
+ peer_ctx->send_channel = NULL;
/* Somwewhat {ab,re}use the iterator function */
/* Cast to void is ok, because it's used as void in peer_remove_cb */
(void) peer_remove_cb ((void *) channel, peer, peer_ctx);
}
- else /* Other peer doesn't want to send us messages anymore */
+ else if (channel == peer_ctx->recv_channel)
+ { /* Other peer doesn't want to send us messages anymore */
+ LOG (GNUNET_ERROR_TYPE_DEBUG,
+ "Peer %s destroyed recv channel - cleaning up channel\n",
+ GNUNET_i2s (peer));
peer_ctx->recv_channel = NULL;
+ }
}
}
rps_start (struct GNUNET_SERVER_Handle *server)
{
static const struct GNUNET_SERVER_MessageHandler handlers[] = {
- {&handle_client_request, NULL, GNUNET_MESSAGE_TYPE_RPS_CS_REQUEST,
+ {&handle_client_request, NULL, GNUNET_MESSAGE_TYPE_RPS_CS_REQUEST,
sizeof (struct GNUNET_RPS_CS_RequestMessage)},
- {&handle_client_seed, NULL, GNUNET_MESSAGE_TYPE_RPS_CS_SEED, 0},
+ {&handle_client_seed, NULL, GNUNET_MESSAGE_TYPE_RPS_CS_SEED, 0},
+ #ifdef ENABLE_MALICIOUS
+ {&handle_client_act_malicious, NULL, GNUNET_MESSAGE_TYPE_RPS_ACT_MALICIOUS , 0},
+ #endif /* ENABLE_MALICIOUS */
{NULL, NULL, 0, 0}
};
{&handle_peer_pull_request, GNUNET_MESSAGE_TYPE_RPS_PP_PULL_REQUEST,
sizeof (struct GNUNET_MessageHeader)},
{&handle_peer_pull_reply , GNUNET_MESSAGE_TYPE_RPS_PP_PULL_REPLY , 0},
- #if ENABLE_MALICIOUS
- {&handle_peer_act_malicious, GNUNET_MESSAGE_TYPE_RPS_ACT_MALICIOUS , 0},
- #endif /* ENABLE_MALICIOUS */
{NULL, 0, 0}
};