+ * Compute a random delay.
+ * A uniformly distributed value between mean + spread and mean - spread.
+ *
+ * For example for mean 4 min and spread 2 the minimum is (4 min - (1/2 * 4 min))
+ * It would return a random value between 2 and 6 min.
+ *
+ * @param mean the mean
+ * @param spread the inverse amount of deviation from the mean
+ */
+static struct GNUNET_TIME_Relative
+compute_rand_delay (struct GNUNET_TIME_Relative mean, unsigned int spread)
+{
+ struct GNUNET_TIME_Relative half_interval;
+ struct GNUNET_TIME_Relative ret;
+ unsigned int rand_delay;
+ unsigned int max_rand_delay;
+
+ if (0 == spread)
+ {
+ LOG (GNUNET_ERROR_TYPE_WARNING,
+ "Not accepting spread of 0\n");
+ GNUNET_break (0);
+ }
+
+ /* Compute random time value between spread * mean and spread * mean */
+ half_interval = GNUNET_TIME_relative_divide (mean, spread);
+
+ max_rand_delay = GNUNET_TIME_UNIT_FOREVER_REL.rel_value_us / mean.rel_value_us * (2/spread);
+ /**
+ * Compute random value between (0 and 1) * round_interval
+ * via multiplying round_interval with a 'fraction' (0 to value)/value
+ */
+ rand_delay = GNUNET_CRYPTO_random_u32 (GNUNET_CRYPTO_QUALITY_WEAK, max_rand_delay);
+ ret = GNUNET_TIME_relative_multiply (mean, rand_delay);
+ ret = GNUNET_TIME_relative_divide (ret, max_rand_delay);
+ ret = GNUNET_TIME_relative_add (ret, half_interval);
+
+ if (GNUNET_TIME_UNIT_FOREVER_REL.rel_value_us == ret.rel_value_us)
+ LOG (GNUNET_ERROR_TYPE_WARNING,
+ "Returning FOREVER_REL\n");
+
+ return ret;
+}
+
+
+/**
+ * Send single pull request
+ *
+ * @param peer_id the peer to send the pull request to.
+ */
+static void
+send_pull_request (struct GNUNET_PeerIdentity *peer_id)
+{
+ struct GNUNET_MQ_Envelope *ev;
+ struct GNUNET_MQ_Handle *mq;
+
+ LOG (GNUNET_ERROR_TYPE_DEBUG,
+ "Sending PULL request to peer %s of gossiped list.\n",
+ GNUNET_i2s (peer_id));
+
+ GNUNET_array_append (pending_pull_reply_list, pending_pull_reply_list_size, *peer_id);
+
+ ev = GNUNET_MQ_msg_header (GNUNET_MESSAGE_TYPE_RPS_PP_PULL_REQUEST);
+ mq = get_mq (peer_map, peer_id);
+ GNUNET_MQ_send (mq, ev);
+}
+
+
+/**
+ * Send single push
+ *
+ * @param peer_id the peer to send the push to.
+ */
+static void
+send_push (struct GNUNET_PeerIdentity *peer_id)
+{
+ struct GNUNET_MQ_Envelope *ev;
+ struct GNUNET_MQ_Handle *mq;
+
+ LOG (GNUNET_ERROR_TYPE_DEBUG,
+ "Sending PUSH to peer %s of gossiped list.\n",
+ GNUNET_i2s (peer_id));
+
+ ev = GNUNET_MQ_msg_header (GNUNET_MESSAGE_TYPE_RPS_PP_PUSH);
+ mq = get_mq (peer_map, peer_id);
+ GNUNET_MQ_send (mq, ev);
+}
+
+
+static void
+do_round (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc);
+
+static void
+do_mal_round (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc);
+
+
+#ifdef ENABLE_MALICIOUS
+/**
+ * Turn RPS service to act malicious.
+ *
+ * @param cls Closure
+ * @param channel The channel the PUSH was received over
+ * @param channel_ctx The context associated with this channel
+ * @param msg The message header
+ */
+ static void
+handle_client_act_malicious (void *cls,
+ struct GNUNET_SERVER_Client *client,
+ const struct GNUNET_MessageHeader *msg)
+{
+ struct GNUNET_RPS_CS_ActMaliciousMessage *in_msg;
+ struct GNUNET_PeerIdentity *peers;
+ uint32_t num_mal_peers_sent;
+ uint32_t num_mal_peers_old;
+
+ /* Check for protocol violation */
+ if (sizeof (struct GNUNET_RPS_CS_ActMaliciousMessage) > ntohs (msg->size))
+ {
+ GNUNET_break_op (0);
+ }
+
+ in_msg = (struct GNUNET_RPS_CS_ActMaliciousMessage *) msg;
+ if ((ntohs (msg->size) - sizeof (struct GNUNET_RPS_CS_ActMaliciousMessage)) /
+ sizeof (struct GNUNET_PeerIdentity) != ntohl (in_msg->num_peers))
+ {
+ LOG (GNUNET_ERROR_TYPE_ERROR,
+ "message says it sends %" PRIu64 " peers, have space for %i peers\n",
+ ntohl (in_msg->num_peers),
+ (ntohs (msg->size) - sizeof (struct GNUNET_RPS_CS_ActMaliciousMessage)) /
+ sizeof (struct GNUNET_PeerIdentity));
+ GNUNET_break_op (0);
+ }
+
+
+ /* Do actual logic */
+ peers = (struct GNUNET_PeerIdentity *) &msg[1];
+ mal_type = ntohl (in_msg->type);
+
+ LOG (GNUNET_ERROR_TYPE_DEBUG,
+ "Now acting malicious type %" PRIu32 "\n",
+ mal_type);
+
+ if (1 == mal_type)
+ { /* Try to maximise representation */
+ /* Add other malicious peers to those we already know */
+
+ num_mal_peers_sent = ntohl (in_msg->num_peers);
+ num_mal_peers_old = num_mal_peers;
+ GNUNET_array_grow (mal_peers,
+ num_mal_peers,
+ num_mal_peers + num_mal_peers_sent);
+ memcpy (&mal_peers[num_mal_peers_old],
+ peers,
+ num_mal_peers_sent * sizeof (struct GNUNET_PeerIdentity));
+
+ /* Add all mal peers to mal_peer_set */
+ add_peer_array_to_set (&mal_peers[num_mal_peers_old],
+ num_mal_peers_sent,
+ mal_peer_set);
+
+ /* Substitute do_round () with do_mal_round () */
+ GNUNET_SCHEDULER_cancel (do_round_task);
+ do_round_task = GNUNET_SCHEDULER_add_now (&do_mal_round, NULL);
+ }
+
+ else if (2 == mal_type)
+ { /* Try to partition the network */
+ /* Add other malicious peers to those we already know */
+ num_mal_peers_sent = ntohl (in_msg->num_peers) - 1;
+ num_mal_peers_old = num_mal_peers;
+ GNUNET_array_grow (mal_peers,
+ num_mal_peers,
+ num_mal_peers + num_mal_peers_sent);
+ memcpy (&mal_peers[num_mal_peers_old],
+ peers,
+ num_mal_peers_sent * sizeof (struct GNUNET_PeerIdentity));
+
+ /* Add all mal peers to mal_peer_set */
+ add_peer_array_to_set (&mal_peers[num_mal_peers_old],
+ num_mal_peers_sent,
+ mal_peer_set);
+
+ /* Store the one attacked peer */
+ memcpy (&attacked_peer,
+ &peers[num_mal_peers_sent],
+ sizeof (struct GNUNET_PeerIdentity));
+
+ LOG (GNUNET_ERROR_TYPE_DEBUG,
+ "Attacked peer is %s\n",
+ GNUNET_i2s (&attacked_peer));
+
+ /* Substitute do_round () with do_mal_round () */
+ GNUNET_SCHEDULER_cancel (do_round_task);
+ do_round_task = GNUNET_SCHEDULER_add_now (&do_mal_round, NULL);
+ }
+ else if (0 == mal_type)
+ { /* Stop acting malicious */
+ num_mal_peers = 0;
+ GNUNET_free (mal_peers);
+
+ /* Substitute do_mal_round () with do_round () */
+ GNUNET_SCHEDULER_cancel (do_round_task);
+ do_round_task = GNUNET_SCHEDULER_add_now (&do_round, NULL);
+ }
+ else
+ {
+ GNUNET_break (0);
+ }
+}
+
+
+/**
+ * Send out PUSHes and PULLs maliciously.
+ *
+ * This is executed regylary.
+ */
+static void
+do_mal_round (void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
+{
+ uint32_t num_pushes;
+ uint32_t i;
+ struct GNUNET_TIME_Relative time_next_round;
+ struct AttackedPeer *tmp_att_peer;
+
+ LOG (GNUNET_ERROR_TYPE_DEBUG, "Going to execute next round maliciously.\n");
+
+ /* Do malicious actions */
+ if (1 == mal_type)
+ { /* Try to maximise representation */
+
+ /* The maximum of pushes we're going to send this round */
+ num_pushes = min (min (push_limit,
+ num_attacked_peers),
+ GNUNET_CONSTANTS_MAX_CADET_MESSAGE_SIZE);
+
+ /* Send PUSHes to attacked peers */
+ for (i = 0 ; i < num_pushes ; i++)
+ {
+ if (att_peers_tail == att_peer_index)
+ att_peer_index = att_peers_head;
+ else
+ att_peer_index = att_peer_index->next;
+
+ send_push (&att_peer_index->peer_id);
+ }
+
+ /* Send PULLs to some peers to learn about additional peers to attack */
+ for (i = 0 ; i < num_pushes * alpha ; i++)
+ {
+ if (att_peers_tail == tmp_att_peer)
+ tmp_att_peer = att_peers_head;
+ else
+ att_peer_index = tmp_att_peer->next;
+
+ send_pull_request (&tmp_att_peer->peer_id);
+ }
+ }
+
+
+ else if (2 == mal_type)
+ { /**
+ * Try to partition the network
+ * Send as many pushes to the attacked peer as possible
+ * That is one push per round as it will ignore more.
+ */
+ send_push (&attacked_peer);
+ }
+
+
+ /* Schedule next round */
+ time_next_round = compute_rand_delay (round_interval, 2);
+
+ //do_round_task = GNUNET_SCHEDULER_add_delayed (round_interval, &do_mal_round, NULL);
+ do_round_task = GNUNET_SCHEDULER_add_delayed (time_next_round, &do_mal_round, NULL);
+ LOG (GNUNET_ERROR_TYPE_DEBUG, "Finished round\n");
+}
+#endif /* ENABLE_MALICIOUS */
+
+
+/**
+ * Send out PUSHes and PULLs, possibly update #gossip_list, samplers.