+ * Context for the #garbage_collect_cb().
+ */
+struct GarbageContext
+{
+
+ /**
+ * Map for which we are garbage collecting removed elements.
+ */
+ struct GNUNET_CONTAINER_MultiHashMap *map;
+
+ /**
+ * Lowest generation for which an operation is still pending.
+ */
+ unsigned int min_op_generation;
+
+ /**
+ * Largest generation for which an operation is still pending.
+ */
+ unsigned int max_op_generation;
+
+};
+
+
+/**
+ * Function invoked to check if an element can be removed from
+ * the set's history because it is no longer needed.
+ *
+ * @param cls the `struct GarbageContext *`
+ * @param key key of the element in the map
+ * @param value the `struct ElementEntry *`
+ * @return #GNUNET_OK (continue to iterate)
+ */
+static int
+garbage_collect_cb (void *cls,
+ const struct GNUNET_HashCode *key,
+ void *value)
+{
+ //struct GarbageContext *gc = cls;
+ //struct ElementEntry *ee = value;
+
+ //if (GNUNET_YES != ee->removed)
+ // return GNUNET_OK;
+ //if ( (gc->max_op_generation < ee->generation_added) ||
+ // (ee->generation_removed > gc->min_op_generation) )
+ //{
+ // GNUNET_assert (GNUNET_YES ==
+ // GNUNET_CONTAINER_multihashmap_remove (gc->map,
+ // key,
+ // ee));
+ // GNUNET_free (ee);
+ //}
+ return GNUNET_OK;
+}
+
+
+/**
+ * Collect and destroy elements that are not needed anymore, because
+ * their lifetime (as determined by their generation) does not overlap
+ * with any active set operation.
+ *
+ * @param set set to garbage collect
+ */
+static void
+collect_generation_garbage (struct Set *set)
+{
+ struct Operation *op;
+ struct GarbageContext gc;
+
+ gc.min_op_generation = UINT_MAX;
+ gc.max_op_generation = 0;
+ for (op = set->ops_head; NULL != op; op = op->next)
+ {
+ gc.min_op_generation = GNUNET_MIN (gc.min_op_generation,
+ op->generation_created);
+ gc.max_op_generation = GNUNET_MAX (gc.max_op_generation,
+ op->generation_created);
+ }
+ gc.map = set->content->elements;
+ GNUNET_CONTAINER_multihashmap_iterate (set->content->elements,
+ &garbage_collect_cb,
+ &gc);
+}
+
+
+static int
+is_excluded_generation (unsigned int generation,
+ struct GenerationRange *excluded,
+ unsigned int excluded_size)
+{
+ unsigned int i;
+
+ for (i = 0; i < excluded_size; i++)
+ {
+ if ( (generation >= excluded[i].start) && (generation < excluded[i].end) )
+ return GNUNET_YES;
+ }
+
+ return GNUNET_NO;
+}
+
+
+static int
+is_element_of_generation (struct ElementEntry *ee,
+ unsigned int query_generation,
+ struct GenerationRange *excluded,
+ unsigned int excluded_size)
+{
+ struct MutationEvent *mut;
+ int is_present;
+ unsigned int i;
+
+ GNUNET_assert (NULL != ee->mutations);
+
+ if (GNUNET_YES == is_excluded_generation (query_generation, excluded, excluded_size))
+ {
+ GNUNET_break (0);
+ return GNUNET_NO;
+ }
+
+ is_present = GNUNET_NO;
+
+ /* Could be made faster with binary search, but lists
+ are small, so why bother. */
+ for (i = 0; i < ee->mutations_size; i++)
+ {
+ mut = &ee->mutations[i];
+
+ if (mut->generation > query_generation)
+ {
+ /* The mutation doesn't apply to our generation
+ anymore. We can'b break here, since mutations aren't
+ sorted by generation. */
+ continue;
+ }
+
+ if (GNUNET_YES == is_excluded_generation (mut->generation, excluded, excluded_size))
+ {
+ /* The generation is excluded (because it belongs to another
+ fork via a lazy copy) and thus mutations aren't considered
+ for membership testing. */
+ continue;
+ }
+
+ /* This would be an inconsistency in how we manage mutations. */
+ if ( (GNUNET_YES == is_present) && (GNUNET_YES == mut->added) )
+ GNUNET_assert (0);
+
+ /* Likewise. */
+ if ( (GNUNET_NO == is_present) && (GNUNET_NO == mut->added) )
+ GNUNET_assert (0);
+
+ is_present = mut->added;
+ }
+
+ return is_present;
+}
+
+
+int
+_GSS_is_element_of_set (struct ElementEntry *ee,
+ struct Set *set)
+{
+ return is_element_of_generation (ee,
+ set->current_generation,
+ set->excluded_generations,
+ set->excluded_generations_size);
+}
+
+
+static int
+is_element_of_iteration (struct ElementEntry *ee,
+ struct Set *set)
+{
+ return is_element_of_generation (ee,
+ set->iter_generation,
+ set->excluded_generations,
+ set->excluded_generations_size);
+}
+
+
+int
+_GSS_is_element_of_operation (struct ElementEntry *ee,
+ struct Operation *op)
+{
+ return is_element_of_generation (ee,
+ op->generation_created,
+ op->spec->set->excluded_generations,
+ op->spec->set->excluded_generations_size);
+}
+
+
+/**
+ * Destroy the given operation. Call the implementation-specific
+ * cancel function of the operation. Disconnects from the remote
+ * peer. Does not disconnect the client, as there may be multiple
+ * operations per set.
+ *
+ * @param op operation to destroy
+ * @param gc #GNUNET_YES to perform garbage collection on the set
+ */
+void
+_GSS_operation_destroy (struct Operation *op,
+ int gc)
+{
+ struct Set *set;
+ struct GNUNET_CADET_Channel *channel;
+
+ if (NULL == op->vt)
+ {
+ /* already in #_GSS_operation_destroy() */
+ return;
+ }
+ GNUNET_assert (GNUNET_NO == op->is_incoming);
+ GNUNET_assert (NULL != op->spec);
+ set = op->spec->set;
+ GNUNET_CONTAINER_DLL_remove (set->ops_head,
+ set->ops_tail,
+ op);
+ op->vt->cancel (op);
+ op->vt = NULL;
+ if (NULL != op->spec)
+ {
+ if (NULL != op->spec->context_msg)
+ {
+ GNUNET_free (op->spec->context_msg);
+ op->spec->context_msg = NULL;
+ }
+ GNUNET_free (op->spec);
+ op->spec = NULL;
+ }
+ if (NULL != op->mq)
+ {
+ GNUNET_MQ_destroy (op->mq);
+ op->mq = NULL;
+ }
+ if (NULL != (channel = op->channel))
+ {
+ op->channel = NULL;
+ GNUNET_CADET_channel_destroy (channel);
+ }
+ if (GNUNET_YES == gc)
+ collect_generation_garbage (set);
+ /* We rely on the channel end handler to free 'op'. When 'op->channel' was NULL,
+ * there was a channel end handler that will free 'op' on the call stack. */
+}
+
+
+/**
+ * Iterator over hash map entries to free element entries.