Linux-libre 3.4.8-gnu1
[librecmc/linux-libre.git] / net / batman-adv / send.c
1 /*
2  * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "send.h"
24 #include "routing.h"
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "vis.h"
29 #include "gateway_common.h"
30 #include "originator.h"
31
32 static void send_outstanding_bcast_packet(struct work_struct *work);
33
34 /* send out an already prepared packet to the given address via the
35  * specified batman interface */
36 int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
37                     const uint8_t *dst_addr)
38 {
39         struct ethhdr *ethhdr;
40
41         if (hard_iface->if_status != IF_ACTIVE)
42                 goto send_skb_err;
43
44         if (unlikely(!hard_iface->net_dev))
45                 goto send_skb_err;
46
47         if (!(hard_iface->net_dev->flags & IFF_UP)) {
48                 pr_warning("Interface %s is not up - can't send packet via that interface!\n",
49                            hard_iface->net_dev->name);
50                 goto send_skb_err;
51         }
52
53         /* push to the ethernet header. */
54         if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0)
55                 goto send_skb_err;
56
57         skb_reset_mac_header(skb);
58
59         ethhdr = (struct ethhdr *)skb_mac_header(skb);
60         memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
61         memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
62         ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
63
64         skb_set_network_header(skb, ETH_HLEN);
65         skb->priority = TC_PRIO_CONTROL;
66         skb->protocol = __constant_htons(ETH_P_BATMAN);
67
68         skb->dev = hard_iface->net_dev;
69
70         /* dev_queue_xmit() returns a negative result on error.  However on
71          * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
72          * (which is > 0). This will not be treated as an error. */
73
74         return dev_queue_xmit(skb);
75 send_skb_err:
76         kfree_skb(skb);
77         return NET_XMIT_DROP;
78 }
79
80 static void realloc_packet_buffer(struct hard_iface *hard_iface,
81                                   int new_len)
82 {
83         unsigned char *new_buff;
84
85         new_buff = kmalloc(new_len, GFP_ATOMIC);
86
87         /* keep old buffer if kmalloc should fail */
88         if (new_buff) {
89                 memcpy(new_buff, hard_iface->packet_buff,
90                        BATMAN_OGM_LEN);
91
92                 kfree(hard_iface->packet_buff);
93                 hard_iface->packet_buff = new_buff;
94                 hard_iface->packet_len = new_len;
95         }
96 }
97
98 /* when calling this function (hard_iface == primary_if) has to be true */
99 static int prepare_packet_buffer(struct bat_priv *bat_priv,
100                                   struct hard_iface *hard_iface)
101 {
102         int new_len;
103
104         new_len = BATMAN_OGM_LEN +
105                   tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
106
107         /* if we have too many changes for one packet don't send any
108          * and wait for the tt table request which will be fragmented */
109         if (new_len > hard_iface->soft_iface->mtu)
110                 new_len = BATMAN_OGM_LEN;
111
112         realloc_packet_buffer(hard_iface, new_len);
113
114         atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
115
116         /* reset the sending counter */
117         atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
118
119         return tt_changes_fill_buffer(bat_priv,
120                                       hard_iface->packet_buff + BATMAN_OGM_LEN,
121                                       hard_iface->packet_len - BATMAN_OGM_LEN);
122 }
123
124 static int reset_packet_buffer(struct bat_priv *bat_priv,
125                                 struct hard_iface *hard_iface)
126 {
127         realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN);
128         return 0;
129 }
130
131 void schedule_bat_ogm(struct hard_iface *hard_iface)
132 {
133         struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
134         struct hard_iface *primary_if;
135         int tt_num_changes = -1;
136
137         if ((hard_iface->if_status == IF_NOT_IN_USE) ||
138             (hard_iface->if_status == IF_TO_BE_REMOVED))
139                 return;
140
141         /**
142          * the interface gets activated here to avoid race conditions between
143          * the moment of activating the interface in
144          * hardif_activate_interface() where the originator mac is set and
145          * outdated packets (especially uninitialized mac addresses) in the
146          * packet queue
147          */
148         if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
149                 hard_iface->if_status = IF_ACTIVE;
150
151         primary_if = primary_if_get_selected(bat_priv);
152
153         if (hard_iface == primary_if) {
154                 /* if at least one change happened */
155                 if (atomic_read(&bat_priv->tt_local_changes) > 0) {
156                         tt_commit_changes(bat_priv);
157                         tt_num_changes = prepare_packet_buffer(bat_priv,
158                                                                hard_iface);
159                 }
160
161                 /* if the changes have been sent often enough */
162                 if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
163                         tt_num_changes = reset_packet_buffer(bat_priv,
164                                                              hard_iface);
165         }
166
167         if (primary_if)
168                 hardif_free_ref(primary_if);
169
170         bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface, tt_num_changes);
171 }
172
173 static void forw_packet_free(struct forw_packet *forw_packet)
174 {
175         if (forw_packet->skb)
176                 kfree_skb(forw_packet->skb);
177         if (forw_packet->if_incoming)
178                 hardif_free_ref(forw_packet->if_incoming);
179         kfree(forw_packet);
180 }
181
182 static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
183                                       struct forw_packet *forw_packet,
184                                       unsigned long send_time)
185 {
186         INIT_HLIST_NODE(&forw_packet->list);
187
188         /* add new packet to packet list */
189         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
190         hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
191         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
192
193         /* start timer for this packet */
194         INIT_DELAYED_WORK(&forw_packet->delayed_work,
195                           send_outstanding_bcast_packet);
196         queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
197                            send_time);
198 }
199
200 /* add a broadcast packet to the queue and setup timers. broadcast packets
201  * are sent multiple times to increase probability for being received.
202  *
203  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
204  * errors.
205  *
206  * The skb is not consumed, so the caller should make sure that the
207  * skb is freed. */
208 int add_bcast_packet_to_list(struct bat_priv *bat_priv,
209                              const struct sk_buff *skb, unsigned long delay)
210 {
211         struct hard_iface *primary_if = NULL;
212         struct forw_packet *forw_packet;
213         struct bcast_packet *bcast_packet;
214         struct sk_buff *newskb;
215
216         if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
217                 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
218                 goto out;
219         }
220
221         primary_if = primary_if_get_selected(bat_priv);
222         if (!primary_if)
223                 goto out_and_inc;
224
225         forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
226
227         if (!forw_packet)
228                 goto out_and_inc;
229
230         newskb = skb_copy(skb, GFP_ATOMIC);
231         if (!newskb)
232                 goto packet_free;
233
234         /* as we have a copy now, it is safe to decrease the TTL */
235         bcast_packet = (struct bcast_packet *)newskb->data;
236         bcast_packet->header.ttl--;
237
238         skb_reset_mac_header(newskb);
239
240         forw_packet->skb = newskb;
241         forw_packet->if_incoming = primary_if;
242
243         /* how often did we send the bcast packet ? */
244         forw_packet->num_packets = 0;
245
246         _add_bcast_packet_to_list(bat_priv, forw_packet, delay);
247         return NETDEV_TX_OK;
248
249 packet_free:
250         kfree(forw_packet);
251 out_and_inc:
252         atomic_inc(&bat_priv->bcast_queue_left);
253 out:
254         if (primary_if)
255                 hardif_free_ref(primary_if);
256         return NETDEV_TX_BUSY;
257 }
258
259 static void send_outstanding_bcast_packet(struct work_struct *work)
260 {
261         struct hard_iface *hard_iface;
262         struct delayed_work *delayed_work =
263                 container_of(work, struct delayed_work, work);
264         struct forw_packet *forw_packet =
265                 container_of(delayed_work, struct forw_packet, delayed_work);
266         struct sk_buff *skb1;
267         struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
268         struct bat_priv *bat_priv = netdev_priv(soft_iface);
269
270         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
271         hlist_del(&forw_packet->list);
272         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
273
274         if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
275                 goto out;
276
277         /* rebroadcast packet */
278         rcu_read_lock();
279         list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
280                 if (hard_iface->soft_iface != soft_iface)
281                         continue;
282
283                 /* send a copy of the saved skb */
284                 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
285                 if (skb1)
286                         send_skb_packet(skb1, hard_iface, broadcast_addr);
287         }
288         rcu_read_unlock();
289
290         forw_packet->num_packets++;
291
292         /* if we still have some more bcasts to send */
293         if (forw_packet->num_packets < 3) {
294                 _add_bcast_packet_to_list(bat_priv, forw_packet,
295                                           ((5 * HZ) / 1000));
296                 return;
297         }
298
299 out:
300         forw_packet_free(forw_packet);
301         atomic_inc(&bat_priv->bcast_queue_left);
302 }
303
304 void send_outstanding_bat_ogm_packet(struct work_struct *work)
305 {
306         struct delayed_work *delayed_work =
307                 container_of(work, struct delayed_work, work);
308         struct forw_packet *forw_packet =
309                 container_of(delayed_work, struct forw_packet, delayed_work);
310         struct bat_priv *bat_priv;
311
312         bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
313         spin_lock_bh(&bat_priv->forw_bat_list_lock);
314         hlist_del(&forw_packet->list);
315         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
316
317         if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
318                 goto out;
319
320         bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
321
322         /**
323          * we have to have at least one packet in the queue
324          * to determine the queues wake up time unless we are
325          * shutting down
326          */
327         if (forw_packet->own)
328                 schedule_bat_ogm(forw_packet->if_incoming);
329
330 out:
331         /* don't count own packet */
332         if (!forw_packet->own)
333                 atomic_inc(&bat_priv->batman_queue_left);
334
335         forw_packet_free(forw_packet);
336 }
337
338 void purge_outstanding_packets(struct bat_priv *bat_priv,
339                                const struct hard_iface *hard_iface)
340 {
341         struct forw_packet *forw_packet;
342         struct hlist_node *tmp_node, *safe_tmp_node;
343         bool pending;
344
345         if (hard_iface)
346                 bat_dbg(DBG_BATMAN, bat_priv,
347                         "purge_outstanding_packets(): %s\n",
348                         hard_iface->net_dev->name);
349         else
350                 bat_dbg(DBG_BATMAN, bat_priv,
351                         "purge_outstanding_packets()\n");
352
353         /* free bcast list */
354         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
355         hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
356                                   &bat_priv->forw_bcast_list, list) {
357
358                 /**
359                  * if purge_outstanding_packets() was called with an argument
360                  * we delete only packets belonging to the given interface
361                  */
362                 if ((hard_iface) &&
363                     (forw_packet->if_incoming != hard_iface))
364                         continue;
365
366                 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
367
368                 /**
369                  * send_outstanding_bcast_packet() will lock the list to
370                  * delete the item from the list
371                  */
372                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
373                 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
374
375                 if (pending) {
376                         hlist_del(&forw_packet->list);
377                         forw_packet_free(forw_packet);
378                 }
379         }
380         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
381
382         /* free batman packet list */
383         spin_lock_bh(&bat_priv->forw_bat_list_lock);
384         hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
385                                   &bat_priv->forw_bat_list, list) {
386
387                 /**
388                  * if purge_outstanding_packets() was called with an argument
389                  * we delete only packets belonging to the given interface
390                  */
391                 if ((hard_iface) &&
392                     (forw_packet->if_incoming != hard_iface))
393                         continue;
394
395                 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
396
397                 /**
398                  * send_outstanding_bat_packet() will lock the list to
399                  * delete the item from the list
400                  */
401                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
402                 spin_lock_bh(&bat_priv->forw_bat_list_lock);
403
404                 if (pending) {
405                         hlist_del(&forw_packet->list);
406                         forw_packet_free(forw_packet);
407                 }
408         }
409         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
410 }