ath79/mikrotik: use routerbootpart partitions
[oweals/openwrt.git] / target / linux / layerscape / patches-5.4 / 802-can-0006-can-rx-offload-Prepare-for-CAN-FD-support.patch
1 From 42abc4a8a97a87734c759c02c5ba255ed5124a2c Mon Sep 17 00:00:00 2001
2 From: Joakim Zhang <qiangqing.zhang@nxp.com>
3 Date: Fri, 12 Jul 2019 08:02:38 +0000
4 Subject: [PATCH] can: rx-offload: Prepare for CAN FD support
5
6 The skbs for classic CAN and CAN FD frames are allocated with seperate
7 functions: alloc_can_skb() and alloc_canfd_skb().
8
9 In order to support CAN FD frames via the rx-offload helper, the driver
10 itself has to allocate the skb (depending whether it received a classic
11 CAN or CAN FD frame), as the rx-offload helper cannot know which kind of
12 CAN frame the driver has received.
13
14 This patch moves the allocation of the skb into the struct
15 can_rx_offload::mailbox_read callbacks of the the flexcan and ti_hecc
16 driver and adjusts the rx-offload helper accordingly.
17
18 Signed-off-by: Joakim Zhang <qiangqing.zhang@nxp.com>
19 Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
20 ---
21  drivers/net/can/flexcan.c      | 27 +++++++++++-----
22  drivers/net/can/rx-offload.c   | 70 ++++++++++--------------------------------
23  include/linux/can/rx-offload.h |  6 ++--
24  3 files changed, 40 insertions(+), 63 deletions(-)
25
26 --- a/drivers/net/can/flexcan.c
27 +++ b/drivers/net/can/flexcan.c
28 @@ -784,16 +784,23 @@ static inline struct flexcan_priv *rx_of
29         return container_of(offload, struct flexcan_priv, offload);
30  }
31  
32 -static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
33 -                                        struct can_frame *cf,
34 -                                        u32 *timestamp, unsigned int n)
35 +static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
36 +                                           unsigned int n, u32 *timestamp,
37 +                                           bool drop)
38  {
39         struct flexcan_priv *priv = rx_offload_to_priv(offload);
40         struct flexcan_regs __iomem *regs = priv->regs;
41         struct flexcan_mb __iomem *mb;
42 +       struct sk_buff *skb;
43 +       struct can_frame *cf;
44         u32 reg_ctrl, reg_id, reg_iflag1;
45         int i;
46  
47 +       if (unlikely(drop)) {
48 +               skb = ERR_PTR(-ENOBUFS);
49 +               goto mark_as_read;
50 +       }
51 +
52         mb = flexcan_get_mb(priv, n);
53  
54         if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
55 @@ -807,7 +814,7 @@ static unsigned int flexcan_mailbox_read
56                 code = reg_ctrl & FLEXCAN_MB_CODE_MASK;
57                 if ((code != FLEXCAN_MB_CODE_RX_FULL) &&
58                     (code != FLEXCAN_MB_CODE_RX_OVERRUN))
59 -                       return 0;
60 +                       return NULL;
61  
62                 if (code == FLEXCAN_MB_CODE_RX_OVERRUN) {
63                         /* This MB was overrun, we lost data */
64 @@ -817,11 +824,17 @@ static unsigned int flexcan_mailbox_read
65         } else {
66                 reg_iflag1 = priv->read(&regs->iflag1);
67                 if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
68 -                       return 0;
69 +                       return NULL;
70  
71                 reg_ctrl = priv->read(&mb->can_ctrl);
72         }
73  
74 +       skb = alloc_can_skb(offload->dev, &cf);
75 +       if (!skb) {
76 +               skb = ERR_PTR(-ENOMEM);
77 +               goto mark_as_read;
78 +       }
79 +
80         /* increase timstamp to full 32 bit */
81         *timestamp = reg_ctrl << 16;
82  
83 @@ -840,7 +853,7 @@ static unsigned int flexcan_mailbox_read
84                 *(__be32 *)(cf->data + i) = data;
85         }
86  
87 -       /* mark as read */
88 + mark_as_read:
89         if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
90                 /* Clear IRQ */
91                 if (n < 32)
92 @@ -857,7 +870,7 @@ static unsigned int flexcan_mailbox_read
93          */
94         priv->read(&regs->timer);
95  
96 -       return 1;
97 +       return skb;
98  }
99  
100  
101 --- a/drivers/net/can/rx-offload.c
102 +++ b/drivers/net/can/rx-offload.c
103 @@ -139,71 +139,35 @@ static int can_rx_offload_compare(struct
104  static struct sk_buff *
105  can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
106  {
107 -       struct sk_buff *skb = NULL, *skb_error = NULL;
108 +       struct sk_buff *skb;
109         struct can_rx_offload_cb *cb;
110 -       struct can_frame *cf;
111 -       int ret;
112 +       bool drop = false;
113 +       u32 timestamp;
114  
115 -       if (likely(skb_queue_len(&offload->skb_queue) <
116 -                  offload->skb_queue_len_max)) {
117 -               skb = alloc_can_skb(offload->dev, &cf);
118 -               if (unlikely(!skb))
119 -                       skb_error = ERR_PTR(-ENOMEM);   /* skb alloc failed */
120 -       } else {
121 -               skb_error = ERR_PTR(-ENOBUFS);          /* skb_queue is full */
122 -       }
123 -
124 -       /* If queue is full or skb not available, drop by reading into
125 -        * overflow buffer.
126 -        */
127 -       if (unlikely(skb_error)) {
128 -               struct can_frame cf_overflow;
129 -               u32 timestamp;
130 -
131 -               ret = offload->mailbox_read(offload, &cf_overflow,
132 -                                           &timestamp, n);
133 -
134 -               /* Mailbox was empty. */
135 -               if (unlikely(!ret))
136 -                       return NULL;
137 -
138 -               /* Mailbox has been read and we're dropping it or
139 -                * there was a problem reading the mailbox.
140 -                *
141 -                * Increment error counters in any case.
142 -                */
143 -               offload->dev->stats.rx_dropped++;
144 -               offload->dev->stats.rx_fifo_errors++;
145 -
146 -               /* There was a problem reading the mailbox, propagate
147 -                * error value.
148 -                */
149 -               if (unlikely(ret < 0))
150 -                       return ERR_PTR(ret);
151 -
152 -               return skb_error;
153 -       }
154 -
155 -       cb = can_rx_offload_get_cb(skb);
156 -       ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
157 +       /* If queue is full drop frame */
158 +       if (unlikely(skb_queue_len(&offload->skb_queue) >
159 +                    offload->skb_queue_len_max))
160 +               drop = true;
161  
162 +       skb = offload->mailbox_read(offload, n, &timestamp, drop);
163         /* Mailbox was empty. */
164 -       if (unlikely(!ret)) {
165 -               kfree_skb(skb);
166 +       if (unlikely(!skb))
167                 return NULL;
168 -       }
169 -
170 -       /* There was a problem reading the mailbox, propagate error value. */
171 -       if (unlikely(ret < 0)) {
172 -               kfree_skb(skb);
173  
174 +       /* There was a problem reading the mailbox, propagate
175 +        * error value.
176 +        */
177 +       if (unlikely(IS_ERR(skb))) {
178                 offload->dev->stats.rx_dropped++;
179                 offload->dev->stats.rx_fifo_errors++;
180  
181 -               return ERR_PTR(ret);
182 +               return skb;
183         }
184  
185         /* Mailbox was read. */
186 +       cb = can_rx_offload_get_cb(skb);
187 +       cb->timestamp = timestamp;
188 +
189         return skb;
190  }
191  
192 --- a/include/linux/can/rx-offload.h
193 +++ b/include/linux/can/rx-offload.h
194 @@ -15,9 +15,9 @@
195  struct can_rx_offload {
196         struct net_device *dev;
197  
198 -       unsigned int (*mailbox_read)(struct can_rx_offload *offload,
199 -                                    struct can_frame *cf,
200 -                                    u32 *timestamp, unsigned int mb);
201 +       struct sk_buff *(*mailbox_read)(struct can_rx_offload *offload,
202 +                                       unsigned int mb, u32 *timestamp,
203 +                                       bool drop);
204  
205         struct sk_buff_head skb_queue;
206         u32 skb_queue_len_max;