512d323ac6e9c488bca27845d137d9f78679e68e
[oweals/openwrt.git] / target / linux / layerscape / patches-4.14 / 202-core-linux-support-layerscape.patch
1 From d2ef9f2f6d16d34d7eee74cb8efd269341fec5a1 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Mon, 6 May 2019 16:54:17 +0800
4 Subject: [PATCH] core-linux: support layerscape
5
6 This is an integrated patch of core-linux for layerscape
7
8 Signed-off-by: Aaron Lu <aaron.lu@intel.com>
9 Signed-off-by: Abhijit Ayarekar <abhijit.ayarekar@caviumnetworks.com>
10 Signed-off-by: Amrita Kumari <amrita.kumari@nxp.com>
11 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
12 Signed-off-by: Ashish Kumar <Ashish.Kumar@nxp.com>
13 Signed-off-by: Biwen Li <biwen.li@nxp.com>
14 Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
15 Signed-off-by: Christoph Hellwig <hch@lst.de>
16 Signed-off-by: David Ahern <dsahern@gmail.com>
17 Signed-off-by: David S. Miller <davem@davemloft.net>
18 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
19 Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
20 Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
21 Signed-off-by: Jiri Pirko <jiri@mellanox.com>
22 Signed-off-by: Joel Fernandes <joelaf@google.com>
23 Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
24 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
25 Signed-off-by: Li Yang <leoyang.li@nxp.com>
26 Signed-off-by: Lukas Wunner <lukas@wunner.de>
27 Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
28 Signed-off-by: Mark Brown <broonie@kernel.org>
29 Signed-off-by: Nikhil Badola <nikhil.badola@freescale.com>
30 Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
31 Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
32 Signed-off-by: pascal paillet <p.paillet@st.com>
33 Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
34 Signed-off-by: Ramneek Mehresh <ramneek.mehresh@freescale.com>
35 Signed-off-by: Robin Murphy <robin.murphy@arm.com>
36 Signed-off-by: Suresh Gupta <suresh.gupta@freescale.com>
37 Signed-off-by: Vivek Gautam <vivek.gautam@codeaurora.org>
38 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
39 ---
40  drivers/base/core.c                       | 122 ++++++++++++++++++++++++++----
41  drivers/base/dma-mapping.c                |   7 ++
42  drivers/gpu/ipu-v3/ipu-pre.c              |   3 +-
43  drivers/gpu/ipu-v3/ipu-prg.c              |   3 +-
44  drivers/iommu/dma-iommu.c                 |   3 +
45  drivers/mux/Kconfig                       |  12 +--
46  drivers/mux/mmio.c                        |   6 +-
47  drivers/of/device.c                       |  14 +++-
48  drivers/soc/imx/gpc.c                     |   2 +-
49  include/linux/device.h                    |  20 +++--
50  include/linux/fsl_devices.h               |   2 +
51  include/linux/netdevice.h                 |  10 ++-
52  include/linux/skbuff.h                    |   2 +
53  lib/dma-noop.c                            |  19 +++++
54  mm/page_alloc.c                           |  10 ++-
55  net/core/dev.c                            |  81 ++++++++++++--------
56  net/core/skbuff.c                         |  29 ++++++-
57  samples/bpf/Makefile                      |  12 ++-
58  samples/bpf/map_perf_test_kern.c          |   2 +-
59  samples/bpf/map_perf_test_user.c          |   2 +-
60  tools/testing/selftests/bpf/bpf_helpers.h |  56 ++++++++++++--
61  21 files changed, 337 insertions(+), 80 deletions(-)
62
63 --- a/drivers/base/core.c
64 +++ b/drivers/base/core.c
65 @@ -162,10 +162,10 @@ static int device_reorder_to_tail(struct
66   * of the link.  If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
67   * ignored.
68   *
69 - * If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically
70 - * when the consumer device driver unbinds from it.  The combination of both
71 - * DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL
72 - * to be returned.
73 + * If the DL_FLAG_AUTOREMOVE_CONSUMER is set, the link will be removed
74 + * automatically when the consumer device driver unbinds from it.
75 + * The combination of both DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_STATELESS
76 + * set is invalid and will cause NULL to be returned.
77   *
78   * A side effect of the link creation is re-ordering of dpm_list and the
79   * devices_kset list by moving the consumer device and all devices depending
80 @@ -182,7 +182,8 @@ struct device_link *device_link_add(stru
81         struct device_link *link;
82  
83         if (!consumer || !supplier ||
84 -           ((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))
85 +           ((flags & DL_FLAG_STATELESS) &&
86 +            (flags & DL_FLAG_AUTOREMOVE_CONSUMER)))
87                 return NULL;
88  
89         device_links_write_lock();
90 @@ -200,8 +201,10 @@ struct device_link *device_link_add(stru
91         }
92  
93         list_for_each_entry(link, &supplier->links.consumers, s_node)
94 -               if (link->consumer == consumer)
95 +               if (link->consumer == consumer) {
96 +                       kref_get(&link->kref);
97                         goto out;
98 +               }
99  
100         link = kzalloc(sizeof(*link), GFP_KERNEL);
101         if (!link)
102 @@ -233,6 +236,7 @@ struct device_link *device_link_add(stru
103         link->consumer = consumer;
104         INIT_LIST_HEAD(&link->c_node);
105         link->flags = flags;
106 +       kref_init(&link->kref);
107  
108         /* Determine the initial link state. */
109         if (flags & DL_FLAG_STATELESS) {
110 @@ -303,8 +307,10 @@ static void __device_link_free_srcu(stru
111         device_link_free(container_of(rhead, struct device_link, rcu_head));
112  }
113  
114 -static void __device_link_del(struct device_link *link)
115 +static void __device_link_del(struct kref *kref)
116  {
117 +       struct device_link *link = container_of(kref, struct device_link, kref);
118 +
119         dev_info(link->consumer, "Dropping the link to %s\n",
120                  dev_name(link->supplier));
121  
122 @@ -316,8 +322,10 @@ static void __device_link_del(struct dev
123         call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
124  }
125  #else /* !CONFIG_SRCU */
126 -static void __device_link_del(struct device_link *link)
127 +static void __device_link_del(struct kref *kref)
128  {
129 +       struct device_link *link = container_of(kref, struct device_link, kref);
130 +
131         dev_info(link->consumer, "Dropping the link to %s\n",
132                  dev_name(link->supplier));
133  
134 @@ -335,18 +343,50 @@ static void __device_link_del(struct dev
135   * @link: Device link to delete.
136   *
137   * The caller must ensure proper synchronization of this function with runtime
138 - * PM.
139 + * PM.  If the link was added multiple times, it needs to be deleted as often.
140 + * Care is required for hotplugged devices:  Their links are purged on removal
141 + * and calling device_link_del() is then no longer allowed.
142   */
143  void device_link_del(struct device_link *link)
144  {
145         device_links_write_lock();
146         device_pm_lock();
147 -       __device_link_del(link);
148 +       kref_put(&link->kref, __device_link_del);
149         device_pm_unlock();
150         device_links_write_unlock();
151  }
152  EXPORT_SYMBOL_GPL(device_link_del);
153  
154 +/**
155 + * device_link_remove - remove a link between two devices.
156 + * @consumer: Consumer end of the link.
157 + * @supplier: Supplier end of the link.
158 + *
159 + * The caller must ensure proper synchronization of this function with runtime
160 + * PM.
161 + */
162 +void device_link_remove(void *consumer, struct device *supplier)
163 +{
164 +       struct device_link *link;
165 +
166 +       if (WARN_ON(consumer == supplier))
167 +               return;
168 +
169 +       device_links_write_lock();
170 +       device_pm_lock();
171 +
172 +       list_for_each_entry(link, &supplier->links.consumers, s_node) {
173 +               if (link->consumer == consumer) {
174 +                       kref_put(&link->kref, __device_link_del);
175 +                       break;
176 +               }
177 +       }
178 +
179 +       device_pm_unlock();
180 +       device_links_write_unlock();
181 +}
182 +EXPORT_SYMBOL_GPL(device_link_remove);
183 +
184  static void device_links_missing_supplier(struct device *dev)
185  {
186         struct device_link *link;
187 @@ -454,8 +494,8 @@ static void __device_links_no_driver(str
188                 if (link->flags & DL_FLAG_STATELESS)
189                         continue;
190  
191 -               if (link->flags & DL_FLAG_AUTOREMOVE)
192 -                       __device_link_del(link);
193 +               if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
194 +                       kref_put(&link->kref, __device_link_del);
195                 else if (link->status != DL_STATE_SUPPLIER_UNBIND)
196                         WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
197         }
198 @@ -490,8 +530,18 @@ void device_links_driver_cleanup(struct
199                 if (link->flags & DL_FLAG_STATELESS)
200                         continue;
201  
202 -               WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);
203 +               WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
204                 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
205 +
206 +               /*
207 +                * autoremove the links between this @dev and its consumer
208 +                * devices that are not active, i.e. where the link state
209 +                * has moved to DL_STATE_SUPPLIER_UNBIND.
210 +                */
211 +               if (link->status == DL_STATE_SUPPLIER_UNBIND &&
212 +                   link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
213 +                       kref_put(&link->kref, __device_link_del);
214 +
215                 WRITE_ONCE(link->status, DL_STATE_DORMANT);
216         }
217  
218 @@ -608,13 +658,13 @@ static void device_links_purge(struct de
219  
220         list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
221                 WARN_ON(link->status == DL_STATE_ACTIVE);
222 -               __device_link_del(link);
223 +               __device_link_del(&link->kref);
224         }
225  
226         list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
227                 WARN_ON(link->status != DL_STATE_DORMANT &&
228                         link->status != DL_STATE_NONE);
229 -               __device_link_del(link);
230 +               __device_link_del(&link->kref);
231         }
232  
233         device_links_write_unlock();
234 @@ -1036,6 +1086,34 @@ static ssize_t online_store(struct devic
235  }
236  static DEVICE_ATTR_RW(online);
237  
238 +static ssize_t suppliers_show(struct device *dev, struct device_attribute *attr,
239 +                             char *buf)
240 +{
241 +       struct device_link *link;
242 +       size_t count = 0;
243 +
244 +       list_for_each_entry(link, &dev->links.suppliers, c_node)
245 +               count += scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
246 +                                  dev_name(link->supplier));
247 +
248 +       return count;
249 +}
250 +static DEVICE_ATTR_RO(suppliers);
251 +
252 +static ssize_t consumers_show(struct device *dev, struct device_attribute *attr,
253 +                             char *buf)
254 +{
255 +       struct device_link *link;
256 +       size_t count = 0;
257 +
258 +       list_for_each_entry(link, &dev->links.consumers, s_node)
259 +               count += scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
260 +                                  dev_name(link->consumer));
261 +
262 +       return count;
263 +}
264 +static DEVICE_ATTR_RO(consumers);
265 +
266  int device_add_groups(struct device *dev, const struct attribute_group **groups)
267  {
268         return sysfs_create_groups(&dev->kobj, groups);
269 @@ -1207,8 +1285,20 @@ static int device_add_attrs(struct devic
270                         goto err_remove_dev_groups;
271         }
272  
273 +       error = device_create_file(dev, &dev_attr_suppliers);
274 +       if (error)
275 +               goto err_remove_online;
276 +
277 +       error = device_create_file(dev, &dev_attr_consumers);
278 +       if (error)
279 +               goto err_remove_suppliers;
280 +
281         return 0;
282  
283 + err_remove_suppliers:
284 +       device_remove_file(dev, &dev_attr_suppliers);
285 + err_remove_online:
286 +       device_remove_file(dev, &dev_attr_online);
287   err_remove_dev_groups:
288         device_remove_groups(dev, dev->groups);
289   err_remove_type_groups:
290 @@ -1226,6 +1316,8 @@ static void device_remove_attrs(struct d
291         struct class *class = dev->class;
292         const struct device_type *type = dev->type;
293  
294 +       device_remove_file(dev, &dev_attr_consumers);
295 +       device_remove_file(dev, &dev_attr_suppliers);
296         device_remove_file(dev, &dev_attr_online);
297         device_remove_groups(dev, dev->groups);
298  
299 --- a/drivers/base/dma-mapping.c
300 +++ b/drivers/base/dma-mapping.c
301 @@ -335,6 +335,7 @@ void dma_common_free_remap(void *cpu_add
302   * Common configuration to enable DMA API use for a device
303   */
304  #include <linux/pci.h>
305 +#include <linux/fsl/mc.h>
306  
307  int dma_configure(struct device *dev)
308  {
309 @@ -350,6 +351,12 @@ int dma_configure(struct device *dev)
310                         dma_dev = dma_dev->parent;
311         }
312  
313 +       if (dev_is_fsl_mc(dev)) {
314 +               dma_dev = dev;
315 +               while (dev_is_fsl_mc(dma_dev))
316 +                       dma_dev = dma_dev->parent;
317 +       }
318 +
319         if (dma_dev->of_node) {
320                 ret = of_dma_configure(dev, dma_dev->of_node);
321         } else if (has_acpi_companion(dma_dev)) {
322 --- a/drivers/gpu/ipu-v3/ipu-pre.c
323 +++ b/drivers/gpu/ipu-v3/ipu-pre.c
324 @@ -124,7 +124,8 @@ ipu_pre_lookup_by_phandle(struct device
325         list_for_each_entry(pre, &ipu_pre_list, list) {
326                 if (pre_node == pre->dev->of_node) {
327                         mutex_unlock(&ipu_pre_list_mutex);
328 -                       device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE);
329 +                       device_link_add(dev, pre->dev,
330 +                                       DL_FLAG_AUTOREMOVE_CONSUMER);
331                         of_node_put(pre_node);
332                         return pre;
333                 }
334 --- a/drivers/gpu/ipu-v3/ipu-prg.c
335 +++ b/drivers/gpu/ipu-v3/ipu-prg.c
336 @@ -99,7 +99,8 @@ ipu_prg_lookup_by_phandle(struct device
337         list_for_each_entry(prg, &ipu_prg_list, list) {
338                 if (prg_node == prg->dev->of_node) {
339                         mutex_unlock(&ipu_prg_list_mutex);
340 -                       device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE);
341 +                       device_link_add(dev, prg->dev,
342 +                                       DL_FLAG_AUTOREMOVE_CONSUMER);
343                         prg->id = ipu_id;
344                         of_node_put(prg_node);
345                         return prg;
346 --- a/drivers/iommu/dma-iommu.c
347 +++ b/drivers/iommu/dma-iommu.c
348 @@ -381,6 +381,9 @@ static dma_addr_t iommu_dma_alloc_iova(s
349         if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
350                 iova_len = roundup_pow_of_two(iova_len);
351  
352 +       if (dev->bus_dma_mask)
353 +               dma_limit &= dev->bus_dma_mask;
354 +
355         if (domain->geometry.force_aperture)
356                 dma_limit = min(dma_limit, domain->geometry.aperture_end);
357  
358 --- a/drivers/mux/Kconfig
359 +++ b/drivers/mux/Kconfig
360 @@ -35,14 +35,14 @@ config MUX_GPIO
361           be called mux-gpio.
362  
363  config MUX_MMIO
364 -       tristate "MMIO register bitfield-controlled Multiplexer"
365 -       depends on (OF && MFD_SYSCON) || COMPILE_TEST
366 +       tristate "MMIO/Regmap register bitfield-controlled Multiplexer"
367 +       depends on OF || COMPILE_TEST
368         help
369 -         MMIO register bitfield-controlled Multiplexer controller.
370 +         MMIO/Regmap register bitfield-controlled Multiplexer controller.
371  
372 -         The driver builds multiplexer controllers for bitfields in a syscon
373 -         register. For N bit wide bitfields, there will be 2^N possible
374 -         multiplexer states.
375 +         The driver builds multiplexer controllers for bitfields in either
376 +         a syscon register or a driver regmap register. For N bit wide
377 +         bitfields, there will be 2^N possible multiplexer states.
378  
379           To compile the driver as a module, choose M here: the module will
380           be called mux-mmio.
381 --- a/drivers/mux/mmio.c
382 +++ b/drivers/mux/mmio.c
383 @@ -31,6 +31,7 @@ static const struct mux_control_ops mux_
384  
385  static const struct of_device_id mux_mmio_dt_ids[] = {
386         { .compatible = "mmio-mux", },
387 +       { .compatible = "reg-mux", },
388         { /* sentinel */ }
389  };
390  MODULE_DEVICE_TABLE(of, mux_mmio_dt_ids);
391 @@ -46,7 +47,10 @@ static int mux_mmio_probe(struct platfor
392         int ret;
393         int i;
394  
395 -       regmap = syscon_node_to_regmap(np->parent);
396 +       if (of_device_is_compatible(np, "mmio-mux"))
397 +               regmap = syscon_node_to_regmap(np->parent);
398 +       else
399 +               regmap = dev_get_regmap(dev->parent, NULL) ?: ERR_PTR(-ENODEV);
400         if (IS_ERR(regmap)) {
401                 ret = PTR_ERR(regmap);
402                 dev_err(dev, "failed to get regmap: %d\n", ret);
403 --- a/drivers/of/device.c
404 +++ b/drivers/of/device.c
405 @@ -15,6 +15,9 @@
406  
407  #include <asm/errno.h>
408  #include "of_private.h"
409 +#ifdef CONFIG_FSL_MC_BUS
410 +#include <linux/fsl/mc.h>
411 +#endif
412  
413  /**
414   * of_match_device - Tell if a struct device matches an of_device_id list
415 @@ -105,6 +108,9 @@ int of_dma_configure(struct device *dev,
416  #ifdef CONFIG_ARM_AMBA
417                     dev->bus != &amba_bustype &&
418  #endif
419 +#ifdef CONFIG_FSL_MC_BUS
420 +                   dev->bus != &fsl_mc_bus_type &&
421 +#endif
422                     dev->bus != &platform_bus_type)
423                         return ret == -ENODEV ? 0 : ret;
424  
425 @@ -152,10 +158,16 @@ int of_dma_configure(struct device *dev,
426          * set by the driver.
427          */
428         mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
429 +       dev->bus_dma_mask = mask;
430         dev->coherent_dma_mask &= mask;
431         *dev->dma_mask &= mask;
432  
433 -       coherent = of_dma_is_coherent(np);
434 +#ifdef CONFIG_FSL_MC_BUS
435 +       if (dev_is_fsl_mc(dev))
436 +               coherent = fsl_mc_is_dev_coherent(dev);
437 +       else
438 +#endif
439 +               coherent = of_dma_is_coherent(np);
440         dev_dbg(dev, "device is%sdma coherent\n",
441                 coherent ? " " : " not ");
442  
443 --- a/drivers/soc/imx/gpc.c
444 +++ b/drivers/soc/imx/gpc.c
445 @@ -209,7 +209,7 @@ static int imx_pgc_power_domain_probe(st
446                         goto genpd_err;
447         }
448  
449 -       device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE);
450 +       device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE_CONSUMER);
451  
452         return 0;
453  
454 --- a/include/linux/device.h
455 +++ b/include/linux/device.h
456 @@ -55,6 +55,8 @@ struct bus_attribute {
457         struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
458  #define BUS_ATTR_RO(_name) \
459         struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
460 +#define BUS_ATTR_WO(_name) \
461 +       struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
462  
463  extern int __must_check bus_create_file(struct bus_type *,
464                                         struct bus_attribute *);
465 @@ -750,14 +752,16 @@ enum device_link_state {
466   * Device link flags.
467   *
468   * STATELESS: The core won't track the presence of supplier/consumer drivers.
469 - * AUTOREMOVE: Remove this link automatically on consumer driver unbind.
470 + * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind.
471   * PM_RUNTIME: If set, the runtime PM framework will use this link.
472   * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
473 + * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
474   */
475 -#define DL_FLAG_STATELESS      BIT(0)
476 -#define DL_FLAG_AUTOREMOVE     BIT(1)
477 -#define DL_FLAG_PM_RUNTIME     BIT(2)
478 -#define DL_FLAG_RPM_ACTIVE     BIT(3)
479 +#define DL_FLAG_STATELESS              BIT(0)
480 +#define DL_FLAG_AUTOREMOVE_CONSUMER    BIT(1)
481 +#define DL_FLAG_PM_RUNTIME             BIT(2)
482 +#define DL_FLAG_RPM_ACTIVE             BIT(3)
483 +#define DL_FLAG_AUTOREMOVE_SUPPLIER    BIT(4)
484  
485  /**
486   * struct device_link - Device link representation.
487 @@ -768,6 +772,7 @@ enum device_link_state {
488   * @status: The state of the link (with respect to the presence of drivers).
489   * @flags: Link flags.
490   * @rpm_active: Whether or not the consumer device is runtime-PM-active.
491 + * @kref: Count repeated addition of the same link.
492   * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
493   */
494  struct device_link {
495 @@ -778,6 +783,7 @@ struct device_link {
496         enum device_link_state status;
497         u32 flags;
498         bool rpm_active;
499 +       struct kref kref;
500  #ifdef CONFIG_SRCU
501         struct rcu_head rcu_head;
502  #endif
503 @@ -850,6 +856,8 @@ struct dev_links_info {
504   * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
505   *             hardware supports 64-bit addresses for consistent allocations
506   *             such descriptors.
507 + * @bus_dma_mask: Mask of an upstream bridge or bus which imposes a smaller DMA
508 + *             limit than the device itself supports.
509   * @dma_pfn_offset: offset of DMA memory range relatively of RAM
510   * @dma_parms: A low level driver may set these to teach IOMMU code about
511   *             segment limitations.
512 @@ -929,6 +937,7 @@ struct device {
513                                              not all hardware supports
514                                              64 bit addresses for consistent
515                                              allocations such descriptors. */
516 +       u64             bus_dma_mask;   /* upstream dma_mask constraint */
517         unsigned long   dma_pfn_offset;
518  
519         struct device_dma_parameters *dma_parms;
520 @@ -1267,6 +1276,7 @@ extern const char *dev_driver_string(con
521  struct device_link *device_link_add(struct device *consumer,
522                                     struct device *supplier, u32 flags);
523  void device_link_del(struct device_link *link);
524 +void device_link_remove(void *consumer, struct device *supplier);
525  
526  #ifdef CONFIG_PRINTK
527  
528 --- a/include/linux/fsl_devices.h
529 +++ b/include/linux/fsl_devices.h
530 @@ -99,7 +99,9 @@ struct fsl_usb2_platform_data {
531         unsigned        suspended:1;
532         unsigned        already_suspended:1;
533         unsigned        has_fsl_erratum_a007792:1;
534 +       unsigned        has_fsl_erratum_14:1;
535         unsigned        has_fsl_erratum_a005275:1;
536 +       unsigned        has_fsl_erratum_a006918:1;
537         unsigned        has_fsl_erratum_a005697:1;
538         unsigned        check_phy_clk_valid:1;
539  
540 --- a/include/linux/netdevice.h
541 +++ b/include/linux/netdevice.h
542 @@ -2344,7 +2344,8 @@ int register_netdevice_notifier(struct n
543  int unregister_netdevice_notifier(struct notifier_block *nb);
544  
545  struct netdev_notifier_info {
546 -       struct net_device *dev;
547 +       struct net_device       *dev;
548 +       struct netlink_ext_ack  *extack;
549  };
550  
551  struct netdev_notifier_info_ext {
552 @@ -2376,6 +2377,7 @@ static inline void netdev_notifier_info_
553                                              struct net_device *dev)
554  {
555         info->dev = dev;
556 +       info->extack = NULL;
557  }
558  
559  static inline struct net_device *
560 @@ -2384,6 +2386,12 @@ netdev_notifier_info_to_dev(const struct
561         return info->dev;
562  }
563  
564 +static inline struct netlink_ext_ack *
565 +netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
566 +{
567 +       return info->extack;
568 +}
569 +
570  int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
571  
572  
573 --- a/include/linux/skbuff.h
574 +++ b/include/linux/skbuff.h
575 @@ -964,6 +964,7 @@ void kfree_skb_list(struct sk_buff *segs
576  void skb_tx_error(struct sk_buff *skb);
577  void consume_skb(struct sk_buff *skb);
578  void __consume_stateless_skb(struct sk_buff *skb);
579 +void skb_recycle(struct sk_buff *skb);
580  void  __kfree_skb(struct sk_buff *skb);
581  extern struct kmem_cache *skbuff_head_cache;
582  
583 @@ -3297,6 +3298,7 @@ static inline void skb_free_datagram_loc
584  }
585  int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
586  int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
587 +void copy_skb_header(struct sk_buff *new, const struct sk_buff *old);
588  int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
589  __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
590                               int len, __wsum csum);
591 --- a/lib/dma-noop.c
592 +++ b/lib/dma-noop.c
593 @@ -58,11 +58,30 @@ static int dma_noop_map_sg(struct device
594         return nents;
595  }
596  
597 +static int dma_noop_supported(struct device *dev, u64 mask)
598 +{
599 +#ifdef CONFIG_ZONE_DMA
600 +       if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
601 +       return 0;
602 +#else
603 +       /*
604 +        * Because 32-bit DMA masks are so common we expect every architecture
605 +        * to be able to satisfy them - either by not supporting more physical
606 +        * memory, or by providing a ZONE_DMA32.  If neither is the case, the
607 +        * architecture needs to use an IOMMU instead of the direct mapping.
608 +        */
609 +       if (dev->bus_dma_mask && mask > dev->bus_dma_mask)
610 +               return 0;
611 +#endif
612 +       return 1;
613 +}
614 +
615  const struct dma_map_ops dma_noop_ops = {
616         .alloc                  = dma_noop_alloc,
617         .free                   = dma_noop_free,
618         .map_page               = dma_noop_map_page,
619         .map_sg                 = dma_noop_map_sg,
620 +       dma_supported           = dma_noop_supported
621  };
622  
623  EXPORT_SYMBOL(dma_noop_ops);
624 --- a/mm/page_alloc.c
625 +++ b/mm/page_alloc.c
626 @@ -4366,8 +4366,14 @@ void page_frag_free(void *addr)
627  {
628         struct page *page = virt_to_head_page(addr);
629  
630 -       if (unlikely(put_page_testzero(page)))
631 -               __free_pages_ok(page, compound_order(page));
632 +       if (unlikely(put_page_testzero(page))) {
633 +               unsigned int order = compound_order(page);
634 +
635 +               if (order == 0)         /* Via pcp? */
636 +                       free_hot_cold_page(page, false);
637 +               else
638 +                       __free_pages_ok(page, order);
639 +       }
640  }
641  EXPORT_SYMBOL(page_frag_free);
642  
643 --- a/net/core/dev.c
644 +++ b/net/core/dev.c
645 @@ -162,7 +162,6 @@ static struct list_head offload_base __r
646  
647  static int netif_rx_internal(struct sk_buff *skb);
648  static int call_netdevice_notifiers_info(unsigned long val,
649 -                                        struct net_device *dev,
650                                          struct netdev_notifier_info *info);
651  static struct napi_struct *napi_by_id(unsigned int napi_id);
652  
653 @@ -1312,10 +1311,11 @@ EXPORT_SYMBOL(netdev_features_change);
654  void netdev_state_change(struct net_device *dev)
655  {
656         if (dev->flags & IFF_UP) {
657 -               struct netdev_notifier_change_info change_info;
658 +               struct netdev_notifier_change_info change_info = {
659 +                       .info.dev = dev,
660 +               };
661  
662 -               change_info.flags_changed = 0;
663 -               call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
664 +               call_netdevice_notifiers_info(NETDEV_CHANGE,
665                                               &change_info.info);
666                 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
667         }
668 @@ -1536,9 +1536,10 @@ EXPORT_SYMBOL(dev_disable_lro);
669  static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
670                                    struct net_device *dev)
671  {
672 -       struct netdev_notifier_info info;
673 +       struct netdev_notifier_info info = {
674 +               .dev = dev,
675 +       };
676  
677 -       netdev_notifier_info_init(&info, dev);
678         return nb->notifier_call(nb, val, &info);
679  }
680  
681 @@ -1663,11 +1664,9 @@ EXPORT_SYMBOL(unregister_netdevice_notif
682   */
683  
684  static int call_netdevice_notifiers_info(unsigned long val,
685 -                                        struct net_device *dev,
686                                          struct netdev_notifier_info *info)
687  {
688         ASSERT_RTNL();
689 -       netdev_notifier_info_init(info, dev);
690         return raw_notifier_call_chain(&netdev_chain, val, info);
691  }
692  
693 @@ -1682,9 +1681,11 @@ static int call_netdevice_notifiers_info
694  
695  int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
696  {
697 -       struct netdev_notifier_info info;
698 +       struct netdev_notifier_info info = {
699 +               .dev = dev,
700 +       };
701  
702 -       return call_netdevice_notifiers_info(val, dev, &info);
703 +       return call_netdevice_notifiers_info(val, &info);
704  }
705  EXPORT_SYMBOL(call_netdevice_notifiers);
706  
707 @@ -1707,7 +1708,7 @@ static int call_netdevice_notifiers_mtu(
708  
709         BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
710  
711 -       return call_netdevice_notifiers_info(val, dev, &info.info);
712 +       return call_netdevice_notifiers_info(val, &info.info);
713  }
714  
715  #ifdef CONFIG_NET_INGRESS
716 @@ -6341,7 +6342,15 @@ static int __netdev_upper_dev_link(struc
717                                    struct net_device *upper_dev, bool master,
718                                    void *upper_priv, void *upper_info)
719  {
720 -       struct netdev_notifier_changeupper_info changeupper_info;
721 +       struct netdev_notifier_changeupper_info changeupper_info = {
722 +               .info = {
723 +                       .dev = dev,
724 +               },
725 +               .upper_dev = upper_dev,
726 +               .master = master,
727 +               .linking = true,
728 +               .upper_info = upper_info,
729 +       };
730         int ret = 0;
731  
732         ASSERT_RTNL();
733 @@ -6359,12 +6368,7 @@ static int __netdev_upper_dev_link(struc
734         if (master && netdev_master_upper_dev_get(dev))
735                 return -EBUSY;
736  
737 -       changeupper_info.upper_dev = upper_dev;
738 -       changeupper_info.master = master;
739 -       changeupper_info.linking = true;
740 -       changeupper_info.upper_info = upper_info;
741 -
742 -       ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
743 +       ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
744                                             &changeupper_info.info);
745         ret = notifier_to_errno(ret);
746         if (ret)
747 @@ -6376,7 +6380,7 @@ static int __netdev_upper_dev_link(struc
748                 return ret;
749  
750         netdev_update_addr_mask(dev);
751 -       ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
752 +       ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
753                                             &changeupper_info.info);
754         ret = notifier_to_errno(ret);
755         if (ret)
756 @@ -6440,21 +6444,25 @@ EXPORT_SYMBOL(netdev_master_upper_dev_li
757  void netdev_upper_dev_unlink(struct net_device *dev,
758                              struct net_device *upper_dev)
759  {
760 -       struct netdev_notifier_changeupper_info changeupper_info;
761 +       struct netdev_notifier_changeupper_info changeupper_info = {
762 +               .info = {
763 +                       .dev = dev,
764 +               },
765 +               .upper_dev = upper_dev,
766 +               .linking = false,
767 +       };
768  
769         ASSERT_RTNL();
770  
771 -       changeupper_info.upper_dev = upper_dev;
772         changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
773 -       changeupper_info.linking = false;
774  
775 -       call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
776 +       call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
777                                       &changeupper_info.info);
778  
779         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
780  
781         netdev_update_addr_mask(dev);
782 -       call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
783 +       call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
784                                       &changeupper_info.info);
785  }
786  EXPORT_SYMBOL(netdev_upper_dev_unlink);
787 @@ -6470,11 +6478,13 @@ EXPORT_SYMBOL(netdev_upper_dev_unlink);
788  void netdev_bonding_info_change(struct net_device *dev,
789                                 struct netdev_bonding_info *bonding_info)
790  {
791 -       struct netdev_notifier_bonding_info     info;
792 +       struct netdev_notifier_bonding_info info = {
793 +               .info.dev = dev,
794 +       };
795  
796         memcpy(&info.bonding_info, bonding_info,
797                sizeof(struct netdev_bonding_info));
798 -       call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
799 +       call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
800                                       &info.info);
801  }
802  EXPORT_SYMBOL(netdev_bonding_info_change);
803 @@ -6600,11 +6610,13 @@ EXPORT_SYMBOL(dev_get_nest_level);
804  void netdev_lower_state_changed(struct net_device *lower_dev,
805                                 void *lower_state_info)
806  {
807 -       struct netdev_notifier_changelowerstate_info changelowerstate_info;
808 +       struct netdev_notifier_changelowerstate_info changelowerstate_info = {
809 +               .info.dev = lower_dev,
810 +       };
811  
812         ASSERT_RTNL();
813         changelowerstate_info.lower_state_info = lower_state_info;
814 -       call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
815 +       call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
816                                       &changelowerstate_info.info);
817  }
818  EXPORT_SYMBOL(netdev_lower_state_changed);
819 @@ -6895,11 +6907,14 @@ void __dev_notify_flags(struct net_devic
820  
821         if (dev->flags & IFF_UP &&
822             (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
823 -               struct netdev_notifier_change_info change_info;
824 +               struct netdev_notifier_change_info change_info = {
825 +                       .info = {
826 +                               .dev = dev,
827 +                       },
828 +                       .flags_changed = changes,
829 +               };
830  
831 -               change_info.flags_changed = changes;
832 -               call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
833 -                                             &change_info.info);
834 +               call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
835         }
836  }
837  
838 --- a/net/core/skbuff.c
839 +++ b/net/core/skbuff.c
840 @@ -803,6 +803,32 @@ void napi_consume_skb(struct sk_buff *sk
841  }
842  EXPORT_SYMBOL(napi_consume_skb);
843  
844 +/**
845 + *     skb_recycle - clean up an skb for reuse
846 + *     @skb: buffer
847 + *
848 + *     Recycles the skb to be reused as a receive buffer. This
849 + *     function does any necessary reference count dropping, and
850 + *     cleans up the skbuff as if it just came from __alloc_skb().
851 + */
852 +void skb_recycle(struct sk_buff *skb)
853 +{
854 +       struct skb_shared_info *shinfo;
855 +       u8 head_frag = skb->head_frag;
856 +
857 +       skb_release_head_state(skb);
858 +
859 +       shinfo = skb_shinfo(skb);
860 +       memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
861 +       atomic_set(&shinfo->dataref, 1);
862 +
863 +       memset(skb, 0, offsetof(struct sk_buff, tail));
864 +       skb->data = skb->head + NET_SKB_PAD;
865 +       skb->head_frag = head_frag;
866 +       skb_reset_tail_pointer(skb);
867 +}
868 +EXPORT_SYMBOL(skb_recycle);
869 +
870  /* Make sure a field is enclosed inside headers_start/headers_end section */
871  #define CHECK_SKB_FIELD(field) \
872         BUILD_BUG_ON(offsetof(struct sk_buff, field) <          \
873 @@ -1322,7 +1348,7 @@ static void skb_headers_offset_update(st
874         skb->inner_mac_header += off;
875  }
876  
877 -static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
878 +void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
879  {
880         __copy_skb_header(new, old);
881  
882 @@ -1330,6 +1356,7 @@ static void copy_skb_header(struct sk_bu
883         skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
884         skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
885  }
886 +EXPORT_SYMBOL(copy_skb_header);
887  
888  static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
889  {
890 --- a/samples/bpf/Makefile
891 +++ b/samples/bpf/Makefile
892 @@ -178,6 +178,12 @@ HOSTLOADLIBES_syscall_tp += -lelf
893  LLC ?= llc
894  CLANG ?= clang
895  
896 +# Detect that we're cross compiling and use the cross compiler
897 +ifdef CROSS_COMPILE
898 +HOSTCC = $(CROSS_COMPILE)gcc
899 +CLANG_ARCH_ARGS = -target $(ARCH)
900 +endif
901 +
902  # Trick to allow make to be run from this directory
903  all: $(LIBBPF)
904         $(MAKE) -C ../../ $(CURDIR)/
905 @@ -228,9 +234,9 @@ $(obj)/tracex5_kern.o: $(obj)/syscall_nr
906  $(obj)/%.o: $(src)/%.c
907         $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \
908                 -I$(srctree)/tools/testing/selftests/bpf/ \
909 -               -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
910 -               -Wno-compare-distinct-pointer-types \
911 +               -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
912 +               -D__TARGET_ARCH_$(ARCH) -Wno-compare-distinct-pointer-types \
913                 -Wno-gnu-variable-sized-type-not-at-end \
914                 -Wno-address-of-packed-member -Wno-tautological-compare \
915 -               -Wno-unknown-warning-option \
916 +               -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
917                 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
918 --- a/samples/bpf/map_perf_test_kern.c
919 +++ b/samples/bpf/map_perf_test_kern.c
920 @@ -266,7 +266,7 @@ int stress_hash_map_lookup(struct pt_reg
921         return 0;
922  }
923  
924 -SEC("kprobe/sys_getpgrp")
925 +SEC("kprobe/sys_getppid")
926  int stress_array_map_lookup(struct pt_regs *ctx)
927  {
928         u32 key = 1, i;
929 --- a/samples/bpf/map_perf_test_user.c
930 +++ b/samples/bpf/map_perf_test_user.c
931 @@ -282,7 +282,7 @@ static void test_array_lookup(int cpu)
932  
933         start_time = time_get_ns();
934         for (i = 0; i < max_cnt; i++)
935 -               syscall(__NR_getpgrp, 0);
936 +               syscall(__NR_getppid, 0);
937         printf("%d:array_lookup %lld lookups per sec\n",
938                cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time));
939  }
940 --- a/tools/testing/selftests/bpf/bpf_helpers.h
941 +++ b/tools/testing/selftests/bpf/bpf_helpers.h
942 @@ -110,7 +110,47 @@ static int (*bpf_skb_under_cgroup)(void
943  static int (*bpf_skb_change_head)(void *, int len, int flags) =
944         (void *) BPF_FUNC_skb_change_head;
945  
946 +/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
947 +#if defined(__TARGET_ARCH_x86)
948 +       #define bpf_target_x86
949 +       #define bpf_target_defined
950 +#elif defined(__TARGET_ARCH_s930x)
951 +       #define bpf_target_s930x
952 +       #define bpf_target_defined
953 +#elif defined(__TARGET_ARCH_arm64)
954 +       #define bpf_target_arm64
955 +       #define bpf_target_defined
956 +#elif defined(__TARGET_ARCH_mips)
957 +       #define bpf_target_mips
958 +       #define bpf_target_defined
959 +#elif defined(__TARGET_ARCH_powerpc)
960 +       #define bpf_target_powerpc
961 +       #define bpf_target_defined
962 +#elif defined(__TARGET_ARCH_sparc)
963 +       #define bpf_target_sparc
964 +       #define bpf_target_defined
965 +#else
966 +       #undef bpf_target_defined
967 +#endif
968 +
969 +/* Fall back to what the compiler says */
970 +#ifndef bpf_target_defined
971  #if defined(__x86_64__)
972 +       #define bpf_target_x86
973 +#elif defined(__s390x__)
974 +       #define bpf_target_s930x
975 +#elif defined(__aarch64__)
976 +       #define bpf_target_arm64
977 +#elif defined(__mips__)
978 +       #define bpf_target_mips
979 +#elif defined(__powerpc__)
980 +       #define bpf_target_powerpc
981 +#elif defined(__sparc__)
982 +       #define bpf_target_sparc
983 +#endif
984 +#endif
985 +
986 +#if defined(bpf_target_x86)
987  
988  #define PT_REGS_PARM1(x) ((x)->di)
989  #define PT_REGS_PARM2(x) ((x)->si)
990 @@ -123,7 +163,7 @@ static int (*bpf_skb_change_head)(void *
991  #define PT_REGS_SP(x) ((x)->sp)
992  #define PT_REGS_IP(x) ((x)->ip)
993  
994 -#elif defined(__s390x__)
995 +#elif defined(bpf_target_s390x)
996  
997  #define PT_REGS_PARM1(x) ((x)->gprs[2])
998  #define PT_REGS_PARM2(x) ((x)->gprs[3])
999 @@ -136,7 +176,7 @@ static int (*bpf_skb_change_head)(void *
1000  #define PT_REGS_SP(x) ((x)->gprs[15])
1001  #define PT_REGS_IP(x) ((x)->psw.addr)
1002  
1003 -#elif defined(__aarch64__)
1004 +#elif defined(bpf_target_arm64)
1005  
1006  #define PT_REGS_PARM1(x) ((x)->regs[0])
1007  #define PT_REGS_PARM2(x) ((x)->regs[1])
1008 @@ -149,7 +189,7 @@ static int (*bpf_skb_change_head)(void *
1009  #define PT_REGS_SP(x) ((x)->sp)
1010  #define PT_REGS_IP(x) ((x)->pc)
1011  
1012 -#elif defined(__mips__)
1013 +#elif defined(bpf_target_mips)
1014  
1015  #define PT_REGS_PARM1(x) ((x)->regs[4])
1016  #define PT_REGS_PARM2(x) ((x)->regs[5])
1017 @@ -162,7 +202,7 @@ static int (*bpf_skb_change_head)(void *
1018  #define PT_REGS_SP(x) ((x)->regs[29])
1019  #define PT_REGS_IP(x) ((x)->cp0_epc)
1020  
1021 -#elif defined(__powerpc__)
1022 +#elif defined(bpf_target_powerpc)
1023  
1024  #define PT_REGS_PARM1(x) ((x)->gpr[3])
1025  #define PT_REGS_PARM2(x) ((x)->gpr[4])
1026 @@ -173,7 +213,7 @@ static int (*bpf_skb_change_head)(void *
1027  #define PT_REGS_SP(x) ((x)->sp)
1028  #define PT_REGS_IP(x) ((x)->nip)
1029  
1030 -#elif defined(__sparc__)
1031 +#elif defined(bpf_target_sparc)
1032  
1033  #define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
1034  #define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
1035 @@ -183,6 +223,8 @@ static int (*bpf_skb_change_head)(void *
1036  #define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
1037  #define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
1038  #define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
1039 +
1040 +/* Should this also be a bpf_target check for the sparc case? */
1041  #if defined(__arch64__)
1042  #define PT_REGS_IP(x) ((x)->tpc)
1043  #else
1044 @@ -191,10 +233,10 @@ static int (*bpf_skb_change_head)(void *
1045  
1046  #endif
1047  
1048 -#ifdef __powerpc__
1049 +#ifdef bpf_target_powerpc
1050  #define BPF_KPROBE_READ_RET_IP(ip, ctx)                ({ (ip) = (ctx)->link; })
1051  #define BPF_KRETPROBE_READ_RET_IP              BPF_KPROBE_READ_RET_IP
1052 -#elif defined(__sparc__)
1053 +#elif bpf_target_sparc
1054  #define BPF_KPROBE_READ_RET_IP(ip, ctx)                ({ (ip) = PT_REGS_RET(ctx); })
1055  #define BPF_KRETPROBE_READ_RET_IP              BPF_KPROBE_READ_RET_IP
1056  #else