generic: drop support for get_port_stats() on ar8xxx
[oweals/openwrt.git] / target / linux / generic / hack-4.9 / 220-gc_sections.patch
1 From e3d8676f5722b7622685581e06e8f53e6138e3ab Mon Sep 17 00:00:00 2001
2 From: Felix Fietkau <nbd@nbd.name>
3 Date: Sat, 15 Jul 2017 23:42:36 +0200
4 Subject: use -ffunction-sections, -fdata-sections and --gc-sections
5
6 In combination with kernel symbol export stripping this significantly reduces
7 the kernel image size. Used on both ARM and MIPS architectures.
8
9 Signed-off-by: Felix Fietkau <nbd@nbd.name>
10 Signed-off-by: Jonas Gorski <jogo@openwrt.org>
11 Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
12 ---
13  Makefile                          | 10 +++----
14  arch/arm/Kconfig                  |  1 +
15  arch/arm/boot/compressed/Makefile |  1 +
16  arch/arm/kernel/vmlinux.lds.S     | 26 ++++++++--------
17  arch/mips/Kconfig                 |  1 +
18  arch/mips/kernel/vmlinux.lds.S    |  4 +--
19  include/asm-generic/vmlinux.lds.h | 63 ++++++++++++++++++++-------------------
20  7 files changed, 55 insertions(+), 51 deletions(-)
21
22 --- a/Makefile
23 +++ b/Makefile
24 @@ -409,6 +409,11 @@ KBUILD_AFLAGS_MODULE  := -DMODULE
25  KBUILD_CFLAGS_MODULE  := -DMODULE
26  KBUILD_LDFLAGS_MODULE = -T $(srctree)/scripts/module-common.lds $(if $(CONFIG_PROFILING),,-s)
27  
28 +ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
29 +KBUILD_CFLAGS_KERNEL   += $(call cc-option,-ffunction-sections,)
30 +KBUILD_CFLAGS_KERNEL   += $(call cc-option,-fdata-sections,)
31 +endif
32 +
33  # Read KERNELRELEASE from include/config/kernel.release (if it exists)
34  KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
35  KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
36 @@ -633,11 +638,6 @@ KBUILD_CFLAGS      += $(call cc-disable-warni
37  KBUILD_CFLAGS  += $(call cc-disable-warning, format-overflow)
38  KBUILD_CFLAGS  += $(call cc-disable-warning, int-in-bool-context)
39  
40 -ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
41 -KBUILD_CFLAGS  += $(call cc-option,-ffunction-sections,)
42 -KBUILD_CFLAGS  += $(call cc-option,-fdata-sections,)
43 -endif
44 -
45  ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
46  KBUILD_CFLAGS  += -Os $(call cc-disable-warning,maybe-uninitialized,) $(EXTRA_OPTIMIZATION)
47  else
48 --- a/arch/arm/Kconfig
49 +++ b/arch/arm/Kconfig
50 @@ -81,6 +81,7 @@ config ARM
51         select HAVE_UID16
52         select HAVE_VIRT_CPU_ACCOUNTING_GEN
53         select IRQ_FORCED_THREADING
54 +       select LD_DEAD_CODE_DATA_ELIMINATION
55         select MODULES_USE_ELF_REL
56         select NO_BOOTMEM
57         select OF_EARLY_FLATTREE if OF
58 --- a/arch/arm/boot/compressed/Makefile
59 +++ b/arch/arm/boot/compressed/Makefile
60 @@ -102,6 +102,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
61  ORIG_CFLAGS := $(KBUILD_CFLAGS)
62  KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
63  endif
64 +KBUILD_CFLAGS_KERNEL := $(patsubst -f%-sections,,$(KBUILD_CFLAGS_KERNEL))
65  
66  # -fstack-protector-strong triggers protection checks in this code,
67  # but it is being used too early to link to meaningful stack_chk logic.
68 --- a/arch/arm/kernel/vmlinux.lds.S
69 +++ b/arch/arm/kernel/vmlinux.lds.S
70 @@ -17,7 +17,7 @@
71  #define PROC_INFO                                                      \
72         . = ALIGN(4);                                                   \
73         VMLINUX_SYMBOL(__proc_info_begin) = .;                          \
74 -       *(.proc.info.init)                                              \
75 +       KEEP(*(.proc.info.init))                                        \
76         VMLINUX_SYMBOL(__proc_info_end) = .;
77  
78  #define HYPERVISOR_TEXT                                                        \
79 @@ -28,11 +28,11 @@
80  #define IDMAP_TEXT                                                     \
81         ALIGN_FUNCTION();                                               \
82         VMLINUX_SYMBOL(__idmap_text_start) = .;                         \
83 -       *(.idmap.text)                                                  \
84 +       KEEP(*(.idmap.text))                                            \
85         VMLINUX_SYMBOL(__idmap_text_end) = .;                           \
86         . = ALIGN(PAGE_SIZE);                                           \
87         VMLINUX_SYMBOL(__hyp_idmap_text_start) = .;                     \
88 -       *(.hyp.idmap.text)                                              \
89 +       KEEP(*(.hyp.idmap.text))                                        \
90         VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
91  
92  #ifdef CONFIG_HOTPLUG_CPU
93 @@ -105,7 +105,7 @@ SECTIONS
94                 _stext = .;             /* Text and read-only data      */
95                         IDMAP_TEXT
96                         __exception_text_start = .;
97 -                       *(.exception.text)
98 +                       KEEP(*(.exception.text))
99                         __exception_text_end = .;
100                         IRQENTRY_TEXT
101                         SOFTIRQENTRY_TEXT
102 @@ -134,7 +134,7 @@ SECTIONS
103         __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
104                 __start___ex_table = .;
105  #ifdef CONFIG_MMU
106 -               *(__ex_table)
107 +               KEEP(*(__ex_table))
108  #endif
109                 __stop___ex_table = .;
110         }
111 @@ -146,12 +146,12 @@ SECTIONS
112         . = ALIGN(8);
113         .ARM.unwind_idx : {
114                 __start_unwind_idx = .;
115 -               *(.ARM.exidx*)
116 +               KEEP(*(.ARM.exidx*))
117                 __stop_unwind_idx = .;
118         }
119         .ARM.unwind_tab : {
120                 __start_unwind_tab = .;
121 -               *(.ARM.extab*)
122 +               KEEP(*(.ARM.extab*))
123                 __stop_unwind_tab = .;
124         }
125  #endif
126 @@ -171,14 +171,14 @@ SECTIONS
127          */
128         __vectors_start = .;
129         .vectors 0xffff0000 : AT(__vectors_start) {
130 -               *(.vectors)
131 +               KEEP(*(.vectors))
132         }
133         . = __vectors_start + SIZEOF(.vectors);
134         __vectors_end = .;
135  
136         __stubs_start = .;
137         .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
138 -               *(.stubs)
139 +               KEEP(*(.stubs))
140         }
141         . = __stubs_start + SIZEOF(.stubs);
142         __stubs_end = .;
143 @@ -194,24 +194,24 @@ SECTIONS
144         }
145         .init.arch.info : {
146                 __arch_info_begin = .;
147 -               *(.arch.info.init)
148 +               KEEP(*(.arch.info.init))
149                 __arch_info_end = .;
150         }
151         .init.tagtable : {
152                 __tagtable_begin = .;
153 -               *(.taglist.init)
154 +               KEEP(*(.taglist.init))
155                 __tagtable_end = .;
156         }
157  #ifdef CONFIG_SMP_ON_UP
158         .init.smpalt : {
159                 __smpalt_begin = .;
160 -               *(.alt.smp.init)
161 +               KEEP(*(.alt.smp.init))
162                 __smpalt_end = .;
163         }
164  #endif
165         .init.pv_table : {
166                 __pv_table_begin = .;
167 -               *(.pv_table)
168 +               KEEP(*(.pv_table))
169                 __pv_table_end = .;
170         }
171         .init.data : {
172 --- a/arch/mips/Kconfig
173 +++ b/arch/mips/Kconfig
174 @@ -55,6 +55,7 @@ config MIPS
175         select CLONE_BACKWARDS
176         select HAVE_DEBUG_STACKOVERFLOW
177         select HAVE_CC_STACKPROTECTOR
178 +       select LD_DEAD_CODE_DATA_ELIMINATION
179         select CPU_PM if CPU_IDLE
180         select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
181         select ARCH_BINFMT_ELF_STATE
182 --- a/arch/mips/kernel/vmlinux.lds.S
183 +++ b/arch/mips/kernel/vmlinux.lds.S
184 @@ -71,7 +71,7 @@ SECTIONS
185         /* Exception table for data bus errors */
186         __dbe_table : {
187                 __start___dbe_table = .;
188 -               *(__dbe_table)
189 +               KEEP(*(__dbe_table))
190                 __stop___dbe_table = .;
191         }
192  
193 @@ -121,7 +121,7 @@ SECTIONS
194         . = ALIGN(4);
195         .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
196                 __mips_machines_start = .;
197 -               *(.mips.machines.init)
198 +               KEEP(*(.mips.machines.init))
199                 __mips_machines_end = .;
200         }
201  
202 --- a/include/asm-generic/vmlinux.lds.h
203 +++ b/include/asm-generic/vmlinux.lds.h
204 @@ -130,7 +130,7 @@
205  #ifdef CONFIG_KPROBES
206  #define KPROBE_BLACKLIST()     . = ALIGN(8);                                 \
207                                 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
208 -                               *(_kprobe_blacklist)                          \
209 +                               KEEP(*(_kprobe_blacklist))                    \
210                                 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
211  #else
212  #define KPROBE_BLACKLIST()
213 @@ -139,10 +139,10 @@
214  #ifdef CONFIG_EVENT_TRACING
215  #define FTRACE_EVENTS()        . = ALIGN(8);                                   \
216                         VMLINUX_SYMBOL(__start_ftrace_events) = .;      \
217 -                       *(_ftrace_events)                               \
218 +                       KEEP(*(_ftrace_events))                         \
219                         VMLINUX_SYMBOL(__stop_ftrace_events) = .;       \
220                         VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .;   \
221 -                       *(_ftrace_enum_map)                             \
222 +                       KEEP(*(_ftrace_enum_map))                       \
223                         VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
224  #else
225  #define FTRACE_EVENTS()
226 @@ -163,7 +163,7 @@
227  #ifdef CONFIG_FTRACE_SYSCALLS
228  #define TRACE_SYSCALLS() . = ALIGN(8);                                 \
229                          VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
230 -                        *(__syscalls_metadata)                         \
231 +                        KEEP(*(__syscalls_metadata))                   \
232                          VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
233  #else
234  #define TRACE_SYSCALLS()
235 @@ -172,7 +172,7 @@
236  #ifdef CONFIG_SERIAL_EARLYCON
237  #define EARLYCON_TABLE() STRUCT_ALIGN();                       \
238                          VMLINUX_SYMBOL(__earlycon_table) = .;  \
239 -                        *(__earlycon_table)                    \
240 +                        KEEP(*(__earlycon_table))              \
241                          VMLINUX_SYMBOL(__earlycon_table_end) = .;
242  #else
243  #define EARLYCON_TABLE()
244 @@ -185,8 +185,8 @@
245  #define _OF_TABLE_1(name)                                              \
246         . = ALIGN(8);                                                   \
247         VMLINUX_SYMBOL(__##name##_of_table) = .;                        \
248 -       *(__##name##_of_table)                                          \
249 -       *(__##name##_of_table_end)
250 +       KEEP(*(__##name##_of_table))                                    \
251 +       KEEP(*(__##name##_of_table_end))
252  
253  #define CLKSRC_OF_TABLES()     OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
254  #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
255 @@ -209,7 +209,7 @@
256  #define KERNEL_DTB()                                                   \
257         STRUCT_ALIGN();                                                 \
258         VMLINUX_SYMBOL(__dtb_start) = .;                                \
259 -       *(.dtb.init.rodata)                                             \
260 +       KEEP(*(.dtb.init.rodata))                                       \
261         VMLINUX_SYMBOL(__dtb_end) = .;
262  
263  /*
264 @@ -227,16 +227,17 @@
265         /* implement dynamic printk debug */                            \
266         . = ALIGN(8);                                                   \
267         VMLINUX_SYMBOL(__start___jump_table) = .;                       \
268 -       *(__jump_table)                                                 \
269 +       KEEP(*(__jump_table))                                           \
270         VMLINUX_SYMBOL(__stop___jump_table) = .;                        \
271         . = ALIGN(8);                                                   \
272         VMLINUX_SYMBOL(__start___verbose) = .;                          \
273 -       *(__verbose)                                                    \
274 +       KEEP(*(__verbose))                                              \
275         VMLINUX_SYMBOL(__stop___verbose) = .;                           \
276         LIKELY_PROFILE()                                                \
277         BRANCH_PROFILE()                                                \
278         TRACE_PRINTKS()                                                 \
279 -       TRACEPOINT_STR()
280 +       TRACEPOINT_STR()                                                \
281 +       *(.data.[a-zA-Z_]*)
282  
283  /*
284   * Data section helpers
285 @@ -304,35 +305,35 @@
286         /* PCI quirks */                                                \
287         .pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {        \
288                 VMLINUX_SYMBOL(__start_pci_fixups_early) = .;           \
289 -               *(.pci_fixup_early)                                     \
290 +               KEEP(*(.pci_fixup_early))                               \
291                 VMLINUX_SYMBOL(__end_pci_fixups_early) = .;             \
292                 VMLINUX_SYMBOL(__start_pci_fixups_header) = .;          \
293 -               *(.pci_fixup_header)                                    \
294 +               KEEP(*(.pci_fixup_header))                              \
295                 VMLINUX_SYMBOL(__end_pci_fixups_header) = .;            \
296                 VMLINUX_SYMBOL(__start_pci_fixups_final) = .;           \
297 -               *(.pci_fixup_final)                                     \
298 +               KEEP(*(.pci_fixup_final))                               \
299                 VMLINUX_SYMBOL(__end_pci_fixups_final) = .;             \
300                 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;          \
301 -               *(.pci_fixup_enable)                                    \
302 +               KEEP(*(.pci_fixup_enable))                              \
303                 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;            \
304                 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;          \
305 -               *(.pci_fixup_resume)                                    \
306 +               KEEP(*(.pci_fixup_resume))                              \
307                 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;            \
308                 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .;    \
309 -               *(.pci_fixup_resume_early)                              \
310 +               KEEP(*(.pci_fixup_resume_early))                        \
311                 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .;      \
312                 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .;         \
313 -               *(.pci_fixup_suspend)                                   \
314 +               KEEP(*(.pci_fixup_suspend))                             \
315                 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .;           \
316                 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .;    \
317 -               *(.pci_fixup_suspend_late)                              \
318 +               KEEP(*(.pci_fixup_suspend_late))                        \
319                 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .;      \
320         }                                                               \
321                                                                         \
322         /* Built-in firmware blobs */                                   \
323         .builtin_fw        : AT(ADDR(.builtin_fw) - LOAD_OFFSET) {      \
324                 VMLINUX_SYMBOL(__start_builtin_fw) = .;                 \
325 -               *(.builtin_fw)                                          \
326 +               KEEP(*(.builtin_fw))                                    \
327                 VMLINUX_SYMBOL(__end_builtin_fw) = .;                   \
328         }                                                               \
329                                                                         \
330 @@ -410,7 +411,7 @@
331                                                                         \
332         /* Kernel symbol table: strings */                              \
333          __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {        \
334 -               KEEP(*(__ksymtab_strings))                              \
335 +               *(__ksymtab_strings)                                    \
336         }                                                               \
337                                                                         \
338         /* __*init sections */                                          \
339 @@ -423,14 +424,14 @@
340         /* Built-in module parameters. */                               \
341         __param : AT(ADDR(__param) - LOAD_OFFSET) {                     \
342                 VMLINUX_SYMBOL(__start___param) = .;                    \
343 -               *(__param)                                              \
344 +               KEEP(*(__param))                                        \
345                 VMLINUX_SYMBOL(__stop___param) = .;                     \
346         }                                                               \
347                                                                         \
348         /* Built-in module versions. */                                 \
349         __modver : AT(ADDR(__modver) - LOAD_OFFSET) {                   \
350                 VMLINUX_SYMBOL(__start___modver) = .;                   \
351 -               *(__modver)                                             \
352 +               KEEP(*(__modver))                                       \
353                 VMLINUX_SYMBOL(__stop___modver) = .;                    \
354                 . = ALIGN((align));                                     \
355                 VMLINUX_SYMBOL(__end_rodata) = .;                       \
356 @@ -496,7 +497,7 @@
357  #define ENTRY_TEXT                                                     \
358                 ALIGN_FUNCTION();                                       \
359                 VMLINUX_SYMBOL(__entry_text_start) = .;                 \
360 -               *(.entry.text)                                          \
361 +               KEEP(*(.entry.text))                                    \
362                 VMLINUX_SYMBOL(__entry_text_end) = .;
363  
364  #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
365 @@ -534,7 +535,7 @@
366         . = ALIGN(align);                                               \
367         __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {               \
368                 VMLINUX_SYMBOL(__start___ex_table) = .;                 \
369 -               *(__ex_table)                                           \
370 +               KEEP(*(__ex_table))                                             \
371                 VMLINUX_SYMBOL(__stop___ex_table) = .;                  \
372         }
373  
374 @@ -550,9 +551,9 @@
375  #ifdef CONFIG_CONSTRUCTORS
376  #define KERNEL_CTORS() . = ALIGN(8);                      \
377                         VMLINUX_SYMBOL(__ctors_start) = .; \
378 -                       *(.ctors)                          \
379 +                       KEEP(*(.ctors))                    \
380                         *(SORT(.init_array.*))             \
381 -                       *(.init_array)                     \
382 +                       KEEP(*(.init_array))               \
383                         VMLINUX_SYMBOL(__ctors_end) = .;
384  #else
385  #define KERNEL_CTORS()
386 @@ -609,7 +610,7 @@
387  #define SBSS(sbss_align)                                               \
388         . = ALIGN(sbss_align);                                          \
389         .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) {                         \
390 -               *(.sbss)                                                \
391 +               *(.sbss .sbss.*)                                        \
392                 *(.scommon)                                             \
393         }
394  
395 @@ -676,7 +677,7 @@
396         . = ALIGN(8);                                                   \
397         __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {             \
398                 VMLINUX_SYMBOL(__start___bug_table) = .;                \
399 -               *(__bug_table)                                          \
400 +               KEEP(*(__bug_table))                                    \
401                 VMLINUX_SYMBOL(__stop___bug_table) = .;                 \
402         }
403  #else
404 @@ -688,7 +689,7 @@
405         . = ALIGN(4);                                                   \
406         .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {               \
407                 VMLINUX_SYMBOL(__tracedata_start) = .;                  \
408 -               *(.tracedata)                                           \
409 +               KEEP(*(.tracedata))                                     \
410                 VMLINUX_SYMBOL(__tracedata_end) = .;                    \
411         }
412  #else
413 @@ -705,7 +706,7 @@
414  #define INIT_SETUP(initsetup_align)                                    \
415                 . = ALIGN(initsetup_align);                             \
416                 VMLINUX_SYMBOL(__setup_start) = .;                      \
417 -               *(.init.setup)                                          \
418 +               KEEP(*(.init.setup))                                    \
419                 VMLINUX_SYMBOL(__setup_end) = .;
420  
421  #define INIT_CALLS_LEVEL(level)                                                \