From: Piotr Dymacz Date: Tue, 29 Mar 2016 22:28:28 +0000 (+0200) Subject: Fix new gcc compile errors (do not use extern inline functions) X-Git-Url: https://git.librecmc.org/?a=commitdiff_plain;h=6d509b9219dc0e87f06ac2f24df60505fc1af707;p=oweals%2Fu-boot_mod.git Fix new gcc compile errors (do not use extern inline functions) --- diff --git a/u-boot/include/asm-mips/bitops.h b/u-boot/include/asm-mips/bitops.h index ae2693a..fec57f5 100644 --- a/u-boot/include/asm-mips/bitops.h +++ b/u-boot/include/asm-mips/bitops.h @@ -60,7 +60,7 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -extern __inline__ void +static __inline__ void set_bit(int nr, volatile void *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> 5); @@ -84,7 +84,7 @@ set_bit(int nr, volatile void *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -extern __inline__ void __set_bit(int nr, volatile void * addr) +static __inline__ void __set_bit(int nr, volatile void * addr) { unsigned long * m = ((unsigned long *) addr) + (nr >> 5); @@ -101,7 +101,7 @@ extern __inline__ void __set_bit(int nr, volatile void * addr) * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ -extern __inline__ void +static __inline__ void clear_bit(int nr, volatile void *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> 5); @@ -125,7 +125,7 @@ clear_bit(int nr, volatile void *addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -extern __inline__ void +static __inline__ void change_bit(int nr, volatile void *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> 5); @@ -149,7 +149,7 @@ change_bit(int nr, volatile void *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -extern __inline__ void __change_bit(int nr, volatile void * addr) +static __inline__ void __change_bit(int nr, volatile void * addr) { unsigned long * m = ((unsigned long *) addr) + (nr >> 5); @@ -164,7 +164,7 @@ extern __inline__ void __change_bit(int nr, volatile void * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -extern __inline__ int +static __inline__ int test_and_set_bit(int nr, volatile void *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> 5); @@ -194,7 +194,7 @@ test_and_set_bit(int nr, volatile void *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -extern __inline__ int __test_and_set_bit(int nr, volatile void * addr) +static __inline__ int __test_and_set_bit(int nr, volatile void * addr) { int mask, retval; volatile int *a = addr; @@ -215,7 +215,7 @@ extern __inline__ int __test_and_set_bit(int nr, volatile void * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -extern __inline__ int +static __inline__ int test_and_clear_bit(int nr, volatile void *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> 5); @@ -246,7 +246,7 @@ test_and_clear_bit(int nr, volatile void *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr) +static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) { int mask, retval; volatile int *a = addr; @@ -267,7 +267,7 @@ extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -extern __inline__ int +static __inline__ int test_and_change_bit(int nr, volatile void *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> 5); @@ -297,7 +297,7 @@ test_and_change_bit(int nr, volatile void *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -extern __inline__ int __test_and_change_bit(int nr, volatile void * addr) +static __inline__ int __test_and_change_bit(int nr, volatile void * addr) { int mask, retval; volatile int *a = addr; @@ -322,7 +322,7 @@ extern __inline__ int __test_and_change_bit(int nr, volatile void * addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -extern __inline__ void set_bit(int nr, volatile void * addr) +static __inline__ void set_bit(int nr, volatile void * addr) { int mask; volatile int *a = addr; @@ -344,7 +344,7 @@ extern __inline__ void set_bit(int nr, volatile void * addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -extern __inline__ void __set_bit(int nr, volatile void * addr) +static __inline__ void __set_bit(int nr, volatile void * addr) { int mask; volatile int *a = addr; @@ -364,7 +364,7 @@ extern __inline__ void __set_bit(int nr, volatile void * addr) * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ -extern __inline__ void clear_bit(int nr, volatile void * addr) +static __inline__ void clear_bit(int nr, volatile void * addr) { int mask; volatile int *a = addr; @@ -386,7 +386,7 @@ extern __inline__ void clear_bit(int nr, volatile void * addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -extern __inline__ void change_bit(int nr, volatile void * addr) +static __inline__ void change_bit(int nr, volatile void * addr) { int mask; volatile int *a = addr; @@ -408,7 +408,7 @@ extern __inline__ void change_bit(int nr, volatile void * addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -extern __inline__ void __change_bit(int nr, volatile void * addr) +static __inline__ void __change_bit(int nr, volatile void * addr) { unsigned long * m = ((unsigned long *) addr) + (nr >> 5); @@ -423,7 +423,7 @@ extern __inline__ void __change_bit(int nr, volatile void * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -extern __inline__ int test_and_set_bit(int nr, volatile void * addr) +static __inline__ int test_and_set_bit(int nr, volatile void * addr) { int mask, retval; volatile int *a = addr; @@ -448,7 +448,7 @@ extern __inline__ int test_and_set_bit(int nr, volatile void * addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -extern __inline__ int __test_and_set_bit(int nr, volatile void * addr) +static __inline__ int __test_and_set_bit(int nr, volatile void * addr) { int mask, retval; volatile int *a = addr; @@ -469,7 +469,7 @@ extern __inline__ int __test_and_set_bit(int nr, volatile void * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -extern __inline__ int test_and_clear_bit(int nr, volatile void * addr) +static __inline__ int test_and_clear_bit(int nr, volatile void * addr) { int mask, retval; volatile int *a = addr; @@ -494,7 +494,7 @@ extern __inline__ int test_and_clear_bit(int nr, volatile void * addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr) +static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) { int mask, retval; volatile int *a = addr; @@ -515,7 +515,7 @@ extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -extern __inline__ int test_and_change_bit(int nr, volatile void * addr) +static __inline__ int test_and_change_bit(int nr, volatile void * addr) { int mask, retval; volatile int *a = addr; @@ -540,7 +540,7 @@ extern __inline__ int test_and_change_bit(int nr, volatile void * addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -extern __inline__ int __test_and_change_bit(int nr, volatile void * addr) +static __inline__ int __test_and_change_bit(int nr, volatile void * addr) { int mask, retval; volatile int *a = addr; @@ -565,7 +565,7 @@ extern __inline__ int __test_and_change_bit(int nr, volatile void * addr) * @nr: bit number to test * @addr: Address to start counting from */ -extern __inline__ int test_bit(int nr, volatile void *addr) +static __inline__ int test_bit(int nr, volatile void *addr) { return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0; } @@ -582,7 +582,7 @@ extern __inline__ int test_bit(int nr, volatile void *addr) * Returns the bit-number of the first zero bit, not the number of the byte * containing a bit. */ -extern __inline__ int find_first_zero_bit (void *addr, unsigned size) +static __inline__ int find_first_zero_bit (void *addr, unsigned size) { unsigned long dummy; int res; @@ -633,7 +633,7 @@ extern __inline__ int find_first_zero_bit (void *addr, unsigned size) * @offset: The bitnumber to start searching at * @size: The maximum size to search */ -extern __inline__ int find_next_zero_bit (void * addr, int size, int offset) +static __inline__ int find_next_zero_bit (void * addr, int size, int offset) { unsigned int *p = ((unsigned int *) addr) + (offset >> 5); int set = 0, bit = offset & 31, res; @@ -679,7 +679,7 @@ extern __inline__ int find_next_zero_bit (void * addr, int size, int offset) * * Undefined if no zero exists, so code should check against ~0UL first. */ -extern __inline__ unsigned long ffz(unsigned long word) +static __inline__ unsigned long ffz(unsigned long word) { unsigned int __res; unsigned int mask = 1; @@ -736,7 +736,7 @@ extern __inline__ unsigned long ffz(unsigned long word) * @offset: The bitnumber to start searching at * @size: The maximum size to search */ -extern __inline__ int find_next_zero_bit(void *addr, int size, int offset) +static __inline__ int find_next_zero_bit(void *addr, int size, int offset) { unsigned long *p = ((unsigned long *) addr) + (offset >> 5); unsigned long result = offset & ~31UL; @@ -796,7 +796,7 @@ extern int find_first_zero_bit (void *addr, unsigned size); /* Now for the ext2 filesystem bit operations and helper routines. */ #if (CONFIG_COMMANDS & CFG_CMD_EXT2) #ifdef __MIPSEB__ -extern __inline__ int ext2_set_bit(int nr, void * addr) +static __inline__ int ext2_set_bit(int nr, void * addr) { int mask, retval, flags; unsigned char *ADDR = (unsigned char *) addr; @@ -810,7 +810,7 @@ extern __inline__ int ext2_set_bit(int nr, void * addr) return retval; } -extern __inline__ int ext2_clear_bit(int nr, void * addr) +static __inline__ int ext2_clear_bit(int nr, void * addr) { int mask, retval, flags; unsigned char *ADDR = (unsigned char *) addr; @@ -824,7 +824,7 @@ extern __inline__ int ext2_clear_bit(int nr, void * addr) return retval; } -extern __inline__ int ext2_test_bit(int nr, const void * addr) +static __inline__ int ext2_test_bit(int nr, const void * addr) { int mask; const unsigned char *ADDR = (const unsigned char *) addr; @@ -837,7 +837,7 @@ extern __inline__ int ext2_test_bit(int nr, const void * addr) #define ext2_find_first_zero_bit(addr, size) \ ext2_find_next_zero_bit((addr), (size), 0) -extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) +static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) { unsigned long *p = ((unsigned long *) addr) + (offset >> 5); unsigned long result = offset & ~31UL; diff --git a/u-boot/include/asm-mips/io.h b/u-boot/include/asm-mips/io.h index 857fb03..1942d83 100644 --- a/u-boot/include/asm-mips/io.h +++ b/u-boot/include/asm-mips/io.h @@ -104,12 +104,12 @@ extern unsigned long mips_io_port_base; * Change virtual addresses to physical addresses and vv. * These are trivial on the 1:1 Linux/MIPS mapping */ -extern inline unsigned long virt_to_phys(volatile void * address) +static inline unsigned long virt_to_phys(volatile void * address) { return PHYSADDR(address); } -extern inline void * phys_to_virt(unsigned long address) +static inline void * phys_to_virt(unsigned long address) { return (void *)KSEG0ADDR(address); } @@ -117,12 +117,12 @@ extern inline void * phys_to_virt(unsigned long address) /* * IO bus memory addresses are also 1:1 with the physical address */ -extern inline unsigned long virt_to_bus(volatile void * address) +static inline unsigned long virt_to_bus(volatile void * address) { return PHYSADDR(address); } -extern inline void * bus_to_virt(unsigned long address) +static inline void * bus_to_virt(unsigned long address) { return (void *)KSEG0ADDR(address); } @@ -224,7 +224,7 @@ out: */ #define __OUT1(s) \ -extern inline void __out##s(unsigned int value, unsigned int port) { +static inline void __out##s(unsigned int value, unsigned int port) { #define __OUT2(m) \ __asm__ __volatile__ ("s" #m "\t%0,%1(%2)" @@ -238,7 +238,7 @@ __OUT1(s##c_p) __OUT2(m) : : "r" (__ioswab##w(value)), "ir" (port), "r" (mips_io SLOW_DOWN_IO; } #define __IN1(t,s) \ -extern __inline__ t __in##s(unsigned int port) { t _v; +static __inline__ t __in##s(unsigned int port) { t _v; /* * Required nops will be inserted by the assembler @@ -253,7 +253,7 @@ __IN1(t,s##_p) __IN2(m) : "=r" (_v) : "i" (0), "r" (mips_io_port_base+port)); SL __IN1(t,s##c_p) __IN2(m) : "=r" (_v) : "ir" (port), "r" (mips_io_port_base)); SLOW_DOWN_IO; return __ioswab##w(_v); } #define __INS1(s) \ -extern inline void __ins##s(unsigned int port, void * addr, unsigned long count) { +static inline void __ins##s(unsigned int port, void * addr, unsigned long count) { #define __INS2(m) \ if (count) \ @@ -281,7 +281,7 @@ __INS1(s##c) __INS2(m) \ : "$1");} #define __OUTS1(s) \ -extern inline void __outs##s(unsigned int port, const void * addr, unsigned long count) { +static inline void __outs##s(unsigned int port, const void * addr, unsigned long count) { #define __OUTS2(m) \ if (count) \ diff --git a/u-boot/include/asm-mips/mipsregs.h b/u-boot/include/asm-mips/mipsregs.h index 09a641d..e41d29b 100644 --- a/u-boot/include/asm-mips/mipsregs.h +++ b/u-boot/include/asm-mips/mipsregs.h @@ -291,7 +291,7 @@ * Mostly used to access the interrupt bits. */ #define __BUILD_SET_CP0(name,register) \ -extern __inline__ unsigned int \ +static __inline__ unsigned int \ set_cp0_##name(unsigned int set) \ { \ unsigned int res; \ @@ -303,7 +303,7 @@ set_cp0_##name(unsigned int set) \ return res; \ } \ \ -extern __inline__ unsigned int \ +static __inline__ unsigned int \ clear_cp0_##name(unsigned int clear) \ { \ unsigned int res; \ @@ -315,7 +315,7 @@ clear_cp0_##name(unsigned int clear) \ return res; \ } \ \ -extern __inline__ unsigned int \ +static __inline__ unsigned int \ change_cp0_##name(unsigned int change, unsigned int new) \ { \ unsigned int res; \ diff --git a/u-boot/include/asm-mips/processor.h b/u-boot/include/asm-mips/processor.h index 6838aee..8b3f0eb 100644 --- a/u-boot/include/asm-mips/processor.h +++ b/u-boot/include/asm-mips/processor.h @@ -219,7 +219,7 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); /* * Return saved PC of a blocked thread. */ -extern inline unsigned long thread_saved_pc(struct thread_struct *t) +static inline unsigned long thread_saved_pc(struct thread_struct *t) { extern void ret_from_fork(void); diff --git a/u-boot/include/asm-mips/string.h b/u-boot/include/asm-mips/string.h index dab4e70..4955615 100644 --- a/u-boot/include/asm-mips/string.h +++ b/u-boot/include/asm-mips/string.h @@ -12,7 +12,7 @@ #include #define __HAVE_ARCH_STRCPY -extern __inline__ char *strcpy(char *__dest, __const__ char *__src) +static __inline__ char *strcpy(char *__dest, __const__ char *__src) { char *__xdest = __dest; @@ -34,7 +34,7 @@ extern __inline__ char *strcpy(char *__dest, __const__ char *__src) } #define __HAVE_ARCH_STRNCPY -extern __inline__ char *strncpy(char *__dest, __const__ char *__src, size_t __n) +static __inline__ char *strncpy(char *__dest, __const__ char *__src, size_t __n) { char *__xdest = __dest; @@ -62,7 +62,7 @@ extern __inline__ char *strncpy(char *__dest, __const__ char *__src, size_t __n) } #define __HAVE_ARCH_STRCMP -extern __inline__ int strcmp(__const__ char *__cs, __const__ char *__ct) +static __inline__ int strcmp(__const__ char *__cs, __const__ char *__ct) { int __res; @@ -91,7 +91,7 @@ extern __inline__ int strcmp(__const__ char *__cs, __const__ char *__ct) } #define __HAVE_ARCH_STRNCMP -extern __inline__ int +static __inline__ int strncmp(__const__ char *__cs, __const__ char *__ct, size_t __count) { int __res; @@ -135,7 +135,7 @@ extern void *memmove(void *__dest, __const__ void *__src, size_t __n); #define __HAVE_ARCH_BCOPY #define __HAVE_ARCH_MEMSCAN -extern __inline__ void *memscan(void *__addr, int __c, size_t __size) +static __inline__ void *memscan(void *__addr, int __c, size_t __size) { char *__end = (char *)__addr + __size; diff --git a/u-boot/include/asm-mips/system.h b/u-boot/include/asm-mips/system.h index b6d50e2..992dd44 100644 --- a/u-boot/include/asm-mips/system.h +++ b/u-boot/include/asm-mips/system.h @@ -23,7 +23,7 @@ #include #endif -extern __inline__ void +static __inline__ void __sti(void) { __asm__ __volatile__( @@ -47,7 +47,7 @@ __sti(void) * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs * no nops at all. */ -extern __inline__ void +static __inline__ void __cli(void) { __asm__ __volatile__( @@ -208,7 +208,7 @@ do { \ * For 32 and 64 bit operands we can take advantage of ll and sc. * FIXME: This doesn't work for R3000 machines. */ -extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val) +static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val) { #ifdef CONFIG_CPU_HAS_LLSC unsigned long dummy;