this will allow the compiler to cache and reuse the result, meaning we
no longer have to take care not to load it more than once for the sake
of archs where the load may be expensive.
depends on commit
1c84c99913bf1cd47b866ed31e665848a0da84a2 for
correctness, since otherwise the compiler could hoist loads during
stage 3 of dynamic linking before the initial thread-pointer setup.
static inline struct pthread *__pthread_self()
{
char *self;
- __asm__ __volatile__ ("mrs %0,tpidr_el0" : "=r"(self));
+ __asm__ ("mrs %0,tpidr_el0" : "=r"(self));
return (void*)(self - sizeof(struct pthread));
}
static inline pthread_t __pthread_self()
{
char *p;
- __asm__ __volatile__ ( "mrc p15,0,%0,c13,c0,3" : "=r"(p) );
+ __asm__ ( "mrc p15,0,%0,c13,c0,3" : "=r"(p) );
return (void *)(p-sizeof(struct pthread));
}
{
extern hidden uintptr_t __a_gettp_ptr;
register uintptr_t p __asm__("r0");
- __asm__ __volatile__ ( BLX " %1" : "=r"(p) : "r"(__a_gettp_ptr) : "cc", "lr" );
+ __asm__ ( BLX " %1" : "=r"(p) : "r"(__a_gettp_ptr) : "cc", "lr" );
return (void *)(p-sizeof(struct pthread));
}
static inline struct pthread *__pthread_self()
{
struct pthread *self;
- __asm__ __volatile__ ("movl %%gs:0,%0" : "=r" (self) );
+ __asm__ ("movl %%gs:0,%0" : "=r" (self) );
return self;
}
static inline struct pthread *__pthread_self()
{
struct pthread *self;
- __asm__ __volatile__ ("ori %0, r21, 0" : "=r" (self) );
+ __asm__ ("ori %0, r21, 0" : "=r" (self) );
return self;
}
{
#if __mips_isa_rev < 2
register char *tp __asm__("$3");
- __asm__ __volatile__ (".word 0x7c03e83b" : "=r" (tp) );
+ __asm__ (".word 0x7c03e83b" : "=r" (tp) );
#else
char *tp;
- __asm__ __volatile__ ("rdhwr %0, $29" : "=r" (tp) );
+ __asm__ ("rdhwr %0, $29" : "=r" (tp) );
#endif
return (pthread_t)(tp - 0x7000 - sizeof(struct pthread));
}
{
#if __mips_isa_rev < 2
register char *tp __asm__("$3");
- __asm__ __volatile__ (".word 0x7c03e83b" : "=r" (tp) );
+ __asm__ (".word 0x7c03e83b" : "=r" (tp) );
#else
char *tp;
- __asm__ __volatile__ ("rdhwr %0, $29" : "=r" (tp) );
+ __asm__ ("rdhwr %0, $29" : "=r" (tp) );
#endif
return (pthread_t)(tp - 0x7000 - sizeof(struct pthread));
}
{
#if __mips_isa_rev < 2
register char *tp __asm__("$3");
- __asm__ __volatile__ (".word 0x7c03e83b" : "=r" (tp) );
+ __asm__ (".word 0x7c03e83b" : "=r" (tp) );
#else
char *tp;
- __asm__ __volatile__ ("rdhwr %0, $29" : "=r" (tp) );
+ __asm__ ("rdhwr %0, $29" : "=r" (tp) );
#endif
return (pthread_t)(tp - 0x7000 - sizeof(struct pthread));
}
{
#ifdef __clang__
char *tp;
- __asm__ __volatile__ ("l.ori %0, r10, 0" : "=r" (tp) );
+ __asm__ ("l.ori %0, r10, 0" : "=r" (tp) );
#else
register char *tp __asm__("r10");
- __asm__ __volatile__ ("" : "=r" (tp) );
+ __asm__ ("" : "=r" (tp) );
#endif
return (struct pthread *) (tp - sizeof(struct pthread));
}
static inline struct pthread *__pthread_self()
{
register char *tp __asm__("r2");
- __asm__ __volatile__ ("" : "=r" (tp) );
+ __asm__ ("" : "=r" (tp) );
return (pthread_t)(tp - 0x7000 - sizeof(struct pthread));
}
static inline struct pthread *__pthread_self()
{
register char *tp __asm__("r13");
- __asm__ __volatile__ ("" : "=r" (tp) );
+ __asm__ ("" : "=r" (tp) );
return (pthread_t)(tp - 0x7000 - sizeof(struct pthread));
}
static inline struct pthread *__pthread_self()
{
struct pthread *self;
- __asm__ __volatile__ (
+ __asm__ (
"ear %0, %%a0\n"
"sllg %0, %0, 32\n"
"ear %0, %%a1\n"
static inline struct pthread *__pthread_self()
{
char *self;
- __asm__ __volatile__ ("stc gbr,%0" : "=r" (self) );
+ __asm__ ("stc gbr,%0" : "=r" (self) );
return (struct pthread *) (self - sizeof(struct pthread));
}
static inline struct pthread *__pthread_self()
{
struct pthread *self;
- __asm__ __volatile__ ("mov %%fs:0,%0" : "=r" (self) );
+ __asm__ ("mov %%fs:0,%0" : "=r" (self) );
return self;
}
static inline struct pthread *__pthread_self()
{
struct pthread *self;
- __asm__ __volatile__ ("mov %%fs:0,%0" : "=r" (self) );
+ __asm__ ("mov %%fs:0,%0" : "=r" (self) );
return self;
}