From 92637bb0d8f13940aebd2a8116cc935c3e7d8266 Mon Sep 17 00:00:00 2001 From: Rich Felker Date: Thu, 15 Oct 2015 12:08:51 -0400 Subject: [PATCH] prevent reordering of or1k and powerpc thread pointer loads other archs use asm for the thread pointer load, so making that asm volatile is sufficient to inform the compiler that it has a "side effect" (crashing or giving the wrong result if the thread pointer was not yet initialized) that prevents reordering. however, powerpc and or1k have dedicated general purpose registers for the thread pointer and did not need to use any asm to access it; instead, "local register variables with a specified register" were used. however, there is no specification for ordering constraints on this type of usage, and presumably use of the thread pointer could be reordered across its initialization. to impose an ordering, I have added empty volatile asm blocks that produce the "local register variable with a specified register" as an output constraint. --- arch/or1k/pthread_arch.h | 1 + arch/powerpc/pthread_arch.h | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/or1k/pthread_arch.h b/arch/or1k/pthread_arch.h index e826997e..ad631694 100644 --- a/arch/or1k/pthread_arch.h +++ b/arch/or1k/pthread_arch.h @@ -6,6 +6,7 @@ static inline struct pthread *__pthread_self() __asm__ __volatile__ ("l.ori %0, r10, 0" : "=r" (tp) ); #else register char *tp __asm__("r10"); + __asm__ __volatile__ ("" : "=r" (tp) ); #endif return (struct pthread *) (tp - sizeof(struct pthread)); } diff --git a/arch/powerpc/pthread_arch.h b/arch/powerpc/pthread_arch.h index 1cbfc223..bb7405d1 100644 --- a/arch/powerpc/pthread_arch.h +++ b/arch/powerpc/pthread_arch.h @@ -5,6 +5,7 @@ static inline struct pthread *__pthread_self() __asm__ __volatile__ ("mr %0, 2" : "=r"(tp) : : ); #else register char *tp __asm__("r2"); + __asm__ __volatile__ ("" : "=r" (tp) ); #endif return (pthread_t)(tp - 0x7000 - sizeof(struct pthread)); } -- 2.25.1