From d36cd9e0dddff35021e2c86261641b76a1684368 Mon Sep 17 00:00:00 2001 From: Tobias Powalowski Date: Thu, 13 Feb 2014 19:58:32 +0000 Subject: [PATCH] add correct patch for asm issues --- 0001-quirk-asm_volatile_goto.patch | 51 ++++ 0001-revert-avmfritz-breaker.patch | 417 ----------------------------- PKGBUILD | 13 +- 3 files changed, 57 insertions(+), 424 deletions(-) create mode 100644 0001-quirk-asm_volatile_goto.patch delete mode 100644 0001-revert-avmfritz-breaker.patch diff --git a/0001-quirk-asm_volatile_goto.patch b/0001-quirk-asm_volatile_goto.patch new file mode 100644 index 0000000..c9ee404 --- /dev/null +++ b/0001-quirk-asm_volatile_goto.patch @@ -0,0 +1,51 @@ +From a9f180345f5378ac87d80ed0bea55ba421d83859 Mon Sep 17 00:00:00 2001 +From: Steven Noonan +Date: Thu, 13 Feb 2014 07:01:07 +0000 +Subject: compiler/gcc4: Make quirk for asm_volatile_goto() unconditional + +I started noticing problems with KVM guest destruction on Linux +3.12+, where guest memory wasn't being cleaned up. I bisected it +down to the commit introducing the new 'asm goto'-based atomics, +and found this quirk was later applied to those. + +Unfortunately, even with GCC 4.8.2 (which ostensibly fixed the +known 'asm goto' bug) I am still getting some kind of +miscompilation. If I enable the asm_volatile_goto quirk for my +compiler, KVM guests are destroyed correctly and the memory is +cleaned up. + +So make the quirk unconditional for now, until bug is found +and fixed. + +Suggested-by: Linus Torvalds +Signed-off-by: Steven Noonan +Cc: Peter Zijlstra +Cc: Steven Rostedt +Cc: Jakub Jelinek +Cc: Richard Henderson +Cc: Andrew Morton +Cc: Oleg Nesterov +Cc: +Link: http://lkml.kernel.org/r/1392274867-15236-1-git-send-email-steven@uplinklabs.net +Link: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 +Signed-off-by: Ingo Molnar +--- +diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h +index ded4299..2507fd2 100644 +--- a/include/linux/compiler-gcc4.h ++++ b/include/linux/compiler-gcc4.h +@@ -75,11 +75,7 @@ + * + * (asm goto is automatically volatile - the naming reflects this.) + */ +-#if GCC_VERSION <= 40801 +-# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) +-#else +-# define asm_volatile_goto(x...) do { asm goto(x); } while (0) +-#endif ++#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) + + #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP + #if GCC_VERSION >= 40400 +-- +cgit v0.9.2 diff --git a/0001-revert-avmfritz-breaker.patch b/0001-revert-avmfritz-breaker.patch deleted file mode 100644 index 769be2d..0000000 --- a/0001-revert-avmfritz-breaker.patch +++ /dev/null @@ -1,417 +0,0 @@ -From e0f6dec35f9286e78879fe1ac92803fd69fc4fdc Mon Sep 17 00:00:00 2001 -From: H. Peter Anvin -Date: Wed, 04 Dec 2013 22:31:28 +0000 -Subject: x86, bitops: Correct the assembly constraints to testing bitops - -In checkin: - -0c44c2d0f459 x86: Use asm goto to implement better modify_and_test() functions - -the various functions which do modify and test were unified and -optimized using "asm goto". However, this change missed the detail -that the bitops require an "Ir" constraint rather than an "er" -constraint ("I" = integer constant from 0-31, "e" = signed 32-bit -integer constant). This would cause code to miscompile if these -functions were used on constant bit positions 32-255 and the build to -fail if used on constant bit positions above 255. - -Add the constraints as a parameter to the GEN_BINARY_RMWcc() macro to -avoid this problem. - -Reported-by: Jesse Brandeburg -Signed-off-by: H. Peter Anvin -Cc: Peter Zijlstra -Link: http://lkml.kernel.org/r/529E8719.4070202@zytor.com ---- -diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h -index da31c8b..b17f4f4 100644 ---- a/arch/x86/include/asm/atomic.h -+++ b/arch/x86/include/asm/atomic.h -@@ -77,7 +77,7 @@ static inline void atomic_sub(int i, atomic_t *v) - */ - static inline int atomic_sub_and_test(int i, atomic_t *v) - { -- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, i, "%0", "e"); -+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e"); - } - - /** -@@ -141,7 +141,7 @@ static inline int atomic_inc_and_test(atomic_t *v) - */ - static inline int atomic_add_negative(int i, atomic_t *v) - { -- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, i, "%0", "s"); -+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s"); - } - - /** -diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h -index 3f065c9..46e9052 100644 ---- a/arch/x86/include/asm/atomic64_64.h -+++ b/arch/x86/include/asm/atomic64_64.h -@@ -72,7 +72,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) - */ - static inline int atomic64_sub_and_test(long i, atomic64_t *v) - { -- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, i, "%0", "e"); -+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e"); - } - - /** -@@ -138,7 +138,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v) - */ - static inline int atomic64_add_negative(long i, atomic64_t *v) - { -- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, i, "%0", "s"); -+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s"); - } - - /** -diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h -index 6d76d09..9fc1af7 100644 ---- a/arch/x86/include/asm/bitops.h -+++ b/arch/x86/include/asm/bitops.h -@@ -205,7 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr) - */ - static inline int test_and_set_bit(long nr, volatile unsigned long *addr) - { -- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, nr, "%0", "c"); -+ GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c"); - } - - /** -@@ -251,7 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr) - */ - static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) - { -- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, nr, "%0", "c"); -+ GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c"); - } - - /** -@@ -304,7 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr) - */ - static inline int test_and_change_bit(long nr, volatile unsigned long *addr) - { -- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, nr, "%0", "c"); -+ GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c"); - } - - static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) -diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h -index 5b23e60..4ad6560 100644 ---- a/arch/x86/include/asm/local.h -+++ b/arch/x86/include/asm/local.h -@@ -52,7 +52,7 @@ static inline void local_sub(long i, local_t *l) - */ - static inline int local_sub_and_test(long i, local_t *l) - { -- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, i, "%0", "e"); -+ GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e"); - } - - /** -@@ -92,7 +92,7 @@ static inline int local_inc_and_test(local_t *l) - */ - static inline int local_add_negative(long i, local_t *l) - { -- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, i, "%0", "s"); -+ GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s"); - } - - /** -diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h -index 1ff990f..8f7866a 100644 ---- a/arch/x86/include/asm/rmwcc.h -+++ b/arch/x86/include/asm/rmwcc.h -@@ -16,8 +16,8 @@ cc_label: \ - #define GEN_UNARY_RMWcc(op, var, arg0, cc) \ - __GEN_RMWcc(op " " arg0, var, cc) - --#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \ -- __GEN_RMWcc(op " %1, " arg0, var, cc, "er" (val)) -+#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ -+ __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val)) - - #else /* !CC_HAVE_ASM_GOTO */ - -@@ -33,8 +33,8 @@ do { \ - #define GEN_UNARY_RMWcc(op, var, arg0, cc) \ - __GEN_RMWcc(op " " arg0, var, cc) - --#define GEN_BINARY_RMWcc(op, var, val, arg0, cc) \ -- __GEN_RMWcc(op " %2, " arg0, var, cc, "er" (val)) -+#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ -+ __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val)) - - #endif /* CC_HAVE_ASM_GOTO */ - --- -cgit v0.9.2 - -From 0c44c2d0f459cd7e275242b72f500137c4fa834d Mon Sep 17 00:00:00 2001 -From: Peter Zijlstra -Date: Wed, 11 Sep 2013 13:19:24 +0000 -Subject: x86: Use asm goto to implement better modify_and_test() functions - -Linus suggested using asm goto to get rid of the typical SETcc + TEST -instruction pair -- which also clobbers an extra register -- for our -typical modify_and_test() functions. - -Because asm goto doesn't allow output fields it has to include an -unconditinal memory clobber when it changes a memory variable to force -a reload. - -Luckily all atomic ops already imply a compiler barrier to go along -with their memory barrier semantics. - -Suggested-by: Linus Torvalds -Signed-off-by: Peter Zijlstra -Link: http://lkml.kernel.org/n/tip-0mtn9siwbeo1d33bap1422se@git.kernel.org -Signed-off-by: Ingo Molnar ---- -diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h -index 722aa3b..da31c8b 100644 ---- a/arch/x86/include/asm/atomic.h -+++ b/arch/x86/include/asm/atomic.h -@@ -6,6 +6,7 @@ - #include - #include - #include -+#include - - /* - * Atomic operations that C can't guarantee us. Useful for -@@ -76,12 +77,7 @@ static inline void atomic_sub(int i, atomic_t *v) - */ - static inline int atomic_sub_and_test(int i, atomic_t *v) - { -- unsigned char c; -- -- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" -- : "+m" (v->counter), "=qm" (c) -- : "ir" (i) : "memory"); -- return c; -+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, i, "%0", "e"); - } - - /** -@@ -118,12 +114,7 @@ static inline void atomic_dec(atomic_t *v) - */ - static inline int atomic_dec_and_test(atomic_t *v) - { -- unsigned char c; -- -- asm volatile(LOCK_PREFIX "decl %0; sete %1" -- : "+m" (v->counter), "=qm" (c) -- : : "memory"); -- return c != 0; -+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); - } - - /** -@@ -136,12 +127,7 @@ static inline int atomic_dec_and_test(atomic_t *v) - */ - static inline int atomic_inc_and_test(atomic_t *v) - { -- unsigned char c; -- -- asm volatile(LOCK_PREFIX "incl %0; sete %1" -- : "+m" (v->counter), "=qm" (c) -- : : "memory"); -- return c != 0; -+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e"); - } - - /** -@@ -155,12 +141,7 @@ static inline int atomic_inc_and_test(atomic_t *v) - */ - static inline int atomic_add_negative(int i, atomic_t *v) - { -- unsigned char c; -- -- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" -- : "+m" (v->counter), "=qm" (c) -- : "ir" (i) : "memory"); -- return c; -+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, i, "%0", "s"); - } - - /** -diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h -index 0e1cbfc..3f065c9 100644 ---- a/arch/x86/include/asm/atomic64_64.h -+++ b/arch/x86/include/asm/atomic64_64.h -@@ -72,12 +72,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) - */ - static inline int atomic64_sub_and_test(long i, atomic64_t *v) - { -- unsigned char c; -- -- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" -- : "=m" (v->counter), "=qm" (c) -- : "er" (i), "m" (v->counter) : "memory"); -- return c; -+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, i, "%0", "e"); - } - - /** -@@ -116,12 +111,7 @@ static inline void atomic64_dec(atomic64_t *v) - */ - static inline int atomic64_dec_and_test(atomic64_t *v) - { -- unsigned char c; -- -- asm volatile(LOCK_PREFIX "decq %0; sete %1" -- : "=m" (v->counter), "=qm" (c) -- : "m" (v->counter) : "memory"); -- return c != 0; -+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e"); - } - - /** -@@ -134,12 +124,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v) - */ - static inline int atomic64_inc_and_test(atomic64_t *v) - { -- unsigned char c; -- -- asm volatile(LOCK_PREFIX "incq %0; sete %1" -- : "=m" (v->counter), "=qm" (c) -- : "m" (v->counter) : "memory"); -- return c != 0; -+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e"); - } - - /** -@@ -153,12 +138,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v) - */ - static inline int atomic64_add_negative(long i, atomic64_t *v) - { -- unsigned char c; -- -- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" -- : "=m" (v->counter), "=qm" (c) -- : "er" (i), "m" (v->counter) : "memory"); -- return c; -+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, i, "%0", "s"); - } - - /** -diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h -index 41639ce..6d76d09 100644 ---- a/arch/x86/include/asm/bitops.h -+++ b/arch/x86/include/asm/bitops.h -@@ -14,6 +14,7 @@ - - #include - #include -+#include - - #if BITS_PER_LONG == 32 - # define _BITOPS_LONG_SHIFT 5 -@@ -204,12 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr) - */ - static inline int test_and_set_bit(long nr, volatile unsigned long *addr) - { -- int oldbit; -- -- asm volatile(LOCK_PREFIX "bts %2,%1\n\t" -- "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); -- -- return oldbit; -+ GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, nr, "%0", "c"); - } - - /** -@@ -255,13 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr) - */ - static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) - { -- int oldbit; -- -- asm volatile(LOCK_PREFIX "btr %2,%1\n\t" -- "sbb %0,%0" -- : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); -- -- return oldbit; -+ GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, nr, "%0", "c"); - } - - /** -@@ -314,13 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr) - */ - static inline int test_and_change_bit(long nr, volatile unsigned long *addr) - { -- int oldbit; -- -- asm volatile(LOCK_PREFIX "btc %2,%1\n\t" -- "sbb %0,%0" -- : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); -- -- return oldbit; -+ GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, nr, "%0", "c"); - } - - static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) -diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h -index 2d89e39..5b23e60 100644 ---- a/arch/x86/include/asm/local.h -+++ b/arch/x86/include/asm/local.h -@@ -52,12 +52,7 @@ static inline void local_sub(long i, local_t *l) - */ - static inline int local_sub_and_test(long i, local_t *l) - { -- unsigned char c; -- -- asm volatile(_ASM_SUB "%2,%0; sete %1" -- : "+m" (l->a.counter), "=qm" (c) -- : "ir" (i) : "memory"); -- return c; -+ GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, i, "%0", "e"); - } - - /** -@@ -70,12 +65,7 @@ static inline int local_sub_and_test(long i, local_t *l) - */ - static inline int local_dec_and_test(local_t *l) - { -- unsigned char c; -- -- asm volatile(_ASM_DEC "%0; sete %1" -- : "+m" (l->a.counter), "=qm" (c) -- : : "memory"); -- return c != 0; -+ GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e"); - } - - /** -@@ -88,12 +78,7 @@ static inline int local_dec_and_test(local_t *l) - */ - static inline int local_inc_and_test(local_t *l) - { -- unsigned char c; -- -- asm volatile(_ASM_INC "%0; sete %1" -- : "+m" (l->a.counter), "=qm" (c) -- : : "memory"); -- return c != 0; -+ GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e"); - } - - /** -@@ -107,12 +92,7 @@ static inline int local_inc_and_test(local_t *l) - */ - static inline int local_add_negative(long i, local_t *l) - { -- unsigned char c; -- -- asm volatile(_ASM_ADD "%2,%0; sets %1" -- : "+m" (l->a.counter), "=qm" (c) -- : "ir" (i) : "memory"); -- return c; -+ GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, i, "%0", "s"); - } - - /** -cgit v0.9.2 diff --git a/PKGBUILD b/PKGBUILD index 8fde002..fcc478f 100644 --- a/PKGBUILD +++ b/PKGBUILD @@ -5,7 +5,7 @@ pkgbase=linux # Build stock -ARCH kernel #pkgbase=linux-custom # Build kernel with a different name _srcname=linux-3.13 pkgver=3.13.2 -pkgrel=2 +pkgrel=3 arch=('i686' 'x86_64') url="http://www.kernel.org/" license=('GPL2') @@ -26,7 +26,7 @@ source=("http://www.kernel.org/pub/linux/kernel/v3.x/${_srcname}.tar.xz" '0005-sunrpc-add-an-info-file-for-the-dummy-gssd-pipe.patch' '0006-rpc_pipe-fix-cleanup-of-dummy-gssd-directory-when-no.patch' '0001-syscalls.h-use-gcc-alias-instead-of-assembler-aliase.patch' - '0001-revert-avmfritz-breaker.patch' + '0001-quirk-asm_volatile_goto.patch' 'i8042-fix-aliases.patch' ) md5sums=('0ecbaf65c00374eb4a826c2f9f37606f' @@ -43,7 +43,7 @@ md5sums=('0ecbaf65c00374eb4a826c2f9f37606f' 'd5907a721b97299f0685c583499f7820' 'a724515b350b29c53f20e631c6cf9a14' 'e6fa278c092ad83780e2dd0568e24ca6' - 'bc1917dd2a0f9e4f511f120c85fa0c49' + '6baa312bc166681f48e972824f3f6649' '93dbf73af819b77f03453a9c6de2bb47') _kernelname=${pkgbase#linux} @@ -86,10 +86,9 @@ prepare() { # Fix i8042 aliases patch -p1 -i "${srcdir}/i8042-fix-aliases.patch" - # Revert avmfritz breaker - # https://git.kernel.org/cgit/linux/kernel/git/stable/linux-stable.git/commit/?id=e0f6dec35f9286e78879fe1ac92803fd69fc4fdc - # https://git.kernel.org/cgit/linux/kernel/git/stable/linux-stable.git/commit/?id=0c44c2d0f459cd7e275242b72f500137c4fa834d - patch -Rp1 -i "${srcdir}/0001-revert-avmfritz-breaker.patch" + # Fix compile issues + # http://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/patch/?id=a9f180345f5378ac87d80ed0bea55ba421d83859 + patch -Np1 -i "${srcdir}/0001-quirk-asm_volatile_goto.patch" if [ "${CARCH}" = "x86_64" ]; then cat "${srcdir}/config.x86_64" > ./.config