Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 03b9730

Browse files
amlutoingomolnar
authored andcommitted
x86/asm/tsc: Add rdtsc_ordered() and use it in trivial call sites
rdtsc_barrier(); rdtsc() is an unnecessary mouthful and requires more thought than should be necessary. Add an rdtsc_ordered() helper and replace the trivial call sites with it. This should not change generated code. The duplication of the fence asm is temporary. Signed-off-by: Andy Lutomirski <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Brian Gerst <[email protected]> Cc: Denys Vlasenko <[email protected]> Cc: H. Peter Anvin <[email protected]> Cc: Huang Rui <[email protected]> Cc: John Stultz <[email protected]> Cc: Len Brown <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Ralf Baechle <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: kvm ML <[email protected]> Link: http://lkml.kernel.org/r/dddbf98a2af53312e9aa73a5a2b1622fe5d6f52b.1434501121.git.luto@kernel.org Signed-off-by: Ingo Molnar <[email protected]>
1 parent 4ea1636 commit 03b9730

5 files changed

Lines changed: 34 additions & 40 deletions

File tree

arch/x86/entry/vdso/vclock_gettime.c

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -175,20 +175,8 @@ static notrace cycle_t vread_pvclock(int *mode)
175175

176176
notrace static cycle_t vread_tsc(void)
177177
{
178-
cycle_t ret;
179-
u64 last;
180-
181-
/*
182-
* Empirically, a fence (of type that depends on the CPU)
183-
* before rdtsc is enough to ensure that rdtsc is ordered
184-
* with respect to loads. The various CPU manuals are unclear
185-
* as to whether rdtsc can be reordered with later loads,
186-
* but no one has ever seen it happen.
187-
*/
188-
rdtsc_barrier();
189-
ret = (cycle_t)rdtsc();
190-
191-
last = gtod->cycle_last;
178+
cycle_t ret = (cycle_t)rdtsc_ordered();
179+
u64 last = gtod->cycle_last;
192180

193181
if (likely(ret >= last))
194182
return ret;

arch/x86/include/asm/msr.h

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,32 @@ static __always_inline unsigned long long rdtsc(void)
127127
return EAX_EDX_VAL(val, low, high);
128128
}
129129

130+
/**
131+
* rdtsc_ordered() - read the current TSC in program order
132+
*
133+
* rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
134+
* It is ordered like a load to a global in-memory counter. It should
135+
* be impossible to observe non-monotonic rdtsc_unordered() behavior
136+
* across multiple CPUs as long as the TSC is synced.
137+
*/
138+
static __always_inline unsigned long long rdtsc_ordered(void)
139+
{
140+
/*
141+
* The RDTSC instruction is not ordered relative to memory
142+
* access. The Intel SDM and the AMD APM are both vague on this
143+
* point, but empirically an RDTSC instruction can be
144+
* speculatively executed before prior loads. An RDTSC
145+
* immediately after an appropriate barrier appears to be
146+
* ordered as a normal load, that is, it provides the same
147+
* ordering guarantees as reading from a global memory location
148+
* that some other imaginary CPU is updating continuously with a
149+
* time stamp.
150+
*/
151+
alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
152+
"lfence", X86_FEATURE_LFENCE_RDTSC);
153+
return rdtsc();
154+
}
155+
130156
static inline unsigned long long native_read_pmc(int counter)
131157
{
132158
DECLARE_ARGS(val, low, high);

arch/x86/kernel/trace_clock.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,5 @@
1212
*/
1313
u64 notrace trace_clock_x86_tsc(void)
1414
{
15-
u64 ret;
16-
17-
rdtsc_barrier();
18-
ret = rdtsc();
19-
20-
return ret;
15+
return rdtsc_ordered();
2116
}

arch/x86/kvm/x86.c

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1444,20 +1444,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
14441444

14451445
static cycle_t read_tsc(void)
14461446
{
1447-
cycle_t ret;
1448-
u64 last;
1449-
1450-
/*
1451-
* Empirically, a fence (of type that depends on the CPU)
1452-
* before rdtsc is enough to ensure that rdtsc is ordered
1453-
* with respect to loads. The various CPU manuals are unclear
1454-
* as to whether rdtsc can be reordered with later loads,
1455-
* but no one has ever seen it happen.
1456-
*/
1457-
rdtsc_barrier();
1458-
ret = (cycle_t)rdtsc();
1459-
1460-
last = pvclock_gtod_data.clock.cycle_last;
1447+
cycle_t ret = (cycle_t)rdtsc_ordered();
1448+
u64 last = pvclock_gtod_data.clock.cycle_last;
14611449

14621450
if (likely(ret >= last))
14631451
return ret;

arch/x86/lib/delay.c

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -54,11 +54,9 @@ static void delay_tsc(unsigned long __loops)
5454

5555
preempt_disable();
5656
cpu = smp_processor_id();
57-
rdtsc_barrier();
58-
bclock = rdtsc();
57+
bclock = rdtsc_ordered();
5958
for (;;) {
60-
rdtsc_barrier();
61-
now = rdtsc();
59+
now = rdtsc_ordered();
6260
if ((now - bclock) >= loops)
6361
break;
6462

@@ -79,8 +77,7 @@ static void delay_tsc(unsigned long __loops)
7977
if (unlikely(cpu != smp_processor_id())) {
8078
loops -= (now - bclock);
8179
cpu = smp_processor_id();
82-
rdtsc_barrier();
83-
bclock = rdtsc();
80+
bclock = rdtsc_ordered();
8481
}
8582
}
8683
preempt_enable();

0 commit comments

Comments
 (0)