1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
|
# HG changeset patch
# User "Eduardo Habkost <ehabkost@redhat.com>"
# Date 1190313016 10800
# Node ID 8792a16b6dd531cad0ed28d9bef86e3e597ea8db
# Parent 35983eae211a531bd50bcd177a9da77d48631e2c
Fix sleazy-fpu under Xen
- Make it reset fpu_counter when needed (like __unlazy_fpu() does)
- Make it call clts() before restoring the fpu state during task switch
(clts() will still not be called during the device_not_available trap,
because it is not needed under Xen)
diff -r 35983eae211a -r 8792a16b6dd5 arch/i386/kernel/entry-xen.S
--- a/arch/i386/kernel/entry-xen.S Wed Aug 08 16:15:30 2007 -0300
+++ b/arch/i386/kernel/entry-xen.S Thu Sep 20 15:30:16 2007 -0300
@@ -950,7 +950,9 @@ device_available_emulate:
device_available_emulate:
#endif
preempt_stop(CLBR_ANY)
- call math_state_restore
+ # 'clts' is done by Xen during virtual trap, so we can call
+ # __math_state_restore instead of math_state_restore
+ call __math_state_restore
jmp ret_from_exception
CFI_ENDPROC
diff -r 35983eae211a -r 8792a16b6dd5 arch/i386/kernel/process-xen.c
--- a/arch/i386/kernel/process-xen.c Wed Aug 08 16:15:30 2007 -0300
+++ b/arch/i386/kernel/process-xen.c Thu Sep 20 15:30:16 2007 -0300
@@ -597,7 +597,9 @@ struct task_struct fastcall * __switch_t
mcl->op = __HYPERVISOR_fpu_taskswitch;
mcl->args[0] = 1;
mcl++;
- }
+ } else
+ prev_p->fpu_counter = 0;
+
#if 0 /* lazy fpu sanity check */
else BUG_ON(!(read_cr0() & 8));
#endif
diff -r 35983eae211a -r 8792a16b6dd5 arch/i386/kernel/traps-xen.c
--- a/arch/i386/kernel/traps-xen.c Wed Aug 08 16:15:30 2007 -0300
+++ b/arch/i386/kernel/traps-xen.c Thu Sep 20 15:30:16 2007 -0300
@@ -1012,17 +1012,22 @@ fastcall unsigned long patch_espfix_desc
* Must be called with kernel preemption disabled (in this case,
* local interrupts are disabled at the call-site in entry.S).
*/
-asmlinkage void math_state_restore(void)
+asmlinkage void __math_state_restore(void)
{
struct thread_info *thread = current_thread_info();
struct task_struct *tsk = thread->task;
- /* NB. 'clts' is done for us by Xen during virtual trap. */
if (!tsk_used_math(tsk))
init_fpu(tsk);
restore_fpu(tsk);
thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
tsk->fpu_counter++;
+}
+
+asmlinkage void math_state_restore(void)
+{
+ clts(); /* Allow maths ops (or we recurse) */
+ __math_state_restore();
}
#ifndef CONFIG_MATH_EMULATION
diff -r 35983eae211a -r 8792a16b6dd5 arch/i386/kernel/traps.c
--- a/arch/i386/kernel/traps.c Wed Aug 08 16:15:30 2007 -0300
+++ b/arch/i386/kernel/traps.c Thu Sep 20 15:30:16 2007 -0300
@@ -1041,17 +1041,22 @@ fastcall unsigned long patch_espfix_desc
* Must be called with kernel preemption disabled (in this case,
* local interrupts are disabled at the call-site in entry.S).
*/
-asmlinkage void math_state_restore(void)
+asmlinkage void __math_state_restore(void)
{
struct thread_info *thread = current_thread_info();
struct task_struct *tsk = thread->task;
- clts(); /* Allow maths ops (or we recurse) */
if (!tsk_used_math(tsk))
init_fpu(tsk);
restore_fpu(tsk);
thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
tsk->fpu_counter++;
+}
+
+asmlinkage void math_state_restore(void)
+{
+ clts(); /* Allow maths ops (or we recurse) */
+ __math_state_restore();
}
#ifndef CONFIG_MATH_EMULATION
|