aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/kernel/cpu/fpu.c
blob: f059ed62cf57176cbae60d679a16436dff2793c3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/fpu.h>

int init_fpu(struct task_struct *tsk)
{
	if (tsk_used_math(tsk)) {
		if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
			unlazy_fpu(tsk, task_pt_regs(tsk));
		return 0;
	}

	/*
	 * Memory allocation at the first usage of the FPU and other state.
	 */
	if (!tsk->thread.xstate) {
		tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
						      GFP_KERNEL);
		if (!tsk->thread.xstate)
			return -ENOMEM;
	}

	if (boot_cpu_data.flags & CPU_HAS_FPU) {
		struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
		memset(fp, 0, xstate_size);
		fp->fpscr = FPSCR_INIT;
	} else {
		struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
		memset(fp, 0, xstate_size);
		fp->fpscr = FPSCR_INIT;
	}

	set_stopped_child_used_math(tsk);
	return 0;
}

#ifdef CONFIG_SH_FPU
void __fpu_state_restore(void)
{
	struct task_struct *tsk = current;

	restore_fpu(tsk);

	task_thread_info(tsk)->status |= TS_USEDFPU;
	tsk->fpu_counter++;
}

void fpu_state_restore(struct pt_regs *regs)
{
	struct task_struct *tsk = current;

	if (unlikely(!user_mode(regs))) {
		printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
		BUG();
		return;
	}

	if (!tsk_used_math(tsk)) {
		local_irq_enable();
		/*
		 * does a slab alloc which can sleep
		 */
		if (init_fpu(tsk)) {
			/*
			 * ran out of memory!
			 */
			do_group_exit(SIGKILL);
			return;
		}
		local_irq_disable();
	}

	grab_fpu(regs);

	__fpu_state_restore();
}

BUILD_TRAP_HANDLER(fpu_state_restore)
{
	TRAP_HANDLER_DECL;

	fpu_state_restore(regs);
}
#endif /* CONFIG_SH_FPU */