aboutsummaryrefslogtreecommitdiff
path: root/kernel/events
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2013-03-24 18:58:04 +0100
committerOleg Nesterov <oleg@redhat.com>2013-04-04 13:57:05 +0200
commit5669ccee21d87622f30a724b3fe0d04ec5b0afae (patch)
treeb8eda48d364795c9ccb07c9df9f29fbf9fa7199e /kernel/events
parent98763a1bb1515f8a8d7f1d9ae42604e19872364b (diff)
uprobes: Introduce copy_to_page()
Extract the kmap_atomic/memcpy/kunmap_atomic code from xol_get_insn_slot() into the new simple helper, copy_to_page(). It will have more users soon. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Anton Arapov <anton@redhat.com> Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/uprobes.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 093866547fe..b8255eaca19 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -194,6 +194,13 @@ static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, in
kunmap_atomic(kaddr);
}
+static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
+{
+ void *kaddr = kmap_atomic(page);
+ memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
+ kunmap_atomic(kaddr);
+}
+
static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
{
uprobe_opcode_t old_opcode;
@@ -1227,9 +1234,7 @@ static unsigned long xol_take_insn_slot(struct xol_area *area)
static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
{
struct xol_area *area;
- unsigned long offset;
unsigned long xol_vaddr;
- void *vaddr;
area = get_xol_area();
if (!area)
@@ -1240,10 +1245,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
return 0;
/* Initialize the slot */
- offset = xol_vaddr & ~PAGE_MASK;
- vaddr = kmap_atomic(area->page);
- memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES);
- kunmap_atomic(vaddr);
+ copy_to_page(area->page, xol_vaddr, uprobe->arch.insn, MAX_UINSN_BYTES);
/*
* We probably need flush_icache_user_range() but it needs vma.
* This should work on supported architectures too.