[S390] lockless get_user_pages_fast()

Implement get_user_pages_fast without locking in the fastpath on s390.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index fd1c00d..f1f644f 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -64,10 +64,9 @@
 	if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS))
 		__tlb_flush_mm(tlb->mm);
 	while (tlb->nr_ptes > 0)
-		pte_free(tlb->mm, tlb->array[--tlb->nr_ptes]);
+		page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]);
 	while (tlb->nr_pxds < TLB_NR_PTRS)
-		/* pgd_free frees the pointer as region or segment table */
-		pgd_free(tlb->mm, tlb->array[tlb->nr_pxds++]);
+		crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]);
 }
 
 static inline void tlb_finish_mmu(struct mmu_gather *tlb,
@@ -75,6 +74,8 @@
 {
 	tlb_flush_mmu(tlb, start, end);
 
+	rcu_table_freelist_finish();
+
 	/* keep the page table cache within bounds */
 	check_pgt_cache();
 
@@ -103,7 +104,7 @@
 		if (tlb->nr_ptes >= tlb->nr_pxds)
 			tlb_flush_mmu(tlb, 0, 0);
 	} else
-		pte_free(tlb->mm, pte);
+		page_table_free(tlb->mm, (unsigned long *) pte);
 }
 
 /*
@@ -124,7 +125,7 @@
 		if (tlb->nr_ptes >= tlb->nr_pxds)
 			tlb_flush_mmu(tlb, 0, 0);
 	} else
-		pmd_free(tlb->mm, pmd);
+		crst_table_free(tlb->mm, (unsigned long *) pmd);
 #endif
 }
 
@@ -146,7 +147,7 @@
 		if (tlb->nr_ptes >= tlb->nr_pxds)
 			tlb_flush_mmu(tlb, 0, 0);
 	} else
-		pud_free(tlb->mm, pud);
+		crst_table_free(tlb->mm, (unsigned long *) pud);
 #endif
 }