blob: 5686d6d1a4b51f5fb4d013c2c5629bb30288cbf8 [file] [log] [blame]
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -06001/*
2 * Xen implementation for transcendent memory (tmem)
3 *
Dan Magenheimerafec6e02011-06-17 15:06:20 -06004 * Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -06005 * Author: Dan Magenheimer
6 */
7
Dan Magenheimer10a7a0772013-04-30 15:27:00 -07008#include <linux/module.h>
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -06009#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/init.h>
12#include <linux/pagemap.h>
13#include <linux/cleancache.h>
Dan Magenheimerafec6e02011-06-17 15:06:20 -060014#include <linux/frontswap.h>
Dan Magenheimerafec6e02011-06-17 15:06:20 -060015
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060016#include <xen/xen.h>
17#include <xen/interface/xen.h>
18#include <asm/xen/hypercall.h>
19#include <asm/xen/page.h>
20#include <asm/xen/hypervisor.h>
Konrad Rzeszutek Wilkb8b0f552012-08-21 14:49:34 -040021#include <xen/tmem.h>
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -060022
23#define TMEM_CONTROL 0
24#define TMEM_NEW_POOL 1
25#define TMEM_DESTROY_POOL 2
26#define TMEM_NEW_PAGE 3
27#define TMEM_PUT_PAGE 4
28#define TMEM_GET_PAGE 5
29#define TMEM_FLUSH_PAGE 6
30#define TMEM_FLUSH_OBJECT 7
31#define TMEM_READ 8
32#define TMEM_WRITE 9
33#define TMEM_XCHG 10
34
35/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
36#define TMEM_POOL_PERSIST 1
37#define TMEM_POOL_SHARED 2
38#define TMEM_POOL_PAGESIZE_SHIFT 4
39#define TMEM_VERSION_SHIFT 24
40
41
42struct tmem_pool_uuid {
43 u64 uuid_lo;
44 u64 uuid_hi;
45};
46
47struct tmem_oid {
48 u64 oid[3];
49};
50
51#define TMEM_POOL_PRIVATE_UUID { 0, 0 }
52
53/* flags for tmem_ops.new_pool */
54#define TMEM_POOL_PERSIST 1
55#define TMEM_POOL_SHARED 2
56
57/* xen tmem foundation ops/hypercalls */
58
59static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
60 u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
61{
62 struct tmem_op op;
63 int rc = 0;
64
65 op.cmd = tmem_cmd;
66 op.pool_id = tmem_pool;
67 op.u.gen.oid[0] = oid.oid[0];
68 op.u.gen.oid[1] = oid.oid[1];
69 op.u.gen.oid[2] = oid.oid[2];
70 op.u.gen.index = index;
71 op.u.gen.tmem_offset = tmem_offset;
72 op.u.gen.pfn_offset = pfn_offset;
73 op.u.gen.len = len;
74 set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
75 rc = HYPERVISOR_tmem_op(&op);
76 return rc;
77}
78
79static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
80 u32 flags, unsigned long pagesize)
81{
82 struct tmem_op op;
83 int rc = 0, pageshift;
84
85 for (pageshift = 0; pagesize != 1; pageshift++)
86 pagesize >>= 1;
87 flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
88 flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
89 op.cmd = TMEM_NEW_POOL;
90 op.u.new.uuid[0] = uuid.uuid_lo;
91 op.u.new.uuid[1] = uuid.uuid_hi;
92 op.u.new.flags = flags;
93 rc = HYPERVISOR_tmem_op(&op);
94 return rc;
95}
96
97/* xen generic tmem ops */
98
99static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
100 u32 index, unsigned long pfn)
101{
102 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
103
104 return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
105 gmfn, 0, 0, 0);
106}
107
108static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
109 u32 index, unsigned long pfn)
110{
111 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
112
113 return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
114 gmfn, 0, 0, 0);
115}
116
117static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
118{
119 return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
120 0, 0, 0, 0);
121}
122
123static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
124{
125 return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
126}
127
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700128#ifndef CONFIG_XEN_TMEM_MODULE
Jan Beulich8e6f7c22012-02-03 15:09:04 +0000129bool __read_mostly tmem_enabled = false;
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600130
131static int __init enable_tmem(char *s)
132{
Jan Beulich8e6f7c22012-02-03 15:09:04 +0000133 tmem_enabled = true;
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600134 return 1;
135}
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600136__setup("tmem", enable_tmem);
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700137#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600138
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600139#ifdef CONFIG_CLEANCACHE
140static int xen_tmem_destroy_pool(u32 pool_id)
141{
142 struct tmem_oid oid = { { 0 } };
143
144 return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
145}
146
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600147/* cleancache ops */
148
149static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
150 pgoff_t index, struct page *page)
151{
152 u32 ind = (u32) index;
153 struct tmem_oid oid = *(struct tmem_oid *)&key;
154 unsigned long pfn = page_to_pfn(page);
155
156 if (pool < 0)
157 return;
158 if (ind != index)
159 return;
160 mb(); /* ensure page is quiescent; tmem may address it with an alias */
161 (void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
162}
163
164static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
165 pgoff_t index, struct page *page)
166{
167 u32 ind = (u32) index;
168 struct tmem_oid oid = *(struct tmem_oid *)&key;
169 unsigned long pfn = page_to_pfn(page);
170 int ret;
171
172 /* translate return values to linux semantics */
173 if (pool < 0)
174 return -1;
175 if (ind != index)
176 return -1;
177 ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
178 if (ret == 1)
179 return 0;
180 else
181 return -1;
182}
183
184static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
185 pgoff_t index)
186{
187 u32 ind = (u32) index;
188 struct tmem_oid oid = *(struct tmem_oid *)&key;
189
190 if (pool < 0)
191 return;
192 if (ind != index)
193 return;
194 (void)xen_tmem_flush_page((u32)pool, oid, ind);
195}
196
197static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
198{
199 struct tmem_oid oid = *(struct tmem_oid *)&key;
200
201 if (pool < 0)
202 return;
203 (void)xen_tmem_flush_object((u32)pool, oid);
204}
205
206static void tmem_cleancache_flush_fs(int pool)
207{
208 if (pool < 0)
209 return;
210 (void)xen_tmem_destroy_pool((u32)pool);
211}
212
213static int tmem_cleancache_init_fs(size_t pagesize)
214{
215 struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
216
217 return xen_tmem_new_pool(uuid_private, 0, pagesize);
218}
219
220static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
221{
222 struct tmem_pool_uuid shared_uuid;
223
224 shared_uuid.uuid_lo = *(u64 *)uuid;
225 shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
226 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
227}
228
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700229static bool disable_cleancache __read_mostly;
230static bool disable_selfballooning __read_mostly;
231#ifdef CONFIG_XEN_TMEM_MODULE
232module_param(disable_cleancache, bool, S_IRUGO);
233module_param(disable_selfballooning, bool, S_IRUGO);
234#else
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600235static int __init no_cleancache(char *s)
236{
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700237 disable_cleancache = true;
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600238 return 1;
239}
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600240__setup("nocleancache", no_cleancache);
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700241#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600242
Konrad Rzeszutek Wilk833f8662013-04-30 15:26:57 -0700243static struct cleancache_ops tmem_cleancache_ops = {
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600244 .put_page = tmem_cleancache_put_page,
245 .get_page = tmem_cleancache_get_page,
Dan Magenheimer91c6cc92012-01-12 14:03:25 -0500246 .invalidate_page = tmem_cleancache_flush_page,
247 .invalidate_inode = tmem_cleancache_flush_inode,
248 .invalidate_fs = tmem_cleancache_flush_fs,
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600249 .init_shared_fs = tmem_cleancache_init_shared_fs,
250 .init_fs = tmem_cleancache_init_fs
251};
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600252#endif
253
254#ifdef CONFIG_FRONTSWAP
255/* frontswap tmem operations */
256
257/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
258static int tmem_frontswap_poolid;
259
260/*
261 * Swizzling increases objects per swaptype, increasing tmem concurrency
262 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
263 */
264#define SWIZ_BITS 4
265#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
266#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
267#define iswiz(_ind) (_ind >> SWIZ_BITS)
268
269static inline struct tmem_oid oswiz(unsigned type, u32 ind)
270{
271 struct tmem_oid oid = { .oid = { 0 } };
272 oid.oid[0] = _oswiz(type, ind);
273 return oid;
274}
275
276/* returns 0 if the page was successfully put into frontswap, -1 if not */
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400277static int tmem_frontswap_store(unsigned type, pgoff_t offset,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600278 struct page *page)
279{
280 u64 ind64 = (u64)offset;
281 u32 ind = (u32)offset;
282 unsigned long pfn = page_to_pfn(page);
283 int pool = tmem_frontswap_poolid;
284 int ret;
285
286 if (pool < 0)
287 return -1;
288 if (ind64 != ind)
289 return -1;
290 mb(); /* ensure page is quiescent; tmem may address it with an alias */
291 ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
292 /* translate Xen tmem return values to linux semantics */
293 if (ret == 1)
294 return 0;
295 else
296 return -1;
297}
298
299/*
300 * returns 0 if the page was successfully gotten from frontswap, -1 if
301 * was not present (should never happen!)
302 */
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400303static int tmem_frontswap_load(unsigned type, pgoff_t offset,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600304 struct page *page)
305{
306 u64 ind64 = (u64)offset;
307 u32 ind = (u32)offset;
308 unsigned long pfn = page_to_pfn(page);
309 int pool = tmem_frontswap_poolid;
310 int ret;
311
312 if (pool < 0)
313 return -1;
314 if (ind64 != ind)
315 return -1;
316 ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
317 /* translate Xen tmem return values to linux semantics */
318 if (ret == 1)
319 return 0;
320 else
321 return -1;
322}
323
324/* flush a single page from frontswap */
325static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
326{
327 u64 ind64 = (u64)offset;
328 u32 ind = (u32)offset;
329 int pool = tmem_frontswap_poolid;
330
331 if (pool < 0)
332 return;
333 if (ind64 != ind)
334 return;
335 (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
336}
337
338/* flush all pages from the passed swaptype */
339static void tmem_frontswap_flush_area(unsigned type)
340{
341 int pool = tmem_frontswap_poolid;
342 int ind;
343
344 if (pool < 0)
345 return;
346 for (ind = SWIZ_MASK; ind >= 0; ind--)
347 (void)xen_tmem_flush_object(pool, oswiz(type, ind));
348}
349
350static void tmem_frontswap_init(unsigned ignored)
351{
352 struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
353
354 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
355 if (tmem_frontswap_poolid < 0)
356 tmem_frontswap_poolid =
357 xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
358}
359
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700360static bool disable_frontswap __read_mostly;
361static bool disable_frontswap_selfshrinking __read_mostly;
362#ifdef CONFIG_XEN_TMEM_MODULE
363module_param(disable_frontswap, bool, S_IRUGO);
364module_param(disable_frontswap_selfshrinking, bool, S_IRUGO);
365#else
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600366static int __init no_frontswap(char *s)
367{
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700368 disable_frontswap = true;
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600369 return 1;
370}
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600371__setup("nofrontswap", no_frontswap);
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700372#endif
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600373
Konrad Rzeszutek Wilk1e01c962013-04-30 15:26:51 -0700374static struct frontswap_ops tmem_frontswap_ops = {
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400375 .store = tmem_frontswap_store,
376 .load = tmem_frontswap_load,
Dan Magenheimer91c6cc92012-01-12 14:03:25 -0500377 .invalidate_page = tmem_frontswap_flush_page,
378 .invalidate_area = tmem_frontswap_flush_area,
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600379 .init = tmem_frontswap_init
380};
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700381#else /* CONFIG_FRONTSWAP */
382#define disable_frontswap_selfshrinking 1
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600383#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600384
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700385static int xen_tmem_init(void)
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600386{
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600387 if (!xen_domain())
388 return 0;
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600389#ifdef CONFIG_FRONTSWAP
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700390 if (tmem_enabled && !disable_frontswap) {
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600391 char *s = "";
Konrad Rzeszutek Wilk1e01c962013-04-30 15:26:51 -0700392 struct frontswap_ops *old_ops =
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600393 frontswap_register_ops(&tmem_frontswap_ops);
394
395 tmem_frontswap_poolid = -1;
Konrad Rzeszutek Wilkf42158f2013-04-30 15:27:01 -0700396 if (IS_ERR(old_ops) || old_ops) {
397 if (IS_ERR(old_ops))
398 return PTR_ERR(old_ops);
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600399 s = " (WARNING: frontswap_ops overridden)";
Konrad Rzeszutek Wilkf42158f2013-04-30 15:27:01 -0700400 }
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600401 printk(KERN_INFO "frontswap enabled, RAM provided by "
Konrad Rzeszutek Wilk22230c12013-02-01 14:10:44 -0500402 "Xen Transcendent Memory%s\n", s);
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600403 }
404#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600405#ifdef CONFIG_CLEANCACHE
406 BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700407 if (tmem_enabled && !disable_cleancache) {
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600408 char *s = "";
Konrad Rzeszutek Wilk833f8662013-04-30 15:26:57 -0700409 struct cleancache_ops *old_ops =
Dan Magenheimerafec6e02011-06-17 15:06:20 -0600410 cleancache_register_ops(&tmem_cleancache_ops);
Konrad Rzeszutek Wilk833f8662013-04-30 15:26:57 -0700411 if (old_ops)
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600412 s = " (WARNING: cleancache_ops overridden)";
413 printk(KERN_INFO "cleancache enabled, RAM provided by "
414 "Xen Transcendent Memory%s\n", s);
415 }
416#endif
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700417#ifdef CONFIG_XEN_SELFBALLOONING
418 xen_selfballoon_init(!disable_selfballooning,
419 !disable_frontswap_selfshrinking);
420#endif
Dan Magenheimer5bc20fc2011-05-26 10:02:21 -0600421 return 0;
422}
423
424module_init(xen_tmem_init)
Dan Magenheimer10a7a0772013-04-30 15:27:00 -0700425MODULE_LICENSE("GPL");
426MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
427MODULE_DESCRIPTION("Shim to Xen transcendent memory");