aboutsummaryrefslogtreecommitdiff
path: root/fs/ncpfs/mmap.c
blob: ee24df5af1f929671ae0c6d8145d7951e767fc17 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
/*
 *  mmap.c
 *
 *  Copyright (C) 1995, 1996 by Volker Lendecke
 *  Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
 *
 */

#include <linux/stat.h>
#include <linux/time.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/shm.h>
#include <linux/errno.h>
#include <linux/mman.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/memcontrol.h>

#include <asm/uaccess.h>

#include "ncp_fs.h"

/*
 * Fill in the supplied page for mmap
 * XXX: how are we excluding truncate/invalidate here? Maybe need to lock
 * page?
 */
static int ncp_file_mmap_fault(struct vm_area_struct *area,
					struct vm_fault *vmf)
{
	struct file *file = area->vm_file;
	struct dentry *dentry = file->f_path.dentry;
	struct inode *inode = dentry->d_inode;
	char *pg_addr;
	unsigned int already_read;
	unsigned int count;
	int bufsize;
	int pos; /* XXX: loff_t ? */

	/*
	 * ncpfs has nothing against high pages as long
	 * as recvmsg and memset works on it
	 */
	vmf->page = alloc_page(GFP_HIGHUSER);
	if (!vmf->page)
		return VM_FAULT_OOM;
	pg_addr = kmap(vmf->page);
	pos = vmf->pgoff << PAGE_SHIFT;

	count = PAGE_SIZE;
	/* what we can read in one go */
	bufsize = NCP_SERVER(inode)->buffer_size;

	already_read = 0;
	if (ncp_make_open(inode, O_RDONLY) >= 0) {
		while (already_read < count) {
			int read_this_time;
			int to_read;

			to_read = bufsize - (pos % bufsize);

			to_read = min_t(unsigned int, to_read, count - already_read);

			if (ncp_read_kernel(NCP_SERVER(inode),
				     NCP_FINFO(inode)->file_handle,
				     pos, to_read,
				     pg_addr + already_read,
				     &read_this_time) != 0) {
				read_this_time = 0;
			}
			pos += read_this_time;
			already_read += read_this_time;

			if (read_this_time < to_read) {
				break;
			}
		}
		ncp_inode_close(inode);

	}

	if (already_read < PAGE_SIZE)
		memset(pg_addr + already_read, 0, PAGE_SIZE - already_read);
	flush_dcache_page(vmf->page);
	kunmap(vmf->page);

	/*
	 * If I understand ncp_read_kernel() properly, the above always
	 * fetches from the network, here the analogue of disk.
	 * -- nyc
	 */
	count_vm_event(PGMAJFAULT);
	mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT);
	return VM_FAULT_MAJOR;
}

static const struct vm_operations_struct ncp_file_mmap =
{
	.fault = ncp_file_mmap_fault,
};


/* This is used for a general mmap of a ncp file */
int ncp_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct inode *inode = file_inode(file);
	
	DPRINTK("ncp_mmap: called\n");

	if (!ncp_conn_valid(NCP_SERVER(inode)))
		return -EIO;

	/* only PAGE_COW or read-only supported now */
	if (vma->vm_flags & VM_SHARED)
		return -EINVAL;
	/* we do not support files bigger than 4GB... We eventually 
	   supports just 4GB... */
	if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff 
	   > (1U << (32 - PAGE_SHIFT)))
		return -EFBIG;

	vma->vm_ops = &ncp_file_mmap;
	file_accessed(file);
	return 0;
}