aboutsummaryrefslogtreecommitdiff
path: root/sysutils/fusefs-kmod/files/extra-patch-8-fuse_vnops.c
blob: b18b8190e6438cc271ed07b89ac5be9ea9c17339 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
--- fs/fuse/fuse_vnops.c.orig
+++ fs/fuse/fuse_vnops.c
@@ -190,10 +190,10 @@
 
 int	fuse_pbuf_freecnt = -1;
 
-#define fuse_vm_page_lock(m)		vm_page_lock((m));
-#define fuse_vm_page_unlock(m)		vm_page_unlock((m));
-#define fuse_vm_page_lock_queues()	((void)0)
-#define fuse_vm_page_unlock_queues()	((void)0)
+#define fuse_vm_page_lock(m)		((void)0)
+#define fuse_vm_page_unlock(m)		((void)0)
+#define fuse_vm_page_lock_queues()	vm_page_lock_queues()
+#define fuse_vm_page_unlock_queues()	vm_page_unlock_queues()
 
 /*
     struct vnop_access_args {
@@ -579,7 +579,7 @@
 	}
 
 	if ((fvdat->flag & FN_REVOKED) != 0 && fuse_reclaim_revoked) {
-		vrecycle(vp);
+		vrecycle(vp, curthread);
 	}
 	return 0;
 }
@@ -706,7 +706,7 @@
 		op = FUSE_GETATTR;
 		goto calldaemon;
 	} else if (fuse_lookup_cache_enable) {
-		err = cache_lookup(dvp, vpp, cnp, NULL, NULL);
+		err = cache_lookup(dvp, vpp, cnp);
 		switch (err) {
 
 		case -1:		/* positive match */
@@ -1758,7 +1758,7 @@
 	 * can only occur at the file EOF.
 	 */
 
-	VM_OBJECT_WLOCK(vp->v_object);
+	VM_OBJECT_LOCK(vp->v_object);
 	fuse_vm_page_lock_queues();
 	if (pages[ap->a_reqpage]->valid != 0) {
 		for (i = 0; i < npages; ++i) {
@@ -1769,11 +1769,11 @@
 			}
 		}
 		fuse_vm_page_unlock_queues();
-		VM_OBJECT_WUNLOCK(vp->v_object);
+		VM_OBJECT_UNLOCK(vp->v_object);
 		return 0;
 	}
 	fuse_vm_page_unlock_queues();
-	VM_OBJECT_WUNLOCK(vp->v_object);
+	VM_OBJECT_UNLOCK(vp->v_object);
 
 	/*
 	 * We use only the kva address for the buffer, but this is extremely
@@ -1803,7 +1803,7 @@
 
 	if (error && (uio.uio_resid == count)) {
 		FS_DEBUG("error %d\n", error);
-		VM_OBJECT_WLOCK(vp->v_object);
+		VM_OBJECT_LOCK(vp->v_object);
 		fuse_vm_page_lock_queues();
 		for (i = 0; i < npages; ++i) {
 			if (i != ap->a_reqpage) {
@@ -1813,7 +1813,7 @@
 			}
 		}
 		fuse_vm_page_unlock_queues();
-		VM_OBJECT_WUNLOCK(vp->v_object);
+		VM_OBJECT_UNLOCK(vp->v_object);
 		return VM_PAGER_ERROR;
 	}
 	/*
@@ -1823,7 +1823,7 @@
 	 */
 
 	size = count - uio.uio_resid;
-	VM_OBJECT_WLOCK(vp->v_object);
+	VM_OBJECT_LOCK(vp->v_object);
 	fuse_vm_page_lock_queues();
 	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
 		vm_page_t m;
@@ -1843,7 +1843,7 @@
 			 * Read operation filled a partial page.
 			 */
 			m->valid = 0;
-			vm_page_set_valid_range(m, 0, size - toff);
+			vm_page_set_valid(m, 0, size - toff);
 			KASSERT(m->dirty == 0,
 			    ("fuse_getpages: page %p is dirty", m));
 		} else {
@@ -1854,11 +1854,36 @@
 			 */
 			;
 		}
-		if (i != ap->a_reqpage)
-			vm_page_readahead_finish(m);
+		if (i != ap->a_reqpage) {
+			/*
+			 * whether or not to leave the page activated is up in
+			 * the air, but we should put the page on a page queue
+			 * somewhere. (it already is in the object). Result:
+			 * It appears that empirical results show that
+			 * deactivating pages is best.
+			 */
+
+			/*
+			 * just in case someone was asking for this page we
+			 * now tell them that it is ok to use
+			 */
+			if (!error) {
+#ifdef VPO_WANTED
+				if (m->oflags & VPO_WANTED)
+#else
+				if (m->flags & PG_WANTED)
+#endif
+					vm_page_activate(m);
+				else
+					vm_page_deactivate(m);
+				vm_page_wakeup(m);
+			} else {
+				vm_page_free(m);
+			}
+		}
 	}
 	fuse_vm_page_unlock_queues();
-	VM_OBJECT_WUNLOCK(vp->v_object);
+	VM_OBJECT_UNLOCK(vp->v_object);
 	return 0;
 }
 
@@ -1947,9 +1972,9 @@
 
 		for (i = 0; i < nwritten; i++) {
 			rtvals[i] = VM_PAGER_OK;
-			VM_OBJECT_WLOCK(pages[i]->object);
+			VM_OBJECT_LOCK(pages[i]->object);
 			vm_page_undirty(pages[i]);
-			VM_OBJECT_WUNLOCK(pages[i]->object);
+			VM_OBJECT_UNLOCK(pages[i]->object);
 		}
 	}
 	return rtvals[0];