summaryrefslogtreecommitdiff
path: root/minix/servers/vfs/vnode.c
blob: 73d63d2a55635c1a6c1f2e48a2325f7e70e49c24 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
/* This file contains the routines related to vnodes.
 * The entry points are:
 *
 *  get_vnode - increase counter and get details of an inode
 *  get_free_vnode - get a pointer to a free vnode obj
 *  find_vnode - find a vnode according to the FS endpoint and the inode num.
 *  dup_vnode - duplicate vnode (i.e. increase counter)
 *  put_vnode - drop vnode (i.e. decrease counter)
 */

#include "fs.h"
#include "vnode.h"
#include "vmnt.h"
#include "file.h"
#include <minix/vfsif.h>
#include <assert.h>

/* Is vnode pointer reasonable? */
#if NDEBUG
#define SANEVP(v)
#define CHECKVN(v)
#define ASSERTVP(v)
#else
#define SANEVP(v) ((((v) >= &vnode[0] && (v) < &vnode[NR_VNODES])))

#define BADVP(v, f, l) printf("%s:%d: bad vp %p\n", f, l, v)

/* vp check that returns 0 for use in check_vrefs() */
#define CHECKVN(v) if(!SANEVP(v)) {				\
	BADVP(v, __FILE__, __LINE__);	\
	return 0;	\
}

/* vp check that panics */
#define ASSERTVP(v) if(!SANEVP(v)) { \
	BADVP(v, __FILE__, __LINE__); panic("bad vp"); }
#endif

#if LOCK_DEBUG
/*===========================================================================*
 *				check_vnode_locks_by_me			     *
 *===========================================================================*/
void check_vnode_locks_by_me(struct fproc *rfp)
{
/* Check whether this thread still has locks held on vnodes */
  struct vnode *vp;

  for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; vp++) {
	if (tll_locked_by_me(&vp->v_lock)) {
		panic("Thread %d still holds vnode lock on vp %p call_nr=%d\n",
		      mthread_self(), vp, job_call_nr);
	}
  }

  if (rfp->fp_vp_rdlocks != 0)
	panic("Thread %d still holds read locks on a vnode (%d) call_nr=%d\n",
	      mthread_self(), rfp->fp_vp_rdlocks, job_call_nr);
}
#endif

/*===========================================================================*
 *				check_vnode_locks			     *
 *===========================================================================*/
void
check_vnode_locks(void)
{
  struct vnode *vp;
  int count = 0;

  for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; vp++)
	if (is_vnode_locked(vp)) {
		count++;
	}

  if (count) panic("%d locked vnodes\n", count);
#if 0
  printf("check_vnode_locks OK\n");
#endif
}

/*===========================================================================*
 *				get_free_vnode				     *
 *===========================================================================*/
struct vnode *
get_free_vnode(void)
{
/* Find a free vnode slot in the vnode table (it's not actually allocated) */
  struct vnode *vp;

  for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; ++vp) {
	if (vp->v_ref_count == 0 && !is_vnode_locked(vp)) {
		vp->v_uid  = -1;
		vp->v_gid  = -1;
		vp->v_sdev = NO_DEV;
		vp->v_mapfs_e = NONE;
		vp->v_mapfs_count = 0;
		vp->v_mapinode_nr = 0;
		return(vp);
	}
  }

  err_code = ENFILE;
  return(NULL);
}


/*===========================================================================*
 *				find_vnode				     *
 *===========================================================================*/
struct vnode *find_vnode(int fs_e, ino_t ino)
{
/* Find a specified (FS endpoint and inode number) vnode in the
 * vnode table */
  struct vnode *vp;

  for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; ++vp)
	if (vp->v_ref_count > 0 && vp->v_inode_nr == ino && vp->v_fs_e == fs_e)
		return(vp);

  return(NULL);
}

/*===========================================================================*
 *				is_vnode_locked				     *
 *===========================================================================*/
int is_vnode_locked(struct vnode *vp)
{
/* Find out whether a thread holds a lock on this vnode or is trying to obtain
 * a lock. */
  ASSERTVP(vp);

  return(tll_islocked(&vp->v_lock) || tll_haspendinglock(&vp->v_lock));
}

/*===========================================================================*
 *				init_vnodes				     *
 *===========================================================================*/
void init_vnodes(void)
{
  struct vnode *vp;

  for (vp = &vnode[0]; vp < &vnode[NR_VNODES]; ++vp) {
	vp->v_fs_e = NONE;
	vp->v_mapfs_e = NONE;
	vp->v_inode_nr = 0;
	vp->v_ref_count = 0;
	vp->v_fs_count = 0;
	vp->v_mapfs_count = 0;
	tll_init(&vp->v_lock);
  }
}

/*===========================================================================*
 *				lock_vnode				     *
 *===========================================================================*/
int lock_vnode(struct vnode *vp, tll_access_t locktype)
{
  int r;

  ASSERTVP(vp);

  r = tll_lock(&vp->v_lock, locktype);

#if LOCK_DEBUG
  if (locktype == VNODE_READ) {
	fp->fp_vp_rdlocks++;
  }
#endif

  if (r == EBUSY) return(r);
  return(OK);
}

/*===========================================================================*
 *				unlock_vnode				     *
 *===========================================================================*/
void unlock_vnode(struct vnode *vp)
{
#if LOCK_DEBUG
  int i;
  register struct vnode *rvp;
  struct worker_thread *w;
#endif
  ASSERTVP(vp);

#if LOCK_DEBUG
  /* Decrease read-only lock counter when not locked as VNODE_OPCL or
   * VNODE_WRITE */
  if (!tll_locked_by_me(&vp->v_lock)) {
	fp->fp_vp_rdlocks--;
  }

  for (i = 0; i < NR_VNODES; i++) {
	rvp = &vnode[i];

	w = rvp->v_lock.t_write;
	assert(w != self);
	while (w && w->w_next != NULL) {
		w = w->w_next;
		assert(w != self);
	}

	w = rvp->v_lock.t_serial;
	assert(w != self);
	while (w && w->w_next != NULL) {
		w = w->w_next;
		assert(w != self);
	}
  }
#endif

  tll_unlock(&vp->v_lock);
}

/*===========================================================================*
 *				vnode				     *
 *===========================================================================*/
void upgrade_vnode_lock(struct vnode *vp)
{
  ASSERTVP(vp);
  tll_upgrade(&vp->v_lock);
}

/*===========================================================================*
 *				dup_vnode				     *
 *===========================================================================*/
void dup_vnode(struct vnode *vp)
{
/* dup_vnode() is called to increment the vnode and therefore the
 * referred inode's counter.
 */
  ASSERTVP(vp);
  vp->v_ref_count++;
}


/*===========================================================================*
 *				put_vnode				     *
 *===========================================================================*/
void put_vnode(struct vnode *vp)
{
/* Decrease vnode's usage counter and decrease inode's usage counter in the
 * corresponding FS process. Decreasing the fs_count each time we decrease the
 * ref count would lead to poor performance. Instead, only decrease fs_count
 * when the ref count hits zero. However, this could lead to fs_count to wrap.
 * To prevent this, we drop the counter to 1 when the counter hits 256.
 * We maintain fs_count as a sanity check to make sure VFS and the FS are in
 * sync.
 */
  int r, lock_vp;

  ASSERTVP(vp);

  /* Lock vnode. It's quite possible this thread already has a lock on this
   * vnode. That's no problem, because the reference counter will not decrease
   * to zero in that case. However, if the counter does decrease to zero *and*
   * is already locked, we have a consistency problem somewhere. */
  lock_vp = lock_vnode(vp, VNODE_OPCL);

  if (vp->v_ref_count > 1) {
	/* Decrease counter */
	vp->v_ref_count--;
	if (vp->v_fs_count > 256)
		vnode_clean_refs(vp);
	if (lock_vp != EBUSY) unlock_vnode(vp);
	return;
  }

  /* If we already had a lock, there is a consistency problem */
  assert(lock_vp != EBUSY);
  upgrade_vnode_lock(vp); /* Acquire exclusive access */

  /* A vnode that's not in use can't be put back. */
  if (vp->v_ref_count <= 0)
	panic("put_vnode failed: bad v_ref_count %d\n", vp->v_ref_count);

  /* fs_count should indicate that the file is in use. */
  if (vp->v_fs_count <= 0)
	panic("put_vnode failed: bad v_fs_count %d\n", vp->v_fs_count);

  /* Tell FS we don't need this inode to be open anymore. */
  r = req_putnode(vp->v_fs_e, vp->v_inode_nr, vp->v_fs_count);

  if (r != OK) {
	printf("VFS: putnode failed: %d\n", r);
	util_stacktrace();
  }

  /* This inode could've been mapped. If so, tell mapped FS to close it as
   * well. If mapped onto same FS, this putnode is not needed. */
  if (vp->v_mapfs_e != NONE && vp->v_mapfs_e != vp->v_fs_e)
	req_putnode(vp->v_mapfs_e, vp->v_mapinode_nr, vp->v_mapfs_count);

  vp->v_fs_count = 0;
  vp->v_ref_count = 0;
  vp->v_mapfs_count = 0;

  unlock_vnode(vp);
}


/*===========================================================================*
 *				vnode_clean_refs			     *
 *===========================================================================*/
void vnode_clean_refs(struct vnode *vp)
{
/* Tell the underlying FS to drop all reference but one. */

  if (vp == NULL) return;
  if (vp->v_fs_count <= 1) return;	/* Nothing to do */

  /* Drop all references except one */
  req_putnode(vp->v_fs_e, vp->v_inode_nr, vp->v_fs_count - 1);
  vp->v_fs_count = 1;
}