blob: 42b4b52644eb999a28009655f5ffb8ccb53212cc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_fs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_types.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
Nathan Scotta844f452005-11-02 14:38:42 +110027#include "xfs_dir2.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include "xfs_mount.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include "xfs_bmap_btree.h"
Nathan Scotta844f452005-11-02 14:38:42 +110030#include "xfs_alloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_ialloc_btree.h"
Nathan Scotta844f452005-11-02 14:38:42 +110032#include "xfs_dir2_sf.h"
33#include "xfs_attr_sf.h"
34#include "xfs_dinode.h"
35#include "xfs_inode.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "xfs_btree.h"
37#include "xfs_ialloc.h"
38#include "xfs_alloc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include "xfs_error.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000040#include "xfs_trace.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42
43#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
44
45#define XFSA_FIXUP_BNO_OK 1
46#define XFSA_FIXUP_CNT_OK 2
47
Dave Chinnered3b4d62010-05-21 12:07:08 +100048static int
49xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
50 xfs_agblock_t bno, xfs_extlen_t len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/*
53 * Prototypes for per-ag allocation routines
54 */
55
56STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
57STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
58STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
59STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
60 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
61
62/*
63 * Internal functions.
64 */
65
66/*
Christoph Hellwigfe033cc2008-10-30 16:56:09 +110067 * Lookup the record equal to [bno, len] in the btree given by cur.
68 */
69STATIC int /* error */
70xfs_alloc_lookup_eq(
71 struct xfs_btree_cur *cur, /* btree cursor */
72 xfs_agblock_t bno, /* starting block of extent */
73 xfs_extlen_t len, /* length of extent */
74 int *stat) /* success/failure */
75{
76 cur->bc_rec.a.ar_startblock = bno;
77 cur->bc_rec.a.ar_blockcount = len;
78 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
79}
80
81/*
82 * Lookup the first record greater than or equal to [bno, len]
83 * in the btree given by cur.
84 */
85STATIC int /* error */
86xfs_alloc_lookup_ge(
87 struct xfs_btree_cur *cur, /* btree cursor */
88 xfs_agblock_t bno, /* starting block of extent */
89 xfs_extlen_t len, /* length of extent */
90 int *stat) /* success/failure */
91{
92 cur->bc_rec.a.ar_startblock = bno;
93 cur->bc_rec.a.ar_blockcount = len;
94 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
95}
96
97/*
98 * Lookup the first record less than or equal to [bno, len]
99 * in the btree given by cur.
100 */
101STATIC int /* error */
102xfs_alloc_lookup_le(
103 struct xfs_btree_cur *cur, /* btree cursor */
104 xfs_agblock_t bno, /* starting block of extent */
105 xfs_extlen_t len, /* length of extent */
106 int *stat) /* success/failure */
107{
108 cur->bc_rec.a.ar_startblock = bno;
109 cur->bc_rec.a.ar_blockcount = len;
110 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
111}
112
Christoph Hellwig278d0ca2008-10-30 16:56:32 +1100113/*
114 * Update the record referred to by cur to the value given
115 * by [bno, len].
116 * This either works (return 0) or gets an EFSCORRUPTED error.
117 */
118STATIC int /* error */
119xfs_alloc_update(
120 struct xfs_btree_cur *cur, /* btree cursor */
121 xfs_agblock_t bno, /* starting block of extent */
122 xfs_extlen_t len) /* length of extent */
123{
124 union xfs_btree_rec rec;
125
126 rec.alloc.ar_startblock = cpu_to_be32(bno);
127 rec.alloc.ar_blockcount = cpu_to_be32(len);
128 return xfs_btree_update(cur, &rec);
129}
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100130
131/*
Christoph Hellwig8cc938f2008-10-30 16:58:11 +1100132 * Get the data from the pointed-to record.
133 */
134STATIC int /* error */
135xfs_alloc_get_rec(
136 struct xfs_btree_cur *cur, /* btree cursor */
137 xfs_agblock_t *bno, /* output: starting block of extent */
138 xfs_extlen_t *len, /* output: length of extent */
139 int *stat) /* output: success/failure */
140{
141 union xfs_btree_rec *rec;
142 int error;
143
144 error = xfs_btree_get_rec(cur, &rec, stat);
145 if (!error && *stat == 1) {
146 *bno = be32_to_cpu(rec->alloc.ar_startblock);
147 *len = be32_to_cpu(rec->alloc.ar_blockcount);
148 }
149 return error;
150}
151
152/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 * Compute aligned version of the found extent.
154 * Takes alignment and min length into account.
155 */
David Chinner12375c82008-04-10 12:21:32 +1000156STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157xfs_alloc_compute_aligned(
158 xfs_agblock_t foundbno, /* starting block in found extent */
159 xfs_extlen_t foundlen, /* length in found extent */
160 xfs_extlen_t alignment, /* alignment for allocation */
161 xfs_extlen_t minlen, /* minimum length for allocation */
162 xfs_agblock_t *resbno, /* result block number */
163 xfs_extlen_t *reslen) /* result length */
164{
165 xfs_agblock_t bno;
166 xfs_extlen_t diff;
167 xfs_extlen_t len;
168
169 if (alignment > 1 && foundlen >= minlen) {
170 bno = roundup(foundbno, alignment);
171 diff = bno - foundbno;
172 len = diff >= foundlen ? 0 : foundlen - diff;
173 } else {
174 bno = foundbno;
175 len = foundlen;
176 }
177 *resbno = bno;
178 *reslen = len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179}
180
181/*
182 * Compute best start block and diff for "near" allocations.
183 * freelen >= wantlen already checked by caller.
184 */
185STATIC xfs_extlen_t /* difference value (absolute) */
186xfs_alloc_compute_diff(
187 xfs_agblock_t wantbno, /* target starting block */
188 xfs_extlen_t wantlen, /* target length */
189 xfs_extlen_t alignment, /* target alignment */
190 xfs_agblock_t freebno, /* freespace's starting block */
191 xfs_extlen_t freelen, /* freespace's length */
192 xfs_agblock_t *newbnop) /* result: best start block from free */
193{
194 xfs_agblock_t freeend; /* end of freespace extent */
195 xfs_agblock_t newbno1; /* return block number */
196 xfs_agblock_t newbno2; /* other new block number */
197 xfs_extlen_t newlen1=0; /* length with newbno1 */
198 xfs_extlen_t newlen2=0; /* length with newbno2 */
199 xfs_agblock_t wantend; /* end of target extent */
200
201 ASSERT(freelen >= wantlen);
202 freeend = freebno + freelen;
203 wantend = wantbno + wantlen;
204 if (freebno >= wantbno) {
205 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
206 newbno1 = NULLAGBLOCK;
207 } else if (freeend >= wantend && alignment > 1) {
208 newbno1 = roundup(wantbno, alignment);
209 newbno2 = newbno1 - alignment;
210 if (newbno1 >= freeend)
211 newbno1 = NULLAGBLOCK;
212 else
213 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
214 if (newbno2 < freebno)
215 newbno2 = NULLAGBLOCK;
216 else
217 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
218 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
219 if (newlen1 < newlen2 ||
220 (newlen1 == newlen2 &&
221 XFS_ABSDIFF(newbno1, wantbno) >
222 XFS_ABSDIFF(newbno2, wantbno)))
223 newbno1 = newbno2;
224 } else if (newbno2 != NULLAGBLOCK)
225 newbno1 = newbno2;
226 } else if (freeend >= wantend) {
227 newbno1 = wantbno;
228 } else if (alignment > 1) {
229 newbno1 = roundup(freeend - wantlen, alignment);
230 if (newbno1 > freeend - wantlen &&
231 newbno1 - alignment >= freebno)
232 newbno1 -= alignment;
233 else if (newbno1 >= freeend)
234 newbno1 = NULLAGBLOCK;
235 } else
236 newbno1 = freeend - wantlen;
237 *newbnop = newbno1;
238 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
239}
240
241/*
242 * Fix up the length, based on mod and prod.
243 * len should be k * prod + mod for some k.
244 * If len is too small it is returned unchanged.
245 * If len hits maxlen it is left alone.
246 */
247STATIC void
248xfs_alloc_fix_len(
249 xfs_alloc_arg_t *args) /* allocation argument structure */
250{
251 xfs_extlen_t k;
252 xfs_extlen_t rlen;
253
254 ASSERT(args->mod < args->prod);
255 rlen = args->len;
256 ASSERT(rlen >= args->minlen);
257 ASSERT(rlen <= args->maxlen);
258 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
259 (args->mod == 0 && rlen < args->prod))
260 return;
261 k = rlen % args->prod;
262 if (k == args->mod)
263 return;
264 if (k > args->mod) {
265 if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen)
266 return;
267 } else {
268 if ((int)(rlen = rlen - args->prod - (args->mod - k)) <
269 (int)args->minlen)
270 return;
271 }
272 ASSERT(rlen >= args->minlen);
273 ASSERT(rlen <= args->maxlen);
274 args->len = rlen;
275}
276
277/*
278 * Fix up length if there is too little space left in the a.g.
279 * Return 1 if ok, 0 if too little, should give up.
280 */
281STATIC int
282xfs_alloc_fix_minleft(
283 xfs_alloc_arg_t *args) /* allocation argument structure */
284{
285 xfs_agf_t *agf; /* a.g. freelist header */
286 int diff; /* free space difference */
287
288 if (args->minleft == 0)
289 return 1;
290 agf = XFS_BUF_TO_AGF(args->agbp);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100291 diff = be32_to_cpu(agf->agf_freeblks)
292 + be32_to_cpu(agf->agf_flcount)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 - args->len - args->minleft;
294 if (diff >= 0)
295 return 1;
296 args->len += diff; /* shrink the allocated space */
297 if (args->len >= args->minlen)
298 return 1;
299 args->agbno = NULLAGBLOCK;
300 return 0;
301}
302
303/*
304 * Update the two btrees, logically removing from freespace the extent
305 * starting at rbno, rlen blocks. The extent is contained within the
306 * actual (current) free extent fbno for flen blocks.
307 * Flags are passed in indicating whether the cursors are set to the
308 * relevant records.
309 */
310STATIC int /* error code */
311xfs_alloc_fixup_trees(
312 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
313 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
314 xfs_agblock_t fbno, /* starting block of free extent */
315 xfs_extlen_t flen, /* length of free extent */
316 xfs_agblock_t rbno, /* starting block of returned extent */
317 xfs_extlen_t rlen, /* length of returned extent */
318 int flags) /* flags, XFSA_FIXUP_... */
319{
320 int error; /* error code */
321 int i; /* operation results */
322 xfs_agblock_t nfbno1; /* first new free startblock */
323 xfs_agblock_t nfbno2; /* second new free startblock */
324 xfs_extlen_t nflen1=0; /* first new free length */
325 xfs_extlen_t nflen2=0; /* second new free length */
326
327 /*
328 * Look up the record in the by-size tree if necessary.
329 */
330 if (flags & XFSA_FIXUP_CNT_OK) {
331#ifdef DEBUG
332 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
333 return error;
334 XFS_WANT_CORRUPTED_RETURN(
335 i == 1 && nfbno1 == fbno && nflen1 == flen);
336#endif
337 } else {
338 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
339 return error;
340 XFS_WANT_CORRUPTED_RETURN(i == 1);
341 }
342 /*
343 * Look up the record in the by-block tree if necessary.
344 */
345 if (flags & XFSA_FIXUP_BNO_OK) {
346#ifdef DEBUG
347 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
348 return error;
349 XFS_WANT_CORRUPTED_RETURN(
350 i == 1 && nfbno1 == fbno && nflen1 == flen);
351#endif
352 } else {
353 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
354 return error;
355 XFS_WANT_CORRUPTED_RETURN(i == 1);
356 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
Christoph Hellwig7cc95a82008-10-30 17:14:34 +1100358#ifdef DEBUG
359 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
360 struct xfs_btree_block *bnoblock;
361 struct xfs_btree_block *cntblock;
362
363 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
364 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
365
366 XFS_WANT_CORRUPTED_RETURN(
367 bnoblock->bb_numrecs == cntblock->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 }
369#endif
Christoph Hellwig7cc95a82008-10-30 17:14:34 +1100370
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 /*
372 * Deal with all four cases: the allocated record is contained
373 * within the freespace record, so we can have new freespace
374 * at either (or both) end, or no freespace remaining.
375 */
376 if (rbno == fbno && rlen == flen)
377 nfbno1 = nfbno2 = NULLAGBLOCK;
378 else if (rbno == fbno) {
379 nfbno1 = rbno + rlen;
380 nflen1 = flen - rlen;
381 nfbno2 = NULLAGBLOCK;
382 } else if (rbno + rlen == fbno + flen) {
383 nfbno1 = fbno;
384 nflen1 = flen - rlen;
385 nfbno2 = NULLAGBLOCK;
386 } else {
387 nfbno1 = fbno;
388 nflen1 = rbno - fbno;
389 nfbno2 = rbno + rlen;
390 nflen2 = (fbno + flen) - nfbno2;
391 }
392 /*
393 * Delete the entry from the by-size btree.
394 */
Christoph Hellwig91cca5df2008-10-30 16:58:01 +1100395 if ((error = xfs_btree_delete(cnt_cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 return error;
397 XFS_WANT_CORRUPTED_RETURN(i == 1);
398 /*
399 * Add new by-size btree entry(s).
400 */
401 if (nfbno1 != NULLAGBLOCK) {
402 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
403 return error;
404 XFS_WANT_CORRUPTED_RETURN(i == 0);
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100405 if ((error = xfs_btree_insert(cnt_cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 return error;
407 XFS_WANT_CORRUPTED_RETURN(i == 1);
408 }
409 if (nfbno2 != NULLAGBLOCK) {
410 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
411 return error;
412 XFS_WANT_CORRUPTED_RETURN(i == 0);
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100413 if ((error = xfs_btree_insert(cnt_cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 return error;
415 XFS_WANT_CORRUPTED_RETURN(i == 1);
416 }
417 /*
418 * Fix up the by-block btree entry(s).
419 */
420 if (nfbno1 == NULLAGBLOCK) {
421 /*
422 * No remaining freespace, just delete the by-block tree entry.
423 */
Christoph Hellwig91cca5df2008-10-30 16:58:01 +1100424 if ((error = xfs_btree_delete(bno_cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 return error;
426 XFS_WANT_CORRUPTED_RETURN(i == 1);
427 } else {
428 /*
429 * Update the by-block entry to start later|be shorter.
430 */
431 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
432 return error;
433 }
434 if (nfbno2 != NULLAGBLOCK) {
435 /*
436 * 2 resulting free entries, need to add one.
437 */
438 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
439 return error;
440 XFS_WANT_CORRUPTED_RETURN(i == 0);
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100441 if ((error = xfs_btree_insert(bno_cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 return error;
443 XFS_WANT_CORRUPTED_RETURN(i == 1);
444 }
445 return 0;
446}
447
448/*
449 * Read in the allocation group free block array.
450 */
451STATIC int /* error */
452xfs_alloc_read_agfl(
453 xfs_mount_t *mp, /* mount point structure */
454 xfs_trans_t *tp, /* transaction pointer */
455 xfs_agnumber_t agno, /* allocation group number */
456 xfs_buf_t **bpp) /* buffer for the ag free block array */
457{
458 xfs_buf_t *bp; /* return value */
459 int error;
460
461 ASSERT(agno != NULLAGNUMBER);
462 error = xfs_trans_read_buf(
463 mp, tp, mp->m_ddev_targp,
464 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
465 XFS_FSS_TO_BB(mp, 1), 0, &bp);
466 if (error)
467 return error;
468 ASSERT(bp);
469 ASSERT(!XFS_BUF_GETERROR(bp));
470 XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGFL, XFS_AGFL_REF);
471 *bpp = bp;
472 return 0;
473}
474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475/*
476 * Allocation group level functions.
477 */
478
479/*
480 * Allocate a variable extent in the allocation group agno.
481 * Type and bno are used to determine where in the allocation group the
482 * extent will start.
483 * Extent's length (returned in *len) will be between minlen and maxlen,
484 * and of the form k * prod + mod unless there's nothing that large.
485 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
486 */
487STATIC int /* error */
488xfs_alloc_ag_vextent(
489 xfs_alloc_arg_t *args) /* argument structure for allocation */
490{
491 int error=0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
493 ASSERT(args->minlen > 0);
494 ASSERT(args->maxlen > 0);
495 ASSERT(args->minlen <= args->maxlen);
496 ASSERT(args->mod < args->prod);
497 ASSERT(args->alignment > 0);
498 /*
499 * Branch to correct routine based on the type.
500 */
501 args->wasfromfl = 0;
502 switch (args->type) {
503 case XFS_ALLOCTYPE_THIS_AG:
504 error = xfs_alloc_ag_vextent_size(args);
505 break;
506 case XFS_ALLOCTYPE_NEAR_BNO:
507 error = xfs_alloc_ag_vextent_near(args);
508 break;
509 case XFS_ALLOCTYPE_THIS_BNO:
510 error = xfs_alloc_ag_vextent_exact(args);
511 break;
512 default:
513 ASSERT(0);
514 /* NOTREACHED */
515 }
516 if (error)
517 return error;
518 /*
519 * If the allocation worked, need to change the agf structure
520 * (and log it), and the superblock.
521 */
522 if (args->agbno != NULLAGBLOCK) {
523 xfs_agf_t *agf; /* allocation group freelist header */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 long slen = (long)args->len;
525
526 ASSERT(args->len >= args->minlen && args->len <= args->maxlen);
527 ASSERT(!(args->wasfromfl) || !args->isfl);
528 ASSERT(args->agbno % args->alignment == 0);
529 if (!(args->wasfromfl)) {
530
531 agf = XFS_BUF_TO_AGF(args->agbp);
Marcin Slusarz413d57c2008-02-13 15:03:29 -0800532 be32_add_cpu(&agf->agf_freeblks, -(args->len));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 xfs_trans_agblocks_delta(args->tp,
534 -((long)(args->len)));
535 args->pag->pagf_freeblks -= args->len;
Christoph Hellwig16259e72005-11-02 15:11:25 +1100536 ASSERT(be32_to_cpu(agf->agf_freeblks) <=
537 be32_to_cpu(agf->agf_length));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 xfs_alloc_log_agf(args->tp, args->agbp,
539 XFS_AGF_FREEBLKS);
Dave Chinnered3b4d62010-05-21 12:07:08 +1000540 /*
541 * Search the busylist for these blocks and mark the
542 * transaction as synchronous if blocks are found. This
543 * avoids the need to block due to a synchronous log
544 * force to ensure correct ordering as the synchronous
545 * transaction will guarantee that for us.
546 */
547 if (xfs_alloc_busy_search(args->mp, args->agno,
548 args->agbno, args->len))
549 xfs_trans_set_sync(args->tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 }
551 if (!args->isfl)
552 xfs_trans_mod_sb(args->tp,
553 args->wasdel ? XFS_TRANS_SB_RES_FDBLOCKS :
554 XFS_TRANS_SB_FDBLOCKS, -slen);
555 XFS_STATS_INC(xs_allocx);
556 XFS_STATS_ADD(xs_allocb, args->len);
557 }
558 return 0;
559}
560
561/*
562 * Allocate a variable extent at exactly agno/bno.
563 * Extent's length (returned in *len) will be between minlen and maxlen,
564 * and of the form k * prod + mod unless there's nothing that large.
565 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
566 */
567STATIC int /* error */
568xfs_alloc_ag_vextent_exact(
569 xfs_alloc_arg_t *args) /* allocation argument structure */
570{
571 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
572 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
573 xfs_agblock_t end; /* end of allocated extent */
574 int error;
575 xfs_agblock_t fbno; /* start block of found extent */
576 xfs_agblock_t fend; /* end block of found extent */
577 xfs_extlen_t flen; /* length of found extent */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 int i; /* success/failure of operation */
579 xfs_agblock_t maxend; /* end of maximal extent */
580 xfs_agblock_t minend; /* end of minimal extent */
581 xfs_extlen_t rlen; /* length of returned extent */
582
583 ASSERT(args->alignment == 1);
584 /*
585 * Allocate/initialize a cursor for the by-number freespace btree.
586 */
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100587 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
588 args->agno, XFS_BTNUM_BNO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 /*
590 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
591 * Look for the closest free block <= bno, it must contain bno
592 * if any free block does.
593 */
594 if ((error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i)))
595 goto error0;
596 if (!i) {
597 /*
598 * Didn't find it, return null.
599 */
600 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
601 args->agbno = NULLAGBLOCK;
602 return 0;
603 }
604 /*
605 * Grab the freespace record.
606 */
607 if ((error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i)))
608 goto error0;
609 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
610 ASSERT(fbno <= args->agbno);
611 minend = args->agbno + args->minlen;
612 maxend = args->agbno + args->maxlen;
613 fend = fbno + flen;
614 /*
615 * Give up if the freespace isn't long enough for the minimum request.
616 */
617 if (fend < minend) {
618 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
619 args->agbno = NULLAGBLOCK;
620 return 0;
621 }
622 /*
623 * End of extent will be smaller of the freespace end and the
624 * maximal requested end.
625 */
626 end = XFS_AGBLOCK_MIN(fend, maxend);
627 /*
628 * Fix the length according to mod and prod if given.
629 */
630 args->len = end - args->agbno;
631 xfs_alloc_fix_len(args);
632 if (!xfs_alloc_fix_minleft(args)) {
633 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
634 return 0;
635 }
636 rlen = args->len;
637 ASSERT(args->agbno + rlen <= fend);
638 end = args->agbno + rlen;
639 /*
640 * We are allocating agbno for rlen [agbno .. end]
641 * Allocate/initialize a cursor for the by-size btree.
642 */
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100643 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
644 args->agno, XFS_BTNUM_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 ASSERT(args->agbno + args->len <=
Christoph Hellwig16259e72005-11-02 15:11:25 +1100646 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
648 args->agbno, args->len, XFSA_FIXUP_BNO_OK))) {
649 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
650 goto error0;
651 }
652 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
653 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000654
655 trace_xfs_alloc_exact_done(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 args->wasfromfl = 0;
657 return 0;
658
659error0:
660 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000661 trace_xfs_alloc_exact_error(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 return error;
663}
664
665/*
666 * Allocate a variable extent near bno in the allocation group agno.
667 * Extent's length (returned in len) will be between minlen and maxlen,
668 * and of the form k * prod + mod unless there's nothing that large.
669 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
670 */
671STATIC int /* error */
672xfs_alloc_ag_vextent_near(
673 xfs_alloc_arg_t *args) /* allocation argument structure */
674{
675 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
676 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
677 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 xfs_agblock_t gtbno; /* start bno of right side entry */
679 xfs_agblock_t gtbnoa; /* aligned ... */
680 xfs_extlen_t gtdiff; /* difference to right side entry */
681 xfs_extlen_t gtlen; /* length of right side entry */
682 xfs_extlen_t gtlena; /* aligned ... */
683 xfs_agblock_t gtnew; /* useful start bno of right side */
684 int error; /* error code */
685 int i; /* result code, temporary */
686 int j; /* result code, temporary */
687 xfs_agblock_t ltbno; /* start bno of left side entry */
688 xfs_agblock_t ltbnoa; /* aligned ... */
689 xfs_extlen_t ltdiff; /* difference to left side entry */
690 /*REFERENCED*/
691 xfs_agblock_t ltend; /* end bno of left side entry */
692 xfs_extlen_t ltlen; /* length of left side entry */
693 xfs_extlen_t ltlena; /* aligned ... */
694 xfs_agblock_t ltnew; /* useful start bno of left side */
695 xfs_extlen_t rlen; /* length of returned extent */
696#if defined(DEBUG) && defined(__KERNEL__)
697 /*
698 * Randomly don't execute the first algorithm.
699 */
700 int dofirst; /* set to do first algorithm */
701
Joe Perchese7a23a92007-05-08 13:49:03 +1000702 dofirst = random32() & 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703#endif
704 /*
705 * Get a cursor for the by-size btree.
706 */
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100707 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
708 args->agno, XFS_BTNUM_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 ltlen = 0;
710 bno_cur_lt = bno_cur_gt = NULL;
711 /*
712 * See if there are any free extents as big as maxlen.
713 */
714 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
715 goto error0;
716 /*
717 * If none, then pick up the last entry in the tree unless the
718 * tree is empty.
719 */
720 if (!i) {
721 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
722 &ltlen, &i)))
723 goto error0;
724 if (i == 0 || ltlen == 0) {
725 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
726 return 0;
727 }
728 ASSERT(i == 1);
729 }
730 args->wasfromfl = 0;
731 /*
732 * First algorithm.
733 * If the requested extent is large wrt the freespaces available
734 * in this a.g., then the cursor will be pointing to a btree entry
735 * near the right edge of the tree. If it's in the last btree leaf
736 * block, then we just examine all the entries in that block
737 * that are big enough, and pick the best one.
738 * This is written as a while loop so we can break out of it,
739 * but we never loop back to the top.
740 */
741 while (xfs_btree_islastblock(cnt_cur, 0)) {
742 xfs_extlen_t bdiff;
743 int besti=0;
744 xfs_extlen_t blen=0;
745 xfs_agblock_t bnew=0;
746
747#if defined(DEBUG) && defined(__KERNEL__)
748 if (!dofirst)
749 break;
750#endif
751 /*
752 * Start from the entry that lookup found, sequence through
753 * all larger free blocks. If we're actually pointing at a
754 * record smaller than maxlen, go to the start of this block,
755 * and skip all those smaller than minlen.
756 */
757 if (ltlen || args->alignment > 1) {
758 cnt_cur->bc_ptrs[0] = 1;
759 do {
760 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
761 &ltlen, &i)))
762 goto error0;
763 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
764 if (ltlen >= args->minlen)
765 break;
Christoph Hellwig637aa502008-10-30 16:55:45 +1100766 if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 goto error0;
768 } while (i);
769 ASSERT(ltlen >= args->minlen);
770 if (!i)
771 break;
772 }
773 i = cnt_cur->bc_ptrs[0];
774 for (j = 1, blen = 0, bdiff = 0;
775 !error && j && (blen < args->maxlen || bdiff > 0);
Christoph Hellwig637aa502008-10-30 16:55:45 +1100776 error = xfs_btree_increment(cnt_cur, 0, &j)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 /*
778 * For each entry, decide if it's better than
779 * the previous best entry.
780 */
781 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
782 goto error0;
783 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
David Chinner12375c82008-04-10 12:21:32 +1000784 xfs_alloc_compute_aligned(ltbno, ltlen, args->alignment,
785 args->minlen, &ltbnoa, &ltlena);
David Chinnere6430032008-04-17 16:49:49 +1000786 if (ltlena < args->minlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 continue;
788 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
789 xfs_alloc_fix_len(args);
790 ASSERT(args->len >= args->minlen);
791 if (args->len < blen)
792 continue;
793 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
794 args->alignment, ltbno, ltlen, &ltnew);
795 if (ltnew != NULLAGBLOCK &&
796 (args->len > blen || ltdiff < bdiff)) {
797 bdiff = ltdiff;
798 bnew = ltnew;
799 blen = args->len;
800 besti = cnt_cur->bc_ptrs[0];
801 }
802 }
803 /*
804 * It didn't work. We COULD be in a case where
805 * there's a good record somewhere, so try again.
806 */
807 if (blen == 0)
808 break;
809 /*
810 * Point at the best entry, and retrieve it again.
811 */
812 cnt_cur->bc_ptrs[0] = besti;
813 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
814 goto error0;
815 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
816 ltend = ltbno + ltlen;
Christoph Hellwig16259e72005-11-02 15:11:25 +1100817 ASSERT(ltend <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 args->len = blen;
819 if (!xfs_alloc_fix_minleft(args)) {
820 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000821 trace_xfs_alloc_near_nominleft(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 return 0;
823 }
824 blen = args->len;
825 /*
826 * We are allocating starting at bnew for blen blocks.
827 */
828 args->agbno = bnew;
829 ASSERT(bnew >= ltbno);
830 ASSERT(bnew + blen <= ltend);
831 /*
832 * Set up a cursor for the by-bno tree.
833 */
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100834 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
835 args->agbp, args->agno, XFS_BTNUM_BNO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 /*
837 * Fix up the btree entries.
838 */
839 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
840 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
841 goto error0;
842 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
843 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000844
845 trace_xfs_alloc_near_first(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 return 0;
847 }
848 /*
849 * Second algorithm.
850 * Search in the by-bno tree to the left and to the right
851 * simultaneously, until in each case we find a space big enough,
852 * or run into the edge of the tree. When we run into the edge,
853 * we deallocate that cursor.
854 * If both searches succeed, we compare the two spaces and pick
855 * the better one.
856 * With alignment, it's possible for both to fail; the upper
857 * level algorithm that picks allocation groups for allocations
858 * is not supposed to do this.
859 */
860 /*
861 * Allocate and initialize the cursor for the leftward search.
862 */
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100863 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
864 args->agno, XFS_BTNUM_BNO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 /*
866 * Lookup <= bno to find the leftward search's starting point.
867 */
868 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
869 goto error0;
870 if (!i) {
871 /*
872 * Didn't find anything; use this cursor for the rightward
873 * search.
874 */
875 bno_cur_gt = bno_cur_lt;
876 bno_cur_lt = NULL;
877 }
878 /*
879 * Found something. Duplicate the cursor for the rightward search.
880 */
881 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
882 goto error0;
883 /*
884 * Increment the cursor, so we will point at the entry just right
885 * of the leftward entry if any, or to the leftmost entry.
886 */
Christoph Hellwig637aa502008-10-30 16:55:45 +1100887 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 goto error0;
889 if (!i) {
890 /*
891 * It failed, there are no rightward entries.
892 */
893 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
894 bno_cur_gt = NULL;
895 }
896 /*
897 * Loop going left with the leftward cursor, right with the
898 * rightward cursor, until either both directions give up or
899 * we find an entry at least as big as minlen.
900 */
901 do {
902 if (bno_cur_lt) {
903 if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
904 goto error0;
905 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
David Chinner12375c82008-04-10 12:21:32 +1000906 xfs_alloc_compute_aligned(ltbno, ltlen, args->alignment,
907 args->minlen, &ltbnoa, &ltlena);
908 if (ltlena >= args->minlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 break;
Christoph Hellwig8df4da42008-10-30 16:55:58 +1100910 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 goto error0;
912 if (!i) {
913 xfs_btree_del_cursor(bno_cur_lt,
914 XFS_BTREE_NOERROR);
915 bno_cur_lt = NULL;
916 }
917 }
918 if (bno_cur_gt) {
919 if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
920 goto error0;
921 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
David Chinner12375c82008-04-10 12:21:32 +1000922 xfs_alloc_compute_aligned(gtbno, gtlen, args->alignment,
923 args->minlen, &gtbnoa, &gtlena);
924 if (gtlena >= args->minlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 break;
Christoph Hellwig637aa502008-10-30 16:55:45 +1100926 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 goto error0;
928 if (!i) {
929 xfs_btree_del_cursor(bno_cur_gt,
930 XFS_BTREE_NOERROR);
931 bno_cur_gt = NULL;
932 }
933 }
934 } while (bno_cur_lt || bno_cur_gt);
935 /*
936 * Got both cursors still active, need to find better entry.
937 */
938 if (bno_cur_lt && bno_cur_gt) {
939 /*
940 * Left side is long enough, look for a right side entry.
941 */
942 if (ltlena >= args->minlen) {
943 /*
944 * Fix up the length.
945 */
946 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
947 xfs_alloc_fix_len(args);
948 rlen = args->len;
949 ltdiff = xfs_alloc_compute_diff(args->agbno, rlen,
950 args->alignment, ltbno, ltlen, &ltnew);
951 /*
952 * Not perfect.
953 */
954 if (ltdiff) {
955 /*
956 * Look until we find a better one, run out of
957 * space, or run off the end.
958 */
959 while (bno_cur_lt && bno_cur_gt) {
960 if ((error = xfs_alloc_get_rec(
961 bno_cur_gt, &gtbno,
962 &gtlen, &i)))
963 goto error0;
964 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
965 xfs_alloc_compute_aligned(gtbno, gtlen,
966 args->alignment, args->minlen,
967 &gtbnoa, &gtlena);
968 /*
969 * The left one is clearly better.
970 */
971 if (gtbnoa >= args->agbno + ltdiff) {
972 xfs_btree_del_cursor(
973 bno_cur_gt,
974 XFS_BTREE_NOERROR);
975 bno_cur_gt = NULL;
976 break;
977 }
978 /*
979 * If we reach a big enough entry,
980 * compare the two and pick the best.
981 */
982 if (gtlena >= args->minlen) {
983 args->len =
984 XFS_EXTLEN_MIN(gtlena,
985 args->maxlen);
986 xfs_alloc_fix_len(args);
987 rlen = args->len;
988 gtdiff = xfs_alloc_compute_diff(
989 args->agbno, rlen,
990 args->alignment,
991 gtbno, gtlen, &gtnew);
992 /*
993 * Right side is better.
994 */
995 if (gtdiff < ltdiff) {
996 xfs_btree_del_cursor(
997 bno_cur_lt,
998 XFS_BTREE_NOERROR);
999 bno_cur_lt = NULL;
1000 }
1001 /*
1002 * Left side is better.
1003 */
1004 else {
1005 xfs_btree_del_cursor(
1006 bno_cur_gt,
1007 XFS_BTREE_NOERROR);
1008 bno_cur_gt = NULL;
1009 }
1010 break;
1011 }
1012 /*
1013 * Fell off the right end.
1014 */
Christoph Hellwig637aa502008-10-30 16:55:45 +11001015 if ((error = xfs_btree_increment(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 bno_cur_gt, 0, &i)))
1017 goto error0;
1018 if (!i) {
1019 xfs_btree_del_cursor(
1020 bno_cur_gt,
1021 XFS_BTREE_NOERROR);
1022 bno_cur_gt = NULL;
1023 break;
1024 }
1025 }
1026 }
1027 /*
1028 * The left side is perfect, trash the right side.
1029 */
1030 else {
1031 xfs_btree_del_cursor(bno_cur_gt,
1032 XFS_BTREE_NOERROR);
1033 bno_cur_gt = NULL;
1034 }
1035 }
1036 /*
1037 * It's the right side that was found first, look left.
1038 */
1039 else {
1040 /*
1041 * Fix up the length.
1042 */
1043 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1044 xfs_alloc_fix_len(args);
1045 rlen = args->len;
1046 gtdiff = xfs_alloc_compute_diff(args->agbno, rlen,
1047 args->alignment, gtbno, gtlen, &gtnew);
1048 /*
1049 * Right side entry isn't perfect.
1050 */
1051 if (gtdiff) {
1052 /*
1053 * Look until we find a better one, run out of
1054 * space, or run off the end.
1055 */
1056 while (bno_cur_lt && bno_cur_gt) {
1057 if ((error = xfs_alloc_get_rec(
1058 bno_cur_lt, &ltbno,
1059 &ltlen, &i)))
1060 goto error0;
1061 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1062 xfs_alloc_compute_aligned(ltbno, ltlen,
1063 args->alignment, args->minlen,
1064 &ltbnoa, &ltlena);
1065 /*
1066 * The right one is clearly better.
1067 */
1068 if (ltbnoa <= args->agbno - gtdiff) {
1069 xfs_btree_del_cursor(
1070 bno_cur_lt,
1071 XFS_BTREE_NOERROR);
1072 bno_cur_lt = NULL;
1073 break;
1074 }
1075 /*
1076 * If we reach a big enough entry,
1077 * compare the two and pick the best.
1078 */
1079 if (ltlena >= args->minlen) {
1080 args->len = XFS_EXTLEN_MIN(
1081 ltlena, args->maxlen);
1082 xfs_alloc_fix_len(args);
1083 rlen = args->len;
1084 ltdiff = xfs_alloc_compute_diff(
1085 args->agbno, rlen,
1086 args->alignment,
1087 ltbno, ltlen, &ltnew);
1088 /*
1089 * Left side is better.
1090 */
1091 if (ltdiff < gtdiff) {
1092 xfs_btree_del_cursor(
1093 bno_cur_gt,
1094 XFS_BTREE_NOERROR);
1095 bno_cur_gt = NULL;
1096 }
1097 /*
1098 * Right side is better.
1099 */
1100 else {
1101 xfs_btree_del_cursor(
1102 bno_cur_lt,
1103 XFS_BTREE_NOERROR);
1104 bno_cur_lt = NULL;
1105 }
1106 break;
1107 }
1108 /*
1109 * Fell off the left end.
1110 */
Christoph Hellwig8df4da42008-10-30 16:55:58 +11001111 if ((error = xfs_btree_decrement(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 bno_cur_lt, 0, &i)))
1113 goto error0;
1114 if (!i) {
1115 xfs_btree_del_cursor(bno_cur_lt,
1116 XFS_BTREE_NOERROR);
1117 bno_cur_lt = NULL;
1118 break;
1119 }
1120 }
1121 }
1122 /*
1123 * The right side is perfect, trash the left side.
1124 */
1125 else {
1126 xfs_btree_del_cursor(bno_cur_lt,
1127 XFS_BTREE_NOERROR);
1128 bno_cur_lt = NULL;
1129 }
1130 }
1131 }
1132 /*
1133 * If we couldn't get anything, give up.
1134 */
1135 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001136 trace_xfs_alloc_size_neither(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 args->agbno = NULLAGBLOCK;
1138 return 0;
1139 }
1140 /*
1141 * At this point we have selected a freespace entry, either to the
1142 * left or to the right. If it's on the right, copy all the
1143 * useful variables to the "left" set so we only have one
1144 * copy of this code.
1145 */
1146 if (bno_cur_gt) {
1147 bno_cur_lt = bno_cur_gt;
1148 bno_cur_gt = NULL;
1149 ltbno = gtbno;
1150 ltbnoa = gtbnoa;
1151 ltlen = gtlen;
1152 ltlena = gtlena;
1153 j = 1;
1154 } else
1155 j = 0;
1156 /*
1157 * Fix up the length and compute the useful address.
1158 */
1159 ltend = ltbno + ltlen;
1160 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1161 xfs_alloc_fix_len(args);
1162 if (!xfs_alloc_fix_minleft(args)) {
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001163 trace_xfs_alloc_near_nominleft(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1165 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1166 return 0;
1167 }
1168 rlen = args->len;
1169 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbno,
1170 ltlen, &ltnew);
1171 ASSERT(ltnew >= ltbno);
1172 ASSERT(ltnew + rlen <= ltend);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001173 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 args->agbno = ltnew;
1175 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1176 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1177 goto error0;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001178
1179 if (j)
1180 trace_xfs_alloc_near_greater(args);
1181 else
1182 trace_xfs_alloc_near_lesser(args);
1183
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1185 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1186 return 0;
1187
1188 error0:
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001189 trace_xfs_alloc_near_error(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 if (cnt_cur != NULL)
1191 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1192 if (bno_cur_lt != NULL)
1193 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1194 if (bno_cur_gt != NULL)
1195 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1196 return error;
1197}
1198
1199/*
1200 * Allocate a variable extent anywhere in the allocation group agno.
1201 * Extent's length (returned in len) will be between minlen and maxlen,
1202 * and of the form k * prod + mod unless there's nothing that large.
1203 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1204 */
1205STATIC int /* error */
1206xfs_alloc_ag_vextent_size(
1207 xfs_alloc_arg_t *args) /* allocation argument structure */
1208{
1209 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
1210 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
1211 int error; /* error result */
1212 xfs_agblock_t fbno; /* start of found freespace */
1213 xfs_extlen_t flen; /* length of found freespace */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 int i; /* temp status variable */
1215 xfs_agblock_t rbno; /* returned block number */
1216 xfs_extlen_t rlen; /* length of returned extent */
1217
1218 /*
1219 * Allocate and initialize a cursor for the by-size btree.
1220 */
Christoph Hellwig561f7d12008-10-30 16:53:59 +11001221 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1222 args->agno, XFS_BTNUM_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 bno_cur = NULL;
1224 /*
1225 * Look for an entry >= maxlen+alignment-1 blocks.
1226 */
1227 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1228 args->maxlen + args->alignment - 1, &i)))
1229 goto error0;
1230 /*
1231 * If none, then pick up the last entry in the tree unless the
1232 * tree is empty.
1233 */
1234 if (!i) {
1235 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &fbno,
1236 &flen, &i)))
1237 goto error0;
1238 if (i == 0 || flen == 0) {
1239 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001240 trace_xfs_alloc_size_noentry(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 return 0;
1242 }
1243 ASSERT(i == 1);
1244 }
1245 /*
1246 * There's a freespace as big as maxlen+alignment-1, get it.
1247 */
1248 else {
1249 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i)))
1250 goto error0;
1251 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1252 }
1253 /*
1254 * In the first case above, we got the last entry in the
1255 * by-size btree. Now we check to see if the space hits maxlen
1256 * once aligned; if not, we search left for something better.
1257 * This can't happen in the second case above.
1258 */
1259 xfs_alloc_compute_aligned(fbno, flen, args->alignment, args->minlen,
1260 &rbno, &rlen);
1261 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1262 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1263 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1264 if (rlen < args->maxlen) {
1265 xfs_agblock_t bestfbno;
1266 xfs_extlen_t bestflen;
1267 xfs_agblock_t bestrbno;
1268 xfs_extlen_t bestrlen;
1269
1270 bestrlen = rlen;
1271 bestrbno = rbno;
1272 bestflen = flen;
1273 bestfbno = fbno;
1274 for (;;) {
Christoph Hellwig8df4da42008-10-30 16:55:58 +11001275 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 goto error0;
1277 if (i == 0)
1278 break;
1279 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1280 &i)))
1281 goto error0;
1282 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1283 if (flen < bestrlen)
1284 break;
1285 xfs_alloc_compute_aligned(fbno, flen, args->alignment,
1286 args->minlen, &rbno, &rlen);
1287 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1288 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1289 (rlen <= flen && rbno + rlen <= fbno + flen),
1290 error0);
1291 if (rlen > bestrlen) {
1292 bestrlen = rlen;
1293 bestrbno = rbno;
1294 bestflen = flen;
1295 bestfbno = fbno;
1296 if (rlen == args->maxlen)
1297 break;
1298 }
1299 }
1300 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1301 &i)))
1302 goto error0;
1303 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1304 rlen = bestrlen;
1305 rbno = bestrbno;
1306 flen = bestflen;
1307 fbno = bestfbno;
1308 }
1309 args->wasfromfl = 0;
1310 /*
1311 * Fix up the length.
1312 */
1313 args->len = rlen;
1314 xfs_alloc_fix_len(args);
1315 if (rlen < args->minlen || !xfs_alloc_fix_minleft(args)) {
1316 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001317 trace_xfs_alloc_size_nominleft(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 args->agbno = NULLAGBLOCK;
1319 return 0;
1320 }
1321 rlen = args->len;
1322 XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0);
1323 /*
1324 * Allocate and initialize a cursor for the by-block tree.
1325 */
Christoph Hellwig561f7d12008-10-30 16:53:59 +11001326 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1327 args->agno, XFS_BTNUM_BNO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1329 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1330 goto error0;
1331 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1332 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1333 cnt_cur = bno_cur = NULL;
1334 args->len = rlen;
1335 args->agbno = rbno;
1336 XFS_WANT_CORRUPTED_GOTO(
1337 args->agbno + args->len <=
Christoph Hellwig16259e72005-11-02 15:11:25 +11001338 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 error0);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001340 trace_xfs_alloc_size_done(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 return 0;
1342
1343error0:
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001344 trace_xfs_alloc_size_error(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 if (cnt_cur)
1346 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1347 if (bno_cur)
1348 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1349 return error;
1350}
1351
1352/*
1353 * Deal with the case where only small freespaces remain.
1354 * Either return the contents of the last freespace record,
1355 * or allocate space from the freelist if there is nothing in the tree.
1356 */
1357STATIC int /* error */
1358xfs_alloc_ag_vextent_small(
1359 xfs_alloc_arg_t *args, /* allocation argument structure */
1360 xfs_btree_cur_t *ccur, /* by-size cursor */
1361 xfs_agblock_t *fbnop, /* result block number */
1362 xfs_extlen_t *flenp, /* result length */
1363 int *stat) /* status: 0-freelist, 1-normal/none */
1364{
1365 int error;
1366 xfs_agblock_t fbno;
1367 xfs_extlen_t flen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 int i;
1369
Christoph Hellwig8df4da42008-10-30 16:55:58 +11001370 if ((error = xfs_btree_decrement(ccur, 0, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 goto error0;
1372 if (i) {
1373 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1374 goto error0;
1375 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1376 }
1377 /*
1378 * Nothing in the btree, try the freelist. Make sure
1379 * to respect minleft even when pulling from the
1380 * freelist.
1381 */
1382 else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
Christoph Hellwig16259e72005-11-02 15:11:25 +11001383 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1384 > args->minleft)) {
David Chinner92821e22007-05-24 15:26:31 +10001385 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1386 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 goto error0;
1388 if (fbno != NULLAGBLOCK) {
1389 if (args->userdata) {
1390 xfs_buf_t *bp;
1391
1392 bp = xfs_btree_get_bufs(args->mp, args->tp,
1393 args->agno, fbno, 0);
1394 xfs_trans_binval(args->tp, bp);
1395 }
1396 args->len = 1;
1397 args->agbno = fbno;
1398 XFS_WANT_CORRUPTED_GOTO(
1399 args->agbno + args->len <=
Christoph Hellwig16259e72005-11-02 15:11:25 +11001400 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 error0);
1402 args->wasfromfl = 1;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001403 trace_xfs_alloc_small_freelist(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 *stat = 0;
1405 return 0;
1406 }
1407 /*
1408 * Nothing in the freelist.
1409 */
1410 else
1411 flen = 0;
1412 }
1413 /*
1414 * Can't allocate from the freelist for some reason.
1415 */
Nathan Scottd432c802006-09-28 11:03:44 +10001416 else {
1417 fbno = NULLAGBLOCK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 flen = 0;
Nathan Scottd432c802006-09-28 11:03:44 +10001419 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 /*
1421 * Can't do the allocation, give up.
1422 */
1423 if (flen < args->minlen) {
1424 args->agbno = NULLAGBLOCK;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001425 trace_xfs_alloc_small_notenough(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 flen = 0;
1427 }
1428 *fbnop = fbno;
1429 *flenp = flen;
1430 *stat = 1;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001431 trace_xfs_alloc_small_done(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 return 0;
1433
1434error0:
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001435 trace_xfs_alloc_small_error(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 return error;
1437}
1438
1439/*
1440 * Free the extent starting at agno/bno for length.
1441 */
1442STATIC int /* error */
1443xfs_free_ag_extent(
1444 xfs_trans_t *tp, /* transaction pointer */
1445 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
1446 xfs_agnumber_t agno, /* allocation group number */
1447 xfs_agblock_t bno, /* starting block number */
1448 xfs_extlen_t len, /* length of extent */
1449 int isfl) /* set if is freelist blocks - no sb acctg */
1450{
1451 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
1452 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
1453 int error; /* error return value */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 xfs_agblock_t gtbno; /* start of right neighbor block */
1455 xfs_extlen_t gtlen; /* length of right neighbor block */
1456 int haveleft; /* have a left neighbor block */
1457 int haveright; /* have a right neighbor block */
1458 int i; /* temp, result code */
1459 xfs_agblock_t ltbno; /* start of left neighbor block */
1460 xfs_extlen_t ltlen; /* length of left neighbor block */
1461 xfs_mount_t *mp; /* mount point struct for filesystem */
1462 xfs_agblock_t nbno; /* new starting block of freespace */
1463 xfs_extlen_t nlen; /* new length of freespace */
1464
1465 mp = tp->t_mountp;
1466 /*
1467 * Allocate and initialize a cursor for the by-block btree.
1468 */
Christoph Hellwig561f7d12008-10-30 16:53:59 +11001469 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 cnt_cur = NULL;
1471 /*
1472 * Look for a neighboring block on the left (lower block numbers)
1473 * that is contiguous with this space.
1474 */
1475 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1476 goto error0;
1477 if (haveleft) {
1478 /*
1479 * There is a block to our left.
1480 */
1481 if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1482 goto error0;
1483 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1484 /*
1485 * It's not contiguous, though.
1486 */
1487 if (ltbno + ltlen < bno)
1488 haveleft = 0;
1489 else {
1490 /*
1491 * If this failure happens the request to free this
1492 * space was invalid, it's (partly) already free.
1493 * Very bad.
1494 */
1495 XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0);
1496 }
1497 }
1498 /*
1499 * Look for a neighboring block on the right (higher block numbers)
1500 * that is contiguous with this space.
1501 */
Christoph Hellwig637aa502008-10-30 16:55:45 +11001502 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 goto error0;
1504 if (haveright) {
1505 /*
1506 * There is a block to our right.
1507 */
1508 if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1509 goto error0;
1510 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1511 /*
1512 * It's not contiguous, though.
1513 */
1514 if (bno + len < gtbno)
1515 haveright = 0;
1516 else {
1517 /*
1518 * If this failure happens the request to free this
1519 * space was invalid, it's (partly) already free.
1520 * Very bad.
1521 */
1522 XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0);
1523 }
1524 }
1525 /*
1526 * Now allocate and initialize a cursor for the by-size tree.
1527 */
Christoph Hellwig561f7d12008-10-30 16:53:59 +11001528 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 /*
1530 * Have both left and right contiguous neighbors.
1531 * Merge all three into a single free block.
1532 */
1533 if (haveleft && haveright) {
1534 /*
1535 * Delete the old by-size entry on the left.
1536 */
1537 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1538 goto error0;
1539 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
Christoph Hellwig91cca5df2008-10-30 16:58:01 +11001540 if ((error = xfs_btree_delete(cnt_cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 goto error0;
1542 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1543 /*
1544 * Delete the old by-size entry on the right.
1545 */
1546 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1547 goto error0;
1548 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
Christoph Hellwig91cca5df2008-10-30 16:58:01 +11001549 if ((error = xfs_btree_delete(cnt_cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 goto error0;
1551 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1552 /*
1553 * Delete the old by-block entry for the right block.
1554 */
Christoph Hellwig91cca5df2008-10-30 16:58:01 +11001555 if ((error = xfs_btree_delete(bno_cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 goto error0;
1557 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1558 /*
1559 * Move the by-block cursor back to the left neighbor.
1560 */
Christoph Hellwig8df4da42008-10-30 16:55:58 +11001561 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 goto error0;
1563 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1564#ifdef DEBUG
1565 /*
1566 * Check that this is the right record: delete didn't
1567 * mangle the cursor.
1568 */
1569 {
1570 xfs_agblock_t xxbno;
1571 xfs_extlen_t xxlen;
1572
1573 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1574 &i)))
1575 goto error0;
1576 XFS_WANT_CORRUPTED_GOTO(
1577 i == 1 && xxbno == ltbno && xxlen == ltlen,
1578 error0);
1579 }
1580#endif
1581 /*
1582 * Update remaining by-block entry to the new, joined block.
1583 */
1584 nbno = ltbno;
1585 nlen = len + ltlen + gtlen;
1586 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1587 goto error0;
1588 }
1589 /*
1590 * Have only a left contiguous neighbor.
1591 * Merge it together with the new freespace.
1592 */
1593 else if (haveleft) {
1594 /*
1595 * Delete the old by-size entry on the left.
1596 */
1597 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1598 goto error0;
1599 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
Christoph Hellwig91cca5df2008-10-30 16:58:01 +11001600 if ((error = xfs_btree_delete(cnt_cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 goto error0;
1602 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1603 /*
1604 * Back up the by-block cursor to the left neighbor, and
1605 * update its length.
1606 */
Christoph Hellwig8df4da42008-10-30 16:55:58 +11001607 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 goto error0;
1609 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1610 nbno = ltbno;
1611 nlen = len + ltlen;
1612 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1613 goto error0;
1614 }
1615 /*
1616 * Have only a right contiguous neighbor.
1617 * Merge it together with the new freespace.
1618 */
1619 else if (haveright) {
1620 /*
1621 * Delete the old by-size entry on the right.
1622 */
1623 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1624 goto error0;
1625 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
Christoph Hellwig91cca5df2008-10-30 16:58:01 +11001626 if ((error = xfs_btree_delete(cnt_cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 goto error0;
1628 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1629 /*
1630 * Update the starting block and length of the right
1631 * neighbor in the by-block tree.
1632 */
1633 nbno = bno;
1634 nlen = len + gtlen;
1635 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1636 goto error0;
1637 }
1638 /*
1639 * No contiguous neighbors.
1640 * Insert the new freespace into the by-block tree.
1641 */
1642 else {
1643 nbno = bno;
1644 nlen = len;
Christoph Hellwig4b22a572008-10-30 16:57:40 +11001645 if ((error = xfs_btree_insert(bno_cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 goto error0;
1647 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1648 }
1649 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1650 bno_cur = NULL;
1651 /*
1652 * In all cases we need to insert the new freespace in the by-size tree.
1653 */
1654 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1655 goto error0;
1656 XFS_WANT_CORRUPTED_GOTO(i == 0, error0);
Christoph Hellwig4b22a572008-10-30 16:57:40 +11001657 if ((error = xfs_btree_insert(cnt_cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 goto error0;
1659 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1660 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1661 cnt_cur = NULL;
1662 /*
1663 * Update the freespace totals in the ag and superblock.
1664 */
1665 {
1666 xfs_agf_t *agf;
1667 xfs_perag_t *pag; /* per allocation group data */
1668
Dave Chinnera862e0f2010-01-11 11:47:41 +00001669 pag = xfs_perag_get(mp, agno);
1670 pag->pagf_freeblks += len;
1671 xfs_perag_put(pag);
1672
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 agf = XFS_BUF_TO_AGF(agbp);
Marcin Slusarz413d57c2008-02-13 15:03:29 -08001674 be32_add_cpu(&agf->agf_freeblks, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 xfs_trans_agblocks_delta(tp, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 XFS_WANT_CORRUPTED_GOTO(
Christoph Hellwig16259e72005-11-02 15:11:25 +11001677 be32_to_cpu(agf->agf_freeblks) <=
1678 be32_to_cpu(agf->agf_length),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 error0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
1681 if (!isfl)
1682 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
1683 XFS_STATS_INC(xs_freex);
1684 XFS_STATS_ADD(xs_freeb, len);
1685 }
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001686
1687 trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688
1689 /*
1690 * Since blocks move to the free list without the coordination
1691 * used in xfs_bmap_finish, we can't allow block to be available
1692 * for reallocation and non-transaction writing (user data)
1693 * until we know that the transaction that moved it to the free
1694 * list is permanently on disk. We track the blocks by declaring
1695 * these blocks as "busy"; the busy list is maintained on a per-ag
1696 * basis and each transaction records which entries should be removed
1697 * when the iclog commits to disk. If a busy block is allocated,
1698 * the iclog is pushed up to the LSN that freed the block.
1699 */
Dave Chinnered3b4d62010-05-21 12:07:08 +10001700 xfs_alloc_busy_insert(tp, agno, bno, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 return 0;
1702
1703 error0:
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001704 trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 if (bno_cur)
1706 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1707 if (cnt_cur)
1708 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1709 return error;
1710}
1711
1712/*
1713 * Visible (exported) allocation/free functions.
1714 * Some of these are used just by xfs_alloc_btree.c and this file.
1715 */
1716
1717/*
1718 * Compute and fill in value of m_ag_maxlevels.
1719 */
1720void
1721xfs_alloc_compute_maxlevels(
1722 xfs_mount_t *mp) /* file system mount structure */
1723{
1724 int level;
1725 uint maxblocks;
1726 uint maxleafents;
1727 int minleafrecs;
1728 int minnoderecs;
1729
1730 maxleafents = (mp->m_sb.sb_agblocks + 1) / 2;
1731 minleafrecs = mp->m_alloc_mnr[0];
1732 minnoderecs = mp->m_alloc_mnr[1];
1733 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
1734 for (level = 1; maxblocks > 1; level++)
1735 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
1736 mp->m_ag_maxlevels = level;
1737}
1738
1739/*
Dave Chinner6cc87642009-03-16 08:29:46 +01001740 * Find the length of the longest extent in an AG.
1741 */
1742xfs_extlen_t
1743xfs_alloc_longest_free_extent(
1744 struct xfs_mount *mp,
1745 struct xfs_perag *pag)
1746{
1747 xfs_extlen_t need, delta = 0;
1748
1749 need = XFS_MIN_FREELIST_PAG(pag, mp);
1750 if (need > pag->pagf_flcount)
1751 delta = need - pag->pagf_flcount;
1752
1753 if (pag->pagf_longest > delta)
1754 return pag->pagf_longest - delta;
1755 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
1756}
1757
1758/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 * Decide whether to use this allocation group for this allocation.
1760 * If so, fix up the btree freelist's size.
1761 */
1762STATIC int /* error */
1763xfs_alloc_fix_freelist(
1764 xfs_alloc_arg_t *args, /* allocation argument structure */
1765 int flags) /* XFS_ALLOC_FLAG_... */
1766{
1767 xfs_buf_t *agbp; /* agf buffer pointer */
1768 xfs_agf_t *agf; /* a.g. freespace structure pointer */
1769 xfs_buf_t *agflbp;/* agfl buffer pointer */
1770 xfs_agblock_t bno; /* freelist block */
1771 xfs_extlen_t delta; /* new blocks needed in freelist */
1772 int error; /* error result code */
1773 xfs_extlen_t longest;/* longest extent in allocation group */
1774 xfs_mount_t *mp; /* file system mount point structure */
1775 xfs_extlen_t need; /* total blocks needed in freelist */
1776 xfs_perag_t *pag; /* per-ag information structure */
1777 xfs_alloc_arg_t targs; /* local allocation arguments */
1778 xfs_trans_t *tp; /* transaction pointer */
1779
1780 mp = args->mp;
1781
1782 pag = args->pag;
1783 tp = args->tp;
1784 if (!pag->pagf_init) {
1785 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1786 &agbp)))
1787 return error;
1788 if (!pag->pagf_init) {
Nathan Scott0e1edbd2006-08-10 14:40:41 +10001789 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1790 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 args->agbp = NULL;
1792 return 0;
1793 }
1794 } else
1795 agbp = NULL;
1796
Nathan Scott0e1edbd2006-08-10 14:40:41 +10001797 /*
1798 * If this is a metadata preferred pag and we are user data
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 * then try somewhere else if we are not being asked to
1800 * try harder at this point
1801 */
Nathan Scott0e1edbd2006-08-10 14:40:41 +10001802 if (pag->pagf_metadata && args->userdata &&
1803 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
1804 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 args->agbp = NULL;
1806 return 0;
1807 }
1808
Nathan Scott0e1edbd2006-08-10 14:40:41 +10001809 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
Nathan Scott0e1edbd2006-08-10 14:40:41 +10001810 /*
1811 * If it looks like there isn't a long enough extent, or enough
1812 * total blocks, reject it.
1813 */
Dave Chinner6cc87642009-03-16 08:29:46 +01001814 need = XFS_MIN_FREELIST_PAG(pag, mp);
1815 longest = xfs_alloc_longest_free_extent(mp, pag);
Nathan Scott0e1edbd2006-08-10 14:40:41 +10001816 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1817 longest ||
1818 ((int)(pag->pagf_freeblks + pag->pagf_flcount -
1819 need - args->total) < (int)args->minleft)) {
1820 if (agbp)
1821 xfs_trans_brelse(tp, agbp);
1822 args->agbp = NULL;
1823 return 0;
1824 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 }
Nathan Scott0e1edbd2006-08-10 14:40:41 +10001826
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 /*
1828 * Get the a.g. freespace buffer.
1829 * Can fail if we're not blocking on locks, and it's held.
1830 */
1831 if (agbp == NULL) {
1832 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1833 &agbp)))
1834 return error;
1835 if (agbp == NULL) {
Nathan Scott0e1edbd2006-08-10 14:40:41 +10001836 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1837 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 args->agbp = NULL;
1839 return 0;
1840 }
1841 }
1842 /*
1843 * Figure out how many blocks we should have in the freelist.
1844 */
1845 agf = XFS_BUF_TO_AGF(agbp);
1846 need = XFS_MIN_FREELIST(agf, mp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 /*
1848 * If there isn't enough total or single-extent, reject it.
1849 */
Nathan Scott0e1edbd2006-08-10 14:40:41 +10001850 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1851 delta = need > be32_to_cpu(agf->agf_flcount) ?
1852 (need - be32_to_cpu(agf->agf_flcount)) : 0;
1853 longest = be32_to_cpu(agf->agf_longest);
1854 longest = (longest > delta) ? (longest - delta) :
1855 (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0);
1856 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1857 longest ||
1858 ((int)(be32_to_cpu(agf->agf_freeblks) +
1859 be32_to_cpu(agf->agf_flcount) - need - args->total) <
1860 (int)args->minleft)) {
1861 xfs_trans_brelse(tp, agbp);
1862 args->agbp = NULL;
1863 return 0;
1864 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 }
1866 /*
1867 * Make the freelist shorter if it's too long.
1868 */
Christoph Hellwig16259e72005-11-02 15:11:25 +11001869 while (be32_to_cpu(agf->agf_flcount) > need) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 xfs_buf_t *bp;
1871
David Chinner92821e22007-05-24 15:26:31 +10001872 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
1873 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 return error;
1875 if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1)))
1876 return error;
1877 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
1878 xfs_trans_binval(tp, bp);
1879 }
1880 /*
1881 * Initialize the args structure.
1882 */
1883 targs.tp = tp;
1884 targs.mp = mp;
1885 targs.agbp = agbp;
1886 targs.agno = args->agno;
1887 targs.mod = targs.minleft = targs.wasdel = targs.userdata =
1888 targs.minalignslop = 0;
1889 targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
1890 targs.type = XFS_ALLOCTYPE_THIS_AG;
1891 targs.pag = pag;
1892 if ((error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp)))
1893 return error;
1894 /*
1895 * Make the freelist longer if it's too short.
1896 */
Christoph Hellwig16259e72005-11-02 15:11:25 +11001897 while (be32_to_cpu(agf->agf_flcount) < need) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 targs.agbno = 0;
Christoph Hellwig16259e72005-11-02 15:11:25 +11001899 targs.maxlen = need - be32_to_cpu(agf->agf_flcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 /*
1901 * Allocate as many blocks as possible at once.
1902 */
Nathan Scotte63a3692006-05-08 19:51:58 +10001903 if ((error = xfs_alloc_ag_vextent(&targs))) {
1904 xfs_trans_brelse(tp, agflbp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 return error;
Nathan Scotte63a3692006-05-08 19:51:58 +10001906 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 /*
1908 * Stop if we run out. Won't happen if callers are obeying
1909 * the restrictions correctly. Can happen for free calls
1910 * on a completely full ag.
1911 */
Yingping Lud210a282006-06-09 14:55:18 +10001912 if (targs.agbno == NULLAGBLOCK) {
Nathan Scott0e1edbd2006-08-10 14:40:41 +10001913 if (flags & XFS_ALLOC_FLAG_FREEING)
1914 break;
1915 xfs_trans_brelse(tp, agflbp);
1916 args->agbp = NULL;
1917 return 0;
Yingping Lud210a282006-06-09 14:55:18 +10001918 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 /*
1920 * Put each allocated block on the list.
1921 */
1922 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
David Chinner92821e22007-05-24 15:26:31 +10001923 error = xfs_alloc_put_freelist(tp, agbp,
1924 agflbp, bno, 0);
1925 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 return error;
1927 }
1928 }
Nathan Scotte63a3692006-05-08 19:51:58 +10001929 xfs_trans_brelse(tp, agflbp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 args->agbp = agbp;
1931 return 0;
1932}
1933
1934/*
1935 * Get a block from the freelist.
1936 * Returns with the buffer for the block gotten.
1937 */
1938int /* error */
1939xfs_alloc_get_freelist(
1940 xfs_trans_t *tp, /* transaction pointer */
1941 xfs_buf_t *agbp, /* buffer containing the agf structure */
David Chinner92821e22007-05-24 15:26:31 +10001942 xfs_agblock_t *bnop, /* block address retrieved from freelist */
1943 int btreeblk) /* destination is a AGF btree */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944{
1945 xfs_agf_t *agf; /* a.g. freespace structure */
1946 xfs_agfl_t *agfl; /* a.g. freelist structure */
1947 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
1948 xfs_agblock_t bno; /* block number returned */
1949 int error;
David Chinner92821e22007-05-24 15:26:31 +10001950 int logflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 xfs_mount_t *mp; /* mount structure */
1952 xfs_perag_t *pag; /* per allocation group data */
1953
1954 agf = XFS_BUF_TO_AGF(agbp);
1955 /*
1956 * Freelist is empty, give up.
1957 */
1958 if (!agf->agf_flcount) {
1959 *bnop = NULLAGBLOCK;
1960 return 0;
1961 }
1962 /*
1963 * Read the array of free blocks.
1964 */
1965 mp = tp->t_mountp;
1966 if ((error = xfs_alloc_read_agfl(mp, tp,
Christoph Hellwig16259e72005-11-02 15:11:25 +11001967 be32_to_cpu(agf->agf_seqno), &agflbp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 return error;
1969 agfl = XFS_BUF_TO_AGFL(agflbp);
1970 /*
1971 * Get the block number and update the data structures.
1972 */
Christoph Hellwige2101002006-09-28 10:56:51 +10001973 bno = be32_to_cpu(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
Marcin Slusarz413d57c2008-02-13 15:03:29 -08001974 be32_add_cpu(&agf->agf_flfirst, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 xfs_trans_brelse(tp, agflbp);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001976 if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 agf->agf_flfirst = 0;
Dave Chinnera862e0f2010-01-11 11:47:41 +00001978
1979 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
Marcin Slusarz413d57c2008-02-13 15:03:29 -08001980 be32_add_cpu(&agf->agf_flcount, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 xfs_trans_agflist_delta(tp, -1);
1982 pag->pagf_flcount--;
Dave Chinnera862e0f2010-01-11 11:47:41 +00001983 xfs_perag_put(pag);
David Chinner92821e22007-05-24 15:26:31 +10001984
1985 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
1986 if (btreeblk) {
Marcin Slusarz413d57c2008-02-13 15:03:29 -08001987 be32_add_cpu(&agf->agf_btreeblks, 1);
David Chinner92821e22007-05-24 15:26:31 +10001988 pag->pagf_btreeblks++;
1989 logflags |= XFS_AGF_BTREEBLKS;
1990 }
1991
David Chinner92821e22007-05-24 15:26:31 +10001992 xfs_alloc_log_agf(tp, agbp, logflags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 *bnop = bno;
1994
1995 /*
Dave Chinnered3b4d62010-05-21 12:07:08 +10001996 * As blocks are freed, they are added to the per-ag busy list and
1997 * remain there until the freeing transaction is committed to disk.
1998 * Now that we have allocated blocks, this list must be searched to see
1999 * if a block is being reused. If one is, then the freeing transaction
2000 * must be pushed to disk before this transaction.
2001 *
2002 * We do this by setting the current transaction to a sync transaction
2003 * which guarantees that the freeing transaction is on disk before this
2004 * transaction. This is done instead of a synchronous log force here so
2005 * that we don't sit and wait with the AGF locked in the transaction
2006 * during the log force.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 */
Dave Chinnered3b4d62010-05-21 12:07:08 +10002008 if (xfs_alloc_busy_search(mp, be32_to_cpu(agf->agf_seqno), bno, 1))
2009 xfs_trans_set_sync(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 return 0;
2011}
2012
2013/*
2014 * Log the given fields from the agf structure.
2015 */
2016void
2017xfs_alloc_log_agf(
2018 xfs_trans_t *tp, /* transaction pointer */
2019 xfs_buf_t *bp, /* buffer for a.g. freelist header */
2020 int fields) /* mask of fields to be logged (XFS_AGF_...) */
2021{
2022 int first; /* first byte offset */
2023 int last; /* last byte offset */
2024 static const short offsets[] = {
2025 offsetof(xfs_agf_t, agf_magicnum),
2026 offsetof(xfs_agf_t, agf_versionnum),
2027 offsetof(xfs_agf_t, agf_seqno),
2028 offsetof(xfs_agf_t, agf_length),
2029 offsetof(xfs_agf_t, agf_roots[0]),
2030 offsetof(xfs_agf_t, agf_levels[0]),
2031 offsetof(xfs_agf_t, agf_flfirst),
2032 offsetof(xfs_agf_t, agf_fllast),
2033 offsetof(xfs_agf_t, agf_flcount),
2034 offsetof(xfs_agf_t, agf_freeblks),
2035 offsetof(xfs_agf_t, agf_longest),
David Chinner92821e22007-05-24 15:26:31 +10002036 offsetof(xfs_agf_t, agf_btreeblks),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 sizeof(xfs_agf_t)
2038 };
2039
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002040 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2041
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2043 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2044}
2045
2046/*
2047 * Interface for inode allocation to force the pag data to be initialized.
2048 */
2049int /* error */
2050xfs_alloc_pagf_init(
2051 xfs_mount_t *mp, /* file system mount structure */
2052 xfs_trans_t *tp, /* transaction pointer */
2053 xfs_agnumber_t agno, /* allocation group number */
2054 int flags) /* XFS_ALLOC_FLAGS_... */
2055{
2056 xfs_buf_t *bp;
2057 int error;
2058
2059 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2060 return error;
2061 if (bp)
2062 xfs_trans_brelse(tp, bp);
2063 return 0;
2064}
2065
2066/*
2067 * Put the block on the freelist for the allocation group.
2068 */
2069int /* error */
2070xfs_alloc_put_freelist(
2071 xfs_trans_t *tp, /* transaction pointer */
2072 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2073 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
David Chinner92821e22007-05-24 15:26:31 +10002074 xfs_agblock_t bno, /* block being freed */
2075 int btreeblk) /* block came from a AGF btree */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076{
2077 xfs_agf_t *agf; /* a.g. freespace structure */
2078 xfs_agfl_t *agfl; /* a.g. free block array */
Christoph Hellwige2101002006-09-28 10:56:51 +10002079 __be32 *blockp;/* pointer to array entry */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 int error;
David Chinner92821e22007-05-24 15:26:31 +10002081 int logflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 xfs_mount_t *mp; /* mount structure */
2083 xfs_perag_t *pag; /* per allocation group data */
2084
2085 agf = XFS_BUF_TO_AGF(agbp);
2086 mp = tp->t_mountp;
2087
2088 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
Christoph Hellwig16259e72005-11-02 15:11:25 +11002089 be32_to_cpu(agf->agf_seqno), &agflbp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 return error;
2091 agfl = XFS_BUF_TO_AGFL(agflbp);
Marcin Slusarz413d57c2008-02-13 15:03:29 -08002092 be32_add_cpu(&agf->agf_fllast, 1);
Christoph Hellwig16259e72005-11-02 15:11:25 +11002093 if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 agf->agf_fllast = 0;
Dave Chinnera862e0f2010-01-11 11:47:41 +00002095
2096 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
Marcin Slusarz413d57c2008-02-13 15:03:29 -08002097 be32_add_cpu(&agf->agf_flcount, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 xfs_trans_agflist_delta(tp, 1);
2099 pag->pagf_flcount++;
David Chinner92821e22007-05-24 15:26:31 +10002100
2101 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2102 if (btreeblk) {
Marcin Slusarz413d57c2008-02-13 15:03:29 -08002103 be32_add_cpu(&agf->agf_btreeblks, -1);
David Chinner92821e22007-05-24 15:26:31 +10002104 pag->pagf_btreeblks--;
2105 logflags |= XFS_AGF_BTREEBLKS;
2106 }
Dave Chinnera862e0f2010-01-11 11:47:41 +00002107 xfs_perag_put(pag);
David Chinner92821e22007-05-24 15:26:31 +10002108
David Chinner92821e22007-05-24 15:26:31 +10002109 xfs_alloc_log_agf(tp, agbp, logflags);
2110
Christoph Hellwig16259e72005-11-02 15:11:25 +11002111 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
2112 blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)];
Christoph Hellwige2101002006-09-28 10:56:51 +10002113 *blockp = cpu_to_be32(bno);
David Chinner92821e22007-05-24 15:26:31 +10002114 xfs_alloc_log_agf(tp, agbp, logflags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 xfs_trans_log_buf(tp, agflbp,
2116 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
2117 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl +
2118 sizeof(xfs_agblock_t) - 1));
2119 return 0;
2120}
2121
2122/*
2123 * Read in the allocation group header (free/alloc section).
2124 */
2125int /* error */
From: Christoph Hellwig48056212008-11-28 14:23:38 +11002126xfs_read_agf(
2127 struct xfs_mount *mp, /* mount point structure */
2128 struct xfs_trans *tp, /* transaction pointer */
2129 xfs_agnumber_t agno, /* allocation group number */
2130 int flags, /* XFS_BUF_ */
2131 struct xfs_buf **bpp) /* buffer for the ag freelist header */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132{
From: Christoph Hellwig48056212008-11-28 14:23:38 +11002133 struct xfs_agf *agf; /* ag freelist header */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 int agf_ok; /* set if agf is consistent */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 int error;
2136
2137 ASSERT(agno != NULLAGNUMBER);
2138 error = xfs_trans_read_buf(
2139 mp, tp, mp->m_ddev_targp,
2140 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
From: Christoph Hellwig48056212008-11-28 14:23:38 +11002141 XFS_FSS_TO_BB(mp, 1), flags, bpp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 if (error)
2143 return error;
From: Christoph Hellwig48056212008-11-28 14:23:38 +11002144 if (!*bpp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 return 0;
From: Christoph Hellwig48056212008-11-28 14:23:38 +11002146
2147 ASSERT(!XFS_BUF_GETERROR(*bpp));
2148 agf = XFS_BUF_TO_AGF(*bpp);
2149
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 /*
2151 * Validate the magic number of the agf block.
2152 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 agf_ok =
Christoph Hellwig16259e72005-11-02 15:11:25 +11002154 be32_to_cpu(agf->agf_magicnum) == XFS_AGF_MAGIC &&
2155 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2156 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2157 be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
2158 be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
From: Christoph Hellwig48056212008-11-28 14:23:38 +11002159 be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp) &&
2160 be32_to_cpu(agf->agf_seqno) == agno;
Barry Naujok89b28392008-10-30 17:05:49 +11002161 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
2162 agf_ok = agf_ok && be32_to_cpu(agf->agf_btreeblks) <=
2163 be32_to_cpu(agf->agf_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
2165 XFS_RANDOM_ALLOC_READ_AGF))) {
2166 XFS_CORRUPTION_ERROR("xfs_alloc_read_agf",
2167 XFS_ERRLEVEL_LOW, mp, agf);
From: Christoph Hellwig48056212008-11-28 14:23:38 +11002168 xfs_trans_brelse(tp, *bpp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 return XFS_ERROR(EFSCORRUPTED);
2170 }
From: Christoph Hellwig48056212008-11-28 14:23:38 +11002171 XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_AGF, XFS_AGF_REF);
2172 return 0;
2173}
2174
2175/*
2176 * Read in the allocation group header (free/alloc section).
2177 */
2178int /* error */
2179xfs_alloc_read_agf(
2180 struct xfs_mount *mp, /* mount point structure */
2181 struct xfs_trans *tp, /* transaction pointer */
2182 xfs_agnumber_t agno, /* allocation group number */
2183 int flags, /* XFS_ALLOC_FLAG_... */
2184 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2185{
2186 struct xfs_agf *agf; /* ag freelist header */
2187 struct xfs_perag *pag; /* per allocation group data */
2188 int error;
2189
2190 ASSERT(agno != NULLAGNUMBER);
2191
2192 error = xfs_read_agf(mp, tp, agno,
Christoph Hellwig0cadda12010-01-19 09:56:44 +00002193 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
From: Christoph Hellwig48056212008-11-28 14:23:38 +11002194 bpp);
2195 if (error)
2196 return error;
2197 if (!*bpp)
2198 return 0;
2199 ASSERT(!XFS_BUF_GETERROR(*bpp));
2200
2201 agf = XFS_BUF_TO_AGF(*bpp);
Dave Chinnera862e0f2010-01-11 11:47:41 +00002202 pag = xfs_perag_get(mp, agno);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 if (!pag->pagf_init) {
Christoph Hellwig16259e72005-11-02 15:11:25 +11002204 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
David Chinner92821e22007-05-24 15:26:31 +10002205 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
Christoph Hellwig16259e72005-11-02 15:11:25 +11002206 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2207 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 pag->pagf_levels[XFS_BTNUM_BNOi] =
Christoph Hellwig16259e72005-11-02 15:11:25 +11002209 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 pag->pagf_levels[XFS_BTNUM_CNTi] =
Christoph Hellwig16259e72005-11-02 15:11:25 +11002211 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
Eric Sandeen007c61c2007-10-11 17:43:56 +10002212 spin_lock_init(&pag->pagb_lock);
Dave Chinnere57336f2010-01-11 11:47:49 +00002213 pag->pagb_count = 0;
Dave Chinnered3b4d62010-05-21 12:07:08 +10002214 pag->pagb_tree = RB_ROOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 pag->pagf_init = 1;
2216 }
2217#ifdef DEBUG
2218 else if (!XFS_FORCED_SHUTDOWN(mp)) {
Christoph Hellwig16259e72005-11-02 15:11:25 +11002219 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
Barry Naujok89b28392008-10-30 17:05:49 +11002220 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
Christoph Hellwig16259e72005-11-02 15:11:25 +11002221 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2222 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
Christoph Hellwig16259e72005-11-02 15:11:25 +11002224 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
Christoph Hellwig16259e72005-11-02 15:11:25 +11002226 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 }
2228#endif
Dave Chinnera862e0f2010-01-11 11:47:41 +00002229 xfs_perag_put(pag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 return 0;
2231}
2232
2233/*
2234 * Allocate an extent (variable-size).
2235 * Depending on the allocation type, we either look in a single allocation
2236 * group or loop over the allocation groups to find the result.
2237 */
2238int /* error */
2239xfs_alloc_vextent(
2240 xfs_alloc_arg_t *args) /* allocation argument structure */
2241{
2242 xfs_agblock_t agsize; /* allocation group size */
2243 int error;
2244 int flags; /* XFS_ALLOC_FLAG_... locking flags */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 xfs_extlen_t minleft;/* minimum left value, temp copy */
2246 xfs_mount_t *mp; /* mount structure pointer */
2247 xfs_agnumber_t sagno; /* starting allocation group number */
2248 xfs_alloctype_t type; /* input allocation type */
2249 int bump_rotor = 0;
2250 int no_min = 0;
2251 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2252
2253 mp = args->mp;
2254 type = args->otype = args->type;
2255 args->agbno = NULLAGBLOCK;
2256 /*
2257 * Just fix this up, for the case where the last a.g. is shorter
2258 * (or there's only one a.g.) and the caller couldn't easily figure
2259 * that out (xfs_bmap_alloc).
2260 */
2261 agsize = mp->m_sb.sb_agblocks;
2262 if (args->maxlen > agsize)
2263 args->maxlen = agsize;
2264 if (args->alignment == 0)
2265 args->alignment = 1;
2266 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2267 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2268 ASSERT(args->minlen <= args->maxlen);
2269 ASSERT(args->minlen <= agsize);
2270 ASSERT(args->mod < args->prod);
2271 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2272 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2273 args->minlen > args->maxlen || args->minlen > agsize ||
2274 args->mod >= args->prod) {
2275 args->fsbno = NULLFSBLOCK;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002276 trace_xfs_alloc_vextent_badargs(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 return 0;
2278 }
2279 minleft = args->minleft;
2280
2281 switch (type) {
2282 case XFS_ALLOCTYPE_THIS_AG:
2283 case XFS_ALLOCTYPE_NEAR_BNO:
2284 case XFS_ALLOCTYPE_THIS_BNO:
2285 /*
2286 * These three force us into a single a.g.
2287 */
2288 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
Dave Chinnera862e0f2010-01-11 11:47:41 +00002289 args->pag = xfs_perag_get(mp, args->agno);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 args->minleft = 0;
2291 error = xfs_alloc_fix_freelist(args, 0);
2292 args->minleft = minleft;
2293 if (error) {
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002294 trace_xfs_alloc_vextent_nofix(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 goto error0;
2296 }
2297 if (!args->agbp) {
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002298 trace_xfs_alloc_vextent_noagbp(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 break;
2300 }
2301 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2302 if ((error = xfs_alloc_ag_vextent(args)))
2303 goto error0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 break;
2305 case XFS_ALLOCTYPE_START_BNO:
2306 /*
2307 * Try near allocation first, then anywhere-in-ag after
2308 * the first a.g. fails.
2309 */
2310 if ((args->userdata == XFS_ALLOC_INITIAL_USER_DATA) &&
2311 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2312 args->fsbno = XFS_AGB_TO_FSB(mp,
2313 ((mp->m_agfrotor / rotorstep) %
2314 mp->m_sb.sb_agcount), 0);
2315 bump_rotor = 1;
2316 }
2317 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2318 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2319 /* FALLTHROUGH */
2320 case XFS_ALLOCTYPE_ANY_AG:
2321 case XFS_ALLOCTYPE_START_AG:
2322 case XFS_ALLOCTYPE_FIRST_AG:
2323 /*
2324 * Rotate through the allocation groups looking for a winner.
2325 */
2326 if (type == XFS_ALLOCTYPE_ANY_AG) {
2327 /*
2328 * Start with the last place we left off.
2329 */
2330 args->agno = sagno = (mp->m_agfrotor / rotorstep) %
2331 mp->m_sb.sb_agcount;
2332 args->type = XFS_ALLOCTYPE_THIS_AG;
2333 flags = XFS_ALLOC_FLAG_TRYLOCK;
2334 } else if (type == XFS_ALLOCTYPE_FIRST_AG) {
2335 /*
2336 * Start with allocation group given by bno.
2337 */
2338 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2339 args->type = XFS_ALLOCTYPE_THIS_AG;
2340 sagno = 0;
2341 flags = 0;
2342 } else {
2343 if (type == XFS_ALLOCTYPE_START_AG)
2344 args->type = XFS_ALLOCTYPE_THIS_AG;
2345 /*
2346 * Start with the given allocation group.
2347 */
2348 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2349 flags = XFS_ALLOC_FLAG_TRYLOCK;
2350 }
2351 /*
2352 * Loop over allocation groups twice; first time with
2353 * trylock set, second time without.
2354 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 for (;;) {
Dave Chinnera862e0f2010-01-11 11:47:41 +00002356 args->pag = xfs_perag_get(mp, args->agno);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 if (no_min) args->minleft = 0;
2358 error = xfs_alloc_fix_freelist(args, flags);
2359 args->minleft = minleft;
2360 if (error) {
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002361 trace_xfs_alloc_vextent_nofix(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 goto error0;
2363 }
2364 /*
2365 * If we get a buffer back then the allocation will fly.
2366 */
2367 if (args->agbp) {
2368 if ((error = xfs_alloc_ag_vextent(args)))
2369 goto error0;
2370 break;
2371 }
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002372
2373 trace_xfs_alloc_vextent_loopfailed(args);
2374
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 /*
2376 * Didn't work, figure out the next iteration.
2377 */
2378 if (args->agno == sagno &&
2379 type == XFS_ALLOCTYPE_START_BNO)
2380 args->type = XFS_ALLOCTYPE_THIS_AG;
Yingping Lud210a282006-06-09 14:55:18 +10002381 /*
2382 * For the first allocation, we can try any AG to get
2383 * space. However, if we already have allocated a
2384 * block, we don't want to try AGs whose number is below
2385 * sagno. Otherwise, we may end up with out-of-order
2386 * locking of AGF, which might cause deadlock.
2387 */
2388 if (++(args->agno) == mp->m_sb.sb_agcount) {
2389 if (args->firstblock != NULLFSBLOCK)
2390 args->agno = sagno;
2391 else
2392 args->agno = 0;
2393 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 /*
2395 * Reached the starting a.g., must either be done
2396 * or switch to non-trylock mode.
2397 */
2398 if (args->agno == sagno) {
2399 if (no_min == 1) {
2400 args->agbno = NULLAGBLOCK;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002401 trace_xfs_alloc_vextent_allfailed(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 break;
2403 }
2404 if (flags == 0) {
2405 no_min = 1;
2406 } else {
2407 flags = 0;
2408 if (type == XFS_ALLOCTYPE_START_BNO) {
2409 args->agbno = XFS_FSB_TO_AGBNO(mp,
2410 args->fsbno);
2411 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2412 }
2413 }
2414 }
Dave Chinnera862e0f2010-01-11 11:47:41 +00002415 xfs_perag_put(args->pag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
2418 if (args->agno == sagno)
2419 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2420 (mp->m_sb.sb_agcount * rotorstep);
2421 else
2422 mp->m_agfrotor = (args->agno * rotorstep + 1) %
2423 (mp->m_sb.sb_agcount * rotorstep);
2424 }
2425 break;
2426 default:
2427 ASSERT(0);
2428 /* NOTREACHED */
2429 }
2430 if (args->agbno == NULLAGBLOCK)
2431 args->fsbno = NULLFSBLOCK;
2432 else {
2433 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2434#ifdef DEBUG
2435 ASSERT(args->len >= args->minlen);
2436 ASSERT(args->len <= args->maxlen);
2437 ASSERT(args->agbno % args->alignment == 0);
2438 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2439 args->len);
2440#endif
2441 }
Dave Chinnera862e0f2010-01-11 11:47:41 +00002442 xfs_perag_put(args->pag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 return 0;
2444error0:
Dave Chinnera862e0f2010-01-11 11:47:41 +00002445 xfs_perag_put(args->pag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 return error;
2447}
2448
2449/*
2450 * Free an extent.
2451 * Just break up the extent address and hand off to xfs_free_ag_extent
2452 * after fixing up the freelist.
2453 */
2454int /* error */
2455xfs_free_extent(
2456 xfs_trans_t *tp, /* transaction pointer */
2457 xfs_fsblock_t bno, /* starting block number of extent */
2458 xfs_extlen_t len) /* length of extent */
2459{
Nathan Scott0e1edbd2006-08-10 14:40:41 +10002460 xfs_alloc_arg_t args;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 int error;
2462
2463 ASSERT(len != 0);
Nathan Scott0e1edbd2006-08-10 14:40:41 +10002464 memset(&args, 0, sizeof(xfs_alloc_arg_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 args.tp = tp;
2466 args.mp = tp->t_mountp;
2467 args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
2468 ASSERT(args.agno < args.mp->m_sb.sb_agcount);
2469 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
Dave Chinnera862e0f2010-01-11 11:47:41 +00002470 args.pag = xfs_perag_get(args.mp, args.agno);
Yingping Lud210a282006-06-09 14:55:18 +10002471 if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 goto error0;
2473#ifdef DEBUG
2474 ASSERT(args.agbp != NULL);
Nathan Scott0e1edbd2006-08-10 14:40:41 +10002475 ASSERT((args.agbno + len) <=
2476 be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477#endif
Nathan Scott0e1edbd2006-08-10 14:40:41 +10002478 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479error0:
Dave Chinnera862e0f2010-01-11 11:47:41 +00002480 xfs_perag_put(args.pag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 return error;
2482}
2483
2484
2485/*
2486 * AG Busy list management
2487 * The busy list contains block ranges that have been freed but whose
Nathan Scottc41564b2006-03-29 08:55:14 +10002488 * transactions have not yet hit disk. If any block listed in a busy
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 * list is reused, the transaction that freed it must be forced to disk
2490 * before continuing to use the block.
2491 *
Dave Chinnered3b4d62010-05-21 12:07:08 +10002492 * xfs_alloc_busy_insert - add to the per-ag busy list
2493 * xfs_alloc_busy_clear - remove an item from the per-ag busy list
2494 * xfs_alloc_busy_search - search for a busy extent
2495 */
2496
2497/*
2498 * Insert a new extent into the busy tree.
2499 *
2500 * The busy extent tree is indexed by the start block of the busy extent.
2501 * there can be multiple overlapping ranges in the busy extent tree but only
2502 * ever one entry at a given start block. The reason for this is that
2503 * multi-block extents can be freed, then smaller chunks of that extent
2504 * allocated and freed again before the first transaction commit is on disk.
2505 * If the exact same start block is freed a second time, we have to wait for
2506 * that busy extent to pass out of the tree before the new extent is inserted.
2507 * There are two main cases we have to handle here.
2508 *
2509 * The first case is a transaction that triggers a "free - allocate - free"
2510 * cycle. This can occur during btree manipulations as a btree block is freed
2511 * to the freelist, then allocated from the free list, then freed again. In
2512 * this case, the second extxpnet free is what triggers the duplicate and as
2513 * such the transaction IDs should match. Because the extent was allocated in
2514 * this transaction, the transaction must be marked as synchronous. This is
2515 * true for all cases where the free/alloc/free occurs in the one transaction,
2516 * hence the addition of the ASSERT(tp->t_flags & XFS_TRANS_SYNC) to this case.
2517 * This serves to catch violations of the second case quite effectively.
2518 *
2519 * The second case is where the free/alloc/free occur in different
2520 * transactions. In this case, the thread freeing the extent the second time
2521 * can't mark the extent busy immediately because it is already tracked in a
2522 * transaction that may be committing. When the log commit for the existing
2523 * busy extent completes, the busy extent will be removed from the tree. If we
2524 * allow the second busy insert to continue using that busy extent structure,
2525 * it can be freed before this transaction is safely in the log. Hence our
2526 * only option in this case is to force the log to remove the existing busy
2527 * extent from the list before we insert the new one with the current
2528 * transaction ID.
2529 *
2530 * The problem we are trying to avoid in the free-alloc-free in separate
2531 * transactions is most easily described with a timeline:
2532 *
2533 * Thread 1 Thread 2 Thread 3 xfslogd
2534 * xact alloc
2535 * free X
2536 * mark busy
2537 * commit xact
2538 * free xact
2539 * xact alloc
2540 * alloc X
2541 * busy search
2542 * mark xact sync
2543 * commit xact
2544 * free xact
2545 * force log
2546 * checkpoint starts
2547 * ....
2548 * xact alloc
2549 * free X
2550 * mark busy
2551 * finds match
2552 * *** KABOOM! ***
2553 * ....
2554 * log IO completes
2555 * unbusy X
2556 * checkpoint completes
2557 *
2558 * By issuing a log force in thread 3 @ "KABOOM", the thread will block until
2559 * the checkpoint completes, and the busy extent it matched will have been
2560 * removed from the tree when it is woken. Hence it can then continue safely.
2561 *
2562 * However, to ensure this matching process is robust, we need to use the
2563 * transaction ID for identifying transaction, as delayed logging results in
2564 * the busy extent and transaction lifecycles being different. i.e. the busy
2565 * extent is active for a lot longer than the transaction. Hence the
2566 * transaction structure can be freed and reallocated, then mark the same
2567 * extent busy again in the new transaction. In this case the new transaction
2568 * will have a different tid but can have the same address, and hence we need
2569 * to check against the tid.
2570 *
2571 * Future: for delayed logging, we could avoid the log force if the extent was
2572 * first freed in the current checkpoint sequence. This, however, requires the
2573 * ability to pin the current checkpoint in memory until this transaction
2574 * commits to ensure that both the original free and the current one combine
2575 * logically into the one checkpoint. If the checkpoint sequences are
2576 * different, however, we still need to wait on a log force.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 */
2578void
Dave Chinnered3b4d62010-05-21 12:07:08 +10002579xfs_alloc_busy_insert(
2580 struct xfs_trans *tp,
2581 xfs_agnumber_t agno,
2582 xfs_agblock_t bno,
2583 xfs_extlen_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584{
Dave Chinnered3b4d62010-05-21 12:07:08 +10002585 struct xfs_busy_extent *new;
2586 struct xfs_busy_extent *busyp;
Dave Chinnera862e0f2010-01-11 11:47:41 +00002587 struct xfs_perag *pag;
Dave Chinnered3b4d62010-05-21 12:07:08 +10002588 struct rb_node **rbp;
2589 struct rb_node *parent;
2590 int match;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591
Dave Chinnered3b4d62010-05-21 12:07:08 +10002592
2593 new = kmem_zalloc(sizeof(struct xfs_busy_extent), KM_MAYFAIL);
2594 if (!new) {
2595 /*
2596 * No Memory! Since it is now not possible to track the free
2597 * block, make this a synchronous transaction to insure that
2598 * the block is not reused before this transaction commits.
2599 */
2600 trace_xfs_alloc_busy(tp, agno, bno, len, 1);
2601 xfs_trans_set_sync(tp);
2602 return;
2603 }
2604
2605 new->agno = agno;
2606 new->bno = bno;
2607 new->length = len;
2608 new->tid = xfs_log_get_trans_ident(tp);
2609
2610 INIT_LIST_HEAD(&new->list);
2611
2612 /* trace before insert to be able to see failed inserts */
2613 trace_xfs_alloc_busy(tp, agno, bno, len, 0);
2614
2615 pag = xfs_perag_get(tp->t_mountp, new->agno);
2616restart:
Dave Chinnera862e0f2010-01-11 11:47:41 +00002617 spin_lock(&pag->pagb_lock);
Dave Chinnered3b4d62010-05-21 12:07:08 +10002618 rbp = &pag->pagb_tree.rb_node;
2619 parent = NULL;
2620 busyp = NULL;
2621 match = 0;
2622 while (*rbp && match >= 0) {
2623 parent = *rbp;
2624 busyp = rb_entry(parent, struct xfs_busy_extent, rb_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625
Dave Chinnered3b4d62010-05-21 12:07:08 +10002626 if (new->bno < busyp->bno) {
2627 /* may overlap, but exact start block is lower */
2628 rbp = &(*rbp)->rb_left;
2629 if (new->bno + new->length > busyp->bno)
2630 match = busyp->tid == new->tid ? 1 : -1;
2631 } else if (new->bno > busyp->bno) {
2632 /* may overlap, but exact start block is higher */
2633 rbp = &(*rbp)->rb_right;
2634 if (bno < busyp->bno + busyp->length)
2635 match = busyp->tid == new->tid ? 1 : -1;
2636 } else {
2637 match = busyp->tid == new->tid ? 1 : -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 break;
2639 }
2640 }
Dave Chinnered3b4d62010-05-21 12:07:08 +10002641 if (match < 0) {
2642 /* overlap marked busy in different transaction */
2643 spin_unlock(&pag->pagb_lock);
2644 xfs_log_force(tp->t_mountp, XFS_LOG_SYNC);
2645 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 }
Dave Chinnered3b4d62010-05-21 12:07:08 +10002647 if (match > 0) {
2648 /*
2649 * overlap marked busy in same transaction. Update if exact
2650 * start block match, otherwise combine the busy extents into
2651 * a single range.
2652 */
2653 if (busyp->bno == new->bno) {
2654 busyp->length = max(busyp->length, new->length);
2655 spin_unlock(&pag->pagb_lock);
2656 ASSERT(tp->t_flags & XFS_TRANS_SYNC);
2657 xfs_perag_put(pag);
2658 kmem_free(new);
2659 return;
2660 }
2661 rb_erase(&busyp->rb_node, &pag->pagb_tree);
2662 new->length = max(busyp->bno + busyp->length,
2663 new->bno + new->length) -
2664 min(busyp->bno, new->bno);
2665 new->bno = min(busyp->bno, new->bno);
2666 } else
2667 busyp = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668
Dave Chinnered3b4d62010-05-21 12:07:08 +10002669 rb_link_node(&new->rb_node, parent, rbp);
2670 rb_insert_color(&new->rb_node, &pag->pagb_tree);
2671
2672 list_add(&new->list, &tp->t_busy);
Dave Chinnera862e0f2010-01-11 11:47:41 +00002673 spin_unlock(&pag->pagb_lock);
2674 xfs_perag_put(pag);
Dave Chinnered3b4d62010-05-21 12:07:08 +10002675 kmem_free(busyp);
2676}
2677
2678/*
2679 * Search for a busy extent within the range of the extent we are about to
2680 * allocate. You need to be holding the busy extent tree lock when calling
2681 * xfs_alloc_busy_search(). This function returns 0 for no overlapping busy
2682 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
2683 * match. This is done so that a non-zero return indicates an overlap that
2684 * will require a synchronous transaction, but it can still be
2685 * used to distinguish between a partial or exact match.
2686 */
2687static int
2688xfs_alloc_busy_search(
2689 struct xfs_mount *mp,
2690 xfs_agnumber_t agno,
2691 xfs_agblock_t bno,
2692 xfs_extlen_t len)
2693{
2694 struct xfs_perag *pag;
2695 struct rb_node *rbp;
2696 struct xfs_busy_extent *busyp;
2697 int match = 0;
2698
2699 pag = xfs_perag_get(mp, agno);
2700 spin_lock(&pag->pagb_lock);
2701
2702 rbp = pag->pagb_tree.rb_node;
2703
2704 /* find closest start bno overlap */
2705 while (rbp) {
2706 busyp = rb_entry(rbp, struct xfs_busy_extent, rb_node);
2707 if (bno < busyp->bno) {
2708 /* may overlap, but exact start block is lower */
2709 if (bno + len > busyp->bno)
2710 match = -1;
2711 rbp = rbp->rb_left;
2712 } else if (bno > busyp->bno) {
2713 /* may overlap, but exact start block is higher */
2714 if (bno < busyp->bno + busyp->length)
2715 match = -1;
2716 rbp = rbp->rb_right;
2717 } else {
2718 /* bno matches busyp, length determines exact match */
2719 match = (busyp->length == len) ? 1 : -1;
2720 break;
2721 }
2722 }
2723 spin_unlock(&pag->pagb_lock);
2724 trace_xfs_alloc_busysearch(mp, agno, bno, len, !!match);
2725 xfs_perag_put(pag);
2726 return match;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727}
2728
2729void
Dave Chinnered3b4d62010-05-21 12:07:08 +10002730xfs_alloc_busy_clear(
2731 struct xfs_mount *mp,
2732 struct xfs_busy_extent *busyp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733{
Dave Chinnera862e0f2010-01-11 11:47:41 +00002734 struct xfs_perag *pag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735
Dave Chinnered3b4d62010-05-21 12:07:08 +10002736 trace_xfs_alloc_unbusy(mp, busyp->agno, busyp->bno,
2737 busyp->length);
2738
2739 ASSERT(xfs_alloc_busy_search(mp, busyp->agno, busyp->bno,
2740 busyp->length) == 1);
2741
2742 list_del_init(&busyp->list);
2743
2744 pag = xfs_perag_get(mp, busyp->agno);
Dave Chinnera862e0f2010-01-11 11:47:41 +00002745 spin_lock(&pag->pagb_lock);
Dave Chinnered3b4d62010-05-21 12:07:08 +10002746 rb_erase(&busyp->rb_node, &pag->pagb_tree);
Dave Chinnera862e0f2010-01-11 11:47:41 +00002747 spin_unlock(&pag->pagb_lock);
2748 xfs_perag_put(pag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749
Dave Chinnered3b4d62010-05-21 12:07:08 +10002750 kmem_free(busyp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751}