linux-zen-desktop/fs/xfs/scrub/alloc.c

187 lines
4.1 KiB
C
Raw Normal View History

2023-10-24 12:59:35 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2023-08-30 17:31:07 +02:00
/*
2023-10-24 12:59:35 +02:00
* Copyright (C) 2017-2023 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <djwong@kernel.org>
2023-08-30 17:31:07 +02:00
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
#include "xfs_alloc.h"
#include "xfs_rmap.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/btree.h"
#include "xfs_ag.h"
/*
* Set us up to scrub free space btrees.
*/
int
xchk_setup_ag_allocbt(
struct xfs_scrub *sc)
{
2023-10-24 12:59:35 +02:00
if (xchk_need_intent_drain(sc))
xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
2023-08-30 17:31:07 +02:00
return xchk_setup_ag_btree(sc, false);
}
/* Free space btree scrubber. */
2023-10-24 12:59:35 +02:00
struct xchk_alloc {
/* Previous free space extent. */
struct xfs_alloc_rec_incore prev;
};
2023-08-30 17:31:07 +02:00
/*
* Ensure there's a corresponding cntbt/bnobt record matching this
* bnobt/cntbt record, respectively.
*/
STATIC void
xchk_allocbt_xref_other(
struct xfs_scrub *sc,
xfs_agblock_t agbno,
xfs_extlen_t len)
{
struct xfs_btree_cur **pcur;
xfs_agblock_t fbno;
xfs_extlen_t flen;
int has_otherrec;
int error;
if (sc->sm->sm_type == XFS_SCRUB_TYPE_BNOBT)
pcur = &sc->sa.cnt_cur;
else
pcur = &sc->sa.bno_cur;
if (!*pcur || xchk_skip_xref(sc->sm))
return;
error = xfs_alloc_lookup_le(*pcur, agbno, len, &has_otherrec);
if (!xchk_should_check_xref(sc, &error, pcur))
return;
if (!has_otherrec) {
xchk_btree_xref_set_corrupt(sc, *pcur, 0);
return;
}
error = xfs_alloc_get_rec(*pcur, &fbno, &flen, &has_otherrec);
if (!xchk_should_check_xref(sc, &error, pcur))
return;
if (!has_otherrec) {
xchk_btree_xref_set_corrupt(sc, *pcur, 0);
return;
}
if (fbno != agbno || flen != len)
xchk_btree_xref_set_corrupt(sc, *pcur, 0);
}
/* Cross-reference with the other btrees. */
STATIC void
xchk_allocbt_xref(
struct xfs_scrub *sc,
2023-10-24 12:59:35 +02:00
const struct xfs_alloc_rec_incore *irec)
2023-08-30 17:31:07 +02:00
{
2023-10-24 12:59:35 +02:00
xfs_agblock_t agbno = irec->ar_startblock;
xfs_extlen_t len = irec->ar_blockcount;
2023-08-30 17:31:07 +02:00
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
xchk_allocbt_xref_other(sc, agbno, len);
xchk_xref_is_not_inode_chunk(sc, agbno, len);
xchk_xref_has_no_owner(sc, agbno, len);
xchk_xref_is_not_shared(sc, agbno, len);
2023-10-24 12:59:35 +02:00
xchk_xref_is_not_cow_staging(sc, agbno, len);
}
/* Flag failures for records that could be merged. */
STATIC void
xchk_allocbt_mergeable(
struct xchk_btree *bs,
struct xchk_alloc *ca,
const struct xfs_alloc_rec_incore *irec)
{
if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
if (ca->prev.ar_blockcount > 0 &&
ca->prev.ar_startblock + ca->prev.ar_blockcount == irec->ar_startblock &&
ca->prev.ar_blockcount + irec->ar_blockcount < (uint32_t)~0U)
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
memcpy(&ca->prev, irec, sizeof(*irec));
2023-08-30 17:31:07 +02:00
}
/* Scrub a bnobt/cntbt record. */
STATIC int
xchk_allocbt_rec(
2023-10-24 12:59:35 +02:00
struct xchk_btree *bs,
const union xfs_btree_rec *rec)
2023-08-30 17:31:07 +02:00
{
2023-10-24 12:59:35 +02:00
struct xfs_alloc_rec_incore irec;
struct xchk_alloc *ca = bs->private;
2023-08-30 17:31:07 +02:00
2023-10-24 12:59:35 +02:00
xfs_alloc_btrec_to_irec(rec, &irec);
if (xfs_alloc_check_irec(bs->cur, &irec) != NULL) {
2023-08-30 17:31:07 +02:00
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
2023-10-24 12:59:35 +02:00
return 0;
}
2023-08-30 17:31:07 +02:00
2023-10-24 12:59:35 +02:00
xchk_allocbt_mergeable(bs, ca, &irec);
xchk_allocbt_xref(bs->sc, &irec);
2023-08-30 17:31:07 +02:00
return 0;
}
/* Scrub the freespace btrees for some AG. */
STATIC int
xchk_allocbt(
struct xfs_scrub *sc,
xfs_btnum_t which)
{
2023-10-24 12:59:35 +02:00
struct xchk_alloc ca = { };
2023-08-30 17:31:07 +02:00
struct xfs_btree_cur *cur;
cur = which == XFS_BTNUM_BNO ? sc->sa.bno_cur : sc->sa.cnt_cur;
2023-10-24 12:59:35 +02:00
return xchk_btree(sc, cur, xchk_allocbt_rec, &XFS_RMAP_OINFO_AG, &ca);
2023-08-30 17:31:07 +02:00
}
int
xchk_bnobt(
struct xfs_scrub *sc)
{
return xchk_allocbt(sc, XFS_BTNUM_BNO);
}
int
xchk_cntbt(
struct xfs_scrub *sc)
{
return xchk_allocbt(sc, XFS_BTNUM_CNT);
}
/* xref check that the extent is not free */
void
xchk_xref_is_used_space(
struct xfs_scrub *sc,
xfs_agblock_t agbno,
xfs_extlen_t len)
{
2023-10-24 12:59:35 +02:00
enum xbtree_recpacking outcome;
2023-08-30 17:31:07 +02:00
int error;
if (!sc->sa.bno_cur || xchk_skip_xref(sc->sm))
return;
2023-10-24 12:59:35 +02:00
error = xfs_alloc_has_records(sc->sa.bno_cur, agbno, len, &outcome);
2023-08-30 17:31:07 +02:00
if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
return;
2023-10-24 12:59:35 +02:00
if (outcome != XBTREE_RECPACKING_EMPTY)
2023-08-30 17:31:07 +02:00
xchk_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0);
}