diff options
author | Alexander Motin <mav@FreeBSD.org> | 2023-12-05 18:58:11 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-12-05 18:58:11 +0000 |
commit | 55b764e062b171874655c7ed51d6a45c2c243c0a (patch) | |
tree | 151043ce94575a2ad42b861f2c5061fbd1a19f12 | |
parent | 014265f4e6fa32b0a990525a0967b52164a94752 (diff) | |
download | src-55b764e062b171874655c7ed51d6a45c2c243c0a.tar.gz src-55b764e062b171874655c7ed51d6a45c2c243c0a.zip |
-rw-r--r-- | module/zfs/dmu.c | 15 | ||||
-rw-r--r-- | module/zfs/zil.c | 38 |
2 files changed, 43 insertions, 10 deletions
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index e0cdd9e3f33e..6cf7f3c3b12e 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -2274,6 +2274,21 @@ dmu_read_l0_bps(objset_t *os, uint64_t object, uint64_t offset, uint64_t length, goto out; } + /* + * If the block was allocated in transaction group that is not + * yet synced, we could clone it, but we couldn't write this + * operation into ZIL, or it may be impossible to replay, since + * the block may appear not yet allocated at that point. + */ + if (BP_PHYSICAL_BIRTH(bp) > spa_freeze_txg(os->os_spa)) { + error = SET_ERROR(EINVAL); + goto out; + } + if (BP_PHYSICAL_BIRTH(bp) > spa_last_synced_txg(os->os_spa)) { + error = SET_ERROR(EAGAIN); + goto out; + } + bps[i] = *bp; } diff --git a/module/zfs/zil.c b/module/zfs/zil.c index 7dfdaa081e8a..252d9a0493de 100644 --- a/module/zfs/zil.c +++ b/module/zfs/zil.c @@ -617,11 +617,12 @@ zil_claim_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t first_txg) } static int -zil_claim_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx) +zil_claim_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx, + uint64_t first_txg) { const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc; const blkptr_t *bp; - spa_t *spa; + spa_t *spa = zilog->zl_spa; uint_t ii; ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr)); @@ -636,19 +637,36 @@ zil_claim_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx) * XXX: Do we need to byteswap lr? */ - spa = zilog->zl_spa; - for (ii = 0; ii < lr->lr_nbps; ii++) { bp = &lr->lr_bps[ii]; /* - * When data in embedded into BP there is no need to create - * BRT entry as there is no data block. Just copy the BP as - * it contains the data. + * When data is embedded into the BP there is no need to create + * BRT entry as there is no data block. Just copy the BP as it + * contains the data. + */ + if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) + continue; + + /* + * We can not handle block pointers from the future, since they + * are not yet allocated. It should not normally happen, but + * just in case lets be safe and just stop here now instead of + * corrupting the pool. */ - if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) { + if (BP_PHYSICAL_BIRTH(bp) >= first_txg) + return (SET_ERROR(ENOENT)); + + /* + * Assert the block is really allocated before we reference it. + */ + metaslab_check_free(spa, bp); + } + + for (ii = 0; ii < lr->lr_nbps; ii++) { + bp = &lr->lr_bps[ii]; + if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) brt_pending_add(spa, bp, tx); - } } return (0); @@ -663,7 +681,7 @@ zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, case TX_WRITE: return (zil_claim_write(zilog, lrc, tx, first_txg)); case TX_CLONE_RANGE: - return (zil_claim_clone_range(zilog, lrc, tx)); + return (zil_claim_clone_range(zilog, lrc, tx, first_txg)); default: return (0); } |