xfs: don't use xlog_in_core_2_t in struct xlog_in_core

Most accessed to the on-disk log record header are for the original
xlog_rec_header.  Make that the main structure, and case for the
single remaining place using other union legs.

This prepares for removing xlog_in_core_2_t entirely.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com>
Signed-off-by: Carlos Maiolino <cem@kernel.org>
This commit is contained in:
Christoph Hellwig 2025-10-27 08:05:50 +01:00 committed by Carlos Maiolino
parent 899b7ee44b
commit be665a4e27
4 changed files with 44 additions and 47 deletions

View File

@ -534,8 +534,8 @@ xlog_state_release_iclog(
*/ */
if ((iclog->ic_state == XLOG_STATE_WANT_SYNC || if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
(iclog->ic_flags & XLOG_ICL_NEED_FUA)) && (iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
!iclog->ic_header.h_tail_lsn) { !iclog->ic_header->h_tail_lsn) {
iclog->ic_header.h_tail_lsn = iclog->ic_header->h_tail_lsn =
cpu_to_be64(atomic64_read(&log->l_tail_lsn)); cpu_to_be64(atomic64_read(&log->l_tail_lsn));
} }
@ -1457,11 +1457,11 @@ xlog_alloc_log(
iclog->ic_prev = prev_iclog; iclog->ic_prev = prev_iclog;
prev_iclog = iclog; prev_iclog = iclog;
iclog->ic_data = kvzalloc(log->l_iclog_size, iclog->ic_header = kvzalloc(log->l_iclog_size,
GFP_KERNEL | __GFP_RETRY_MAYFAIL); GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!iclog->ic_data) if (!iclog->ic_header)
goto out_free_iclog; goto out_free_iclog;
head = &iclog->ic_header; head = iclog->ic_header;
memset(head, 0, sizeof(xlog_rec_header_t)); memset(head, 0, sizeof(xlog_rec_header_t));
head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
head->h_version = cpu_to_be32( head->h_version = cpu_to_be32(
@ -1476,7 +1476,7 @@ xlog_alloc_log(
iclog->ic_log = log; iclog->ic_log = log;
atomic_set(&iclog->ic_refcnt, 0); atomic_set(&iclog->ic_refcnt, 0);
INIT_LIST_HEAD(&iclog->ic_callbacks); INIT_LIST_HEAD(&iclog->ic_callbacks);
iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize; iclog->ic_datap = (void *)iclog->ic_header + log->l_iclog_hsize;
init_waitqueue_head(&iclog->ic_force_wait); init_waitqueue_head(&iclog->ic_force_wait);
init_waitqueue_head(&iclog->ic_write_wait); init_waitqueue_head(&iclog->ic_write_wait);
@ -1504,7 +1504,7 @@ xlog_alloc_log(
out_free_iclog: out_free_iclog:
for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
prev_iclog = iclog->ic_next; prev_iclog = iclog->ic_next;
kvfree(iclog->ic_data); kvfree(iclog->ic_header);
kfree(iclog); kfree(iclog);
if (prev_iclog == log->l_iclog) if (prev_iclog == log->l_iclog)
break; break;
@ -1524,7 +1524,7 @@ xlog_pack_data(
struct xlog_in_core *iclog, struct xlog_in_core *iclog,
int roundoff) int roundoff)
{ {
struct xlog_rec_header *rhead = &iclog->ic_header; struct xlog_rec_header *rhead = iclog->ic_header;
__be32 cycle_lsn = CYCLE_LSN_DISK(rhead->h_lsn); __be32 cycle_lsn = CYCLE_LSN_DISK(rhead->h_lsn);
char *dp = iclog->ic_datap; char *dp = iclog->ic_datap;
int i; int i;
@ -1536,7 +1536,7 @@ xlog_pack_data(
} }
if (xfs_has_logv2(log->l_mp)) { if (xfs_has_logv2(log->l_mp)) {
xlog_in_core_2_t *xhdr = iclog->ic_data; xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)iclog->ic_header;
for (i = 1; i < log->l_iclog_heads; i++) for (i = 1; i < log->l_iclog_heads; i++)
xhdr[i].hic_xheader.xh_cycle = cycle_lsn; xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
@ -1658,11 +1658,11 @@ xlog_write_iclog(
iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA); iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
if (is_vmalloc_addr(iclog->ic_data)) { if (is_vmalloc_addr(iclog->ic_header)) {
if (!bio_add_vmalloc(&iclog->ic_bio, iclog->ic_data, count)) if (!bio_add_vmalloc(&iclog->ic_bio, iclog->ic_header, count))
goto shutdown; goto shutdown;
} else { } else {
bio_add_virt_nofail(&iclog->ic_bio, iclog->ic_data, count); bio_add_virt_nofail(&iclog->ic_bio, iclog->ic_header, count);
} }
/* /*
@ -1791,19 +1791,19 @@ xlog_sync(
size = iclog->ic_offset; size = iclog->ic_offset;
if (xfs_has_logv2(log->l_mp)) if (xfs_has_logv2(log->l_mp))
size += roundoff; size += roundoff;
iclog->ic_header.h_len = cpu_to_be32(size); iclog->ic_header->h_len = cpu_to_be32(size);
XFS_STATS_INC(log->l_mp, xs_log_writes); XFS_STATS_INC(log->l_mp, xs_log_writes);
XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count));
bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)); bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header->h_lsn));
/* Do we need to split this write into 2 parts? */ /* Do we need to split this write into 2 parts? */
if (bno + BTOBB(count) > log->l_logBBsize) if (bno + BTOBB(count) > log->l_logBBsize)
xlog_split_iclog(log, &iclog->ic_header, bno, count); xlog_split_iclog(log, iclog->ic_header, bno, count);
/* calculcate the checksum */ /* calculcate the checksum */
iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, iclog->ic_header->h_crc = xlog_cksum(log, iclog->ic_header,
iclog->ic_datap, XLOG_REC_SIZE, size); iclog->ic_datap, XLOG_REC_SIZE, size);
/* /*
* Intentionally corrupt the log record CRC based on the error injection * Intentionally corrupt the log record CRC based on the error injection
@ -1814,11 +1814,11 @@ xlog_sync(
*/ */
#ifdef DEBUG #ifdef DEBUG
if (XFS_TEST_ERROR(log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) { if (XFS_TEST_ERROR(log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) {
iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA); iclog->ic_header->h_crc &= cpu_to_le32(0xAAAAAAAA);
iclog->ic_fail_crc = true; iclog->ic_fail_crc = true;
xfs_warn(log->l_mp, xfs_warn(log->l_mp,
"Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.", "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
be64_to_cpu(iclog->ic_header.h_lsn)); be64_to_cpu(iclog->ic_header->h_lsn));
} }
#endif #endif
xlog_verify_iclog(log, iclog, count); xlog_verify_iclog(log, iclog, count);
@ -1845,7 +1845,7 @@ xlog_dealloc_log(
iclog = log->l_iclog; iclog = log->l_iclog;
for (i = 0; i < log->l_iclog_bufs; i++) { for (i = 0; i < log->l_iclog_bufs; i++) {
next_iclog = iclog->ic_next; next_iclog = iclog->ic_next;
kvfree(iclog->ic_data); kvfree(iclog->ic_header);
kfree(iclog); kfree(iclog);
iclog = next_iclog; iclog = next_iclog;
} }
@ -1867,7 +1867,7 @@ xlog_state_finish_copy(
{ {
lockdep_assert_held(&log->l_icloglock); lockdep_assert_held(&log->l_icloglock);
be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); be32_add_cpu(&iclog->ic_header->h_num_logops, record_cnt);
iclog->ic_offset += copy_bytes; iclog->ic_offset += copy_bytes;
} }
@ -2290,7 +2290,7 @@ xlog_state_activate_iclog(
* We don't need to cover the dummy. * We don't need to cover the dummy.
*/ */
if (*iclogs_changed == 0 && if (*iclogs_changed == 0 &&
iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) { iclog->ic_header->h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
*iclogs_changed = 1; *iclogs_changed = 1;
} else { } else {
/* /*
@ -2302,11 +2302,11 @@ xlog_state_activate_iclog(
iclog->ic_state = XLOG_STATE_ACTIVE; iclog->ic_state = XLOG_STATE_ACTIVE;
iclog->ic_offset = 0; iclog->ic_offset = 0;
iclog->ic_header.h_num_logops = 0; iclog->ic_header->h_num_logops = 0;
memset(iclog->ic_header.h_cycle_data, 0, memset(iclog->ic_header->h_cycle_data, 0,
sizeof(iclog->ic_header.h_cycle_data)); sizeof(iclog->ic_header->h_cycle_data));
iclog->ic_header.h_lsn = 0; iclog->ic_header->h_lsn = 0;
iclog->ic_header.h_tail_lsn = 0; iclog->ic_header->h_tail_lsn = 0;
} }
/* /*
@ -2398,7 +2398,7 @@ xlog_get_lowest_lsn(
iclog->ic_state == XLOG_STATE_DIRTY) iclog->ic_state == XLOG_STATE_DIRTY)
continue; continue;
lsn = be64_to_cpu(iclog->ic_header.h_lsn); lsn = be64_to_cpu(iclog->ic_header->h_lsn);
if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0) if ((lsn && !lowest_lsn) || XFS_LSN_CMP(lsn, lowest_lsn) < 0)
lowest_lsn = lsn; lowest_lsn = lsn;
} while ((iclog = iclog->ic_next) != log->l_iclog); } while ((iclog = iclog->ic_next) != log->l_iclog);
@ -2433,7 +2433,7 @@ xlog_state_iodone_process_iclog(
* If this is not the lowest lsn iclog, then we will leave it * If this is not the lowest lsn iclog, then we will leave it
* for another completion to process. * for another completion to process.
*/ */
header_lsn = be64_to_cpu(iclog->ic_header.h_lsn); header_lsn = be64_to_cpu(iclog->ic_header->h_lsn);
lowest_lsn = xlog_get_lowest_lsn(log); lowest_lsn = xlog_get_lowest_lsn(log);
if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0) if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
return false; return false;
@ -2616,7 +2616,7 @@ xlog_state_get_iclog_space(
goto restart; goto restart;
} }
head = &iclog->ic_header; head = iclog->ic_header;
atomic_inc(&iclog->ic_refcnt); /* prevents sync */ atomic_inc(&iclog->ic_refcnt); /* prevents sync */
log_offset = iclog->ic_offset; log_offset = iclog->ic_offset;
@ -2781,7 +2781,7 @@ xlog_state_switch_iclogs(
if (!eventual_size) if (!eventual_size)
eventual_size = iclog->ic_offset; eventual_size = iclog->ic_offset;
iclog->ic_state = XLOG_STATE_WANT_SYNC; iclog->ic_state = XLOG_STATE_WANT_SYNC;
iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); iclog->ic_header->h_prev_block = cpu_to_be32(log->l_prev_block);
log->l_prev_block = log->l_curr_block; log->l_prev_block = log->l_curr_block;
log->l_prev_cycle = log->l_curr_cycle; log->l_prev_cycle = log->l_curr_cycle;
@ -2825,7 +2825,7 @@ xlog_force_and_check_iclog(
struct xlog_in_core *iclog, struct xlog_in_core *iclog,
bool *completed) bool *completed)
{ {
xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header->h_lsn);
int error; int error;
*completed = false; *completed = false;
@ -2837,7 +2837,7 @@ xlog_force_and_check_iclog(
* If the iclog has already been completed and reused the header LSN * If the iclog has already been completed and reused the header LSN
* will have been rewritten by completion * will have been rewritten by completion
*/ */
if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) if (be64_to_cpu(iclog->ic_header->h_lsn) != lsn)
*completed = true; *completed = true;
return 0; return 0;
} }
@ -2970,7 +2970,7 @@ xlog_force_lsn(
goto out_error; goto out_error;
iclog = log->l_iclog; iclog = log->l_iclog;
while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { while (be64_to_cpu(iclog->ic_header->h_lsn) != lsn) {
trace_xlog_iclog_force_lsn(iclog, _RET_IP_); trace_xlog_iclog_force_lsn(iclog, _RET_IP_);
iclog = iclog->ic_next; iclog = iclog->ic_next;
if (iclog == log->l_iclog) if (iclog == log->l_iclog)
@ -3236,7 +3236,7 @@ xlog_verify_dump_tail(
{ {
xfs_alert(log->l_mp, xfs_alert(log->l_mp,
"ran out of log space tail 0x%llx/0x%llx, head lsn 0x%llx, head 0x%x/0x%x, prev head 0x%x/0x%x", "ran out of log space tail 0x%llx/0x%llx, head lsn 0x%llx, head 0x%x/0x%x, prev head 0x%x/0x%x",
iclog ? be64_to_cpu(iclog->ic_header.h_tail_lsn) : -1, iclog ? be64_to_cpu(iclog->ic_header->h_tail_lsn) : -1,
atomic64_read(&log->l_tail_lsn), atomic64_read(&log->l_tail_lsn),
log->l_ailp->ail_head_lsn, log->l_ailp->ail_head_lsn,
log->l_curr_cycle, log->l_curr_block, log->l_curr_cycle, log->l_curr_block,
@ -3255,7 +3255,7 @@ xlog_verify_tail_lsn(
struct xlog *log, struct xlog *log,
struct xlog_in_core *iclog) struct xlog_in_core *iclog)
{ {
xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn); xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header->h_tail_lsn);
int blocks; int blocks;
if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
@ -3309,7 +3309,7 @@ xlog_verify_iclog(
struct xlog_in_core *iclog, struct xlog_in_core *iclog,
int count) int count)
{ {
struct xlog_rec_header *rhead = &iclog->ic_header; struct xlog_rec_header *rhead = iclog->ic_header;
xlog_in_core_t *icptr; xlog_in_core_t *icptr;
void *base_ptr, *ptr; void *base_ptr, *ptr;
ptrdiff_t field_offset; ptrdiff_t field_offset;
@ -3507,7 +3507,7 @@ xlog_iclogs_empty(
/* endianness does not matter here, zero is zero in /* endianness does not matter here, zero is zero in
* any language. * any language.
*/ */
if (iclog->ic_header.h_num_logops) if (iclog->ic_header->h_num_logops)
return 0; return 0;
iclog = iclog->ic_next; iclog = iclog->ic_next;
} while (iclog != log->l_iclog); } while (iclog != log->l_iclog);

View File

@ -940,7 +940,7 @@ xlog_cil_set_ctx_write_state(
struct xlog_in_core *iclog) struct xlog_in_core *iclog)
{ {
struct xfs_cil *cil = ctx->cil; struct xfs_cil *cil = ctx->cil;
xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header->h_lsn);
ASSERT(!ctx->commit_lsn); ASSERT(!ctx->commit_lsn);
if (!ctx->start_lsn) { if (!ctx->start_lsn) {
@ -1458,9 +1458,9 @@ xlog_cil_push_work(
*/ */
spin_lock(&log->l_icloglock); spin_lock(&log->l_icloglock);
if (ctx->start_lsn != ctx->commit_lsn) { if (ctx->start_lsn != ctx->commit_lsn) {
xfs_lsn_t plsn; xfs_lsn_t plsn = be64_to_cpu(
ctx->commit_iclog->ic_prev->ic_header->h_lsn);
plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn);
if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) { if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) {
/* /*
* Waiting on ic_force_wait orders the completion of * Waiting on ic_force_wait orders the completion of

View File

@ -158,10 +158,8 @@ struct xlog_ticket {
}; };
/* /*
* - A log record header is 512 bytes. There is plenty of room to grow the * In-core log structure.
* xlog_rec_header_t into the reserved space. *
* - ic_data follows, so a write to disk can start at the beginning of
* the iclog.
* - ic_forcewait is used to implement synchronous forcing of the iclog to disk. * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
* - ic_next is the pointer to the next iclog in the ring. * - ic_next is the pointer to the next iclog in the ring.
* - ic_log is a pointer back to the global log structure. * - ic_log is a pointer back to the global log structure.
@ -198,8 +196,7 @@ typedef struct xlog_in_core {
/* reference counts need their own cacheline */ /* reference counts need their own cacheline */
atomic_t ic_refcnt ____cacheline_aligned_in_smp; atomic_t ic_refcnt ____cacheline_aligned_in_smp;
xlog_in_core_2_t *ic_data; struct xlog_rec_header *ic_header;
#define ic_header ic_data->hic_header
#ifdef DEBUG #ifdef DEBUG
bool ic_fail_crc : 1; bool ic_fail_crc : 1;
#endif #endif

View File

@ -4932,7 +4932,7 @@ DECLARE_EVENT_CLASS(xlog_iclog_class,
__entry->refcount = atomic_read(&iclog->ic_refcnt); __entry->refcount = atomic_read(&iclog->ic_refcnt);
__entry->offset = iclog->ic_offset; __entry->offset = iclog->ic_offset;
__entry->flags = iclog->ic_flags; __entry->flags = iclog->ic_flags;
__entry->lsn = be64_to_cpu(iclog->ic_header.h_lsn); __entry->lsn = be64_to_cpu(iclog->ic_header->h_lsn);
__entry->caller_ip = caller_ip; __entry->caller_ip = caller_ip;
), ),
TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx flags %s caller %pS", TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx flags %s caller %pS",