cxl/region: Split commit_store() into __commit() and queue_reset() helpers

The complexity of dropping the lock is removed in favor of splitting commit
operations to a helper, and leaving all the complexities of "decommit" for
commit_store() to coordinate the different locking contexts.

The CPU cache-invalidation in the decommit path is solely handled now by
cxl_region_decode_reset(). Previously the CPU caches were being needlessly
flushed twice in the decommit path where the first flush had no guarantee
that the memory would not be immediately re-dirtied.

Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Jonathan Cameron <jonathan.cameron@huawei.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Alison Schofield <alison.schofield@intel.com>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Fabio M. De Francesco <fabio.m.de.francesco@linux.intel.com>
Link: https://patch.msgid.link/20250711234932.671292-6-dan.j.williams@intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
This commit is contained in:
Dan Williams 2025-07-11 16:49:29 -07:00 committed by Dave Jiang
parent 55a89d9c99
commit a235d7d963
1 changed files with 72 additions and 31 deletions

View File

@ -349,30 +349,42 @@ static int cxl_region_decode_commit(struct cxl_region *cxlr)
return rc; return rc;
} }
static ssize_t commit_store(struct device *dev, struct device_attribute *attr, static int queue_reset(struct cxl_region *cxlr)
const char *buf, size_t len)
{ {
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params; struct cxl_region_params *p = &cxlr->params;
bool commit; int rc;
ssize_t rc;
rc = kstrtobool(buf, &commit);
if (rc)
return rc;
rc = down_write_killable(&cxl_region_rwsem); rc = down_write_killable(&cxl_region_rwsem);
if (rc) if (rc)
return rc; return rc;
/* Already in the requested state? */ /* Already in the requested state? */
if (commit && p->state >= CXL_CONFIG_COMMIT) if (p->state < CXL_CONFIG_COMMIT)
goto out; goto out;
if (!commit && p->state < CXL_CONFIG_COMMIT)
p->state = CXL_CONFIG_RESET_PENDING;
out:
up_write(&cxl_region_rwsem);
return rc;
}
static int __commit(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
int rc;
rc = down_write_killable(&cxl_region_rwsem);
if (rc)
return rc;
/* Already in the requested state? */
if (p->state >= CXL_CONFIG_COMMIT)
goto out; goto out;
/* Not ready to commit? */ /* Not ready to commit? */
if (commit && p->state < CXL_CONFIG_ACTIVE) { if (p->state < CXL_CONFIG_ACTIVE) {
rc = -ENXIO; rc = -ENXIO;
goto out; goto out;
} }
@ -385,31 +397,60 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
if (rc) if (rc)
goto out; goto out;
if (commit) {
rc = cxl_region_decode_commit(cxlr); rc = cxl_region_decode_commit(cxlr);
if (rc == 0) if (rc == 0)
p->state = CXL_CONFIG_COMMIT; p->state = CXL_CONFIG_COMMIT;
} else {
p->state = CXL_CONFIG_RESET_PENDING; out:
up_write(&cxl_region_rwsem); up_write(&cxl_region_rwsem);
device_release_driver(&cxlr->dev);
down_write(&cxl_region_rwsem); return rc;
}
static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
bool commit;
ssize_t rc;
rc = kstrtobool(buf, &commit);
if (rc)
return rc;
if (commit) {
rc = __commit(cxlr);
if (rc)
return rc;
return len;
}
rc = queue_reset(cxlr);
if (rc)
return rc;
/* /*
* The lock was dropped, so need to revalidate that the reset is * Unmap the region and depend the reset-pending state to ensure
* still pending. * it does not go active again until post reset
*/
device_release_driver(&cxlr->dev);
/*
* With the reset pending take cxl_region_rwsem unconditionally
* to ensure the reset gets handled before returning.
*/
guard(rwsem_write)(&cxl_region_rwsem);
/*
* Revalidate that the reset is still pending in case another
* thread already handled this reset.
*/ */
if (p->state == CXL_CONFIG_RESET_PENDING) { if (p->state == CXL_CONFIG_RESET_PENDING) {
cxl_region_decode_reset(cxlr, p->interleave_ways); cxl_region_decode_reset(cxlr, p->interleave_ways);
p->state = CXL_CONFIG_ACTIVE; p->state = CXL_CONFIG_ACTIVE;
} }
}
out:
up_write(&cxl_region_rwsem);
if (rc)
return rc;
return len; return len;
} }