mirror of https://github.com/torvalds/linux.git
Merge tag 'drm-msm-fixes-2025-10-29' of https://gitlab.freedesktop.org/drm/msm into drm-fixes
Fixes for v6.18-rc4 CI - Disable broken sanity job GEM - Fix vm_bind prealloc error path - Fix dma-buf import free - Fix last-fence update - Reject MAP_NULL if PRR is unsupported - Ensure vm is created in VM_BIND ioctl GPU - GMU fw parsing fix DPU: - Fixed mode_valid callback - Fixed planes on DPU 1.x devices. Signed-off-by: Simona Vetter <simona.vetter@ffwll.ch> From: Rob Clark <rob.clark@oss.qualcomm.com> Link: https://patch.msgid.link/CACSVV03kUm1ms7FBg0m9U4ZcyickSWbnayAWqYqs0XH4UjWf+A@mail.gmail.com
This commit is contained in:
commit
3d8d35bf8d
|
|
@ -280,7 +280,7 @@ sanity:
|
||||||
GIT_STRATEGY: none
|
GIT_STRATEGY: none
|
||||||
script:
|
script:
|
||||||
# ci-fairy check-commits --junit-xml=check-commits.xml
|
# ci-fairy check-commits --junit-xml=check-commits.xml
|
||||||
- ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
|
# - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
|
||||||
- |
|
- |
|
||||||
set -eu
|
set -eu
|
||||||
image_tags=(
|
image_tags=(
|
||||||
|
|
|
||||||
|
|
@ -780,6 +780,9 @@ static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define NEXT_BLK(blk) \
|
||||||
|
((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size))
|
||||||
|
|
||||||
static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
|
static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
|
||||||
{
|
{
|
||||||
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
|
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
|
||||||
|
|
@ -811,7 +814,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
|
||||||
|
|
||||||
for (blk = (const struct block_header *) fw_image->data;
|
for (blk = (const struct block_header *) fw_image->data;
|
||||||
(const u8*) blk < fw_image->data + fw_image->size;
|
(const u8*) blk < fw_image->data + fw_image->size;
|
||||||
blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
|
blk = NEXT_BLK(blk)) {
|
||||||
if (blk->size == 0)
|
if (blk->size == 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -348,13 +348,6 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
|
||||||
adreno_smmu_has_prr(struct msm_gpu *gpu)
|
|
||||||
{
|
|
||||||
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
|
|
||||||
return adreno_smmu && adreno_smmu->set_prr_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
|
int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
|
||||||
uint32_t param, uint64_t *value, uint32_t *len)
|
uint32_t param, uint64_t *value, uint32_t *len)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -1545,6 +1545,9 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
|
||||||
adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock,
|
adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock,
|
||||||
dpu_kms->perf.perf_cfg);
|
dpu_kms->perf.perf_cfg);
|
||||||
|
|
||||||
|
if (dpu_kms->catalog->caps->has_3d_merge)
|
||||||
|
adjusted_mode_clk /= 2;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The given mode, adjusted for the perf clock factor, should not exceed
|
* The given mode, adjusted for the perf clock factor, should not exceed
|
||||||
* the max core clock rate
|
* the max core clock rate
|
||||||
|
|
|
||||||
|
|
@ -267,8 +267,8 @@ static const u32 wb2_formats_rgb_yuv[] = {
|
||||||
.base = 0x200, .len = 0xa0,}, \
|
.base = 0x200, .len = 0xa0,}, \
|
||||||
.csc_blk = {.name = "csc", \
|
.csc_blk = {.name = "csc", \
|
||||||
.base = 0x320, .len = 0x100,}, \
|
.base = 0x320, .len = 0x100,}, \
|
||||||
.format_list = plane_formats_yuv, \
|
.format_list = plane_formats, \
|
||||||
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
|
.num_formats = ARRAY_SIZE(plane_formats), \
|
||||||
.rotation_cfg = NULL, \
|
.rotation_cfg = NULL, \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -500,13 +500,15 @@ static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < DPU_MAX_PLANES; i++) {
|
for (i = 0; i < DPU_MAX_PLANES; i++) {
|
||||||
|
uint32_t w = src_w, h = src_h;
|
||||||
|
|
||||||
if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
|
if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
|
||||||
src_w /= chroma_subsmpl_h;
|
w /= chroma_subsmpl_h;
|
||||||
src_h /= chroma_subsmpl_v;
|
h /= chroma_subsmpl_v;
|
||||||
}
|
}
|
||||||
|
|
||||||
pixel_ext->num_ext_pxls_top[i] = src_h;
|
pixel_ext->num_ext_pxls_top[i] = h;
|
||||||
pixel_ext->num_ext_pxls_left[i] = src_w;
|
pixel_ext->num_ext_pxls_left[i] = w;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -740,7 +742,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
|
||||||
* We already have verified scaling against platform limitations.
|
* We already have verified scaling against platform limitations.
|
||||||
* Now check if the SSPP supports scaling at all.
|
* Now check if the SSPP supports scaling at all.
|
||||||
*/
|
*/
|
||||||
if (!sblk->scaler_blk.len &&
|
if (!(sblk->scaler_blk.len && pipe->sspp->ops.setup_scaler) &&
|
||||||
((drm_rect_width(&new_plane_state->src) >> 16 !=
|
((drm_rect_width(&new_plane_state->src) >> 16 !=
|
||||||
drm_rect_width(&new_plane_state->dst)) ||
|
drm_rect_width(&new_plane_state->dst)) ||
|
||||||
(drm_rect_height(&new_plane_state->src) >> 16 !=
|
(drm_rect_height(&new_plane_state->src) >> 16 !=
|
||||||
|
|
@ -1278,7 +1280,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
|
||||||
state, plane_state,
|
state, plane_state,
|
||||||
prev_adjacent_plane_state);
|
prev_adjacent_plane_state);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
return ret;
|
||||||
|
|
||||||
prev_adjacent_plane_state = plane_state;
|
prev_adjacent_plane_state = plane_state;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -842,7 +842,7 @@ struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
|
||||||
|
|
||||||
if (!reqs->scale && !reqs->yuv)
|
if (!reqs->scale && !reqs->yuv)
|
||||||
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA);
|
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA);
|
||||||
if (!hw_sspp && reqs->scale)
|
if (!hw_sspp && !reqs->yuv)
|
||||||
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB);
|
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB);
|
||||||
if (!hw_sspp)
|
if (!hw_sspp)
|
||||||
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG);
|
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG);
|
||||||
|
|
|
||||||
|
|
@ -72,6 +72,9 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
|
||||||
DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
|
DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
|
||||||
fb->width, dpu_wb_conn->maxlinewidth);
|
fb->width, dpu_wb_conn->maxlinewidth);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
} else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
|
||||||
|
DPU_ERROR("unsupported fb modifier:%#llx\n", fb->modifier);
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state);
|
return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state);
|
||||||
|
|
|
||||||
|
|
@ -109,7 +109,6 @@ struct msm_dsi_phy {
|
||||||
struct msm_dsi_dphy_timing timing;
|
struct msm_dsi_dphy_timing timing;
|
||||||
const struct msm_dsi_phy_cfg *cfg;
|
const struct msm_dsi_phy_cfg *cfg;
|
||||||
void *tuning_cfg;
|
void *tuning_cfg;
|
||||||
void *pll_data;
|
|
||||||
|
|
||||||
enum msm_dsi_phy_usecase usecase;
|
enum msm_dsi_phy_usecase usecase;
|
||||||
bool regulator_ldo_mode;
|
bool regulator_ldo_mode;
|
||||||
|
|
|
||||||
|
|
@ -426,11 +426,8 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
|
||||||
u32 data;
|
u32 data;
|
||||||
|
|
||||||
spin_lock_irqsave(&pll->pll_enable_lock, flags);
|
spin_lock_irqsave(&pll->pll_enable_lock, flags);
|
||||||
if (pll->pll_enable_cnt++) {
|
pll->pll_enable_cnt++;
|
||||||
spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
|
WARN_ON(pll->pll_enable_cnt == INT_MAX);
|
||||||
WARN_ON(pll->pll_enable_cnt == INT_MAX);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
|
data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
|
||||||
data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
|
data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
|
||||||
|
|
@ -876,7 +873,6 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
|
||||||
spin_lock_init(&pll_7nm->pll_enable_lock);
|
spin_lock_init(&pll_7nm->pll_enable_lock);
|
||||||
|
|
||||||
pll_7nm->phy = phy;
|
pll_7nm->phy = phy;
|
||||||
phy->pll_data = pll_7nm;
|
|
||||||
|
|
||||||
ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws);
|
ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|
@ -965,10 +961,8 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
|
||||||
u32 const delay_us = 5;
|
u32 const delay_us = 5;
|
||||||
u32 const timeout_us = 1000;
|
u32 const timeout_us = 1000;
|
||||||
struct msm_dsi_dphy_timing *timing = &phy->timing;
|
struct msm_dsi_dphy_timing *timing = &phy->timing;
|
||||||
struct dsi_pll_7nm *pll = phy->pll_data;
|
|
||||||
void __iomem *base = phy->base;
|
void __iomem *base = phy->base;
|
||||||
bool less_than_1500_mhz;
|
bool less_than_1500_mhz;
|
||||||
unsigned long flags;
|
|
||||||
u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;
|
u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;
|
||||||
u32 glbl_pemph_ctrl_0;
|
u32 glbl_pemph_ctrl_0;
|
||||||
u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
|
u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
|
||||||
|
|
@ -1090,13 +1084,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
|
||||||
glbl_rescode_bot_ctrl = 0x3c;
|
glbl_rescode_bot_ctrl = 0x3c;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&pll->pll_enable_lock, flags);
|
|
||||||
pll->pll_enable_cnt = 1;
|
|
||||||
/* de-assert digital and pll power down */
|
/* de-assert digital and pll power down */
|
||||||
data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B |
|
data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B |
|
||||||
DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
|
DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
|
||||||
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
|
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
|
||||||
spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
|
|
||||||
|
|
||||||
/* Assert PLL core reset */
|
/* Assert PLL core reset */
|
||||||
writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
|
writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
|
||||||
|
|
@ -1209,9 +1200,7 @@ static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
|
||||||
|
|
||||||
static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
|
static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
|
||||||
{
|
{
|
||||||
struct dsi_pll_7nm *pll = phy->pll_data;
|
|
||||||
void __iomem *base = phy->base;
|
void __iomem *base = phy->base;
|
||||||
unsigned long flags;
|
|
||||||
u32 data;
|
u32 data;
|
||||||
|
|
||||||
DBG("");
|
DBG("");
|
||||||
|
|
@ -1238,11 +1227,8 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
|
||||||
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
|
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
|
||||||
writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0);
|
writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0);
|
||||||
|
|
||||||
spin_lock_irqsave(&pll->pll_enable_lock, flags);
|
|
||||||
pll->pll_enable_cnt = 0;
|
|
||||||
/* Turn off all PHY blocks */
|
/* Turn off all PHY blocks */
|
||||||
writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
|
writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
|
||||||
spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
|
|
||||||
|
|
||||||
/* make sure phy is turned off */
|
/* make sure phy is turned off */
|
||||||
wmb();
|
wmb();
|
||||||
|
|
|
||||||
|
|
@ -1120,12 +1120,16 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
|
||||||
put_pages(obj);
|
put_pages(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (obj->resv != &obj->_resv) {
|
/*
|
||||||
|
* In error paths, we could end up here before msm_gem_new_handle()
|
||||||
|
* has changed obj->resv to point to the shared resv. In this case,
|
||||||
|
* we don't want to drop a ref to the shared r_obj that we haven't
|
||||||
|
* taken yet.
|
||||||
|
*/
|
||||||
|
if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) {
|
||||||
struct drm_gem_object *r_obj =
|
struct drm_gem_object *r_obj =
|
||||||
container_of(obj->resv, struct drm_gem_object, _resv);
|
container_of(obj->resv, struct drm_gem_object, _resv);
|
||||||
|
|
||||||
WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE));
|
|
||||||
|
|
||||||
/* Drop reference we hold to shared resv obj: */
|
/* Drop reference we hold to shared resv obj: */
|
||||||
drm_gem_object_put(r_obj);
|
drm_gem_object_put(r_obj);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -414,6 +414,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
|
||||||
submit->user_fence,
|
submit->user_fence,
|
||||||
DMA_RESV_USAGE_BOOKKEEP,
|
DMA_RESV_USAGE_BOOKKEEP,
|
||||||
DMA_RESV_USAGE_BOOKKEEP);
|
DMA_RESV_USAGE_BOOKKEEP);
|
||||||
|
|
||||||
|
last_fence = vm->last_fence;
|
||||||
|
vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
|
||||||
|
dma_fence_put(last_fence);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -427,10 +432,6 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
|
||||||
dma_resv_add_fence(obj->resv, submit->user_fence,
|
dma_resv_add_fence(obj->resv, submit->user_fence,
|
||||||
DMA_RESV_USAGE_READ);
|
DMA_RESV_USAGE_READ);
|
||||||
}
|
}
|
||||||
|
|
||||||
last_fence = vm->last_fence;
|
|
||||||
vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
|
|
||||||
dma_fence_put(last_fence);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
|
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
|
||||||
|
|
|
||||||
|
|
@ -971,6 +971,7 @@ static int
|
||||||
lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
|
lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = job->vm->drm;
|
struct drm_device *dev = job->vm->drm;
|
||||||
|
struct msm_drm_private *priv = dev->dev_private;
|
||||||
int i = job->nr_ops++;
|
int i = job->nr_ops++;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
|
@ -1017,6 +1018,11 @@ lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((op->op == MSM_VM_BIND_OP_MAP_NULL) &&
|
||||||
|
!adreno_smmu_has_prr(priv->gpu)) {
|
||||||
|
ret = UERR(EINVAL, dev, "PRR not supported\n");
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1421,7 +1427,7 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
|
||||||
* Maybe we could allow just UNMAP ops? OTOH userspace should just
|
* Maybe we could allow just UNMAP ops? OTOH userspace should just
|
||||||
* immediately close the device file and all will be torn down.
|
* immediately close the device file and all will be torn down.
|
||||||
*/
|
*/
|
||||||
if (to_msm_vm(ctx->vm)->unusable)
|
if (to_msm_vm(msm_context_vm(dev, ctx))->unusable)
|
||||||
return UERR(EPIPE, dev, "context is unusable");
|
return UERR(EPIPE, dev, "context is unusable");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -299,6 +299,17 @@ static inline struct msm_gpu *dev_to_gpu(struct device *dev)
|
||||||
return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
|
return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
adreno_smmu_has_prr(struct msm_gpu *gpu)
|
||||||
|
{
|
||||||
|
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
|
||||||
|
|
||||||
|
if (!adreno_smmu)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return adreno_smmu && adreno_smmu->set_prr_addr;
|
||||||
|
}
|
||||||
|
|
||||||
/* It turns out that all targets use the same ringbuffer size */
|
/* It turns out that all targets use the same ringbuffer size */
|
||||||
#define MSM_GPU_RINGBUFFER_SZ SZ_32K
|
#define MSM_GPU_RINGBUFFER_SZ SZ_32K
|
||||||
#define MSM_GPU_RINGBUFFER_BLKSIZE 32
|
#define MSM_GPU_RINGBUFFER_BLKSIZE 32
|
||||||
|
|
|
||||||
|
|
@ -338,6 +338,8 @@ msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_preall
|
||||||
|
|
||||||
ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);
|
ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);
|
||||||
if (ret != p->count) {
|
if (ret != p->count) {
|
||||||
|
kfree(p->pages);
|
||||||
|
p->pages = NULL;
|
||||||
p->count = ret;
|
p->count = ret;
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
@ -351,6 +353,9 @@ msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_preallo
|
||||||
struct kmem_cache *pt_cache = get_pt_cache(mmu);
|
struct kmem_cache *pt_cache = get_pt_cache(mmu);
|
||||||
uint32_t remaining_pt_count = p->count - p->ptr;
|
uint32_t remaining_pt_count = p->count - p->ptr;
|
||||||
|
|
||||||
|
if (!p->pages)
|
||||||
|
return;
|
||||||
|
|
||||||
if (p->count > 0)
|
if (p->count > 0)
|
||||||
trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count);
|
trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count);
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue