mirror of https://github.com/torvalds/linux.git
omap: check for pending msgs only when mbox is exclusive
mailbox-test: debugfs_create_dir error checking
mtk: cmdq: fix DMA address handling
gpueb: Add missing 'static' to mailbox ops struct
pcc: don't zero error register
th1520: fix clock imbalance on probe failure
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEE6EwehDt/SOnwFyTyf9lkf8eYP5UFAmkpxWMACgkQf9lkf8eY
P5UUdhAAmIWRSr8U16HdYYHZ32TCqsEn/cm38RRR9NyrYYTTTUIWwlh8S8ff0Ldh
zuBWYzZuo4EocQOE1YVbKUM3hspgobcIJNg7hBVeMT8l6cD/Hmk0FKm4zDssyvWk
XHC6AB3MLAVH8/n8c0NQn1mou0mOSEIFsOjnzikkkaDhKIJM14pnX5gIL8hG3kyV
/B60QIkUCz7+3/OCQF42kQlG/l6D8m/FncK02rZcOyvPFbyiA3F/GhIrQ61dqKEm
8iMP4+srLCek5UmL0LReFaiq69KJvKxqdaWD70o8mcC1a51MSnZAmwZSdSuFablf
hOspEdBpxH2rBeMG/7bEvDZPpKRSvf6S9eI2d2oDK/9NusIjnmx4ryxanp+dknHS
weEKI04VOEiFgByxBUbmB+xMman5q6AF98Mz+N/trp1yU/rxUFLvsUDO7jy2uQVn
zXGNLPC+vW5SoLEYru6zdnJW+m0N/CWVEdz8+2ne+MHkRa+2IDHdfNDuTrFmd0J0
/iWrAQXdMJ6BDkuYJNy16d4GLjkM2lp0K9w07ltezI7pHAQd3EPcMGXzB5BXAe+h
CWh5oTicoAAaxKP0Ya8jYEI5c/vRbVLXdepOn/izcEy0+mE2ACqlQMrUbN4omgal
t6c5L5/yJQsD8whS6Fio9cFLEj5esNFep38T7BuGrX9PSgVJFlc=
=8o7Y
-----END PGP SIGNATURE-----
Merge tag 'mailbox-fixes-v6.18-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/jassibrar/mailbox
Pull mailbox fixes from Jassi Brar:
- omap: check for pending msgs only when mbox is exclusive
- mailbox-test: debugfs_create_dir error checking
- mtk:
- cmdq: fix DMA address handling
- gpueb: Add missing 'static' to mailbox ops struct
- pcc: don't zero error register
- th1520: fix clock imbalance on probe failure
* tag 'mailbox-fixes-v6.18-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/jassibrar/mailbox:
mailbox: th1520: fix clock imbalance on probe failure
mailbox: pcc: don't zero error register
mailbox: mtk-gpueb: Add missing 'static' to mailbox ops struct
mailbox: mtk-cmdq: Refine DMA address handling for the command buffer
mailbox: mailbox-test: Fix debugfs_create_dir error checking
mailbox: omap-mailbox: Check for pending msgs only when mbox is exclusive
This commit is contained in:
commit
24a84ea4ee
|
|
@ -268,7 +268,7 @@ static int mbox_test_add_debugfs(struct platform_device *pdev,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
tdev->root_debugfs_dir = debugfs_create_dir(dev_name(&pdev->dev), NULL);
|
tdev->root_debugfs_dir = debugfs_create_dir(dev_name(&pdev->dev), NULL);
|
||||||
if (!tdev->root_debugfs_dir) {
|
if (IS_ERR(tdev->root_debugfs_dir)) {
|
||||||
dev_err(&pdev->dev, "Failed to create Mailbox debugfs\n");
|
dev_err(&pdev->dev, "Failed to create Mailbox debugfs\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -435,10 +435,8 @@ static int th1520_mbox_probe(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = devm_add_action_or_reset(dev, th1520_disable_clk, priv);
|
ret = devm_add_action_or_reset(dev, th1520_disable_clk, priv);
|
||||||
if (ret) {
|
if (ret)
|
||||||
clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The address mappings in the device tree align precisely with those
|
* The address mappings in the device tree align precisely with those
|
||||||
|
|
|
||||||
|
|
@ -92,6 +92,18 @@ struct gce_plat {
|
||||||
u32 gce_num;
|
u32 gce_num;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline u32 cmdq_convert_gce_addr(dma_addr_t addr, const struct gce_plat *pdata)
|
||||||
|
{
|
||||||
|
/* Convert DMA addr (PA or IOVA) to GCE readable addr */
|
||||||
|
return addr >> pdata->shift;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline dma_addr_t cmdq_revert_gce_addr(u32 addr, const struct gce_plat *pdata)
|
||||||
|
{
|
||||||
|
/* Revert GCE readable addr to DMA addr (PA or IOVA) */
|
||||||
|
return (dma_addr_t)addr << pdata->shift;
|
||||||
|
}
|
||||||
|
|
||||||
u8 cmdq_get_shift_pa(struct mbox_chan *chan)
|
u8 cmdq_get_shift_pa(struct mbox_chan *chan)
|
||||||
{
|
{
|
||||||
struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
|
struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
|
||||||
|
|
@ -188,13 +200,12 @@ static void cmdq_task_insert_into_thread(struct cmdq_task *task)
|
||||||
struct cmdq_task *prev_task = list_last_entry(
|
struct cmdq_task *prev_task = list_last_entry(
|
||||||
&thread->task_busy_list, typeof(*task), list_entry);
|
&thread->task_busy_list, typeof(*task), list_entry);
|
||||||
u64 *prev_task_base = prev_task->pkt->va_base;
|
u64 *prev_task_base = prev_task->pkt->va_base;
|
||||||
|
u32 gce_addr = cmdq_convert_gce_addr(task->pa_base, task->cmdq->pdata);
|
||||||
|
|
||||||
/* let previous task jump to this task */
|
/* let previous task jump to this task */
|
||||||
dma_sync_single_for_cpu(dev, prev_task->pa_base,
|
dma_sync_single_for_cpu(dev, prev_task->pa_base,
|
||||||
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
|
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
|
||||||
prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
|
prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] = (u64)CMDQ_JUMP_BY_PA << 32 | gce_addr;
|
||||||
(u64)CMDQ_JUMP_BY_PA << 32 |
|
|
||||||
(task->pa_base >> task->cmdq->pdata->shift);
|
|
||||||
dma_sync_single_for_device(dev, prev_task->pa_base,
|
dma_sync_single_for_device(dev, prev_task->pa_base,
|
||||||
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
|
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
|
||||||
|
|
||||||
|
|
@ -237,7 +248,8 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
|
||||||
struct cmdq_thread *thread)
|
struct cmdq_thread *thread)
|
||||||
{
|
{
|
||||||
struct cmdq_task *task, *tmp, *curr_task = NULL;
|
struct cmdq_task *task, *tmp, *curr_task = NULL;
|
||||||
u32 curr_pa, irq_flag, task_end_pa;
|
u32 irq_flag, gce_addr;
|
||||||
|
dma_addr_t curr_pa, task_end_pa;
|
||||||
bool err;
|
bool err;
|
||||||
|
|
||||||
irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
|
irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
|
||||||
|
|
@ -259,7 +271,8 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
|
||||||
else
|
else
|
||||||
return;
|
return;
|
||||||
|
|
||||||
curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->pdata->shift;
|
gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR);
|
||||||
|
curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata);
|
||||||
|
|
||||||
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
|
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
|
||||||
list_entry) {
|
list_entry) {
|
||||||
|
|
@ -378,7 +391,8 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
|
||||||
struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
|
struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
|
||||||
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
|
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
|
||||||
struct cmdq_task *task;
|
struct cmdq_task *task;
|
||||||
unsigned long curr_pa, end_pa;
|
u32 gce_addr;
|
||||||
|
dma_addr_t curr_pa, end_pa;
|
||||||
|
|
||||||
/* Client should not flush new tasks if suspended. */
|
/* Client should not flush new tasks if suspended. */
|
||||||
WARN_ON(cmdq->suspended);
|
WARN_ON(cmdq->suspended);
|
||||||
|
|
@ -402,20 +416,20 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
|
||||||
*/
|
*/
|
||||||
WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
|
WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
|
||||||
|
|
||||||
writel(task->pa_base >> cmdq->pdata->shift,
|
gce_addr = cmdq_convert_gce_addr(task->pa_base, cmdq->pdata);
|
||||||
thread->base + CMDQ_THR_CURR_ADDR);
|
writel(gce_addr, thread->base + CMDQ_THR_CURR_ADDR);
|
||||||
writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
|
gce_addr = cmdq_convert_gce_addr(task->pa_base + pkt->cmd_buf_size, cmdq->pdata);
|
||||||
thread->base + CMDQ_THR_END_ADDR);
|
writel(gce_addr, thread->base + CMDQ_THR_END_ADDR);
|
||||||
|
|
||||||
writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
|
writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
|
||||||
writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
|
writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
|
||||||
writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
|
writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
|
||||||
} else {
|
} else {
|
||||||
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
|
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
|
||||||
curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
|
gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR);
|
||||||
cmdq->pdata->shift;
|
curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata);
|
||||||
end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
|
gce_addr = readl(thread->base + CMDQ_THR_END_ADDR);
|
||||||
cmdq->pdata->shift;
|
end_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata);
|
||||||
/* check boundary */
|
/* check boundary */
|
||||||
if (curr_pa == end_pa - CMDQ_INST_SIZE ||
|
if (curr_pa == end_pa - CMDQ_INST_SIZE ||
|
||||||
curr_pa == end_pa) {
|
curr_pa == end_pa) {
|
||||||
|
|
@ -646,6 +660,9 @@ static int cmdq_probe(struct platform_device *pdev)
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
dma_set_coherent_mask(dev,
|
||||||
|
DMA_BIT_MASK(sizeof(u32) * BITS_PER_BYTE + cmdq->pdata->shift));
|
||||||
|
|
||||||
cmdq->mbox.dev = dev;
|
cmdq->mbox.dev = dev;
|
||||||
cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
|
cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
|
||||||
sizeof(*cmdq->mbox.chans), GFP_KERNEL);
|
sizeof(*cmdq->mbox.chans), GFP_KERNEL);
|
||||||
|
|
|
||||||
|
|
@ -200,7 +200,7 @@ static bool mtk_gpueb_mbox_last_tx_done(struct mbox_chan *chan)
|
||||||
return !(readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_TX_STS) & BIT(ch->num));
|
return !(readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_TX_STS) & BIT(ch->num));
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct mbox_chan_ops mtk_gpueb_mbox_ops = {
|
static const struct mbox_chan_ops mtk_gpueb_mbox_ops = {
|
||||||
.send_data = mtk_gpueb_mbox_send_data,
|
.send_data = mtk_gpueb_mbox_send_data,
|
||||||
.startup = mtk_gpueb_mbox_startup,
|
.startup = mtk_gpueb_mbox_startup,
|
||||||
.shutdown = mtk_gpueb_mbox_shutdown,
|
.shutdown = mtk_gpueb_mbox_shutdown,
|
||||||
|
|
|
||||||
|
|
@ -68,6 +68,7 @@ struct omap_mbox_fifo {
|
||||||
|
|
||||||
struct omap_mbox_match_data {
|
struct omap_mbox_match_data {
|
||||||
u32 intr_type;
|
u32 intr_type;
|
||||||
|
bool is_exclusive;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct omap_mbox_device {
|
struct omap_mbox_device {
|
||||||
|
|
@ -78,6 +79,7 @@ struct omap_mbox_device {
|
||||||
u32 num_users;
|
u32 num_users;
|
||||||
u32 num_fifos;
|
u32 num_fifos;
|
||||||
u32 intr_type;
|
u32 intr_type;
|
||||||
|
const struct omap_mbox_match_data *mbox_data;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct omap_mbox {
|
struct omap_mbox {
|
||||||
|
|
@ -341,6 +343,7 @@ static int omap_mbox_suspend(struct device *dev)
|
||||||
if (pm_runtime_status_suspended(dev))
|
if (pm_runtime_status_suspended(dev))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (mdev->mbox_data->is_exclusive) {
|
||||||
for (fifo = 0; fifo < mdev->num_fifos; fifo++) {
|
for (fifo = 0; fifo < mdev->num_fifos; fifo++) {
|
||||||
if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) {
|
if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) {
|
||||||
dev_err(mdev->dev, "fifo %d has unexpected unread messages\n",
|
dev_err(mdev->dev, "fifo %d has unexpected unread messages\n",
|
||||||
|
|
@ -348,6 +351,7 @@ static int omap_mbox_suspend(struct device *dev)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (usr = 0; usr < mdev->num_users; usr++) {
|
for (usr = 0; usr < mdev->num_users; usr++) {
|
||||||
reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
|
reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
|
||||||
|
|
@ -378,8 +382,9 @@ static const struct dev_pm_ops omap_mbox_pm_ops = {
|
||||||
SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume)
|
SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume)
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1 };
|
static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1, true };
|
||||||
static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2 };
|
static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2, true };
|
||||||
|
static const struct omap_mbox_match_data am654_data = { MBOX_INTR_CFG_TYPE2, false };
|
||||||
|
|
||||||
static const struct of_device_id omap_mailbox_of_match[] = {
|
static const struct of_device_id omap_mailbox_of_match[] = {
|
||||||
{
|
{
|
||||||
|
|
@ -396,11 +401,11 @@ static const struct of_device_id omap_mailbox_of_match[] = {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.compatible = "ti,am654-mailbox",
|
.compatible = "ti,am654-mailbox",
|
||||||
.data = &omap4_data,
|
.data = &am654_data,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.compatible = "ti,am64-mailbox",
|
.compatible = "ti,am64-mailbox",
|
||||||
.data = &omap4_data,
|
.data = &am654_data,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
/* end */
|
/* end */
|
||||||
|
|
@ -449,7 +454,6 @@ static int omap_mbox_probe(struct platform_device *pdev)
|
||||||
struct omap_mbox_fifo *fifo;
|
struct omap_mbox_fifo *fifo;
|
||||||
struct device_node *node = pdev->dev.of_node;
|
struct device_node *node = pdev->dev.of_node;
|
||||||
struct device_node *child;
|
struct device_node *child;
|
||||||
const struct omap_mbox_match_data *match_data;
|
|
||||||
struct mbox_controller *controller;
|
struct mbox_controller *controller;
|
||||||
u32 intr_type, info_count;
|
u32 intr_type, info_count;
|
||||||
u32 num_users, num_fifos;
|
u32 num_users, num_fifos;
|
||||||
|
|
@ -462,11 +466,6 @@ static int omap_mbox_probe(struct platform_device *pdev)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
match_data = of_device_get_match_data(&pdev->dev);
|
|
||||||
if (!match_data)
|
|
||||||
return -ENODEV;
|
|
||||||
intr_type = match_data->intr_type;
|
|
||||||
|
|
||||||
if (of_property_read_u32(node, "ti,mbox-num-users", &num_users))
|
if (of_property_read_u32(node, "ti,mbox-num-users", &num_users))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
|
@ -483,6 +482,12 @@ static int omap_mbox_probe(struct platform_device *pdev)
|
||||||
if (!mdev)
|
if (!mdev)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
mdev->mbox_data = device_get_match_data(&pdev->dev);
|
||||||
|
if (!mdev->mbox_data)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
intr_type = mdev->mbox_data->intr_type;
|
||||||
|
|
||||||
mdev->mbox_base = devm_platform_ioremap_resource(pdev, 0);
|
mdev->mbox_base = devm_platform_ioremap_resource(pdev, 0);
|
||||||
if (IS_ERR(mdev->mbox_base))
|
if (IS_ERR(mdev->mbox_base))
|
||||||
return PTR_ERR(mdev->mbox_base);
|
return PTR_ERR(mdev->mbox_base);
|
||||||
|
|
|
||||||
|
|
@ -276,9 +276,8 @@ static int pcc_mbox_error_check_and_clear(struct pcc_chan_info *pchan)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
val &= pchan->error.status_mask;
|
if (val & pchan->error.status_mask) {
|
||||||
if (val) {
|
val &= pchan->error.preserve_mask;
|
||||||
val &= ~pchan->error.status_mask;
|
|
||||||
pcc_chan_reg_write(&pchan->error, val);
|
pcc_chan_reg_write(&pchan->error, val);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
@ -745,7 +744,8 @@ static int pcc_parse_subspace_db_reg(struct pcc_chan_info *pchan,
|
||||||
|
|
||||||
ret = pcc_chan_reg_init(&pchan->error,
|
ret = pcc_chan_reg_init(&pchan->error,
|
||||||
&pcct_ext->error_status_register,
|
&pcct_ext->error_status_register,
|
||||||
0, 0, pcct_ext->error_status_mask,
|
~pcct_ext->error_status_mask, 0,
|
||||||
|
pcct_ext->error_status_mask,
|
||||||
"Error Status");
|
"Error Status");
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
||||||
|
|
@ -77,6 +77,16 @@ struct cmdq_pkt {
|
||||||
size_t buf_size; /* real buffer size */
|
size_t buf_size; /* real buffer size */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cmdq_get_shift_pa() - get the shift bits of physical address
|
||||||
|
* @chan: mailbox channel
|
||||||
|
*
|
||||||
|
* GCE can only fetch the command buffer address from a 32-bit register.
|
||||||
|
* Some SOCs support more than 32-bit command buffer address for GCE, which
|
||||||
|
* requires some shift bits to make the address fit into the 32-bit register.
|
||||||
|
*
|
||||||
|
* Return: the shift bits of physical address
|
||||||
|
*/
|
||||||
u8 cmdq_get_shift_pa(struct mbox_chan *chan);
|
u8 cmdq_get_shift_pa(struct mbox_chan *chan);
|
||||||
|
|
||||||
#endif /* __MTK_CMDQ_MAILBOX_H__ */
|
#endif /* __MTK_CMDQ_MAILBOX_H__ */
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue