mirror of https://github.com/torvalds/linux.git
firewire: ohci: use workqueue to handle events of AR request/response contexts
This commit adds a work item to handle events of 1394 OHCI AR request/response contexts, and queues the item to the specific workqueue. The call of struct fw_address_handler.address_callback() is done in the workqueue when receiving any requests from the remove nodes. Additionally, the call of struct fw_packet.callback() is done in the workqueue too when receiving acknowledge to the asynchronous packet for the response subaction of split transaction to the remote nodes. Link: https://lore.kernel.org/r/20250615133253.433057-3-o-takashi@sakamocchi.jp Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
This commit is contained in:
parent
72bf144123
commit
57e6d9f85f
|
|
@ -557,9 +557,10 @@ const struct fw_address_region fw_unit_space_region =
|
|||
*
|
||||
* region->start, ->end, and handler->length have to be quadlet-aligned.
|
||||
*
|
||||
* When a request is received that falls within the specified address range,
|
||||
* the specified callback is invoked. The parameters passed to the callback
|
||||
* give the details of the particular request.
|
||||
* When a request is received that falls within the specified address range, the specified callback
|
||||
* is invoked. The parameters passed to the callback give the details of the particular request.
|
||||
* The callback is invoked in the workqueue context in most cases. However, if the request is
|
||||
* initiated by the local node, the callback is invoked in the initiator's context.
|
||||
*
|
||||
* To be called in process context.
|
||||
* Return value: 0 on success, non-zero otherwise.
|
||||
|
|
|
|||
|
|
@ -101,7 +101,7 @@ struct ar_context {
|
|||
void *pointer;
|
||||
unsigned int last_buffer_index;
|
||||
u32 regs;
|
||||
struct tasklet_struct tasklet;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
struct context;
|
||||
|
|
@ -1016,9 +1016,9 @@ static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
|
|||
}
|
||||
}
|
||||
|
||||
static void ar_context_tasklet(unsigned long data)
|
||||
static void ohci_ar_context_work(struct work_struct *work)
|
||||
{
|
||||
struct ar_context *ctx = (struct ar_context *)data;
|
||||
struct ar_context *ctx = from_work(ctx, work, work);
|
||||
unsigned int end_buffer_index, end_buffer_offset;
|
||||
void *p, *end;
|
||||
|
||||
|
|
@ -1026,23 +1026,19 @@ static void ar_context_tasklet(unsigned long data)
|
|||
if (!p)
|
||||
return;
|
||||
|
||||
end_buffer_index = ar_search_last_active_buffer(ctx,
|
||||
&end_buffer_offset);
|
||||
end_buffer_index = ar_search_last_active_buffer(ctx, &end_buffer_offset);
|
||||
ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
|
||||
end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
|
||||
|
||||
if (end_buffer_index < ar_first_buffer_index(ctx)) {
|
||||
/*
|
||||
* The filled part of the overall buffer wraps around; handle
|
||||
* all packets up to the buffer end here. If the last packet
|
||||
* wraps around, its tail will be visible after the buffer end
|
||||
* because the buffer start pages are mapped there again.
|
||||
*/
|
||||
// The filled part of the overall buffer wraps around; handle all packets up to the
|
||||
// buffer end here. If the last packet wraps around, its tail will be visible after
|
||||
// the buffer end because the buffer start pages are mapped there again.
|
||||
void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
|
||||
p = handle_ar_packets(ctx, p, buffer_end);
|
||||
if (p < buffer_end)
|
||||
goto error;
|
||||
/* adjust p to point back into the actual buffer */
|
||||
// adjust p to point back into the actual buffer
|
||||
p -= AR_BUFFERS * PAGE_SIZE;
|
||||
}
|
||||
|
||||
|
|
@ -1057,7 +1053,6 @@ static void ar_context_tasklet(unsigned long data)
|
|||
ar_recycle_buffers(ctx, end_buffer_index);
|
||||
|
||||
return;
|
||||
|
||||
error:
|
||||
ctx->pointer = NULL;
|
||||
}
|
||||
|
|
@ -1073,7 +1068,7 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
|
|||
|
||||
ctx->regs = regs;
|
||||
ctx->ohci = ohci;
|
||||
tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
|
||||
INIT_WORK(&ctx->work, ohci_ar_context_work);
|
||||
|
||||
for (i = 0; i < AR_BUFFERS; i++) {
|
||||
ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
|
||||
|
|
@ -2238,10 +2233,10 @@ static irqreturn_t irq_handler(int irq, void *data)
|
|||
}
|
||||
|
||||
if (event & OHCI1394_RQPkt)
|
||||
tasklet_schedule(&ohci->ar_request_ctx.tasklet);
|
||||
queue_work(ohci->card.async_wq, &ohci->ar_request_ctx.work);
|
||||
|
||||
if (event & OHCI1394_RSPkt)
|
||||
tasklet_schedule(&ohci->ar_response_ctx.tasklet);
|
||||
queue_work(ohci->card.async_wq, &ohci->ar_response_ctx.work);
|
||||
|
||||
if (event & OHCI1394_reqTxComplete)
|
||||
tasklet_schedule(&ohci->at_request_ctx.tasklet);
|
||||
|
|
|
|||
Loading…
Reference in New Issue