From 872e330e38806d835bd6c311c93ab998e2fb9058 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Thu, 29 Jul 2010 18:19:22 +0200 Subject: firewire: add isochronous multichannel reception This adds the DMA context programming and userspace ABI for multichannel reception, i.e. for listening on multiple channel numbers by means of a single DMA context. The use case is reception of more streams than there are IR DMA units offered by the link layer. This is already implemented by the older ohci1394 + ieee1394 + raw1394 stack. And as discussed recently on linux1394-devel, this feature is occasionally used in practice. The big drawbacks of this mode are that buffer layout and interrupt generation necessarily differ from single-channel reception: Headers and trailers are not stripped from packets, packets are not aligned with buffer chunks, interrupts are per buffer chunk, not per packet. These drawbacks also cause a rather hefty code footprint to support this rarely used OHCI-1394 feature. (367 lines added, among them 94 lines of added userspace ABI documentation.) This implementation enforces that a multichannel reception context may only listen to channels to which no single-channel context on the same link layer is presently listening to. OHCI-1394 would allow to overlay single-channel contexts by the multi-channel context, but this would be a departure from the present first-come-first-served policy of IR context creation. The implementation is heavily based on an earlier one by Jay Fenlason. Thanks Jay. Signed-off-by: Stefan Richter --- drivers/firewire/core-cdev.c | 93 ++++++++++--- drivers/firewire/core-iso.c | 32 ++++- drivers/firewire/core.h | 2 + drivers/firewire/ohci.c | 316 +++++++++++++++++++++++++++++++++---------- 4 files changed, 346 insertions(+), 97 deletions(-) (limited to 'drivers/firewire') diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index cf989e1635e..ba23646bb10 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -193,6 +193,11 @@ struct iso_interrupt_event { struct fw_cdev_event_iso_interrupt interrupt; }; +struct iso_interrupt_mc_event { + struct event event; + struct fw_cdev_event_iso_interrupt_mc interrupt; +}; + struct iso_resource_event { struct event event; struct fw_cdev_event_iso_resource iso_resource; @@ -415,6 +420,7 @@ union ioctl_arg { struct fw_cdev_get_cycle_timer2 get_cycle_timer2; struct fw_cdev_send_phy_packet send_phy_packet; struct fw_cdev_receive_phy_packets receive_phy_packets; + struct fw_cdev_set_iso_channels set_iso_channels; }; static int ioctl_get_info(struct client *client, union ioctl_arg *arg) @@ -932,26 +938,54 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle, sizeof(e->interrupt) + header_length, NULL, 0); } +static void iso_mc_callback(struct fw_iso_context *context, + dma_addr_t completed, void *data) +{ + struct client *client = data; + struct iso_interrupt_mc_event *e; + + e = kmalloc(sizeof(*e), GFP_ATOMIC); + if (e == NULL) { + fw_notify("Out of memory when allocating event\n"); + return; + } + e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; + e->interrupt.closure = client->iso_closure; + e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer, + completed); + queue_event(client, &e->event, &e->interrupt, + sizeof(e->interrupt), NULL, 0); +} + static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) { struct fw_cdev_create_iso_context *a = &arg->create_iso_context; struct fw_iso_context *context; + fw_iso_callback_t cb; BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT || - FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE); - - if (a->channel > 63) - return -EINVAL; + FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE || + FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL != + FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL); switch (a->type) { - case FW_ISO_CONTEXT_RECEIVE: - if (a->header_size < 4 || (a->header_size & 3)) + case FW_ISO_CONTEXT_TRANSMIT: + if (a->speed > SCODE_3200 || a->channel > 63) return -EINVAL; + + cb = iso_callback; break; - case FW_ISO_CONTEXT_TRANSMIT: - if (a->speed > SCODE_3200) + case FW_ISO_CONTEXT_RECEIVE: + if (a->header_size < 4 || (a->header_size & 3) || + a->channel > 63) return -EINVAL; + + cb = iso_callback; + break; + + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + cb = (fw_iso_callback_t)iso_mc_callback; break; default: @@ -959,8 +993,7 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) } context = fw_iso_context_create(client->device->card, a->type, - a->channel, a->speed, a->header_size, - iso_callback, client); + a->channel, a->speed, a->header_size, cb, client); if (IS_ERR(context)) return PTR_ERR(context); @@ -980,6 +1013,17 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) return 0; } +static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels; + struct fw_iso_context *ctx = client->iso_context; + + if (ctx == NULL || a->handle != 0) + return -EINVAL; + + return fw_iso_context_set_channels(ctx, &a->channels); +} + /* Macros for decoding the iso packet control header. */ #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) #define GET_INTERRUPT(v) (((v) >> 16) & 0x01) @@ -993,7 +1037,7 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) struct fw_cdev_queue_iso *a = &arg->queue_iso; struct fw_cdev_iso_packet __user *p, *end, *next; struct fw_iso_context *ctx = client->iso_context; - unsigned long payload, buffer_end, transmit_header_bytes; + unsigned long payload, buffer_end, transmit_header_bytes = 0; u32 control; int count; struct { @@ -1013,7 +1057,6 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) * use the indirect payload, the iso buffer need not be mapped * and the a->data pointer is ignored. */ - payload = (unsigned long)a->data - client->vm_start; buffer_end = client->buffer.page_count << PAGE_SHIFT; if (a->data == 0 || client->buffer.pages == NULL || @@ -1022,8 +1065,10 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) buffer_end = 0; } - p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); + if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3) + return -EINVAL; + p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); if (!access_ok(VERIFY_READ, p, a->size)) return -EFAULT; @@ -1039,19 +1084,24 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) u.packet.sy = GET_SY(control); u.packet.header_length = GET_HEADER_LENGTH(control); - if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { - if (u.packet.header_length % 4 != 0) + switch (ctx->type) { + case FW_ISO_CONTEXT_TRANSMIT: + if (u.packet.header_length & 3) return -EINVAL; transmit_header_bytes = u.packet.header_length; - } else { - /* - * We require that header_length is a multiple of - * the fixed header size, ctx->header_size. - */ + break; + + case FW_ISO_CONTEXT_RECEIVE: if (u.packet.header_length == 0 || u.packet.header_length % ctx->header_size != 0) return -EINVAL; - transmit_header_bytes = 0; + break; + + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + if (u.packet.payload_length == 0 || + u.packet.payload_length & 3) + return -EINVAL; + break; } next = (struct fw_cdev_iso_packet __user *) @@ -1534,6 +1584,7 @@ static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { [0x14] = ioctl_get_cycle_timer2, [0x15] = ioctl_send_phy_packet, [0x16] = ioctl_receive_phy_packets, + [0x17] = ioctl_set_iso_channels, }; static int dispatch_ioctl(struct client *client, diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 4fe932e60fb..0c8e662a5da 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c @@ -117,6 +117,23 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, } EXPORT_SYMBOL(fw_iso_buffer_destroy); +/* Convert DMA address to offset into virtually contiguous buffer. */ +size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed) +{ + int i; + dma_addr_t address; + ssize_t offset; + + for (i = 0; i < buffer->page_count; i++) { + address = page_private(buffer->pages[i]); + offset = (ssize_t)completed - (ssize_t)address; + if (offset > 0 && offset <= PAGE_SIZE) + return (i << PAGE_SHIFT) + offset; + } + + return 0; +} + struct fw_iso_context *fw_iso_context_create(struct fw_card *card, int type, int channel, int speed, size_t header_size, fw_iso_callback_t callback, void *callback_data) @@ -133,7 +150,7 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card, ctx->channel = channel; ctx->speed = speed; ctx->header_size = header_size; - ctx->callback = callback; + ctx->callback.sc = callback; ctx->callback_data = callback_data; return ctx; @@ -142,9 +159,7 @@ EXPORT_SYMBOL(fw_iso_context_create); void fw_iso_context_destroy(struct fw_iso_context *ctx) { - struct fw_card *card = ctx->card; - - card->driver->free_iso_context(ctx); + ctx->card->driver->free_iso_context(ctx); } EXPORT_SYMBOL(fw_iso_context_destroy); @@ -155,14 +170,17 @@ int fw_iso_context_start(struct fw_iso_context *ctx, } EXPORT_SYMBOL(fw_iso_context_start); +int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels) +{ + return ctx->card->driver->set_iso_channels(ctx, channels); +} + int fw_iso_context_queue(struct fw_iso_context *ctx, struct fw_iso_packet *packet, struct fw_iso_buffer *buffer, unsigned long payload) { - struct fw_card *card = ctx->card; - - return card->driver->queue_iso(ctx, packet, buffer, payload); + return ctx->card->driver->queue_iso(ctx, packet, buffer, payload); } EXPORT_SYMBOL(fw_iso_context_queue); diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index 28621e44b11..e6239f971be 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -90,6 +90,8 @@ struct fw_card_driver { int (*start_iso)(struct fw_iso_context *ctx, s32 cycle, u32 sync, u32 tags); + int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels); + int (*queue_iso)(struct fw_iso_context *ctx, struct fw_iso_packet *packet, struct fw_iso_buffer *buffer, diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 2e4b425847a..4bda1c1b74b 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -190,11 +190,13 @@ struct fw_ohci { struct context at_request_ctx; struct context at_response_ctx; - u32 it_context_mask; + u32 it_context_mask; /* unoccupied IT contexts */ struct iso_context *it_context_list; - u64 ir_context_channels; - u32 ir_context_mask; + u64 ir_context_channels; /* unoccupied channels */ + u32 ir_context_mask; /* unoccupied IR contexts */ struct iso_context *ir_context_list; + u64 mc_channels; /* channels in use by the multichannel IR context */ + bool mc_allocated; __be32 *config_rom; dma_addr_t config_rom_bus; @@ -2197,10 +2199,9 @@ static int handle_ir_packet_per_buffer(struct context *context, __le32 *ir_header; void *p; - for (pd = d; pd <= last; pd++) { + for (pd = d; pd <= last; pd++) if (pd->transfer_status) break; - } if (pd > last) /* Descriptor(s) not done yet, stop iteration */ return 0; @@ -2210,16 +2211,38 @@ static int handle_ir_packet_per_buffer(struct context *context, if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { ir_header = (__le32 *) p; - ctx->base.callback(&ctx->base, - le32_to_cpu(ir_header[0]) & 0xffff, - ctx->header_length, ctx->header, - ctx->base.callback_data); + ctx->base.callback.sc(&ctx->base, + le32_to_cpu(ir_header[0]) & 0xffff, + ctx->header_length, ctx->header, + ctx->base.callback_data); ctx->header_length = 0; } return 1; } +/* d == last because each descriptor block is only a single descriptor. */ +static int handle_ir_buffer_fill(struct context *context, + struct descriptor *d, + struct descriptor *last) +{ + struct iso_context *ctx = + container_of(context, struct iso_context, context); + + if (!last->transfer_status) + /* Descriptor(s) not done yet, stop iteration */ + return 0; + + if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) + ctx->base.callback.mc(&ctx->base, + le32_to_cpu(last->data_address) + + le16_to_cpu(last->req_count) - + le16_to_cpu(last->res_count), + ctx->base.callback_data); + + return 1; +} + static int handle_it_packet(struct context *context, struct descriptor *d, struct descriptor *last) @@ -2245,72 +2268,118 @@ static int handle_it_packet(struct context *context, ctx->header_length += 4; } if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { - ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count), - ctx->header_length, ctx->header, - ctx->base.callback_data); + ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count), + ctx->header_length, ctx->header, + ctx->base.callback_data); ctx->header_length = 0; } return 1; } +static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels) +{ + u32 hi = channels >> 32, lo = channels; + + reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi); + reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo); + reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi); + reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo); + mmiowb(); + ohci->mc_channels = channels; +} + static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, int type, int channel, size_t header_size) { struct fw_ohci *ohci = fw_ohci(card); - struct iso_context *ctx, *list; - descriptor_callback_t callback; - u64 *channels, dont_care = ~0ULL; - u32 *mask, regs; + struct iso_context *uninitialized_var(ctx); + descriptor_callback_t uninitialized_var(callback); + u64 *uninitialized_var(channels); + u32 *uninitialized_var(mask), uninitialized_var(regs); unsigned long flags; - int index, ret = -ENOMEM; + int index, ret = -EBUSY; - if (type == FW_ISO_CONTEXT_TRANSMIT) { - channels = &dont_care; - mask = &ohci->it_context_mask; - list = ohci->it_context_list; + spin_lock_irqsave(&ohci->lock, flags); + + switch (type) { + case FW_ISO_CONTEXT_TRANSMIT: + mask = &ohci->it_context_mask; callback = handle_it_packet; - } else { + index = ffs(*mask) - 1; + if (index >= 0) { + *mask &= ~(1 << index); + regs = OHCI1394_IsoXmitContextBase(index); + ctx = &ohci->it_context_list[index]; + } + break; + + case FW_ISO_CONTEXT_RECEIVE: channels = &ohci->ir_context_channels; - mask = &ohci->ir_context_mask; - list = ohci->ir_context_list; + mask = &ohci->ir_context_mask; callback = handle_ir_packet_per_buffer; - } + index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; + if (index >= 0) { + *channels &= ~(1ULL << channel); + *mask &= ~(1 << index); + regs = OHCI1394_IsoRcvContextBase(index); + ctx = &ohci->ir_context_list[index]; + } + break; - spin_lock_irqsave(&ohci->lock, flags); - index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; - if (index >= 0) { - *channels &= ~(1ULL << channel); - *mask &= ~(1 << index); + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + mask = &ohci->ir_context_mask; + callback = handle_ir_buffer_fill; + index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; + if (index >= 0) { + ohci->mc_allocated = true; + *mask &= ~(1 << index); + regs = OHCI1394_IsoRcvContextBase(index); + ctx = &ohci->ir_context_list[index]; + } + break; + + default: + index = -1; + ret = -ENOSYS; } + spin_unlock_irqrestore(&ohci->lock, flags); if (index < 0) - return ERR_PTR(-EBUSY); - - if (type == FW_ISO_CONTEXT_TRANSMIT) - regs = OHCI1394_IsoXmitContextBase(index); - else - regs = OHCI1394_IsoRcvContextBase(index); + return ERR_PTR(ret); - ctx = &list[index]; memset(ctx, 0, sizeof(*ctx)); ctx->header_length = 0; ctx->header = (void *) __get_free_page(GFP_KERNEL); - if (ctx->header == NULL) + if (ctx->header == NULL) { + ret = -ENOMEM; goto out; - + } ret = context_init(&ctx->context, ohci, regs, callback); if (ret < 0) goto out_with_header; + if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) + set_multichannel_mask(ohci, 0); + return &ctx->base; out_with_header: free_page((unsigned long)ctx->header); out: spin_lock_irqsave(&ohci->lock, flags); - *channels |= 1ULL << channel; + + switch (type) { + case FW_ISO_CONTEXT_RECEIVE: + *channels |= 1ULL << channel; + break; + + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + ohci->mc_allocated = false; + break; + } *mask |= 1 << index; + spin_unlock_irqrestore(&ohci->lock, flags); return ERR_PTR(ret); @@ -2321,10 +2390,11 @@ static int ohci_start_iso(struct fw_iso_context *base, { struct iso_context *ctx = container_of(base, struct iso_context, base); struct fw_ohci *ohci = ctx->context.ohci; - u32 control, match; + u32 control = IR_CONTEXT_ISOCH_HEADER, match; int index; - if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { + switch (ctx->base.type) { + case FW_ISO_CONTEXT_TRANSMIT: index = ctx - ohci->it_context_list; match = 0; if (cycle >= 0) @@ -2334,9 +2404,13 @@ static int ohci_start_iso(struct fw_iso_context *base, reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); context_run(&ctx->context, match); - } else { + break; + + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE; + /* fall through */ + case FW_ISO_CONTEXT_RECEIVE: index = ctx - ohci->ir_context_list; - control = IR_CONTEXT_ISOCH_HEADER; match = (tags << 28) | (sync << 8) | ctx->base.channel; if (cycle >= 0) { match |= (cycle & 0x07fff) << 12; @@ -2347,6 +2421,7 @@ static int ohci_start_iso(struct fw_iso_context *base, reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); context_run(&ctx->context, control); + break; } return 0; @@ -2358,12 +2433,17 @@ static int ohci_stop_iso(struct fw_iso_context *base) struct iso_context *ctx = container_of(base, struct iso_context, base); int index; - if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { + switch (ctx->base.type) { + case FW_ISO_CONTEXT_TRANSMIT: index = ctx - ohci->it_context_list; reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); - } else { + break; + + case FW_ISO_CONTEXT_RECEIVE: + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: index = ctx - ohci->ir_context_list; reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); + break; } flush_writes(ohci); context_stop(&ctx->context); @@ -2384,24 +2464,65 @@ static void ohci_free_iso_context(struct fw_iso_context *base) spin_lock_irqsave(&ohci->lock, flags); - if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) { + switch (base->type) { + case FW_ISO_CONTEXT_TRANSMIT: index = ctx - ohci->it_context_list; ohci->it_context_mask |= 1 << index; - } else { + break; + + case FW_ISO_CONTEXT_RECEIVE: index = ctx - ohci->ir_context_list; ohci->ir_context_mask |= 1 << index; ohci->ir_context_channels |= 1ULL << base->channel; + break; + + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + index = ctx - ohci->ir_context_list; + ohci->ir_context_mask |= 1 << index; + ohci->ir_context_channels |= ohci->mc_channels; + ohci->mc_channels = 0; + ohci->mc_allocated = false; + break; } spin_unlock_irqrestore(&ohci->lock, flags); } -static int ohci_queue_iso_transmit(struct fw_iso_context *base, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload) +static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) +{ + struct fw_ohci *ohci = fw_ohci(base->card); + unsigned long flags; + int ret; + + switch (base->type) { + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + + spin_lock_irqsave(&ohci->lock, flags); + + /* Don't allow multichannel to grab other contexts' channels. */ + if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { + *channels = ohci->ir_context_channels; + ret = -EBUSY; + } else { + set_multichannel_mask(ohci, *channels); + ret = 0; + } + + spin_unlock_irqrestore(&ohci->lock, flags); + + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int queue_iso_transmit(struct iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) { - struct iso_context *ctx = container_of(base, struct iso_context, base); struct descriptor *d, *last, *pd; struct fw_iso_packet *p; __le32 *header; @@ -2497,14 +2618,12 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base, return 0; } -static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload) +static int queue_iso_packet_per_buffer(struct iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) { - struct iso_context *ctx = container_of(base, struct iso_context, base); struct descriptor *d, *pd; - struct fw_iso_packet *p = packet; dma_addr_t d_bus, page_bus; u32 z, header_z, rest; int i, j, length; @@ -2514,14 +2633,14 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, * The OHCI controller puts the isochronous header and trailer in the * buffer, so we need at least 8 bytes. */ - packet_count = p->header_length / ctx->base.header_size; + packet_count = packet->header_length / ctx->base.header_size; header_size = max(ctx->base.header_size, (size_t)8); /* Get header size in number of descriptors. */ header_z = DIV_ROUND_UP(header_size, sizeof(*d)); page = payload >> PAGE_SHIFT; offset = payload & ~PAGE_MASK; - payload_per_buffer = p->payload_length / packet_count; + payload_per_buffer = packet->payload_length / packet_count; for (i = 0; i < packet_count; i++) { /* d points to the header descriptor */ @@ -2533,7 +2652,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, d->control = cpu_to_le16(DESCRIPTOR_STATUS | DESCRIPTOR_INPUT_MORE); - if (p->skip && i == 0) + if (packet->skip && i == 0) d->control |= cpu_to_le16(DESCRIPTOR_WAIT); d->req_count = cpu_to_le16(header_size); d->res_count = d->req_count; @@ -2566,7 +2685,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, pd->control = cpu_to_le16(DESCRIPTOR_STATUS | DESCRIPTOR_INPUT_LAST | DESCRIPTOR_BRANCH_ALWAYS); - if (p->interrupt && i == packet_count - 1) + if (packet->interrupt && i == packet_count - 1) pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); context_append(&ctx->context, d, z, header_z); @@ -2575,6 +2694,58 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, return 0; } +static int queue_iso_buffer_fill(struct iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) +{ + struct descriptor *d; + dma_addr_t d_bus, page_bus; + int page, offset, rest, z, i, length; + + page = payload >> PAGE_SHIFT; + offset = payload & ~PAGE_MASK; + rest = packet->payload_length; + + /* We need one descriptor for each page in the buffer. */ + z = DIV_ROUND_UP(offset + rest, PAGE_SIZE); + + if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) + return -EFAULT; + + for (i = 0; i < z; i++) { + d = context_get_descriptors(&ctx->context, 1, &d_bus); + if (d == NULL) + return -ENOMEM; + + d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | + DESCRIPTOR_BRANCH_ALWAYS); + if (packet->skip && i == 0) + d->control |= cpu_to_le16(DESCRIPTOR_WAIT); + if (packet->interrupt && i == z - 1) + d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); + + if (offset + rest < PAGE_SIZE) + length = rest; + else + length = PAGE_SIZE - offset; + d->req_count = cpu_to_le16(length); + d->res_count = d->req_count; + d->transfer_status = 0; + + page_bus = page_private(buffer->pages[page]); + d->data_address = cpu_to_le32(page_bus + offset); + + rest -= length; + offset = 0; + page++; + + context_append(&ctx->context, d, 1, 0); + } + + return 0; +} + static int ohci_queue_iso(struct fw_iso_context *base, struct fw_iso_packet *packet, struct fw_iso_buffer *buffer, @@ -2582,14 +2753,20 @@ static int ohci_queue_iso(struct fw_iso_context *base, { struct iso_context *ctx = container_of(base, struct iso_context, base); unsigned long flags; - int ret; + int ret = -ENOSYS; spin_lock_irqsave(&ctx->context.ohci->lock, flags); - if (base->type == FW_ISO_CONTEXT_TRANSMIT) - ret = ohci_queue_iso_transmit(base, packet, buffer, payload); - else - ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, - buffer, payload); + switch (base->type) { + case FW_ISO_CONTEXT_TRANSMIT: + ret = queue_iso_transmit(ctx, packet, buffer, payload); + break; + case FW_ISO_CONTEXT_RECEIVE: + ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); + break; + case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: + ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); + break; + } spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); return ret; @@ -2609,6 +2786,7 @@ static const struct fw_card_driver ohci_driver = { .allocate_iso_context = ohci_allocate_iso_context, .free_iso_context = ohci_free_iso_context, + .set_iso_channels = ohci_set_iso_channels, .queue_iso = ohci_queue_iso, .start_iso = ohci_start_iso, .stop_iso = ohci_stop_iso, -- cgit v1.2.3