aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatias Elo <matias.elo@nokia.com>2018-07-26 13:34:45 +0300
committerMatias Elo <matias.elo@nokia.com>2018-07-26 14:41:38 +0300
commit9ea562663e1b828d65b04210db8454c855afc1d7 (patch)
tree09c724303012a12ccc2581a4ce726fe68b5ce8e6
parenta572dab528d1e27b8673b29abbb03eabd54c1da9 (diff)
Port 0ee7d42a "linux-gen: queue: separate plain and sched dequeues"
Signed-off-by: Matias Elo <matias.elo@nokia.com>
-rw-r--r--platform/linux-dpdk/odp_queue_basic.c72
1 files changed, 44 insertions, 28 deletions
diff --git a/platform/linux-dpdk/odp_queue_basic.c b/platform/linux-dpdk/odp_queue_basic.c
index 1d20d5af5..fb66c7f6c 100644
--- a/platform/linux-dpdk/odp_queue_basic.c
+++ b/platform/linux-dpdk/odp_queue_basic.c
@@ -538,40 +538,21 @@ static int queue_enq(odp_queue_t handle, odp_event_t ev)
return queue->s.enqueue(queue, (odp_buffer_hdr_t *)(uintptr_t)ev);
}
-static inline int deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
- int num, int update_status)
+static inline int plain_queue_deq(queue_entry_t *queue,
+ odp_buffer_hdr_t *buf_hdr[], int num)
{
- int status_sync = sched_fn->status_sync;
int num_deq;
LOCK(queue);
if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
- /* Bad queue, or queue has been destroyed.
- * Scheduler finalizes queue destroy after this. */
+ /* Bad queue, or queue has been destroyed. */
UNLOCK(queue);
return -1;
}
num_deq = ring_st_deq_multi(queue->s.ring_st, (void **)buf_hdr, num);
- if (num_deq == 0) {
- /* Already empty queue */
- if (update_status && queue->s.status == QUEUE_STATUS_SCHED) {
- queue->s.status = QUEUE_STATUS_NOTSCHED;
-
- if (status_sync)
- sched_fn->unsched_queue(queue->s.index);
- }
-
- UNLOCK(queue);
-
- return 0;
- }
-
- if (status_sync && queue->s.type == ODP_QUEUE_TYPE_SCHED)
- sched_fn->save_context(queue->s.index);
-
UNLOCK(queue);
return num_deq;
@@ -582,7 +563,7 @@ static int queue_int_deq_multi(void *q_int, odp_buffer_hdr_t *buf_hdr[],
{
queue_entry_t *queue = q_int;
- return deq_multi(queue, buf_hdr, num, 0);
+ return plain_queue_deq(queue, buf_hdr, num);
}
static odp_buffer_hdr_t *queue_int_deq(void *q_int)
@@ -591,7 +572,7 @@ static odp_buffer_hdr_t *queue_int_deq(void *q_int)
odp_buffer_hdr_t *buf_hdr = NULL;
int ret;
- ret = deq_multi(queue, &buf_hdr, 1, 0);
+ ret = plain_queue_deq(queue, &buf_hdr, 1);
if (ret == 1)
return buf_hdr;
@@ -760,12 +741,47 @@ static int queue_info(odp_queue_t handle, odp_queue_info_t *info)
return 0;
}
-int sched_cb_queue_deq_multi(uint32_t queue_index, odp_event_t ev[], int num,
- int update_status)
+int sched_cb_queue_deq_multi(uint32_t queue_index, odp_event_t ev[],
+ int max_num, int update_status)
{
- queue_entry_t *qe = qentry_from_index(queue_index);
+ int num_deq;
+ ring_st_t ring_st;
+ queue_entry_t *queue = qentry_from_index(queue_index);
+ int status_sync = sched_fn->status_sync;
+
+ ring_st = queue->s.ring_st;
- return deq_multi(qe, (odp_buffer_hdr_t **)ev, num, update_status);
+ LOCK(queue);
+
+ if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
+ /* Bad queue, or queue has been destroyed.
+ * Scheduler finalizes queue destroy after this. */
+ UNLOCK(queue);
+ return -1;
+ }
+
+ num_deq = ring_st_deq_multi(ring_st, (void **)ev, max_num);
+
+ if (num_deq == 0) {
+ /* Already empty queue */
+ if (update_status && queue->s.status == QUEUE_STATUS_SCHED) {
+ queue->s.status = QUEUE_STATUS_NOTSCHED;
+
+ if (status_sync)
+ sched_fn->unsched_queue(queue->s.index);
+ }
+
+ UNLOCK(queue);
+
+ return 0;
+ }
+
+ if (status_sync && queue->s.type == ODP_QUEUE_TYPE_SCHED)
+ sched_fn->save_context(queue->s.index);
+
+ UNLOCK(queue);
+
+ return num_deq;
}
int sched_cb_queue_empty(uint32_t queue_index)