DPDK  20.11.7
rte_ring_elem.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2019 Arm Limited
4  * Copyright (c) 2010-2017 Intel Corporation
5  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
6  * All rights reserved.
7  * Derived from FreeBSD's bufring.h
8  * Used as BSD-3 Licensed with permission from Kip Macy.
9  */
10 
11 #ifndef _RTE_RING_ELEM_H_
12 #define _RTE_RING_ELEM_H_
13 
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22 
23 #include <rte_ring_core.h>
24 
43 ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
44 
104 struct rte_ring *rte_ring_create_elem(const char *name, unsigned int esize,
105  unsigned int count, int socket_id, unsigned int flags);
106 
107 static __rte_always_inline void
108 __rte_ring_enqueue_elems_32(struct rte_ring *r, const uint32_t size,
109  uint32_t idx, const void *obj_table, uint32_t n)
110 {
111  unsigned int i;
112  uint32_t *ring = (uint32_t *)&r[1];
113  const uint32_t *obj = (const uint32_t *)obj_table;
114  if (likely(idx + n <= size)) {
115  for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
116  ring[idx] = obj[i];
117  ring[idx + 1] = obj[i + 1];
118  ring[idx + 2] = obj[i + 2];
119  ring[idx + 3] = obj[i + 3];
120  ring[idx + 4] = obj[i + 4];
121  ring[idx + 5] = obj[i + 5];
122  ring[idx + 6] = obj[i + 6];
123  ring[idx + 7] = obj[i + 7];
124  }
125  switch (n & 0x7) {
126  case 7:
127  ring[idx++] = obj[i++]; /* fallthrough */
128  case 6:
129  ring[idx++] = obj[i++]; /* fallthrough */
130  case 5:
131  ring[idx++] = obj[i++]; /* fallthrough */
132  case 4:
133  ring[idx++] = obj[i++]; /* fallthrough */
134  case 3:
135  ring[idx++] = obj[i++]; /* fallthrough */
136  case 2:
137  ring[idx++] = obj[i++]; /* fallthrough */
138  case 1:
139  ring[idx++] = obj[i++]; /* fallthrough */
140  }
141  } else {
142  for (i = 0; idx < size; i++, idx++)
143  ring[idx] = obj[i];
144  /* Start at the beginning */
145  for (idx = 0; i < n; i++, idx++)
146  ring[idx] = obj[i];
147  }
148 }
149 
150 static __rte_always_inline void
151 __rte_ring_enqueue_elems_64(struct rte_ring *r, uint32_t prod_head,
152  const void *obj_table, uint32_t n)
153 {
154  unsigned int i;
155  const uint32_t size = r->size;
156  uint32_t idx = prod_head & r->mask;
157  uint64_t *ring = (uint64_t *)&r[1];
158  const unaligned_uint64_t *obj = (const unaligned_uint64_t *)obj_table;
159  if (likely(idx + n <= size)) {
160  for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
161  ring[idx] = obj[i];
162  ring[idx + 1] = obj[i + 1];
163  ring[idx + 2] = obj[i + 2];
164  ring[idx + 3] = obj[i + 3];
165  }
166  switch (n & 0x3) {
167  case 3:
168  ring[idx++] = obj[i++]; /* fallthrough */
169  case 2:
170  ring[idx++] = obj[i++]; /* fallthrough */
171  case 1:
172  ring[idx++] = obj[i++];
173  }
174  } else {
175  for (i = 0; idx < size; i++, idx++)
176  ring[idx] = obj[i];
177  /* Start at the beginning */
178  for (idx = 0; i < n; i++, idx++)
179  ring[idx] = obj[i];
180  }
181 }
182 
183 static __rte_always_inline void
184 __rte_ring_enqueue_elems_128(struct rte_ring *r, uint32_t prod_head,
185  const void *obj_table, uint32_t n)
186 {
187  unsigned int i;
188  const uint32_t size = r->size;
189  uint32_t idx = prod_head & r->mask;
190  rte_int128_t *ring = (rte_int128_t *)&r[1];
191  const rte_int128_t *obj = (const rte_int128_t *)obj_table;
192  if (likely(idx + n <= size)) {
193  for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
194  memcpy((void *)(ring + idx),
195  (const void *)(obj + i), 32);
196  switch (n & 0x1) {
197  case 1:
198  memcpy((void *)(ring + idx),
199  (const void *)(obj + i), 16);
200  }
201  } else {
202  for (i = 0; idx < size; i++, idx++)
203  memcpy((void *)(ring + idx),
204  (const void *)(obj + i), 16);
205  /* Start at the beginning */
206  for (idx = 0; i < n; i++, idx++)
207  memcpy((void *)(ring + idx),
208  (const void *)(obj + i), 16);
209  }
210 }
211 
212 /* the actual enqueue of elements on the ring.
213  * Placed here since identical code needed in both
214  * single and multi producer enqueue functions.
215  */
216 static __rte_always_inline void
217 __rte_ring_enqueue_elems(struct rte_ring *r, uint32_t prod_head,
218  const void *obj_table, uint32_t esize, uint32_t num)
219 {
220  /* 8B and 16B copies implemented individually to retain
221  * the current performance.
222  */
223  if (esize == 8)
224  __rte_ring_enqueue_elems_64(r, prod_head, obj_table, num);
225  else if (esize == 16)
226  __rte_ring_enqueue_elems_128(r, prod_head, obj_table, num);
227  else {
228  uint32_t idx, scale, nr_idx, nr_num, nr_size;
229 
230  /* Normalize to uint32_t */
231  scale = esize / sizeof(uint32_t);
232  nr_num = num * scale;
233  idx = prod_head & r->mask;
234  nr_idx = idx * scale;
235  nr_size = r->size * scale;
236  __rte_ring_enqueue_elems_32(r, nr_size, nr_idx,
237  obj_table, nr_num);
238  }
239 }
240 
241 static __rte_always_inline void
242 __rte_ring_dequeue_elems_32(struct rte_ring *r, const uint32_t size,
243  uint32_t idx, void *obj_table, uint32_t n)
244 {
245  unsigned int i;
246  uint32_t *ring = (uint32_t *)&r[1];
247  uint32_t *obj = (uint32_t *)obj_table;
248  if (likely(idx + n <= size)) {
249  for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
250  obj[i] = ring[idx];
251  obj[i + 1] = ring[idx + 1];
252  obj[i + 2] = ring[idx + 2];
253  obj[i + 3] = ring[idx + 3];
254  obj[i + 4] = ring[idx + 4];
255  obj[i + 5] = ring[idx + 5];
256  obj[i + 6] = ring[idx + 6];
257  obj[i + 7] = ring[idx + 7];
258  }
259  switch (n & 0x7) {
260  case 7:
261  obj[i++] = ring[idx++]; /* fallthrough */
262  case 6:
263  obj[i++] = ring[idx++]; /* fallthrough */
264  case 5:
265  obj[i++] = ring[idx++]; /* fallthrough */
266  case 4:
267  obj[i++] = ring[idx++]; /* fallthrough */
268  case 3:
269  obj[i++] = ring[idx++]; /* fallthrough */
270  case 2:
271  obj[i++] = ring[idx++]; /* fallthrough */
272  case 1:
273  obj[i++] = ring[idx++]; /* fallthrough */
274  }
275  } else {
276  for (i = 0; idx < size; i++, idx++)
277  obj[i] = ring[idx];
278  /* Start at the beginning */
279  for (idx = 0; i < n; i++, idx++)
280  obj[i] = ring[idx];
281  }
282 }
283 
284 static __rte_always_inline void
285 __rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t prod_head,
286  void *obj_table, uint32_t n)
287 {
288  unsigned int i;
289  const uint32_t size = r->size;
290  uint32_t idx = prod_head & r->mask;
291  uint64_t *ring = (uint64_t *)&r[1];
292  unaligned_uint64_t *obj = (unaligned_uint64_t *)obj_table;
293  if (likely(idx + n <= size)) {
294  for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
295  obj[i] = ring[idx];
296  obj[i + 1] = ring[idx + 1];
297  obj[i + 2] = ring[idx + 2];
298  obj[i + 3] = ring[idx + 3];
299  }
300  switch (n & 0x3) {
301  case 3:
302  obj[i++] = ring[idx++]; /* fallthrough */
303  case 2:
304  obj[i++] = ring[idx++]; /* fallthrough */
305  case 1:
306  obj[i++] = ring[idx++]; /* fallthrough */
307  }
308  } else {
309  for (i = 0; idx < size; i++, idx++)
310  obj[i] = ring[idx];
311  /* Start at the beginning */
312  for (idx = 0; i < n; i++, idx++)
313  obj[i] = ring[idx];
314  }
315 }
316 
317 static __rte_always_inline void
318 __rte_ring_dequeue_elems_128(struct rte_ring *r, uint32_t prod_head,
319  void *obj_table, uint32_t n)
320 {
321  unsigned int i;
322  const uint32_t size = r->size;
323  uint32_t idx = prod_head & r->mask;
324  rte_int128_t *ring = (rte_int128_t *)&r[1];
325  rte_int128_t *obj = (rte_int128_t *)obj_table;
326  if (likely(idx + n <= size)) {
327  for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
328  memcpy((void *)(obj + i), (void *)(ring + idx), 32);
329  switch (n & 0x1) {
330  case 1:
331  memcpy((void *)(obj + i), (void *)(ring + idx), 16);
332  }
333  } else {
334  for (i = 0; idx < size; i++, idx++)
335  memcpy((void *)(obj + i), (void *)(ring + idx), 16);
336  /* Start at the beginning */
337  for (idx = 0; i < n; i++, idx++)
338  memcpy((void *)(obj + i), (void *)(ring + idx), 16);
339  }
340 }
341 
342 /* the actual dequeue of elements from the ring.
343  * Placed here since identical code needed in both
344  * single and multi producer enqueue functions.
345  */
346 static __rte_always_inline void
347 __rte_ring_dequeue_elems(struct rte_ring *r, uint32_t cons_head,
348  void *obj_table, uint32_t esize, uint32_t num)
349 {
350  /* 8B and 16B copies implemented individually to retain
351  * the current performance.
352  */
353  if (esize == 8)
354  __rte_ring_dequeue_elems_64(r, cons_head, obj_table, num);
355  else if (esize == 16)
356  __rte_ring_dequeue_elems_128(r, cons_head, obj_table, num);
357  else {
358  uint32_t idx, scale, nr_idx, nr_num, nr_size;
359 
360  /* Normalize to uint32_t */
361  scale = esize / sizeof(uint32_t);
362  nr_num = num * scale;
363  idx = cons_head & r->mask;
364  nr_idx = idx * scale;
365  nr_size = r->size * scale;
366  __rte_ring_dequeue_elems_32(r, nr_size, nr_idx,
367  obj_table, nr_num);
368  }
369 }
370 
371 /* Between load and load. there might be cpu reorder in weak model
372  * (powerpc/arm).
373  * There are 2 choices for the users
374  * 1.use rmb() memory barrier
375  * 2.use one-direction load_acquire/store_release barrier
376  * It depends on performance test results.
377  * By default, move common functions to rte_ring_generic.h
378  */
379 #ifdef RTE_USE_C11_MEM_MODEL
380 #include "rte_ring_c11_mem.h"
381 #else
382 #include "rte_ring_generic.h"
383 #endif
384 
409 static __rte_always_inline unsigned int
410 __rte_ring_do_enqueue_elem(struct rte_ring *r, const void *obj_table,
411  unsigned int esize, unsigned int n,
412  enum rte_ring_queue_behavior behavior, unsigned int is_sp,
413  unsigned int *free_space)
414 {
415  uint32_t prod_head, prod_next;
416  uint32_t free_entries;
417 
418  n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
419  &prod_head, &prod_next, &free_entries);
420  if (n == 0)
421  goto end;
422 
423  __rte_ring_enqueue_elems(r, prod_head, obj_table, esize, n);
424 
425  update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
426 end:
427  if (free_space != NULL)
428  *free_space = free_entries - n;
429  return n;
430 }
431 
456 static __rte_always_inline unsigned int
457 __rte_ring_do_dequeue_elem(struct rte_ring *r, void *obj_table,
458  unsigned int esize, unsigned int n,
459  enum rte_ring_queue_behavior behavior, unsigned int is_sc,
460  unsigned int *available)
461 {
462  uint32_t cons_head, cons_next;
463  uint32_t entries;
464 
465  n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
466  &cons_head, &cons_next, &entries);
467  if (n == 0)
468  goto end;
469 
470  __rte_ring_dequeue_elems(r, cons_head, obj_table, esize, n);
471 
472  update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
473 
474 end:
475  if (available != NULL)
476  *available = entries - n;
477  return n;
478 }
479 
502 static __rte_always_inline unsigned int
503 rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
504  unsigned int esize, unsigned int n, unsigned int *free_space)
505 {
506  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
508 }
509 
531 static __rte_always_inline unsigned int
532 rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
533  unsigned int esize, unsigned int n, unsigned int *free_space)
534 {
535  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
537 }
538 
539 #ifdef ALLOW_EXPERIMENTAL_API
540 #include <rte_ring_hts.h>
541 #include <rte_ring_rts.h>
542 #endif
543 
567 static __rte_always_inline unsigned int
568 rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
569  unsigned int esize, unsigned int n, unsigned int *free_space)
570 {
571  switch (r->prod.sync_type) {
572  case RTE_RING_SYNC_MT:
573  return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n,
574  free_space);
575  case RTE_RING_SYNC_ST:
576  return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n,
577  free_space);
578 #ifdef ALLOW_EXPERIMENTAL_API
579  case RTE_RING_SYNC_MT_RTS:
580  return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n,
581  free_space);
582  case RTE_RING_SYNC_MT_HTS:
583  return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n,
584  free_space);
585 #endif
586  }
587 
588  /* valid ring should never reach this point */
589  RTE_ASSERT(0);
590  if (free_space != NULL)
591  *free_space = 0;
592  return 0;
593 }
594 
613 static __rte_always_inline int
614 rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
615 {
616  return rte_ring_mp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
617  -ENOBUFS;
618 }
619 
637 static __rte_always_inline int
638 rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
639 {
640  return rte_ring_sp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
641  -ENOBUFS;
642 }
643 
663 static __rte_always_inline int
664 rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
665 {
666  return rte_ring_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
667  -ENOBUFS;
668 }
669 
692 static __rte_always_inline unsigned int
693 rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
694  unsigned int esize, unsigned int n, unsigned int *available)
695 {
696  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
698 }
699 
720 static __rte_always_inline unsigned int
721 rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
722  unsigned int esize, unsigned int n, unsigned int *available)
723 {
724  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
726 }
727 
751 static __rte_always_inline unsigned int
752 rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
753  unsigned int esize, unsigned int n, unsigned int *available)
754 {
755  switch (r->cons.sync_type) {
756  case RTE_RING_SYNC_MT:
757  return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n,
758  available);
759  case RTE_RING_SYNC_ST:
760  return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n,
761  available);
762 #ifdef ALLOW_EXPERIMENTAL_API
763  case RTE_RING_SYNC_MT_RTS:
764  return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize,
765  n, available);
766  case RTE_RING_SYNC_MT_HTS:
767  return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize,
768  n, available);
769 #endif
770  }
771 
772  /* valid ring should never reach this point */
773  RTE_ASSERT(0);
774  if (available != NULL)
775  *available = 0;
776  return 0;
777 }
778 
798 static __rte_always_inline int
799 rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p,
800  unsigned int esize)
801 {
802  return rte_ring_mc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
803  -ENOENT;
804 }
805 
822 static __rte_always_inline int
823 rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p,
824  unsigned int esize)
825 {
826  return rte_ring_sc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
827  -ENOENT;
828 }
829 
850 static __rte_always_inline int
851 rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
852 {
853  return rte_ring_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
854  -ENOENT;
855 }
856 
879 static __rte_always_inline unsigned int
880 rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
881  unsigned int esize, unsigned int n, unsigned int *free_space)
882 {
883  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
885 }
886 
908 static __rte_always_inline unsigned int
909 rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
910  unsigned int esize, unsigned int n, unsigned int *free_space)
911 {
912  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
914 }
915 
939 static __rte_always_inline unsigned int
940 rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
941  unsigned int esize, unsigned int n, unsigned int *free_space)
942 {
943  switch (r->prod.sync_type) {
944  case RTE_RING_SYNC_MT:
945  return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n,
946  free_space);
947  case RTE_RING_SYNC_ST:
948  return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n,
949  free_space);
950 #ifdef ALLOW_EXPERIMENTAL_API
951  case RTE_RING_SYNC_MT_RTS:
952  return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize,
953  n, free_space);
954  case RTE_RING_SYNC_MT_HTS:
955  return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize,
956  n, free_space);
957 #endif
958  }
959 
960  /* valid ring should never reach this point */
961  RTE_ASSERT(0);
962  if (free_space != NULL)
963  *free_space = 0;
964  return 0;
965 }
966 
991 static __rte_always_inline unsigned int
992 rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
993  unsigned int esize, unsigned int n, unsigned int *available)
994 {
995  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
997 }
998 
1020 static __rte_always_inline unsigned int
1021 rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1022  unsigned int esize, unsigned int n, unsigned int *available)
1023 {
1024  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
1026 }
1027 
1051 static __rte_always_inline unsigned int
1052 rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1053  unsigned int esize, unsigned int n, unsigned int *available)
1054 {
1055  switch (r->cons.sync_type) {
1056  case RTE_RING_SYNC_MT:
1057  return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n,
1058  available);
1059  case RTE_RING_SYNC_ST:
1060  return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n,
1061  available);
1062 #ifdef ALLOW_EXPERIMENTAL_API
1063  case RTE_RING_SYNC_MT_RTS:
1064  return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize,
1065  n, available);
1066  case RTE_RING_SYNC_MT_HTS:
1067  return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize,
1068  n, available);
1069 #endif
1070  }
1071 
1072  /* valid ring should never reach this point */
1073  RTE_ASSERT(0);
1074  if (available != NULL)
1075  *available = 0;
1076  return 0;
1077 }
1078 
1079 #ifdef ALLOW_EXPERIMENTAL_API
1080 #include <rte_ring_peek.h>
1081 #include <rte_ring_peek_zc.h>
1082 #endif
1083 
1084 #include <rte_ring.h>
1085 
1086 #ifdef __cplusplus
1087 }
1088 #endif
1089 
1090 #endif /* _RTE_RING_ELEM_H_ */
#define likely(x)
#define __rte_always_inline
Definition: rte_common.h:231
rte_ring_queue_behavior
Definition: rte_ring_core.h:44
@ RTE_RING_QUEUE_VARIABLE
Definition: rte_ring_core.h:48
@ RTE_RING_QUEUE_FIXED
Definition: rte_ring_core.h:46
@ RTE_RING_SYNC_MT
Definition: rte_ring_core.h:58
@ RTE_RING_SYNC_ST
Definition: rte_ring_core.h:59
static __rte_always_inline int rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline int rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
static __rte_always_inline unsigned int rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline int rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline int rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
static __rte_always_inline int rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
static __rte_always_inline unsigned int rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline int rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline unsigned int rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count)
struct rte_ring * rte_ring_create_elem(const char *name, unsigned int esize, unsigned int count, int socket_id, unsigned int flags)
static __rte_always_inline unsigned int rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_hts.h:193
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_hts.h:223
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_hts.h:165
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_hts.h:137
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_rts.h:164
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_rts.h:192
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_rts.h:250
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_rts.h:220
enum rte_ring_sync_type sync_type
Definition: rte_ring_core.h:77
uint32_t size
uint32_t mask