DPDK  23.11.0
rte_eventdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
208 #ifdef __cplusplus
209 extern "C" {
210 #endif
211 
212 #include <rte_compat.h>
213 #include <rte_common.h>
214 #include <rte_errno.h>
215 #include <rte_mbuf_pool_ops.h>
216 #include <rte_mempool.h>
217 
218 #include "rte_eventdev_trace_fp.h"
219 
220 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
221 struct rte_event;
222 
223 /* Event device capability bitmap flags */
224 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
236 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
243 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
252 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
259 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
267 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5)
278 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6)
288 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7)
294 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
300 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
306 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
316 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
323 #define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12)
329 /* Event device priority levels */
330 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
335 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
340 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
346 /* Event queue scheduling weights */
347 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
351 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
356 /* Event queue scheduling affinity */
357 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
361 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
373 uint8_t
375 
386 int
387 rte_event_dev_get_dev_id(const char *name);
388 
399 int
400 rte_event_dev_socket_id(uint8_t dev_id);
401 
406  const char *driver_name;
407  struct rte_device *dev;
442  int32_t max_num_events;
447  uint32_t event_dev_cap;
459 };
460 
475 int
476 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
477 
481 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
485 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
489 #define RTE_EVENT_DEV_ATTR_STARTED 2
490 
503 int
504 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
505  uint32_t *attr_value);
506 
507 
508 /* Event device configuration bitmap flags */
509 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
537  uint8_t nb_event_ports;
563  uint32_t event_dev_cfg;
573 };
574 
594 int
595 rte_event_dev_configure(uint8_t dev_id,
596  const struct rte_event_dev_config *dev_conf);
597 
598 /* Event queue specific APIs */
599 
600 /* Event queue configuration bitmap flags */
601 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
607 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
615  uint32_t nb_atomic_flows;
637  uint32_t event_queue_cfg;
639  uint8_t schedule_type;
644  uint8_t priority;
652  uint8_t weight;
660  uint8_t affinity;
668 };
669 
691 int
692 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
693  struct rte_event_queue_conf *queue_conf);
694 
713 int
714 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
715  const struct rte_event_queue_conf *queue_conf);
716 
720 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
724 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
728 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
732 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
736 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
740 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
744 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
745 
766 int
767 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
768  uint32_t *attr_value);
769 
788 int
789 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
790  uint64_t attr_value);
791 
792 /* Event port specific APIs */
793 
794 /* Event port configuration bitmap flags */
795 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0)
802 #define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1)
807 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER (1ULL << 2)
817 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER (1ULL << 3)
828 #define RTE_EVENT_PORT_CFG_HINT_WORKER (1ULL << 4)
855  uint16_t dequeue_depth;
861  uint16_t enqueue_depth;
867  uint32_t event_port_cfg;
868 };
869 
891 int
892 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
893  struct rte_event_port_conf *port_conf);
894 
915 int
916 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
917  const struct rte_event_port_conf *port_conf);
918 
919 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
920  struct rte_event event, void *arg);
950 void
951 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
952  rte_eventdev_port_flush_t release_cb, void *args);
953 
957 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
961 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
965 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
969 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
970 
987 int
988 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
989  uint32_t *attr_value);
990 
1007 int
1008 rte_event_dev_start(uint8_t dev_id);
1009 
1028 void
1029 rte_event_dev_stop(uint8_t dev_id);
1030 
1031 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1032  struct rte_event event, void *arg);
1062  rte_eventdev_stop_flush_t callback, void *userdata);
1063 
1075 int
1076 rte_event_dev_close(uint8_t dev_id);
1077 
1082  uint16_t nb_elem;
1084  uint16_t elem_offset : 12;
1086  uint16_t rsvd : 3;
1088  uint16_t attr_valid : 1;
1091  union {
1092  /* Used by Rx/Tx adapter.
1093  * Indicates that all the elements in this vector belong to the
1094  * same port and queue pair when originating from Rx adapter,
1095  * valid only when event type is ETHDEV_VECTOR or
1096  * ETH_RX_ADAPTER_VECTOR.
1097  * Can also be used to indicate the Tx adapter the destination
1098  * port and queue of the mbufs in the vector
1099  */
1100  struct {
1101  uint16_t port;
1102  /* Ethernet device port id. */
1103  uint16_t queue;
1104  /* Ethernet device queue id. */
1105  };
1106  };
1108  uint64_t impl_opaque;
1109 
1110 /* empty structures do not have zero size in C++ leading to compilation errors
1111  * with clang about structure having different sizes in C and C++.
1112  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1113  * C++ builds, removing the warning.
1114  */
1115 #ifndef __cplusplus
1121  union {
1122 #endif
1123  struct rte_mbuf *mbufs[0];
1124  void *ptrs[0];
1125  uint64_t u64s[0];
1126 #ifndef __cplusplus
1127  } __rte_aligned(16);
1128 #endif
1133 } __rte_aligned(16);
1134 
1135 /* Scheduler type definitions */
1136 #define RTE_SCHED_TYPE_ORDERED 0
1163 #define RTE_SCHED_TYPE_ATOMIC 1
1182 #define RTE_SCHED_TYPE_PARALLEL 2
1195 /* Event types to classify the event source */
1196 #define RTE_EVENT_TYPE_ETHDEV 0x0
1198 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
1200 #define RTE_EVENT_TYPE_TIMER 0x2
1202 #define RTE_EVENT_TYPE_CPU 0x3
1206 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
1208 #define RTE_EVENT_TYPE_DMADEV 0x5
1210 #define RTE_EVENT_TYPE_VECTOR 0x8
1222 #define RTE_EVENT_TYPE_ETHDEV_VECTOR \
1223  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1225 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1227 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR \
1228  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1230 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR \
1231  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1234 #define RTE_EVENT_TYPE_MAX 0x10
1237 /* Event enqueue operations */
1238 #define RTE_EVENT_OP_NEW 0
1242 #define RTE_EVENT_OP_FORWARD 1
1250 #define RTE_EVENT_OP_RELEASE 2
1286 struct rte_event {
1288  union {
1289  uint64_t event;
1291  struct {
1292  uint32_t flow_id:20;
1299  uint32_t sub_event_type:8;
1303  uint32_t event_type:4;
1307  uint8_t op:2;
1313  uint8_t rsvd:4;
1315  uint8_t sched_type:2;
1320  uint8_t queue_id;
1327  uint8_t priority;
1337  uint8_t impl_opaque;
1344  };
1345  };
1347  union {
1348  uint64_t u64;
1350  void *event_ptr;
1352  struct rte_mbuf *mbuf;
1356  };
1357 };
1358 
1359 /* Ethdev Rx adapter capability bitmap flags */
1360 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1364 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1368 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1375 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR 0x8
1396 int
1397 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1398  uint32_t *caps);
1399 
1400 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1403 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC (1ULL << 1)
1419 int
1420 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1421 
1422 /* Crypto adapter capability bitmap flag */
1423 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1430 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1437 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
1442 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
1447 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR 0x10
1471 int
1472 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1473  uint32_t *caps);
1474 
1475 /* DMA adapter capability bitmap flag */
1476 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1483 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1490 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1513 __rte_experimental
1514 int
1515 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1516 
1517 /* Ethdev Tx adapter capability bitmap flags */
1518 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1
1521 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR 0x2
1542 int
1543 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1544  uint32_t *caps);
1545 
1570 int
1571 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1572  uint64_t *timeout_ticks);
1573 
1637 int
1638 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1639  const uint8_t queues[], const uint8_t priorities[],
1640  uint16_t nb_links);
1641 
1685 int
1686 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1687  uint8_t queues[], uint16_t nb_unlinks);
1688 
1761 __rte_experimental
1762 int
1763 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
1764  const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
1765 
1814 __rte_experimental
1815 int
1816 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1817  uint16_t nb_unlinks, uint8_t profile_id);
1818 
1840 int
1841 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
1842 
1869 int
1870 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1871  uint8_t queues[], uint8_t priorities[]);
1872 
1904 __rte_experimental
1905 int
1906 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1907  uint8_t priorities[], uint8_t profile_id);
1908 
1924 int
1925 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
1926 
1940 int
1941 rte_event_dev_dump(uint8_t dev_id, FILE *f);
1942 
1944 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1945 
1950  RTE_EVENT_DEV_XSTATS_DEVICE,
1951  RTE_EVENT_DEV_XSTATS_PORT,
1952  RTE_EVENT_DEV_XSTATS_QUEUE,
1953 };
1954 
1962  char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
1963 };
1964 
1997 int
1999  enum rte_event_dev_xstats_mode mode,
2000  uint8_t queue_port_id,
2001  struct rte_event_dev_xstats_name *xstats_names,
2002  uint64_t *ids,
2003  unsigned int size);
2004 
2031 int
2033  enum rte_event_dev_xstats_mode mode,
2034  uint8_t queue_port_id,
2035  const uint64_t ids[],
2036  uint64_t values[], unsigned int n);
2037 
2054 uint64_t
2055 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2056  uint64_t *id);
2057 
2078 int
2080  enum rte_event_dev_xstats_mode mode,
2081  int16_t queue_port_id,
2082  const uint64_t ids[],
2083  uint32_t nb_ids);
2084 
2095 int rte_event_dev_selftest(uint8_t dev_id);
2096 
2127 struct rte_mempool *
2128 rte_event_vector_pool_create(const char *name, unsigned int n,
2129  unsigned int cache_size, uint16_t nb_elem,
2130  int socket_id);
2131 
2132 #include <rte_eventdev_core.h>
2133 
2134 static __rte_always_inline uint16_t
2135 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2136  const struct rte_event ev[], uint16_t nb_events,
2137  const event_enqueue_burst_t fn)
2138 {
2139  const struct rte_event_fp_ops *fp_ops;
2140  void *port;
2141 
2142  fp_ops = &rte_event_fp_ops[dev_id];
2143  port = fp_ops->data[port_id];
2144 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2145  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2146  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2147  rte_errno = EINVAL;
2148  return 0;
2149  }
2150 
2151  if (port == NULL) {
2152  rte_errno = EINVAL;
2153  return 0;
2154  }
2155 #endif
2156  rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2157  /*
2158  * Allow zero cost non burst mode routine invocation if application
2159  * requests nb_events as const one
2160  */
2161  if (nb_events == 1)
2162  return (fp_ops->enqueue)(port, ev);
2163  else
2164  return fn(port, ev, nb_events);
2165 }
2166 
2210 static inline uint16_t
2211 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2212  const struct rte_event ev[], uint16_t nb_events)
2213 {
2214  const struct rte_event_fp_ops *fp_ops;
2215 
2216  fp_ops = &rte_event_fp_ops[dev_id];
2217  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2218  fp_ops->enqueue_burst);
2219 }
2220 
2262 static inline uint16_t
2263 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2264  const struct rte_event ev[], uint16_t nb_events)
2265 {
2266  const struct rte_event_fp_ops *fp_ops;
2267 
2268  fp_ops = &rte_event_fp_ops[dev_id];
2269  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2270  fp_ops->enqueue_new_burst);
2271 }
2272 
2314 static inline uint16_t
2315 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2316  const struct rte_event ev[], uint16_t nb_events)
2317 {
2318  const struct rte_event_fp_ops *fp_ops;
2319 
2320  fp_ops = &rte_event_fp_ops[dev_id];
2321  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2322  fp_ops->enqueue_forward_burst);
2323 }
2324 
2391 static inline uint16_t
2392 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2393  uint16_t nb_events, uint64_t timeout_ticks)
2394 {
2395  const struct rte_event_fp_ops *fp_ops;
2396  void *port;
2397 
2398  fp_ops = &rte_event_fp_ops[dev_id];
2399  port = fp_ops->data[port_id];
2400 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2401  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2402  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2403  rte_errno = EINVAL;
2404  return 0;
2405  }
2406 
2407  if (port == NULL) {
2408  rte_errno = EINVAL;
2409  return 0;
2410  }
2411 #endif
2412  rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2413  /*
2414  * Allow zero cost non burst mode routine invocation if application
2415  * requests nb_events as const one
2416  */
2417  if (nb_events == 1)
2418  return (fp_ops->dequeue)(port, ev, timeout_ticks);
2419  else
2420  return (fp_ops->dequeue_burst)(port, ev, nb_events,
2421  timeout_ticks);
2422 }
2423 
2424 #define RTE_EVENT_DEV_MAINT_OP_FLUSH (1 << 0)
2466 static inline int
2467 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2468 {
2469  const struct rte_event_fp_ops *fp_ops;
2470  void *port;
2471 
2472  fp_ops = &rte_event_fp_ops[dev_id];
2473  port = fp_ops->data[port_id];
2474 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2475  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2476  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2477  return -EINVAL;
2478 
2479  if (port == NULL)
2480  return -EINVAL;
2481 
2482  if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2483  return -EINVAL;
2484 #endif
2485  rte_eventdev_trace_maintain(dev_id, port_id, op);
2486 
2487  if (fp_ops->maintain != NULL)
2488  fp_ops->maintain(port, op);
2489 
2490  return 0;
2491 }
2492 
2514 static inline uint8_t
2515 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2516 {
2517  const struct rte_event_fp_ops *fp_ops;
2518  void *port;
2519 
2520  fp_ops = &rte_event_fp_ops[dev_id];
2521  port = fp_ops->data[port_id];
2522 
2523 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2524  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2525  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2526  return -EINVAL;
2527 
2528  if (port == NULL)
2529  return -EINVAL;
2530 
2531  if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2532  return -EINVAL;
2533 #endif
2534  rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2535 
2536  return fp_ops->profile_switch(port, profile_id);
2537 }
2538 
2539 #ifdef __cplusplus
2540 }
2541 #endif
2542 
2543 #endif /* _RTE_EVENTDEV_H_ */
#define __rte_always_inline
Definition: rte_common.h:331
#define rte_errno
Definition: rte_errno.h:29
int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[])
int rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
int rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint64_t attr_value)
void(* rte_eventdev_port_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
Definition: rte_eventdev.h:919
int rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, uint64_t *timeout_ticks)
int rte_event_port_link(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links)
static uint16_t rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
static uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks)
int rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
static uint8_t rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
rte_event_dev_xstats_mode
__rte_experimental int rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id)
int rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
int rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf)
int rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, struct rte_event_queue_conf *queue_conf)
int rte_event_dev_selftest(uint8_t dev_id)
int rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
struct rte_event_vector __rte_aligned(16)
int rte_event_dev_stop_flush_callback_register(uint8_t dev_id, rte_eventdev_stop_flush_t callback, void *userdata)
void rte_event_dev_stop(uint8_t dev_id)
uint8_t rte_event_dev_count(void)
int rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, uint32_t *caps)
void rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, rte_eventdev_port_flush_t release_cb, void *args)
int rte_event_dev_xstats_reset(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, const uint64_t ids[], uint32_t nb_ids)
int rte_event_dev_dump(uint8_t dev_id, FILE *f)
int rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
int rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, uint32_t *attr_value)
int rte_event_dev_get_dev_id(const char *name)
uint64_t rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, uint64_t *id)
int rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, const uint64_t ids[], uint64_t values[], unsigned int n)
#define RTE_EVENT_DEV_MAINT_OP_FLUSH
static uint16_t rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
void(* rte_eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
int rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint32_t *attr_value)
__rte_experimental int rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[], uint8_t profile_id)
struct rte_mempool * rte_event_vector_pool_create(const char *name, unsigned int n, unsigned int cache_size, uint16_t nb_elem, int socket_id)
static int rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
int rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, uint32_t *attr_value)
uint16_t nb_elem
Definition: rte_eventdev.h:0
int rte_event_dev_start(uint8_t dev_id)
int rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, struct rte_event_port_conf *port_conf)
int rte_event_dev_xstats_names_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, struct rte_event_dev_xstats_name *xstats_names, uint64_t *ids, unsigned int size)
int rte_event_port_setup(uint8_t dev_id, uint8_t port_id, const struct rte_event_port_conf *port_conf)
__rte_experimental int rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps)
int rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks)
__rte_experimental int rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks, uint8_t profile_id)
#define RTE_EVENT_DEV_XSTATS_NAME_SIZE
int rte_event_dev_socket_id(uint8_t dev_id)
int rte_event_dev_configure(uint8_t dev_id, const struct rte_event_dev_config *dev_conf)
int rte_event_dev_close(uint8_t dev_id)
static uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:516
uint8_t nb_single_link_event_port_queues
Definition: rte_eventdev.h:565
uint32_t nb_event_port_enqueue_depth
Definition: rte_eventdev.h:555
uint32_t nb_event_queue_flows
Definition: rte_eventdev.h:542
uint32_t nb_event_port_dequeue_depth
Definition: rte_eventdev.h:547
uint8_t max_event_port_links
Definition: rte_eventdev.h:438
uint32_t max_event_port_enqueue_depth
Definition: rte_eventdev.h:433
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:412
uint32_t min_dequeue_timeout_ns
Definition: rte_eventdev.h:408
uint8_t max_event_queues
Definition: rte_eventdev.h:414
uint32_t max_event_queue_flows
Definition: rte_eventdev.h:416
uint8_t max_event_port_dequeue_depth
Definition: rte_eventdev.h:428
uint8_t max_event_queue_priority_levels
Definition: rte_eventdev.h:418
uint8_t max_profiles_per_port
Definition: rte_eventdev.h:455
uint8_t max_event_priority_levels
Definition: rte_eventdev.h:422
uint32_t event_dev_cap
Definition: rte_eventdev.h:447
const char * driver_name
Definition: rte_eventdev.h:406
uint32_t max_dequeue_timeout_ns
Definition: rte_eventdev.h:410
struct rte_device * dev
Definition: rte_eventdev.h:407
uint8_t max_single_link_event_port_queue_pairs
Definition: rte_eventdev.h:449
int32_t new_event_threshold
Definition: rte_eventdev.h:842
uint32_t nb_atomic_order_sequences
Definition: rte_eventdev.h:623
uint16_t elem_offset
uint8_t priority
uint32_t flow_id
uint8_t rsvd
uint8_t op
uint32_t event_type
struct rte_mbuf * mbuf
uint8_t queue_id
uint8_t sched_type
uint64_t u64
struct rte_event_vector * vec
uint8_t impl_opaque
uint32_t sub_event_type
void * event_ptr
char name[RTE_MEMPOOL_NAMESIZE]
Definition: rte_mempool.h:221
uint32_t cache_size
Definition: rte_mempool.h:231