DPDK 22.11.0-rc2
rte_lpm.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 * Copyright(c) 2020 Arm Limited
4 */
5
6#ifndef _RTE_LPM_H_
7#define _RTE_LPM_H_
8
14#include <errno.h>
15#include <stdint.h>
17#include <rte_byteorder.h>
18#include <rte_common.h>
19#include <rte_vect.h>
20#include <rte_rcu_qsbr.h>
21
22#ifdef __cplusplus
23extern "C" {
24#endif
25
27#define RTE_LPM_NAMESIZE 32
28
30#define RTE_LPM_MAX_DEPTH 32
31
33#define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
34
36#define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
37
39#define RTE_LPM_MAX_TBL8_NUM_GROUPS (1 << 24)
40
42#define RTE_LPM_TBL8_NUM_GROUPS 256
43
45#define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
46 RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
47
49#if defined(RTE_LIBRTE_LPM_DEBUG)
50#define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
51 if (cond) return (retval); \
52} while (0)
53#else
54#define RTE_LPM_RETURN_IF_TRUE(cond, retval)
55#endif
56
58#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000
59
61#define RTE_LPM_LOOKUP_SUCCESS 0x01000000
62
64#define RTE_LPM_RCU_DQ_RECLAIM_MAX 16
65
72};
73
74#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
76__extension__
77struct rte_lpm_tbl_entry {
83 uint32_t next_hop :24;
84 /* Using single uint8_t to store 3 values. */
85 uint32_t valid :1;
93 uint32_t valid_group :1;
94 uint32_t depth :6;
95};
96
97#else
98
99__extension__
100struct rte_lpm_tbl_entry {
101 uint32_t depth :6;
102 uint32_t valid_group :1;
103 uint32_t valid :1;
104 uint32_t next_hop :24;
105
106};
107
108#endif
109
112 uint32_t max_rules;
113 uint32_t number_tbl8s;
114 int flags;
115};
116
118struct rte_lpm {
119 /* LPM Tables. */
120 struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
122 struct rte_lpm_tbl_entry *tbl8;
123};
124
127 struct rte_rcu_qsbr *v; /* RCU QSBR variable. */
128 /* Mode of RCU QSBR. RTE_LPM_QSBR_MODE_xxx
129 * '0' for default: create defer queue for reclaim.
130 */
131 enum rte_lpm_qsbr_mode mode;
132 uint32_t dq_size; /* RCU defer queue size.
133 * default: lpm->number_tbl8s.
134 */
135 uint32_t reclaim_thd; /* Threshold to trigger auto reclaim. */
136 uint32_t reclaim_max; /* Max entries to reclaim in one go.
137 * default: RTE_LPM_RCU_DQ_RECLAIM_MAX.
138 */
139};
140
160struct rte_lpm *
161rte_lpm_create(const char *name, int socket_id,
162 const struct rte_lpm_config *config);
163
174struct rte_lpm *
175rte_lpm_find_existing(const char *name);
176
184void
185rte_lpm_free(struct rte_lpm *lpm);
186
205__rte_experimental
206int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg);
207
222int
223rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
224
240int
241rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
242uint32_t *next_hop);
243
256int
257rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
258
265void
266rte_lpm_delete_all(struct rte_lpm *lpm);
267
280static inline int
281rte_lpm_lookup(const struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
282{
283 unsigned tbl24_index = (ip >> 8);
284 uint32_t tbl_entry;
285 const uint32_t *ptbl;
286
287 /* DEBUG: Check user input arguments. */
288 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
289
290 /* Copy tbl24 entry */
291 ptbl = (const uint32_t *)(&lpm->tbl24[tbl24_index]);
292 tbl_entry = *ptbl;
293
294 /* Memory ordering is not required in lookup. Because dataflow
295 * dependency exists, compiler or HW won't be able to re-order
296 * the operations.
297 */
298 /* Copy tbl8 entry (only if needed) */
299 if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
300 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
301
302 unsigned tbl8_index = (uint8_t)ip +
303 (((uint32_t)tbl_entry & 0x00FFFFFF) *
304 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
305
306 ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
307 tbl_entry = *ptbl;
308 }
309
310 *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
311 return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
312}
313
334#define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
335 rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
336
337static inline int
338rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips,
339 uint32_t *next_hops, const unsigned n)
340{
341 unsigned i;
342 unsigned tbl24_indexes[n];
343 const uint32_t *ptbl;
344
345 /* DEBUG: Check user input arguments. */
346 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
347 (next_hops == NULL)), -EINVAL);
348
349 for (i = 0; i < n; i++) {
350 tbl24_indexes[i] = ips[i] >> 8;
351 }
352
353 for (i = 0; i < n; i++) {
354 /* Simply copy tbl24 entry to output */
355 ptbl = (const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
356 next_hops[i] = *ptbl;
357
358 /* Overwrite output with tbl8 entry if needed */
359 if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
360 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
361
362 unsigned tbl8_index = (uint8_t)ips[i] +
363 (((uint32_t)next_hops[i] & 0x00FFFFFF) *
364 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
365
366 ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
367 next_hops[i] = *ptbl;
368 }
369 }
370 return 0;
371}
372
373/* Mask four results. */
374#define RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff)
375
395static inline void
396rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
397 uint32_t defv);
398
399#if defined(RTE_ARCH_ARM)
400#ifdef RTE_HAS_SVE_ACLE
401#include "rte_lpm_sve.h"
402#else
403#include "rte_lpm_neon.h"
404#endif
405#elif defined(RTE_ARCH_PPC_64)
406#include "rte_lpm_altivec.h"
407#elif defined(RTE_ARCH_X86)
408#include "rte_lpm_sse.h"
409#else
410#include "rte_lpm_scalar.h"
411#endif
412
413#ifdef __cplusplus
414}
415#endif
416
417#endif /* _RTE_LPM_H_ */
#define unlikely(x)
#define __rte_cache_aligned
Definition: rte_common.h:440
int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop)
int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
__rte_experimental int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg)
void rte_lpm_free(struct rte_lpm *lpm)
static void rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], uint32_t defv)
#define RTE_LPM_LOOKUP_SUCCESS
Definition: rte_lpm.h:61
rte_lpm_qsbr_mode
Definition: rte_lpm.h:67
@ RTE_LPM_QSBR_MODE_DQ
Definition: rte_lpm.h:69
@ RTE_LPM_QSBR_MODE_SYNC
Definition: rte_lpm.h:71
struct rte_lpm * rte_lpm_create(const char *name, int socket_id, const struct rte_lpm_config *config)
void rte_lpm_delete_all(struct rte_lpm *lpm)
int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t *next_hop)
static int rte_lpm_lookup(const struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
Definition: rte_lpm.h:281
struct rte_lpm * rte_lpm_find_existing(const char *name)
uint32_t number_tbl8s
Definition: rte_lpm.h:113
uint32_t max_rules
Definition: rte_lpm.h:112