2 // Copyright (c) 2010-2017 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
20 #include "task_init.h"
30 LCONF_MSG_RX_DISTR_START,
31 LCONF_MSG_RX_DISTR_STOP,
32 LCONF_MSG_RX_DISTR_RESET,
33 LCONF_MSG_TX_DISTR_START,
34 LCONF_MSG_TX_DISTR_STOP,
35 LCONF_MSG_TX_DISTR_RESET,
36 LCONF_MSG_RX_BW_START,
38 LCONF_MSG_TX_BW_START,
43 /* Set by master core (if not set), unset by worker after consumption. */
45 enum lconf_msg_type type;
50 #define LCONF_FLAG_RX_DISTR_ACTIVE 0x00000001
51 #define LCONF_FLAG_RUNNING 0x00000002
52 #define LCONF_FLAG_TX_DISTR_ACTIVE 0x00000004
53 #define LCONF_FLAG_RX_BW_ACTIVE 0x00000008
54 #define LCONF_FLAG_TX_BW_ACTIVE 0x00000010
57 /* All tasks running at the moment. This is empty when the core is stopped. */
58 struct task_base *tasks_run[MAX_TASKS_PER_CORE];
61 void (*flush_queues[MAX_TASKS_PER_CORE])(struct task_base *tbase);
63 void (*period_func)(void *data);
65 /* call periodic_func after periodic_timeout cycles */
66 uint64_t period_timeout;
68 uint64_t ctrl_timeout;
69 void (*ctrl_func_m[MAX_TASKS_PER_CORE])(struct task_base *tbase, void **data, uint16_t n_msgs);
70 struct rte_ring *ctrl_rings_m[MAX_TASKS_PER_CORE];
72 void (*ctrl_func_p[MAX_TASKS_PER_CORE])(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
73 struct rte_ring *ctrl_rings_p[MAX_TASKS_PER_CORE];
75 struct lconf_msg msg __attribute__((aligned(4)));
76 struct task_base *tasks_all[MAX_TASKS_PER_CORE];
77 int task_is_running[MAX_TASKS_PER_CORE];
81 /* Following variables are not accessed in main loop */
85 char name[MAX_NAME_SIZE];
86 struct task_args targs[MAX_TASKS_PER_CORE];
87 int (*thread_x)(struct lcore_cfg *lconf);
89 } __rte_cache_aligned;
91 extern struct lcore_cfg *lcore_cfg;
92 extern struct lcore_cfg lcore_cfg_init[];
94 /* This function is only run on low load (when no bulk was sent within
95 last drain_timeout (16kpps if DRAIN_TIMEOUT = 2 ms) */
96 static inline void lconf_flush_all_queues(struct lcore_cfg *lconf)
98 struct task_base *task;
100 for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
101 task = lconf->tasks_all[task_id];
102 if (!(task->flags & FLAG_TX_FLUSH) || (task->flags & FLAG_NEVER_FLUSH)) {
103 task->flags |= FLAG_TX_FLUSH;
106 lconf->flush_queues[task_id](task);
110 static inline void lconf_set_req(struct lcore_cfg *lconf)
112 (*(volatile uint32_t *)&lconf->msg.req) = 1;
115 static inline void lconf_unset_req(struct lcore_cfg *lconf)
117 (*(volatile uint32_t *)&lconf->msg.req) = 0;
120 static inline int lconf_is_req(struct lcore_cfg *lconf)
122 return (*(volatile uint32_t *)&lconf->msg.req);
125 /* Returns non-zero when terminate has been requested */
126 int lconf_do_flags(struct lcore_cfg *lconf);
128 int lconf_get_task_id(const struct lcore_cfg *lconf, const struct task_base *task);
129 int lconf_task_is_running(const struct lcore_cfg *lconf, uint8_t task_id);
131 int lconf_run(void *dummy);
133 void lcore_cfg_alloc_hp(void);
135 /* Returns the next active lconf/targ pair. If *lconf = NULL, the
136 first active lconf/targ pair is returned. If the last lconf/targ
137 pair is passed, the function returns non-zero. */
138 int core_targ_next(struct lcore_cfg **lconf, struct task_args **targ, const int with_master);
139 /* Same as above, but uses non-huge page memory (used before
140 lcore_cfg_alloc_hp is called). */
141 int core_targ_next_early(struct lcore_cfg **lconf, struct task_args **targ, const int with_master);
143 struct task_args *core_targ_get(uint32_t lcore_id, uint32_t task_id);
145 #endif /* _LCONF_H_ */