18#ifndef __VSF_THREAD_H__
19#define __VSF_THREAD_H__
25#if VSF_KERNEL_CFG_SUPPORT_THREAD == ENABLED && VSF_USE_KERNEL == ENABLED
26#include "../vsf_eda.h"
28#if VSF_KERNEL_CFG_EDA_SUPPORT_TASK == ENABLED
32#if defined(__VSF_THREAD_CLASS_IMPLEMENT)
33# undef __VSF_THREAD_CLASS_IMPLEMENT
34# define __PLOOC_CLASS_IMPLEMENT__
35#elif defined(__VSF_THREAD_CLASS_INHERIT__)
36# undef __VSF_THREAD_CLASS_INHERIT__
37# define __PLOOC_CLASS_INHERIT__
48#ifndef VSF_KERNEL_CFG_THREAD_STACK_PAGE_SIZE
49# define VSF_KERNEL_CFG_THREAD_STACK_PAGE_SIZE 1
51#ifndef VSF_KERNEL_CFG_THREAD_STACK_GUARDIAN_SIZE
52# define VSF_KERNEL_CFG_THREAD_STACK_GUARDIAN_SIZE 0
55#ifndef VSF_KERNEL_CFG_THREAD_STACK_ALIGN_BIT
56# ifdef VSF_ARCH_STACK_ALIGN_BIT
57# define VSF_KERNEL_CFG_THREAD_STACK_ALIGN_BIT VSF_ARCH_STACK_ALIGN_BIT
59# define VSF_KERNEL_CFG_THREAD_STACK_ALIGN_BIT 3
63#define __VSF_THREAD_STACK_SAFE_SIZE(__stack) \
64 ( ( ( ((__stack) + VSF_KERNEL_CFG_THREAD_STACK_PAGE_SIZE - 1) \
65 / VSF_KERNEL_CFG_THREAD_STACK_PAGE_SIZE) \
66 * VSF_KERNEL_CFG_THREAD_STACK_PAGE_SIZE) \
67 + VSF_KERNEL_CFG_THREAD_STACK_GUARDIAN_SIZE)
69#ifdef VSF_ARCH_LIMIT_NO_SET_STACK
71# define VSF_KERNEL_THREAD_USE_HOST ENABLED
72# ifdef VSF_ARCH_RTOS_DYNAMIC_STACK
73# define VSF_KERNEL_THREAD_DYNAMIC_STACK ENABLED
74# if VSF_KERNEL_CFG_THREAD_STACK_CHECK == ENABLED
75# warning VSF_KERNEL_CFG_THREAD_STACK_CHECK is not usable for dynamic stack
76# undef VSF_KERNEL_CFG_THREAD_STACK_CHECK
77# define VSF_KERNEL_CFG_THREAD_STACK_CHECK DISABLED
84#define __declare_vsf_thread(__name) \
85 typedef struct __name __name; \
86 typedef struct thread_cb_##__name##_t thread_cb_##__name##_t;
87#define declare_vsf_thread(__name) __declare_vsf_thread(__name)
88#define declare_vsf_thread_ex(__name) __declare_vsf_thread(__name)
90#define dcl_vsf_thread(__name) declare_vsf_thread(__name)
91#define dcl_vsf_thread_ex(__name) declare_vsf_thread(__name)
93#if VSF_KERNEL_THREAD_DYNAMIC_STACK == ENABLED
94# define __vsf_thread_set_stack_canery(__thread, __task) \
95 (__thread)->canary = 0xDEADBEEF;
97# define __vsf_thread_set_stack_canery(__thread, __task) \
98 (__task)->canary = 0xDEADBEEF;
101#if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
102# define __vsf_thread_set_stack(__thread, __task, __stack_ptr, __stack_bytesize)\
103 (__thread)->use_as__vsf_thread_cb_t.stack = (__stack_ptr); \
104 (__thread)->use_as__vsf_thread_cb_t.stack_size = (__stack_bytesize);\
105 __vsf_thread_set_stack_canery(__thread, __task)
107# define __vsf_thread_set_stack(__thread, __task, __stack_ptr, __stack_bytesize)\
108 (__thread)->stack = (__stack_ptr); \
109 (__thread)->stack_size = (__stack_bytesize); \
110 __vsf_thread_set_stack_canery(__thread, __task)
113#if VSF_KERNEL_THREAD_DYNAMIC_STACK == ENABLED
114# define __vsf_thread_def_stack(__name, __bytesize) \
116 vsf_thread##__name##_stack_bytesize = (__bytesize), \
118# define __vsf_thread_def_stack_member(__name, __bytesize)
119# define __vsf_thread_imp_stack(__name, __thread, __task) \
120 __vsf_thread_set_stack((__thread), (__task), NULL, (vsf_thread##__name##_stack_bytesize))
121# define __vsf_eda_call_thread_prepare_stack(__name, __thread) \
123 .stack_size = (vsf_thread##__name##_stack_bytesize),
125# define __vsf_thread_def_stack(__name, __bytesize)
126# define __vsf_thread_def_stack_member(__name, __bytesize) \
128 uint64_t stack_arr[(__VSF_THREAD_STACK_SAFE_SIZE(__bytesize) + 7) / 8]\
129 VSF_CAL_ALIGN(1 << VSF_KERNEL_CFG_THREAD_STACK_ALIGN_BIT);
130# define __vsf_thread_imp_stack(__name, __thread, __task) \
131 __vsf_thread_set_stack((__thread), (__task), (__task)->stack_arr, sizeof((__task)->stack_arr))
132# define __vsf_eda_call_thread_prepare_stack(__name, __thread) \
133 .stack = (__thread)->stack_arr, \
134 .stack_size = sizeof((__thread)->stack_arr),
137#if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
138# define __def_vsf_thread(__name, __stack_bytesize, ...) \
139 __vsf_thread_def_stack(__name, (__stack_bytesize)) \
140 struct thread_cb_##__name##_t { \
141 implement(vsf_thread_cb_t) \
143 __vsf_thread_def_stack_member(__name, (__stack_bytesize)) \
146 implement(vsf_thread_t) \
147 implement_ex(thread_cb_##__name##_t, param) \
148 } VSF_CAL_ALIGN(8); \
149 extern void vsf_thread_##__name##_start(struct __name *task, \
150 vsf_prio_t priority); \
151 extern void vsf_thread_##__name##_entry( \
152 struct thread_cb_##__name##_t *vsf_pthis);
154# define __implement_vsf_thread(__name) \
155 void vsf_thread_##__name##_entry( \
156 struct thread_cb_##__name##_t *vsf_pthis); \
157 void vsf_thread_##__name##_start( struct __name *task, \
158 vsf_prio_t priority) \
160 VSF_KERNEL_ASSERT(NULL != task); \
161 thread_cb_##__name##_t *__vsf_pthis = &(task->param); \
162 __vsf_pthis->use_as__vsf_thread_cb_t.entry = (vsf_thread_entry_t *)\
163 &vsf_thread_##__name##_entry; \
164 __vsf_thread_imp_stack(__name, __vsf_pthis, task) \
165 vsf_thread_start( &(task->use_as__vsf_thread_t), \
166 &(__vsf_pthis->use_as__vsf_thread_cb_t), \
169 void vsf_thread_##__name##_entry( \
170 struct thread_cb_##__name##_t *vsf_pthis)
172# define __vsf_eda_call_thread_prepare(__name, __thread_cb) \
174 thread_cb_##__name##_t *__vsf_pthis = (__thread_cb); \
175 const vsf_thread_prepare_cfg_t cfg = { \
176 .entry = (vsf_thread_entry_t *) \
177 &vsf_thread_##__name##_entry, \
178 __vsf_eda_call_thread_prepare_stack(__name, (__thread_cb)) \
180 vk_eda_call_thread_prepare(&(__vsf_pthis->use_as__vsf_thread_cb_t),\
181 (vsf_thread_prepare_cfg_t *)&cfg);\
184# define vsf_eda_call_thread_prepare(__name, __thread_cb) \
185 __vsf_eda_call_thread_prepare(__name, __thread_cb)
187# define vsf_eda_call_thread(__thread_cb) \
188 vk_eda_call_thread(&((__thread_cb)->use_as__vsf_thread_cb_t))
191# define __def_vsf_thread_ex(__name, ...) \
192 struct thread_cb_##__name##_t { \
193 implement(vsf_thread_cb_t) \
197 implement(vsf_thread_t) \
198 implement_ex(thread_cb_##__name##_t, param) \
200 extern void vsf_thread_##__name##_start( struct __name *task, \
201 vsf_prio_t priority, \
203 uint_fast32_t size); \
204 extern void vsf_thread_##__name##_entry( \
205 struct thread_cb_##__name##_t *vsf_pthis);
208# define __implement_vsf_thread_ex(__name) \
209 void vsf_thread_##__name##_entry( \
210 struct thread_cb_##__name##_t *vsf_pthis); \
211 void vsf_thread_##__name##_start( struct __name *task, \
212 vsf_prio_t priority, \
214 uint_fast32_t size) \
216 VSF_KERNEL_ASSERT(NULL != task && 0 != size && NULL != stack); \
217 thread_cb_##__name##_t *__vsf_pthis = &(task->param); \
218 __vsf_pthis->use_as__vsf_thread_cb_t.entry = (vsf_thread_entry_t *)\
219 &vsf_thread_##__name##_entry; \
220 __vsf_pthis->use_as__vsf_thread_cb_t.stack = stack; \
221 __vsf_pthis->use_as__vsf_thread_cb_t.stack_size = size; \
222 vsf_thread_start( &(task->use_as__vsf_thread_t), \
223 &(task->param.use_as__vsf_thread_cb_t), \
226 void vsf_thread_##__name##_entry( \
227 struct thread_cb_##__name##_t *vsf_pthis)
229# define __vsf_eda_call_thread_prepare_ex__( __name, \
234 VSF_KERNEL_ASSERT((NULL != (__stack)) && (0 != (__size))) \
235 thread_cb_##__name##_t *__vsf_pthis = (__thread_cb); \
236 const vsf_thread_prepare_cfg_t cfg = { \
237 .entry = (vsf_thread_entry_t *) \
238 &vsf_thread_##__name##_entry, \
239 .stack = (__stack), \
240 .stack_size = (__stack_bytesize), \
242 vk_eda_call_thread_prepare(&(__vsf_pthis->use_as__vsf_thread_cb_t),\
243 (vsf_thread_prepare_cfg_t *)&cfg);\
247# define vsf_eda_call_thread_prepare_ex( __name, \
251 __vsf_eda_call_thread_prepare_ex__( __name, \
256# define vsf_eda_call_thread_ex(__thread_cb) \
257 vk_eda_call_thread(&((__thread_cb)->use_as__vsf_thread_cb_t))
260# define __def_vsf_thread(__name, __stack_bytesize, ...) \
261 struct thread_cb_##__name##_t { \
262 implement(vsf_thread_t) \
265 __vsf_thread_def_stack(__name, (__stack_bytesize)) \
267 __vsf_thread_def_stack_member(__name, (__stack_bytesize)) \
268 implement_ex(thread_cb_##__name##_t, param); \
269 } VSF_CAL_ALIGN(8); \
270 extern void vsf_thread_##__name##_start(struct __name *task, \
271 vsf_prio_t priority); \
272 extern void vsf_thread_##__name##_entry( \
273 struct thread_cb_##__name##_t *vsf_pthis);
275# define __implement_vsf_thread(__name) \
276 void vsf_thread_##__name##_entry( \
277 struct thread_cb_##__name##_t *vsf_pthis); \
278 void vsf_thread_##__name##_start( struct __name *task, \
279 vsf_prio_t priority) \
281 VSF_KERNEL_ASSERT(NULL != task); \
282 vsf_thread_t *__vsf_pthis = \
283 &(task->param.use_as__vsf_thread_t); \
284 __vsf_pthis->entry = (vsf_thread_entry_t *) \
285 &vsf_thread_##__name##_entry; \
286 __vsf_thread_imp_stack(__name, __vsf_pthis, task) \
287 vsf_thread_start(__vsf_pthis, priority); \
289 void vsf_thread_##__name##_entry( \
290 struct thread_cb_##__name##_t *vsf_pthis)
292# define __def_vsf_thread_ex(__name, ...) \
293 struct thread_cb_##__name##_t { \
294 implement(vsf_thread_t) \
298 implement_ex(thread_cb_##__name##_t, param); \
299 } VSF_CAL_ALIGN(8); \
300 extern void vsf_thread_##__name##_start(struct __name *task, \
301 vsf_prio_t priority); \
302 extern void vsf_thread_##__name##_entry( \
303 struct thread_cb_##__name##_t *vsf_pthis);
306# define __implement_vsf_thread_ex(__name) \
307 void vsf_thread_##__name##_entry( \
308 struct thread_cb_##__name##_t *vsf_pthis); \
309 void vsf_thread_##__name##_start( struct __name *task, \
310 vsf_prio_t priority, \
312 uint_fast32_t size) \
314 VSF_KERNEL_ASSERT(NULL != task && 0 != size && NULL != stack); \
315 vsf_thread_t *__vsf_pthis = \
316 &(task->param.use_as__vsf_thread_t); \
317 __vsf_pthis->entry = (vsf_thread_entry_t *) \
318 &vsf_thread_##__name##_entry; \
319 __vsf_pthis->stack = stack; \
320 __vsf_pthis->stack_size = size; \
321 vsf_thread_start(__vsf_pthis, priority); \
323 void vsf_thread_##__name##_entry( \
324 struct thread_cb_##__name##_t *vsf_pthis)
329#define def_vsf_thread(__name, __stack_bytesize, ...) \
330 __def_vsf_thread(__name, (__stack_bytesize), __VA_ARGS__)
332#define define_vsf_thread(__name, __stack_bytesize, ...) \
333 def_vsf_thread(__name, (__stack_bytesize), __VA_ARGS__)
335#define def_vsf_thread_ex(__name, ...) \
336 __def_vsf_thread_ex(__name, __VA_ARGS__)
338#define define_vsf_thread_ex(__name, ...) \
339 def_vsf_thread_ex(__name, __VA_ARGS__)
341#define end_def_vsf_thread(...)
342#define end_define_vsf_thread(...)
344#define implement_vsf_thread(__name) __implement_vsf_thread(__name)
345#define implement_vsf_thread_ex(__name) __implement_vsf_thread_ex(__name)
347#define imp_vsf_thread(__name) implement_vsf_thread(__name)
348#define imp_vsf_thread_ex(__name) implement_vsf_thread_ex(__name)
350#define __init_vsf_thread(__name, __task, __pri) \
351 vsf_thread_##__name##_start((__task), (__pri))
353#define __init_vsf_thread_ex(__name, __task, __pri, __stack, __stack_bytesize) \
354 vsf_thread_##__name##_start((__task), (__pri), (__stack), (__stack_bytesize))
356#define init_vsf_thread(__name, __task, __pri) \
357 __init_vsf_thread(__name, (__task), (__pri))
359#define init_vsf_thread_ex(__name, __task, __pri, __stack, __stack_bytesize) \
360 __init_vsf_thread_ex(__name, (__task), (__pri), (__stack), (__stack_bytesize))
362#if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
363# define __vsf_thread(__name) thread_cb_##__name##_t
364# define vsf_thread(__name) __vsf_thread(__name)
368#define vsf_thread_wfm vsf_thread_wait_for_msg
369#define vsf_thread_wfe vsf_thread_wait_for_evt
371#if VSF_KERNEL_CFG_SUPPORT_EVT_MESSAGE == ENABLED
372# define vsf_thread_wfem vsf_thread_wait_for_evt_msg
376#if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
377# define __vsf_thread_call_sub(__name, __target, ...) \
378 vk_thread_call_eda( (uintptr_t)(__name), \
379 (uintptr_t)(__target), \
380 (0, ##__VA_ARGS__), \
385# define vsf_thread_call_sub(__name, __target, ...) \
386 __vsf_thread_call_sub(__name, (__target), (0, ##__VA_ARGS__))
389# define vsf_thread_call_pt(__name, __target, ...) \
390 (__target)->fsm_state = 0; \
391 vsf_thread_call_sub(vsf_pt_func(__name), (__target), (0, ##__VA_ARGS__))
395#if VSF_KERNEL_CFG_EDA_SUPPORT_TASK == ENABLED && VSF_KERNEL_CFG_EDA_SUBCALL_HAS_RETURN_VALUE == ENABLED
396# define vsf_thread_call_task(__name, __target, ...) \
397 vk_thread_call_task(vsf_task_func(__name), __target, (0, ##__VA_ARGS__))
400#if VSF_KERNEL_CFG_EDA_SUPPORT_TIMER == ENABLED
401# define vsf_thread_delay_ms(__ms) vsf_thread_delay(vsf_systimer_ms_to_tick(__ms))
402# define vsf_thread_delay_us(__us) vsf_thread_delay(vsf_systimer_us_to_tick(__us))
405#if VSF_KERNEL_CFG_SUPPORT_SYNC == ENABLED
406# define vsf_thread_sem_post(__sem) vsf_eda_sem_post(__sem)
407# define vsf_thread_sem_pend(__sem, timeout) __vsf_thread_wait_for_sync(__sem, timeout)
409# define vsf_thread_trig_set(__trig, ...) vsf_eda_trig_set(__trig, ##__VA_ARGS__)
410# define vsf_thread_trig_reset(__trig) vsf_eda_trig_reset(__trig)
411# define vsf_thread_trig_pend(__trig, timeout) __vsf_thread_wait_for_sync(__trig, timeout)
413# define vsf_thread_mutex_enter(__mtx, timeout) __vsf_thread_wait_for_sync(&(__mtx)->use_as__vsf_sync_t, timeout)
420# if VSF_KERNEL_CFG_THREAD_SIGNAL == ENABLED
421typedef void vsf_thread_sighandler_t(vsf_thread_t *thread,
int sig);
424#if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
426typedef void vsf_thread_entry_t(vsf_thread_cb_t *thread);
428typedef void vsf_thread_entry_t(vsf_thread_t *thread);
436 vsf_thread_entry_t *entry;
437#if VSF_KERNEL_CFG_THREAD_STACK_LARGE == ENABLED
446#
if VSF_KERNEL_THREAD_USE_HOST ==
ENABLED
450# if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
459end_def_class(vsf_thread_cb_t)
462#if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
472 vsf_thread_sighandler_t *sighandler;
483end_def_class(vsf_thread_t)
486 vsf_thread_entry_t *entry;
490} vsf_thread_prepare_cfg_t;
505 vsf_thread_sighandler_t *sighandler;
507 implement(vsf_thread_cb_t)
518end_def_class(vsf_thread_t)
524#if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
526extern
vsf_err_t vk_eda_call_thread_prepare( vsf_thread_cb_t *thread_cb,
527 vsf_thread_prepare_cfg_t *cfg);
530extern
vsf_err_t vk_eda_call_thread(vsf_thread_cb_t *thread_cb);
532#if VSF_KERNEL_CFG_EDA_SUPPORT_TASK == ENABLED && VSF_KERNEL_CFG_EDA_SUBCALL_HAS_RETURN_VALUE == ENABLED
543 size_t local_buff_size,
547extern
vsf_err_t vk_thread_call_thread( vsf_thread_cb_t *thread_cb,
548 vsf_thread_prepare_cfg_t *cfg);
554#if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
556 vsf_thread_cb_t *thread_cb,
562#if VSF_KERNEL_CFG_THREAD_STACK_CHECK == ENABLED
564extern
void vsf_thread_stack_check(
void);
568extern VSF_CAL_NO_RETURN
void vsf_thread_exit(
void);
571extern vsf_thread_t *vsf_thread_get_cur(
void);
577extern
void vsf_thread_wait_for_evt(
vsf_evt_t evt);
580extern
void vsf_thread_sendevt(vsf_thread_t *thread,
vsf_evt_t evt);
582#if VSF_KERNEL_CFG_SUPPORT_EVT_MESSAGE == ENABLED
588extern
uintptr_t vsf_thread_wait_for_msg(
void);
590#if VSF_KERNEL_CFG_EDA_SUPPORT_TIMER == ENABLED
596void vsf_thread_yield(
void);
598#if VSF_KERNEL_CFG_SUPPORT_DYNAMIC_PRIOTIRY == ENABLED
603#if VSF_KERNEL_CFG_THREAD_SIGNAL == ENABLED
605extern
void vsf_thread_signal(vsf_thread_t *thread,
int sig);
608#if VSF_KERNEL_CFG_SUPPORT_SYNC == ENABLED
621# if VSF_KERNEL_CFG_SUPPORT_BITMAP_EVENT == ENABLED
#define VSF_CAL_SECTION(__SEC)
Definition __compiler.h:181
#define ENABLED
Definition __type.h:28
vsf_err_t
Definition __type.h:42
Definition linux_generic.h:104
Definition linux_generic.h:122
uint64_t vsf_systimer_tick_t
Definition cortex_a_generic.h:73
__le16 timeout
Definition ieee80211.h:94
unsigned short uint16_t
Definition lvgl.h:41
unsigned int uint32_t
Definition lvgl.h:43
uint32_t uintptr_t
Definition stdint.h:38
unsigned int uint_fast32_t
Definition stdint.h:27
unsigned long long uint64_t
Definition stdint.h:11
void sync(void)
Definition vsf_linux_fs.c:2137
int16_t vsf_evt_t
Definition vsf_eda.h:654
VSF_KERNEL_TIMEOUT_TICK_T vsf_timeout_tick_t
Definition vsf_eda.h:592
vsf_sync_reason_t
Definition vsf_eda.h:1053
fsm_rt_t
Definition vsf_fsm.h:315
#define VSF_KERNEL_CFG_THREAD_SIGNAL
Definition vsf_kernel_cfg.h:151
#define VSF_KERNEL_CFG_EDA_SUPPORT_TIMER
Definition vsf_kernel_cfg.h:78
#define VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL
Definition vsf_kernel_cfg.h:106
#define VSF_KERNEL_CFG_THREAD_SIGNAL_MASK_T
Definition vsf_kernel_cfg.h:155
vsf_prio_t
Definition vsf_kernel_common.h:37
def_class(vsf_stream_fifo_t, which(vsf_stream_tx_t TX;vsf_stream_rx_t RX;), private:vsf_slist_queue_t union { vsf_stream_fifo_cfg_t cfg;struct { vsf_stream_dat_rdy_evt_t tDataReadyEventHandling;vsf_stream_dat_drn_evt_t tDataDrainEventHandling;vsf_stream_status_t Status;#if !defined(VSF_PBUF_QUEUE_CFG_ATOM_ACCESS) vsf_protect_region_t *pregion;#endif };};) end_def_class(vsf_stream_fifo_t) extern vsf_err_t vsf_stream_fifo_init(vsf_stream_fifo_t *obj_ptr
fsm_rt_t(* vsf_task_entry_t)(uintptr_t target, vsf_evt_t evt)
Definition vsf_task.h:241
vsf_err_t vsf_thread_start(vsf_thread_t *pthis, vsf_thread_cb_t *thread_cb, vsf_prio_t priority)
Definition vsf_thread.c:482
int sig
Definition vsf_thread.h:421
declare_class(vsf_thread_t) typedef void vsf_thread_sighandler_t(vsf_thread_t *thread