VSF Documented
vsf_thread.h
Go to the documentation of this file.
1/*****************************************************************************
2 * Copyright(C)2009-2022 by VSF Team *
3 * *
4 * Licensed under the Apache License, Version 2.0 (the "License"); *
5 * you may not use this file except in compliance with the License. *
6 * You may obtain a copy of the License at *
7 * *
8 * http://www.apache.org/licenses/LICENSE-2.0 *
9 * *
10 * Unless required by applicable law or agreed to in writing, software *
11 * distributed under the License is distributed on an "AS IS" BASIS, *
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
13 * See the License for the specific language governing permissions and *
14 * limitations under the License. *
15 * *
16 ****************************************************************************/
17
18#ifndef __VSF_THREAD_H__
19#define __VSF_THREAD_H__
20
21/*============================ INCLUDES ======================================*/
22
24
25#if VSF_KERNEL_CFG_SUPPORT_THREAD == ENABLED && VSF_USE_KERNEL == ENABLED
26#include "../vsf_eda.h"
27
28#if VSF_KERNEL_CFG_EDA_SUPPORT_TASK == ENABLED
29# include "./vsf_task.h"
30#endif
31
32#if defined(__VSF_THREAD_CLASS_IMPLEMENT)
33# undef __VSF_THREAD_CLASS_IMPLEMENT
34# define __PLOOC_CLASS_IMPLEMENT__
35#elif defined(__VSF_THREAD_CLASS_INHERIT__)
36# undef __VSF_THREAD_CLASS_INHERIT__
37# define __PLOOC_CLASS_INHERIT__
38#endif
39
40#include "utilities/ooc_class.h"
41
42#ifdef __cplusplus
43extern "C" {
44#endif
45
46/*============================ MACROS ========================================*/
47
48#ifndef VSF_KERNEL_CFG_THREAD_STACK_PAGE_SIZE
49# define VSF_KERNEL_CFG_THREAD_STACK_PAGE_SIZE 1
50#endif
51#ifndef VSF_KERNEL_CFG_THREAD_STACK_GUARDIAN_SIZE
52# define VSF_KERNEL_CFG_THREAD_STACK_GUARDIAN_SIZE 0
53#endif
54
55#ifndef VSF_KERNEL_CFG_THREAD_STACK_ALIGN_BIT
56# ifdef VSF_ARCH_STACK_ALIGN_BIT
57# define VSF_KERNEL_CFG_THREAD_STACK_ALIGN_BIT VSF_ARCH_STACK_ALIGN_BIT
58# else
59# define VSF_KERNEL_CFG_THREAD_STACK_ALIGN_BIT 3
60# endif
61#endif
62
63#define __VSF_THREAD_STACK_SAFE_SIZE(__stack) \
64 ( ( ( ((__stack) + VSF_KERNEL_CFG_THREAD_STACK_PAGE_SIZE - 1) \
65 / VSF_KERNEL_CFG_THREAD_STACK_PAGE_SIZE) \
66 * VSF_KERNEL_CFG_THREAD_STACK_PAGE_SIZE) \
67 + VSF_KERNEL_CFG_THREAD_STACK_GUARDIAN_SIZE)
68
69#ifdef VSF_ARCH_LIMIT_NO_SET_STACK
70// arch does not support set stack, use host_thread mode
71# define VSF_KERNEL_THREAD_USE_HOST ENABLED
72# ifdef VSF_ARCH_RTOS_DYNAMIC_STACK
73# define VSF_KERNEL_THREAD_DYNAMIC_STACK ENABLED
74# if VSF_KERNEL_CFG_THREAD_STACK_CHECK == ENABLED
75# warning VSF_KERNEL_CFG_THREAD_STACK_CHECK is not usable for dynamic stack
76# undef VSF_KERNEL_CFG_THREAD_STACK_CHECK
77# define VSF_KERNEL_CFG_THREAD_STACK_CHECK DISABLED
78# endif
79# endif
80#endif
81
82/*============================ MACROFIED FUNCTIONS ===========================*/
83
84#define __declare_vsf_thread(__name) \
85 typedef struct __name __name; \
86 typedef struct thread_cb_##__name##_t thread_cb_##__name##_t;
87#define declare_vsf_thread(__name) __declare_vsf_thread(__name)
88#define declare_vsf_thread_ex(__name) __declare_vsf_thread(__name)
89
90#define dcl_vsf_thread(__name) declare_vsf_thread(__name)
91#define dcl_vsf_thread_ex(__name) declare_vsf_thread(__name)
92
93#if VSF_KERNEL_THREAD_DYNAMIC_STACK == ENABLED
94# define __vsf_thread_set_stack_canery(__thread, __task) \
95 (__thread)->canary = 0xDEADBEEF;
96#else
97# define __vsf_thread_set_stack_canery(__thread, __task) \
98 (__task)->canary = 0xDEADBEEF;
99#endif
100
101#if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
102# define __vsf_thread_set_stack(__thread, __task, __stack_ptr, __stack_bytesize)\
103 (__thread)->use_as__vsf_thread_cb_t.stack = (__stack_ptr); \
104 (__thread)->use_as__vsf_thread_cb_t.stack_size = (__stack_bytesize);\
105 __vsf_thread_set_stack_canery(__thread, __task)
106#else
107# define __vsf_thread_set_stack(__thread, __task, __stack_ptr, __stack_bytesize)\
108 (__thread)->stack = (__stack_ptr); \
109 (__thread)->stack_size = (__stack_bytesize); \
110 __vsf_thread_set_stack_canery(__thread, __task)
111#endif
112
113#if VSF_KERNEL_THREAD_DYNAMIC_STACK == ENABLED
114# define __vsf_thread_def_stack(__name, __bytesize) \
115 typedef enum { \
116 vsf_thread##__name##_stack_bytesize = (__bytesize), \
117 };
118# define __vsf_thread_def_stack_member(__name, __bytesize)
119# define __vsf_thread_imp_stack(__name, __thread, __task) \
120 __vsf_thread_set_stack((__thread), (__task), NULL, (vsf_thread##__name##_stack_bytesize))
121# define __vsf_eda_call_thread_prepare_stack(__name, __thread) \
122 .stack = NULL, \
123 .stack_size = (vsf_thread##__name##_stack_bytesize),
124#else
125# define __vsf_thread_def_stack(__name, __bytesize)
126# define __vsf_thread_def_stack_member(__name, __bytesize) \
127 uint32_t canary; \
128 uint64_t stack_arr[(__VSF_THREAD_STACK_SAFE_SIZE(__bytesize) + 7) / 8]\
129 VSF_CAL_ALIGN(1 << VSF_KERNEL_CFG_THREAD_STACK_ALIGN_BIT);
130# define __vsf_thread_imp_stack(__name, __thread, __task) \
131 __vsf_thread_set_stack((__thread), (__task), (__task)->stack_arr, sizeof((__task)->stack_arr))
132# define __vsf_eda_call_thread_prepare_stack(__name, __thread) \
133 .stack = (__thread)->stack_arr, \
134 .stack_size = sizeof((__thread)->stack_arr),
135#endif
136
137#if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
138# define __def_vsf_thread(__name, __stack_bytesize, ...) \
139 __vsf_thread_def_stack(__name, (__stack_bytesize)) \
140 struct thread_cb_##__name##_t { \
141 implement(vsf_thread_cb_t) \
142 __VA_ARGS__ \
143 __vsf_thread_def_stack_member(__name, (__stack_bytesize)) \
144 }; \
145 struct __name { \
146 implement(vsf_thread_t) \
147 implement_ex(thread_cb_##__name##_t, param) \
148 } VSF_CAL_ALIGN(8); \
149 extern void vsf_thread_##__name##_start(struct __name *task, \
150 vsf_prio_t priority); \
151 extern void vsf_thread_##__name##_entry( \
152 struct thread_cb_##__name##_t *vsf_pthis);
153
154# define __implement_vsf_thread(__name) \
155 void vsf_thread_##__name##_entry( \
156 struct thread_cb_##__name##_t *vsf_pthis); \
157 void vsf_thread_##__name##_start( struct __name *task, \
158 vsf_prio_t priority) \
159 { \
160 VSF_KERNEL_ASSERT(NULL != task); \
161 thread_cb_##__name##_t *__vsf_pthis = &(task->param); \
162 __vsf_pthis->use_as__vsf_thread_cb_t.entry = (vsf_thread_entry_t *)\
163 &vsf_thread_##__name##_entry; \
164 __vsf_thread_imp_stack(__name, __vsf_pthis, task) \
165 vsf_thread_start( &(task->use_as__vsf_thread_t), \
166 &(__vsf_pthis->use_as__vsf_thread_cb_t), \
167 priority); \
168 } \
169 void vsf_thread_##__name##_entry( \
170 struct thread_cb_##__name##_t *vsf_pthis)
171
172# define __vsf_eda_call_thread_prepare(__name, __thread_cb) \
173 do { \
174 thread_cb_##__name##_t *__vsf_pthis = (__thread_cb); \
175 const vsf_thread_prepare_cfg_t cfg = { \
176 .entry = (vsf_thread_entry_t *) \
177 &vsf_thread_##__name##_entry, \
178 __vsf_eda_call_thread_prepare_stack(__name, (__thread_cb)) \
179 }; \
180 vk_eda_call_thread_prepare(&(__vsf_pthis->use_as__vsf_thread_cb_t),\
181 (vsf_thread_prepare_cfg_t *)&cfg);\
182 } while(0)
183
184# define vsf_eda_call_thread_prepare(__name, __thread_cb) \
185 __vsf_eda_call_thread_prepare(__name, __thread_cb)
186
187# define vsf_eda_call_thread(__thread_cb) \
188 vk_eda_call_thread(&((__thread_cb)->use_as__vsf_thread_cb_t))
189
190
191# define __def_vsf_thread_ex(__name, ...) \
192 struct thread_cb_##__name##_t { \
193 implement(vsf_thread_cb_t) \
194 __VA_ARGS__ \
195 }; \
196 struct __name { \
197 implement(vsf_thread_t) \
198 implement_ex(thread_cb_##__name##_t, param) \
199 }; \
200 extern void vsf_thread_##__name##_start( struct __name *task, \
201 vsf_prio_t priority, \
202 void *stack, \
203 uint_fast32_t size); \
204 extern void vsf_thread_##__name##_entry( \
205 struct thread_cb_##__name##_t *vsf_pthis);
206
207
208# define __implement_vsf_thread_ex(__name) \
209 void vsf_thread_##__name##_entry( \
210 struct thread_cb_##__name##_t *vsf_pthis); \
211 void vsf_thread_##__name##_start( struct __name *task, \
212 vsf_prio_t priority, \
213 void *stack, \
214 uint_fast32_t size) \
215 { \
216 VSF_KERNEL_ASSERT(NULL != task && 0 != size && NULL != stack); \
217 thread_cb_##__name##_t *__vsf_pthis = &(task->param); \
218 __vsf_pthis->use_as__vsf_thread_cb_t.entry = (vsf_thread_entry_t *)\
219 &vsf_thread_##__name##_entry; \
220 __vsf_pthis->use_as__vsf_thread_cb_t.stack = stack; \
221 __vsf_pthis->use_as__vsf_thread_cb_t.stack_size = size; \
222 vsf_thread_start( &(task->use_as__vsf_thread_t), \
223 &(task->param.use_as__vsf_thread_cb_t), \
224 priority); \
225 } \
226 void vsf_thread_##__name##_entry( \
227 struct thread_cb_##__name##_t *vsf_pthis)
228
229# define __vsf_eda_call_thread_prepare_ex__( __name, \
230 __thread_cb, \
231 __stack, \
232 __stack_bytesize) \
233 do { \
234 VSF_KERNEL_ASSERT((NULL != (__stack)) && (0 != (__size))) \
235 thread_cb_##__name##_t *__vsf_pthis = (__thread_cb); \
236 const vsf_thread_prepare_cfg_t cfg = { \
237 .entry = (vsf_thread_entry_t *) \
238 &vsf_thread_##__name##_entry, \
239 .stack = (__stack), \
240 .stack_size = (__stack_bytesize), \
241 }; \
242 vk_eda_call_thread_prepare(&(__vsf_pthis->use_as__vsf_thread_cb_t),\
243 (vsf_thread_prepare_cfg_t *)&cfg);\
244 } while(0)
245
246
247# define vsf_eda_call_thread_prepare_ex( __name, \
248 __thread_cb, \
249 __stack, \
250 __stack_bytesize) \
251 __vsf_eda_call_thread_prepare_ex__( __name, \
252 (__thread_cb), \
253 (__stack), \
254 (__stack_bytesize))
255
256# define vsf_eda_call_thread_ex(__thread_cb) \
257 vk_eda_call_thread(&((__thread_cb)->use_as__vsf_thread_cb_t))
258
259#else
260# define __def_vsf_thread(__name, __stack_bytesize, ...) \
261 struct thread_cb_##__name##_t { \
262 implement(vsf_thread_t) \
263 __VA_ARGS__ \
264 }; \
265 __vsf_thread_def_stack(__name, (__stack_bytesize)) \
266 struct __name { \
267 __vsf_thread_def_stack_member(__name, (__stack_bytesize)) \
268 implement_ex(thread_cb_##__name##_t, param); \
269 } VSF_CAL_ALIGN(8); \
270 extern void vsf_thread_##__name##_start(struct __name *task, \
271 vsf_prio_t priority); \
272 extern void vsf_thread_##__name##_entry( \
273 struct thread_cb_##__name##_t *vsf_pthis);
274
275# define __implement_vsf_thread(__name) \
276 void vsf_thread_##__name##_entry( \
277 struct thread_cb_##__name##_t *vsf_pthis); \
278 void vsf_thread_##__name##_start( struct __name *task, \
279 vsf_prio_t priority) \
280 { \
281 VSF_KERNEL_ASSERT(NULL != task); \
282 vsf_thread_t *__vsf_pthis = \
283 &(task->param.use_as__vsf_thread_t); \
284 __vsf_pthis->entry = (vsf_thread_entry_t *) \
285 &vsf_thread_##__name##_entry; \
286 __vsf_thread_imp_stack(__name, __vsf_pthis, task) \
287 vsf_thread_start(__vsf_pthis, priority); \
288 } \
289 void vsf_thread_##__name##_entry( \
290 struct thread_cb_##__name##_t *vsf_pthis)
291
292# define __def_vsf_thread_ex(__name, ...) \
293 struct thread_cb_##__name##_t { \
294 implement(vsf_thread_t) \
295 __VA_ARGS__ \
296 }; \
297 struct __name { \
298 implement_ex(thread_cb_##__name##_t, param); \
299 } VSF_CAL_ALIGN(8); \
300 extern void vsf_thread_##__name##_start(struct __name *task, \
301 vsf_prio_t priority); \
302 extern void vsf_thread_##__name##_entry( \
303 struct thread_cb_##__name##_t *vsf_pthis);
304
305
306# define __implement_vsf_thread_ex(__name) \
307 void vsf_thread_##__name##_entry( \
308 struct thread_cb_##__name##_t *vsf_pthis); \
309 void vsf_thread_##__name##_start( struct __name *task, \
310 vsf_prio_t priority, \
311 void *stack, \
312 uint_fast32_t size) \
313 { \
314 VSF_KERNEL_ASSERT(NULL != task && 0 != size && NULL != stack); \
315 vsf_thread_t *__vsf_pthis = \
316 &(task->param.use_as__vsf_thread_t); \
317 __vsf_pthis->entry = (vsf_thread_entry_t *) \
318 &vsf_thread_##__name##_entry; \
319 __vsf_pthis->stack = stack; \
320 __vsf_pthis->stack_size = size; \
321 vsf_thread_start(__vsf_pthis, priority); \
322 } \
323 void vsf_thread_##__name##_entry( \
324 struct thread_cb_##__name##_t *vsf_pthis)
325
326#endif
327
328
329#define def_vsf_thread(__name, __stack_bytesize, ...) \
330 __def_vsf_thread(__name, (__stack_bytesize), __VA_ARGS__)
331
332#define define_vsf_thread(__name, __stack_bytesize, ...) \
333 def_vsf_thread(__name, (__stack_bytesize), __VA_ARGS__)
334
335#define def_vsf_thread_ex(__name, ...) \
336 __def_vsf_thread_ex(__name, __VA_ARGS__)
337
338#define define_vsf_thread_ex(__name, ...) \
339 def_vsf_thread_ex(__name, __VA_ARGS__)
340
341#define end_def_vsf_thread(...)
342#define end_define_vsf_thread(...)
343
344#define implement_vsf_thread(__name) __implement_vsf_thread(__name)
345#define implement_vsf_thread_ex(__name) __implement_vsf_thread_ex(__name)
346
347#define imp_vsf_thread(__name) implement_vsf_thread(__name)
348#define imp_vsf_thread_ex(__name) implement_vsf_thread_ex(__name)
349
350#define __init_vsf_thread(__name, __task, __pri) \
351 vsf_thread_##__name##_start((__task), (__pri))
352
353#define __init_vsf_thread_ex(__name, __task, __pri, __stack, __stack_bytesize) \
354 vsf_thread_##__name##_start((__task), (__pri), (__stack), (__stack_bytesize))
355
356#define init_vsf_thread(__name, __task, __pri) \
357 __init_vsf_thread(__name, (__task), (__pri))
358
359#define init_vsf_thread_ex(__name, __task, __pri, __stack, __stack_bytesize) \
360 __init_vsf_thread_ex(__name, (__task), (__pri), (__stack), (__stack_bytesize))
361
362#if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
363# define __vsf_thread(__name) thread_cb_##__name##_t
364# define vsf_thread(__name) __vsf_thread(__name)
365
366#endif
367
368#define vsf_thread_wfm vsf_thread_wait_for_msg
369#define vsf_thread_wfe vsf_thread_wait_for_evt
370
371#if VSF_KERNEL_CFG_SUPPORT_EVT_MESSAGE == ENABLED
372# define vsf_thread_wfem vsf_thread_wait_for_evt_msg
373#endif
374
375
376#if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
377# define __vsf_thread_call_sub(__name, __target, ...) \
378 vk_thread_call_eda( (uintptr_t)(__name), \
379 (uintptr_t)(__target), \
380 (0, ##__VA_ARGS__), \
381 0, \
382 0)
383
384
385# define vsf_thread_call_sub(__name, __target, ...) \
386 __vsf_thread_call_sub(__name, (__target), (0, ##__VA_ARGS__))
387
388
389# define vsf_thread_call_pt(__name, __target, ...) \
390 (__target)->fsm_state = 0; \
391 vsf_thread_call_sub(vsf_pt_func(__name), (__target), (0, ##__VA_ARGS__))
392
393#endif
394
395#if VSF_KERNEL_CFG_EDA_SUPPORT_TASK == ENABLED && VSF_KERNEL_CFG_EDA_SUBCALL_HAS_RETURN_VALUE == ENABLED
396# define vsf_thread_call_task(__name, __target, ...) \
397 vk_thread_call_task(vsf_task_func(__name), __target, (0, ##__VA_ARGS__))
398#endif
399
400#if VSF_KERNEL_CFG_EDA_SUPPORT_TIMER == ENABLED
401# define vsf_thread_delay_ms(__ms) vsf_thread_delay(vsf_systimer_ms_to_tick(__ms))
402# define vsf_thread_delay_us(__us) vsf_thread_delay(vsf_systimer_us_to_tick(__us))
403#endif
404
405#if VSF_KERNEL_CFG_SUPPORT_SYNC == ENABLED
406# define vsf_thread_sem_post(__sem) vsf_eda_sem_post(__sem)
407# define vsf_thread_sem_pend(__sem, timeout) __vsf_thread_wait_for_sync(__sem, timeout)
408
409# define vsf_thread_trig_set(__trig, ...) vsf_eda_trig_set(__trig, ##__VA_ARGS__)
410# define vsf_thread_trig_reset(__trig) vsf_eda_trig_reset(__trig)
411# define vsf_thread_trig_pend(__trig, timeout) __vsf_thread_wait_for_sync(__trig, timeout)
412
413# define vsf_thread_mutex_enter(__mtx, timeout) __vsf_thread_wait_for_sync(&(__mtx)->use_as__vsf_sync_t, timeout)
414#endif
415
416/*============================ TYPES =========================================*/
417
418declare_class(vsf_thread_t)
419
420# if VSF_KERNEL_CFG_THREAD_SIGNAL == ENABLED
421typedef void vsf_thread_sighandler_t(vsf_thread_t *thread, int sig);
422# endif
423
424#if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
425declare_class(vsf_thread_cb_t)
426typedef void vsf_thread_entry_t(vsf_thread_cb_t *thread);
427#else
428typedef void vsf_thread_entry_t(vsf_thread_t *thread);
429#endif
430
433def_class(vsf_thread_cb_t,
434
435 public_member(
436 vsf_thread_entry_t *entry;
437#if VSF_KERNEL_CFG_THREAD_STACK_LARGE == ENABLED
438 uint32_t stack_size;
439#else
440 uint16_t stack_size;
441#endif
442 uint64_t *stack;
443 )
444
445 private_member(
446#if VSF_KERNEL_THREAD_USE_HOST == ENABLED
447 vsf_arch_irq_thread_t host_thread;
448 vsf_arch_irq_request_t req, *rep;
449 vsf_evt_t evt;
450# if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
451 bool is_inited;
452# endif
453#else
454 jmp_buf *pos;
455 jmp_buf *ret;
456#endif
457 )
458)
459end_def_class(vsf_thread_cb_t)
461
462#if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
463
464def_class( vsf_thread_t,
465 public_member(
467 implement(vsf_teda_t)
468#else
469 implement(vsf_eda_t)
470#endif
472 vsf_thread_sighandler_t *sighandler;
473#endif
474 )
476 private_member(
477 // set sig_pending if current thread is in subcall,
478 // sighandler will be called if sig_pending is set and subcall returns.
480 )
481#endif
482)
483end_def_class(vsf_thread_t)
484
485typedef struct {
486 vsf_thread_entry_t *entry;
487 void *stack;
488 uint_fast32_t stack_size;
489 //vsf_prio_t priority; //!< TODO do we need this??
490} vsf_thread_prepare_cfg_t;
491
492
493#else
494
497def_class(vsf_thread_t,
498 public_member(
500 implement(vsf_teda_t)
501#else
502 implement(vsf_eda_t)
503#endif
505 vsf_thread_sighandler_t *sighandler;
506#endif
507 implement(vsf_thread_cb_t)
508 )
509
511 private_member(
512 // set sig_pending if current thread is in subcall,
513 // sighandler will be called if sig_pending is set and subcall returns.
515 )
516#endif
517)
518end_def_class(vsf_thread_t)
520#endif
521/*============================ GLOBAL VARIABLES ==============================*/
522/*============================ PROTOTYPES ====================================*/
523
524#if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
525VSF_CAL_SECTION(".text.vsf.kernel.vk_eda_call_thread_prepare")
526extern vsf_err_t vk_eda_call_thread_prepare( vsf_thread_cb_t *thread_cb,
527 vsf_thread_prepare_cfg_t *cfg);
528
529VSF_CAL_SECTION(".text.vsf.kernel.vk_eda_call_thread")
530extern vsf_err_t vk_eda_call_thread(vsf_thread_cb_t *thread_cb);
531
532#if VSF_KERNEL_CFG_EDA_SUPPORT_TASK == ENABLED && VSF_KERNEL_CFG_EDA_SUBCALL_HAS_RETURN_VALUE == ENABLED
533VSF_CAL_SECTION(".text.vsf.kernel.vk_thread_call_task")
534extern
535fsm_rt_t vk_thread_call_task(vsf_task_entry_t task_handler, uintptr_t param, size_t local_size);
536#endif
537
538VSF_CAL_SECTION(".text.vsf.kernel.vk_thread_call_eda")
539extern
540vsf_err_t vk_thread_call_eda( uintptr_t eda_handler,
541 uintptr_t param,
542 size_t local_size,
543 size_t local_buff_size,
544 uintptr_t local_buff);
545
546VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_call_thread")
547extern vsf_err_t vk_thread_call_thread( vsf_thread_cb_t *thread_cb,
548 vsf_thread_prepare_cfg_t *cfg);
549
550#endif
551
552
553VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread")
554#if VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL == ENABLED
555extern vsf_err_t vsf_thread_start( vsf_thread_t *thread,
556 vsf_thread_cb_t *thread_cb,
557 vsf_prio_t priority);
558#else
559extern vsf_err_t vsf_thread_start(vsf_thread_t *this_ptr, vsf_prio_t priority);
560#endif
561
562#if VSF_KERNEL_CFG_THREAD_STACK_CHECK == ENABLED
563VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_stack_check")
564extern void vsf_thread_stack_check(void);
565#endif
566
567VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_exit")
568extern VSF_CAL_NO_RETURN void vsf_thread_exit(void);
569
570VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_get_cur")
571extern vsf_thread_t *vsf_thread_get_cur(void);
572
573VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_wait")
574extern vsf_evt_t vsf_thread_wait(void);
575
576VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_wait_for_evt")
577extern void vsf_thread_wait_for_evt(vsf_evt_t evt);
578
579VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_sendevt")
580extern void vsf_thread_sendevt(vsf_thread_t *thread, vsf_evt_t evt);
581
582#if VSF_KERNEL_CFG_SUPPORT_EVT_MESSAGE == ENABLED
583VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_wait_for_evt_msg")
584extern uintptr_t vsf_thread_wait_for_evt_msg(vsf_evt_t evt);
585#endif
586
587VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_wait_for_evt_msg")
588extern uintptr_t vsf_thread_wait_for_msg(void);
589
590#if VSF_KERNEL_CFG_EDA_SUPPORT_TIMER == ENABLED
591VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_delay")
592extern void vsf_thread_delay(vsf_systimer_tick_t tick);
593#endif
594
595VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_yield")
596void vsf_thread_yield(void);
597
598#if VSF_KERNEL_CFG_SUPPORT_DYNAMIC_PRIOTIRY == ENABLED
599VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_set_priority")
600extern vsf_prio_t vsf_thread_set_priority(vsf_prio_t priority);
601#endif
602
603#if VSF_KERNEL_CFG_THREAD_SIGNAL == ENABLED
604VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_signal")
605extern void vsf_thread_signal(vsf_thread_t *thread, int sig);
606#endif
607
608#if VSF_KERNEL_CFG_SUPPORT_SYNC == ENABLED
609VSF_CAL_SECTION(".text.vsf.kernel.__vsf_thread_wait_for_sync")
610vsf_sync_reason_t __vsf_thread_wait_for_sync(vsf_sync_t *sync, vsf_timeout_tick_t time_out);
611
612VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_mutex")
613extern vsf_err_t vsf_thread_mutex_leave(vsf_mutex_t *mtx);
614
615VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_queue")
616vsf_sync_reason_t vsf_thread_queue_send(vsf_eda_queue_t *queue, void *node, vsf_timeout_tick_t timeout);
617
618VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_queue")
619vsf_sync_reason_t vsf_thread_queue_recv(vsf_eda_queue_t *queue, void **node, vsf_timeout_tick_t timeout);
620
621# if VSF_KERNEL_CFG_SUPPORT_BITMAP_EVENT == ENABLED
622VSF_CAL_SECTION(".text.vsf.kernel.vsf_thread_bmpevt_pend")
623extern vsf_sync_reason_t vsf_thread_bmpevt_pend(
624 vsf_bmpevt_t *bmpevt,
625 vsf_bmpevt_pender_t *pender,
627# endif
628#endif
629
630#ifdef __cplusplus
631}
632#endif
633
634#endif
635#endif
#define VSF_CAL_SECTION(__SEC)
Definition __compiler.h:181
#define ENABLED
Definition __type.h:28
vsf_err_t
Definition __type.h:42
Definition linux_generic.h:104
Definition linux_generic.h:122
Definition vsf_eda.h:942
Definition vsf_eda.h:957
Definition vsf_eda.h:996
Definition vsf_eda.h:766
Definition vsf_eda.h:892
Definition vsf_eda.h:864
Definition vsf_eda.h:834
uint64_t vsf_systimer_tick_t
Definition cortex_a_generic.h:73
__le16 timeout
Definition ieee80211.h:94
uint32_t uintptr_t
Definition stdint.h:38
unsigned short uint16_t
Definition stdint.h:7
unsigned uint32_t
Definition stdint.h:9
unsigned int uint_fast32_t
Definition stdint.h:27
unsigned long long uint64_t
Definition stdint.h:11
void sync(void)
Definition vsf_linux_fs.c:2146
int16_t vsf_evt_t
Definition vsf_eda.h:654
VSF_KERNEL_TIMEOUT_TICK_T vsf_timeout_tick_t
Definition vsf_eda.h:592
vsf_sync_reason_t
Definition vsf_eda.h:1053
fsm_rt_t
Definition vsf_fsm.h:315
#define VSF_KERNEL_CFG_THREAD_SIGNAL
Definition vsf_kernel_cfg.h:151
#define VSF_KERNEL_CFG_EDA_SUPPORT_TIMER
Definition vsf_kernel_cfg.h:78
#define VSF_KERNEL_CFG_EDA_SUPPORT_SUB_CALL
Definition vsf_kernel_cfg.h:106
#define VSF_KERNEL_CFG_THREAD_SIGNAL_MASK_T
Definition vsf_kernel_cfg.h:155
vsf_prio_t
Definition vsf_kernel_common.h:37
def_class(vsf_stream_fifo_t, which(vsf_stream_tx_t TX;vsf_stream_rx_t RX;), private:vsf_slist_queue_t union { vsf_stream_fifo_cfg_t cfg;struct { vsf_stream_dat_rdy_evt_t tDataReadyEventHandling;vsf_stream_dat_drn_evt_t tDataDrainEventHandling;vsf_stream_status_t Status;#if !defined(VSF_PBUF_QUEUE_CFG_ATOM_ACCESS) vsf_protect_region_t *pregion;#endif };};) end_def_class(vsf_stream_fifo_t) extern vsf_err_t vsf_stream_fifo_init(vsf_stream_fifo_t *obj_ptr
fsm_rt_t(* vsf_task_entry_t)(uintptr_t target, vsf_evt_t evt)
Definition vsf_task.h:241
vsf_err_t vsf_thread_start(vsf_thread_t *pthis, vsf_thread_cb_t *thread_cb, vsf_prio_t priority)
Definition vsf_thread.c:482
int sig
Definition vsf_thread.h:421
declare_class(vsf_thread_t) typedef void vsf_thread_sighandler_t(vsf_thread_t *thread