Go to the source code of this file.
|
#define | VSF_USER_ENTRY user_main |
|
#define | VSF_SCHED_SAFE_CODE_REGION DEFAULT_CODE_REGION_NONE |
|
#define | vsf_sched_lock() 0 |
|
#define | vsf_sched_unlock(__level) VSF_UNUSED_PARAM(__level) |
|
#define | vsf_sched_safe() if (1) |
|
#define | __vsf_sched_safe(__code) __code |
|
#define | vsf_sched_safe_exit() |
|
#define | vsf_protect_scheduler() vsf_sched_lock() |
|
#define | vsf_unprotect_scheduler(__state) vsf_sched_unlock((__state)) |
|
#define | vsf_protect_sched() (vsf_protect_t)vsf_protect_scheduler() |
|
#define | vsf_unprotect_sched(__prot) vsf_unprotect_scheduler(__prot) |
|
#define | __vsf_kernel_host_request_init(__req) __vsf_arch_irq_request_init(__req) |
|
#define | __vsf_kernel_host_request_fini(__req) __vsf_arch_irq_request_fini(__req) |
|
#define | __vsf_kernel_host_request_send(__req) __vsf_arch_irq_request_send(__req) |
|
#define | __vsf_kernel_host_request_pend(__req) __vsf_arch_irq_request_pend(__req) |
|
#define | __vsf_kernel_host_thread_init(__thread, __name, __entry, __prio, __stack, __stacksize) __vsf_arch_irq_init(__thread, __name, __entry, __prio) |
|
#define | __vsf_kernel_host_thread_restart(__thread, __request_pending) __vsf_arch_irq_restart((__thread), (__request_pending)) |
|
#define | __vsf_kernel_host_thread_exit(__thread) __vsf_arch_irq_exit(__thread) |
|
#define | vsf_sleep(...) __vsf_sleep((0, ##__VA_ARGS__)) |
|
#define | vsf_protect_region_sched vsf_protect_region_none |
|
◆ VSF_USER_ENTRY
#define VSF_USER_ENTRY user_main |
◆ VSF_SCHED_SAFE_CODE_REGION
◆ vsf_sched_lock
#define vsf_sched_lock |
( |
| ) |
0 |
◆ vsf_sched_unlock
◆ vsf_sched_safe
#define vsf_sched_safe |
( |
| ) |
if (1) |
◆ __vsf_sched_safe
#define __vsf_sched_safe |
( |
|
__code | ) |
__code |
◆ vsf_sched_safe_exit
#define vsf_sched_safe_exit |
( |
| ) |
|
◆ vsf_protect_scheduler
◆ vsf_unprotect_scheduler
◆ vsf_protect_sched
◆ vsf_unprotect_sched
◆ __vsf_kernel_host_request_init
◆ __vsf_kernel_host_request_fini
◆ __vsf_kernel_host_request_send
◆ __vsf_kernel_host_request_pend
◆ __vsf_kernel_host_thread_init
◆ __vsf_kernel_host_thread_restart
#define __vsf_kernel_host_thread_restart |
( |
|
__thread, |
|
|
|
__request_pending |
|
) |
| __vsf_arch_irq_restart((__thread), (__request_pending)) |
◆ __vsf_kernel_host_thread_exit
◆ vsf_sleep
#define vsf_sleep |
( |
|
... | ) |
__vsf_sleep((0, ##__VA_ARGS__)) |
◆ vsf_protect_region_sched
◆ vsf_sched_lock_status_t
◆ __vsf_eda_frame_buffer_t
◆ vsf_kernel_resource_t
◆ dcl_vsf_pool() [1/2]
dcl_vsf_pool |
( |
vsf_evt_node_pool |
| ) |
|
◆ dcl_vsf_pool() [2/2]
dcl_vsf_pool |
( |
vsf_eda_frame_pool |
| ) |
|
◆ __vsf_kernel_os_start()
void __vsf_kernel_os_start |
( |
void |
| ) |
|
|
extern |
__vsf_kernel_os_start and __vsf_kernel_os_run_priority are ONLY used when embedded weak vsf_main_entry is not used
◆ __vsf_kernel_os_run_priority()
void __vsf_kernel_os_run_priority |
( |
vsf_prio_t |
priority | ) |
|
|
extern |
◆ vsf_kernel_err_report()
- Note
- This should not happen. Two possible reasons could be:
- Forgeting to set VSF_OS_CFG_MAIN_MODE to VSF_OS_CFG_MAIN_MODE_THREAD and using vsf kernel APIs, e.g. vsf_delay_ms, vsf_sem_pend and etc.
- When VSF_OS_CFG_MAIN_MODE is not VSF_OS_CFG_MAIN_MODE_THREAD, using any vsf_eda_xxxx APIs.
- call APIs depends on eda context in non-eda context
-
This should not happen. One possible reason is:
You start task, e.g. eda, vsf_task, vsf_pt or vsf_thread in the idle task. Please use vsf_prio_0 when you do this in idle task.
◆ vsf_cpu_usage_start()
◆ vsf_cpu_usage_stop()
void vsf_cpu_usage_stop |
( |
void |
| ) |
|
|
extern |
◆ __vsf_sleep()
void __vsf_sleep |
( |
int |
level | ) |
|
|
extern |