17
17
#include <errno.h>
18
18
#include <ksched.h>
19
19
#include <zephyr/sys/printk.h>
20
+ #include <zephyr/logging/log.h>
21
+
22
+ LOG_MODULE_DECLARE (os , CONFIG_KERNEL_LOG_LEVEL );
20
23
21
24
static inline void flag_clear (uint32_t * flagp ,
22
25
uint32_t bit )
@@ -599,6 +602,50 @@ bool k_work_cancel_sync(struct k_work *work,
599
602
return pending ;
600
603
}
601
604
605
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
606
+ static void workto_handler (struct _timeout * to )
607
+ {
608
+ struct k_work_q * queue = CONTAINER_OF (to , struct k_work_q , workto );
609
+ k_spinlock_key_t key ;
610
+ const char * name ;
611
+ struct k_work * work ;
612
+ k_work_handler_t handler ;
613
+
614
+ key = k_spin_lock (& lock );
615
+ name = k_thread_name_get (& queue -> thread );
616
+ work = queue -> work ;
617
+ handler = work -> handler ;
618
+ k_spin_unlock (& lock , key );
619
+
620
+ if (name != NULL ) {
621
+ LOG_ERR ("queue %s blocked by work %p with handler %p" , name , work , handler );
622
+ } else {
623
+ LOG_ERR ("queue %p blocked by work %p with handler %p" , queue , work , handler );
624
+ }
625
+
626
+ k_thread_abort (& queue -> thread );
627
+ }
628
+
629
+ static void work_timeout_start_locked (struct k_work_q * queue , struct k_work * work )
630
+ {
631
+ if (K_TIMEOUT_EQ (queue -> work_timeout , K_FOREVER )) {
632
+ return ;
633
+ }
634
+
635
+ queue -> work = work ;
636
+ z_add_timeout (& queue -> workto , workto_handler , queue -> work_timeout );
637
+ }
638
+
639
+ static void work_timeout_stop_locked (struct k_work_q * queue )
640
+ {
641
+ if (K_TIMEOUT_EQ (queue -> work_timeout , K_FOREVER )) {
642
+ return ;
643
+ }
644
+
645
+ z_abort_timeout (& queue -> workto );
646
+ }
647
+ #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
648
+
602
649
/* Loop executed by a work queue thread.
603
650
*
604
651
* @param workq_ptr pointer to the work queue structure
@@ -678,6 +725,10 @@ static void work_queue_main(void *workq_ptr, void *p2, void *p3)
678
725
continue ;
679
726
}
680
727
728
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
729
+ work_timeout_start_locked (queue , work );
730
+ #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
731
+
681
732
k_spin_unlock (& lock , key );
682
733
683
734
__ASSERT_NO_MSG (handler != NULL );
@@ -690,6 +741,10 @@ static void work_queue_main(void *workq_ptr, void *p2, void *p3)
690
741
*/
691
742
key = k_spin_lock (& lock );
692
743
744
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
745
+ work_timeout_stop_locked (queue );
746
+ #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
747
+
693
748
flag_clear (& work -> flags , K_WORK_RUNNING_BIT );
694
749
if (flag_test (& work -> flags , K_WORK_FLUSHING_BIT )) {
695
750
finalize_flush_locked (work );
@@ -761,6 +816,14 @@ void k_work_queue_start(struct k_work_q *queue,
761
816
queue -> thread .base .user_options |= K_ESSENTIAL ;
762
817
}
763
818
819
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
820
+ if ((cfg != NULL ) && (cfg -> work_timeout_ms )) {
821
+ queue -> work_timeout = K_MSEC (cfg -> work_timeout_ms );
822
+ } else {
823
+ queue -> work_timeout = K_FOREVER ;
824
+ }
825
+ #endif /* defined(CONFIG_WORKQUEUE_WORK_TIMEOUT) */
826
+
764
827
k_thread_start (& queue -> thread );
765
828
766
829
SYS_PORT_TRACING_OBJ_FUNC_EXIT (k_work_queue , start , queue );
0 commit comments