17
17
#include <errno.h>
18
18
#include <ksched.h>
19
19
#include <zephyr/sys/printk.h>
20
+ #include <zephyr/logging/log.h>
21
+
22
+ LOG_MODULE_DECLARE (os , CONFIG_KERNEL_LOG_LEVEL );
20
23
21
24
static inline void flag_clear (uint32_t * flagp ,
22
25
uint32_t bit )
@@ -599,6 +602,55 @@ bool k_work_cancel_sync(struct k_work *work,
599
602
return pending ;
600
603
}
601
604
605
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
606
+ static void workto_handler (struct _timeout * to )
607
+ {
608
+ struct k_work_q * queue = CONTAINER_OF (to , struct k_work_q , workto );
609
+ k_spinlock_key_t key ;
610
+ const char * name ;
611
+ struct k_work * work ;
612
+ k_work_handler_t handler ;
613
+
614
+ key = k_spin_lock (& lock );
615
+
616
+ flag_set (& queue -> flags , K_WORK_QUEUE_BLOCKED_BIT );
617
+
618
+ name = k_thread_name_get (& queue -> thread );
619
+ work = queue -> work ;
620
+ handler = work -> handler ;
621
+
622
+ if (name != NULL ) {
623
+ LOG_WRN ("queue %s blocked by work %p with handler %p" , name , work , handler );
624
+ } else {
625
+ LOG_WRN ("queue %p blocked by work %p with handler %p" , queue , work , handler );
626
+ }
627
+
628
+ k_spin_unlock (& lock , key );
629
+ }
630
+
631
+ static void work_timeout_start_locked (struct k_work_q * queue , struct k_work * work )
632
+ {
633
+ if (K_TIMEOUT_EQ (queue -> work_timeout , K_FOREVER )) {
634
+ return ;
635
+ }
636
+
637
+ queue -> work = work ;
638
+ z_add_timeout (& queue -> workto , workto_handler , queue -> work_timeout );
639
+ }
640
+
641
+ static void work_timeout_stop_locked (struct k_work_q * queue )
642
+ {
643
+ if (K_TIMEOUT_EQ (queue -> work_timeout , K_FOREVER )) {
644
+ return ;
645
+ }
646
+
647
+ z_abort_timeout (& queue -> workto );
648
+ if (flag_test_and_clear (& queue -> flags , K_WORK_QUEUE_BLOCKED_BIT )) {
649
+ LOG_INF ("queue %p unblocked" , queue );
650
+ }
651
+ }
652
+ #endif
653
+
602
654
/* Loop executed by a work queue thread.
603
655
*
604
656
* @param workq_ptr pointer to the work queue structure
@@ -678,6 +730,10 @@ static void work_queue_main(void *workq_ptr, void *p2, void *p3)
678
730
continue ;
679
731
}
680
732
733
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
734
+ work_timeout_start_locked (queue , work );
735
+ #endif
736
+
681
737
k_spin_unlock (& lock , key );
682
738
683
739
__ASSERT_NO_MSG (handler != NULL );
@@ -690,6 +746,10 @@ static void work_queue_main(void *workq_ptr, void *p2, void *p3)
690
746
*/
691
747
key = k_spin_lock (& lock );
692
748
749
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
750
+ work_timeout_stop_locked (queue );
751
+ #endif
752
+
693
753
flag_clear (& work -> flags , K_WORK_RUNNING_BIT );
694
754
if (flag_test (& work -> flags , K_WORK_FLUSHING_BIT )) {
695
755
finalize_flush_locked (work );
@@ -761,6 +821,14 @@ void k_work_queue_start(struct k_work_q *queue,
761
821
queue -> thread .base .user_options |= K_ESSENTIAL ;
762
822
}
763
823
824
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
825
+ if ((cfg != NULL ) && (cfg -> work_timeout_ms )) {
826
+ queue -> work_timeout = K_MSEC (cfg -> work_timeout_ms );
827
+ } else {
828
+ queue -> work_timeout = K_FOREVER ;
829
+ }
830
+ #endif
831
+
764
832
k_thread_start (& queue -> thread );
765
833
766
834
SYS_PORT_TRACING_OBJ_FUNC_EXIT (k_work_queue , start , queue );
@@ -853,6 +921,13 @@ int k_work_queue_stop(struct k_work_q *queue, k_timeout_t timeout)
853
921
return 0 ;
854
922
}
855
923
924
+ #if defined(CONFIG_WORKQUEUE_WORK_TIMEOUT )
925
+ bool k_work_queue_is_blocked (struct k_work_q * queue )
926
+ {
927
+ return flag_test (& queue -> flags , K_WORK_QUEUE_BLOCKED_BIT );
928
+ }
929
+ #endif
930
+
856
931
#ifdef CONFIG_SYS_CLOCK_EXISTS
857
932
858
933
/* Timeout handler for delayable work.
0 commit comments