@@ -346,6 +346,37 @@ static void put_probe_ref(void)
346
346
mutex_unlock (& blk_probe_mutex );
347
347
}
348
348
349
+ static int blk_trace_start (struct blk_trace * bt )
350
+ {
351
+ if (bt -> trace_state != Blktrace_setup &&
352
+ bt -> trace_state != Blktrace_stopped )
353
+ return - EINVAL ;
354
+
355
+ blktrace_seq ++ ;
356
+ smp_mb ();
357
+ bt -> trace_state = Blktrace_running ;
358
+ raw_spin_lock_irq (& running_trace_lock );
359
+ list_add (& bt -> running_list , & running_trace_list );
360
+ raw_spin_unlock_irq (& running_trace_lock );
361
+ trace_note_time (bt );
362
+
363
+ return 0 ;
364
+ }
365
+
366
+ static int blk_trace_stop (struct blk_trace * bt )
367
+ {
368
+ if (bt -> trace_state != Blktrace_running )
369
+ return - EINVAL ;
370
+
371
+ bt -> trace_state = Blktrace_stopped ;
372
+ raw_spin_lock_irq (& running_trace_lock );
373
+ list_del_init (& bt -> running_list );
374
+ raw_spin_unlock_irq (& running_trace_lock );
375
+ relay_flush (bt -> rchan );
376
+
377
+ return 0 ;
378
+ }
379
+
349
380
static void blk_trace_cleanup (struct request_queue * q , struct blk_trace * bt )
350
381
{
351
382
synchronize_rcu ();
@@ -658,44 +689,17 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
658
689
659
690
static int __blk_trace_startstop (struct request_queue * q , int start )
660
691
{
661
- int ret ;
662
692
struct blk_trace * bt ;
663
693
664
694
bt = rcu_dereference_protected (q -> blk_trace ,
665
695
lockdep_is_held (& q -> debugfs_mutex ));
666
696
if (bt == NULL )
667
697
return - EINVAL ;
668
698
669
- /*
670
- * For starting a trace, we can transition from a setup or stopped
671
- * trace. For stopping a trace, the state must be running
672
- */
673
- ret = - EINVAL ;
674
- if (start ) {
675
- if (bt -> trace_state == Blktrace_setup ||
676
- bt -> trace_state == Blktrace_stopped ) {
677
- blktrace_seq ++ ;
678
- smp_mb ();
679
- bt -> trace_state = Blktrace_running ;
680
- raw_spin_lock_irq (& running_trace_lock );
681
- list_add (& bt -> running_list , & running_trace_list );
682
- raw_spin_unlock_irq (& running_trace_lock );
683
-
684
- trace_note_time (bt );
685
- ret = 0 ;
686
- }
687
- } else {
688
- if (bt -> trace_state == Blktrace_running ) {
689
- bt -> trace_state = Blktrace_stopped ;
690
- raw_spin_lock_irq (& running_trace_lock );
691
- list_del_init (& bt -> running_list );
692
- raw_spin_unlock_irq (& running_trace_lock );
693
- relay_flush (bt -> rchan );
694
- ret = 0 ;
695
- }
696
- }
697
-
698
- return ret ;
699
+ if (start )
700
+ return blk_trace_start (bt );
701
+ else
702
+ return blk_trace_stop (bt );
699
703
}
700
704
701
705
int blk_trace_startstop (struct request_queue * q , int start )
@@ -1614,13 +1618,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
1614
1618
if (bt == NULL )
1615
1619
return - EINVAL ;
1616
1620
1617
- if (bt -> trace_state == Blktrace_running ) {
1618
- bt -> trace_state = Blktrace_stopped ;
1619
- raw_spin_lock_irq (& running_trace_lock );
1620
- list_del_init (& bt -> running_list );
1621
- raw_spin_unlock_irq (& running_trace_lock );
1622
- relay_flush (bt -> rchan );
1623
- }
1621
+ blk_trace_stop (bt );
1624
1622
1625
1623
put_probe_ref ();
1626
1624
synchronize_rcu ();
0 commit comments