@@ -532,6 +532,26 @@ void dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
532
532
void dmu_write_policy (objset_t * os , dnode_t * dn , int level , int wp ,
533
533
struct zio_prop * zp );
534
534
535
+ /*
536
+ * DB_RF_* are to be used for dbuf_read() or in limited other cases.
537
+ */
538
+ typedef enum dmu_flags {
539
+ DB_RF_MUST_SUCCEED = 0 , /* Suspend on I/O errors. */
540
+ DB_RF_CANFAIL = 1 << 0 , /* Return on I/O errors. */
541
+ DB_RF_HAVESTRUCT = 1 << 1 , /* dn_struct_rwlock is locked. */
542
+ DB_RF_NEVERWAIT = 1 << 2 ,
543
+ DMU_READ_PREFETCH = 0 , /* Try speculative prefetch. */
544
+ DMU_READ_NO_PREFETCH = 1 << 3 , /* Don't prefetch speculatively. */
545
+ DB_RF_NOPREFETCH = DMU_READ_NO_PREFETCH ,
546
+ DMU_READ_NO_DECRYPT = 1 << 4 , /* Don't decrypt. */
547
+ DB_RF_NO_DECRYPT = DMU_READ_NO_DECRYPT ,
548
+ DMU_DIRECTIO = 1 << 5 , /* Bypass ARC. */
549
+ DMU_UNCACHEDIO = 1 << 6 , /* Reduce caching. */
550
+ DMU_PARTIAL_FIRST = 1 << 7 , /* First partial access. */
551
+ DMU_PARTIAL_MORE = 1 << 8 , /* Following partial access. */
552
+ DMU_KEEP_CACHING = 1 << 9 , /* Don't affect caching. */
553
+ } dmu_flags_t ;
554
+
535
555
/*
536
556
* The bonus data is accessed more or less like a regular buffer.
537
557
* You must dmu_bonus_hold() to get the buffer, which will give you a
@@ -547,7 +567,7 @@ void dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp,
547
567
int dmu_bonus_hold (objset_t * os , uint64_t object , const void * tag ,
548
568
dmu_buf_t * * dbp );
549
569
int dmu_bonus_hold_by_dnode (dnode_t * dn , const void * tag , dmu_buf_t * * dbp ,
550
- uint32_t flags );
570
+ dmu_flags_t flags );
551
571
int dmu_bonus_max (void );
552
572
int dmu_set_bonus (dmu_buf_t * , int , dmu_tx_t * );
553
573
int dmu_set_bonustype (dmu_buf_t * , dmu_object_type_t , dmu_tx_t * );
@@ -558,9 +578,9 @@ int dmu_rm_spill(objset_t *, uint64_t, dmu_tx_t *);
558
578
* Special spill buffer support used by "SA" framework
559
579
*/
560
580
561
- int dmu_spill_hold_by_bonus (dmu_buf_t * bonus , uint32_t flags , const void * tag ,
562
- dmu_buf_t * * dbp );
563
- int dmu_spill_hold_by_dnode (dnode_t * dn , uint32_t flags ,
581
+ int dmu_spill_hold_by_bonus (dmu_buf_t * bonus , dmu_flags_t flags ,
582
+ const void * tag , dmu_buf_t * * dbp );
583
+ int dmu_spill_hold_by_dnode (dnode_t * dn , dmu_flags_t flags ,
564
584
const void * tag , dmu_buf_t * * dbp );
565
585
int dmu_spill_hold_existing (dmu_buf_t * bonus , const void * tag , dmu_buf_t * * dbp );
566
586
@@ -579,17 +599,17 @@ int dmu_spill_hold_existing(dmu_buf_t *bonus, const void *tag, dmu_buf_t **dbp);
579
599
* The object number must be a valid, allocated object number.
580
600
*/
581
601
int dmu_buf_hold (objset_t * os , uint64_t object , uint64_t offset ,
582
- const void * tag , dmu_buf_t * * , int flags );
602
+ const void * tag , dmu_buf_t * * , dmu_flags_t flags );
583
603
int dmu_buf_hold_array (objset_t * os , uint64_t object , uint64_t offset ,
584
604
uint64_t length , int read , const void * tag , int * numbufsp ,
585
605
dmu_buf_t * * * dbpp );
586
606
int dmu_buf_hold_noread (objset_t * os , uint64_t object , uint64_t offset ,
587
607
const void * tag , dmu_buf_t * * dbp );
588
608
int dmu_buf_hold_by_dnode (dnode_t * dn , uint64_t offset ,
589
- const void * tag , dmu_buf_t * * dbp , int flags );
609
+ const void * tag , dmu_buf_t * * dbp , dmu_flags_t flags );
590
610
int dmu_buf_hold_array_by_dnode (dnode_t * dn , uint64_t offset ,
591
611
uint64_t length , boolean_t read , const void * tag , int * numbufsp ,
592
- dmu_buf_t * * * dbpp , uint32_t flags );
612
+ dmu_buf_t * * * dbpp , dmu_flags_t flags );
593
613
int dmu_buf_hold_noread_by_dnode (dnode_t * dn , uint64_t offset , const void * tag ,
594
614
dmu_buf_t * * dbp );
595
615
@@ -781,6 +801,7 @@ struct blkptr *dmu_buf_get_blkptr(dmu_buf_t *db);
781
801
* (ie. you've called dmu_tx_hold_object(tx, db->db_object)).
782
802
*/
783
803
void dmu_buf_will_dirty (dmu_buf_t * db , dmu_tx_t * tx );
804
+ void dmu_buf_will_dirty_flags (dmu_buf_t * db , dmu_tx_t * tx , dmu_flags_t flags );
784
805
boolean_t dmu_buf_is_dirty (dmu_buf_t * db , dmu_tx_t * tx );
785
806
void dmu_buf_set_crypt_params (dmu_buf_t * db_fake , boolean_t byteorder ,
786
807
const uint8_t * salt , const uint8_t * iv , const uint8_t * mac , dmu_tx_t * tx );
@@ -874,40 +895,36 @@ int dmu_free_long_object(objset_t *os, uint64_t object);
874
895
* Canfail routines will return 0 on success, or an errno if there is a
875
896
* nonrecoverable I/O error.
876
897
*/
877
- #define DMU_READ_PREFETCH 0 /* prefetch */
878
- #define DMU_READ_NO_PREFETCH 1 /* don't prefetch */
879
- #define DMU_READ_NO_DECRYPT 2 /* don't decrypt */
880
- #define DMU_DIRECTIO 4 /* use Direct I/O */
881
-
882
898
int dmu_read (objset_t * os , uint64_t object , uint64_t offset , uint64_t size ,
883
- void * buf , uint32_t flags );
899
+ void * buf , dmu_flags_t flags );
884
900
int dmu_read_by_dnode (dnode_t * dn , uint64_t offset , uint64_t size , void * buf ,
885
- uint32_t flags );
901
+ dmu_flags_t flags );
886
902
void dmu_write (objset_t * os , uint64_t object , uint64_t offset , uint64_t size ,
887
903
const void * buf , dmu_tx_t * tx );
888
904
int dmu_write_by_dnode (dnode_t * dn , uint64_t offset , uint64_t size ,
889
- const void * buf , dmu_tx_t * tx );
890
- int dmu_write_by_dnode_flags (dnode_t * dn , uint64_t offset , uint64_t size ,
891
- const void * buf , dmu_tx_t * tx , uint32_t flags );
905
+ const void * buf , dmu_tx_t * tx , dmu_flags_t flags );
892
906
void dmu_prealloc (objset_t * os , uint64_t object , uint64_t offset , uint64_t size ,
893
907
dmu_tx_t * tx );
894
908
#ifdef _KERNEL
895
- int dmu_read_uio (objset_t * os , uint64_t object , zfs_uio_t * uio , uint64_t size );
896
- int dmu_read_uio_dbuf (dmu_buf_t * zdb , zfs_uio_t * uio , uint64_t size );
897
- int dmu_read_uio_dnode (dnode_t * dn , zfs_uio_t * uio , uint64_t size );
909
+ int dmu_read_uio (objset_t * os , uint64_t object , zfs_uio_t * uio , uint64_t size ,
910
+ dmu_flags_t flags );
911
+ int dmu_read_uio_dbuf (dmu_buf_t * zdb , zfs_uio_t * uio , uint64_t size ,
912
+ dmu_flags_t flags );
913
+ int dmu_read_uio_dnode (dnode_t * dn , zfs_uio_t * uio , uint64_t size ,
914
+ dmu_flags_t flags );
898
915
int dmu_write_uio (objset_t * os , uint64_t object , zfs_uio_t * uio , uint64_t size ,
899
- dmu_tx_t * tx );
916
+ dmu_tx_t * tx , dmu_flags_t flags );
900
917
int dmu_write_uio_dbuf (dmu_buf_t * zdb , zfs_uio_t * uio , uint64_t size ,
901
- dmu_tx_t * tx );
918
+ dmu_tx_t * tx , dmu_flags_t flags );
902
919
int dmu_write_uio_dnode (dnode_t * dn , zfs_uio_t * uio , uint64_t size ,
903
- dmu_tx_t * tx );
920
+ dmu_tx_t * tx , dmu_flags_t flags );
904
921
#endif
905
922
struct arc_buf * dmu_request_arcbuf (dmu_buf_t * handle , int size );
906
923
void dmu_return_arcbuf (struct arc_buf * buf );
907
924
int dmu_assign_arcbuf_by_dnode (dnode_t * dn , uint64_t offset ,
908
- struct arc_buf * buf , dmu_tx_t * tx );
925
+ struct arc_buf * buf , dmu_tx_t * tx , dmu_flags_t flags );
909
926
int dmu_assign_arcbuf_by_dbuf (dmu_buf_t * handle , uint64_t offset ,
910
- struct arc_buf * buf , dmu_tx_t * tx );
927
+ struct arc_buf * buf , dmu_tx_t * tx , dmu_flags_t flags );
911
928
#define dmu_assign_arcbuf dmu_assign_arcbuf_by_dbuf
912
929
extern uint_t zfs_max_recordsize ;
913
930
0 commit comments