Skip to content

Commit d4d3372

Browse files
author
zhengmiao
committed
Merge branch 'limengzhang/refactor_dataset_wrapper' into 'refactor_dev'
[Refactor] Refactor DatasetWrapper See merge request openmmlab-enterprise/openmmlab-ce/mmsegmentation!36
2 parents eef12a0 + f2bac79 commit d4d3372

File tree

11 files changed

+316
-435
lines changed

11 files changed

+316
-435
lines changed

configs/_base_/datasets/chase_db1.py

+10-7
Original file line numberDiff line numberDiff line change
@@ -26,14 +26,17 @@
2626
num_workers=4,
2727
persistent_workers=True,
2828
sampler=dict(type='InfiniteSampler', shuffle=True),
29-
type='RepeatDataset',
30-
times=40000,
3129
dataset=dict(
32-
type=dataset_type,
33-
data_root=data_root,
34-
data_prefix=dict(
35-
img_path='images/training', seg_map_path='annotations/training'),
36-
pipeline=train_pipeline))
30+
type='RepeatDataset',
31+
times=40000,
32+
dataset=dict(
33+
type=dataset_type,
34+
data_root=data_root,
35+
data_prefix=dict(
36+
img_path='images/training',
37+
seg_map_path='annotations/training'),
38+
pipeline=train_pipeline)))
39+
3740
val_dataloader = dict(
3841
batch_size=1,
3942
num_workers=4,

configs/_base_/datasets/drive.py

+9-7
Original file line numberDiff line numberDiff line change
@@ -25,14 +25,16 @@
2525
num_workers=4,
2626
persistent_workers=True,
2727
sampler=dict(type='InfiniteSampler', shuffle=True),
28-
type='RepeatDataset',
29-
times=40000,
3028
dataset=dict(
31-
type=dataset_type,
32-
data_root=data_root,
33-
data_prefix=dict(
34-
img_path='images/training', seg_map_path='annotations/training'),
35-
pipeline=train_pipeline))
29+
type='RepeatDataset',
30+
times=40000,
31+
dataset=dict(
32+
type=dataset_type,
33+
data_root=data_root,
34+
data_prefix=dict(
35+
img_path='images/training',
36+
seg_map_path='annotations/training'),
37+
pipeline=train_pipeline)))
3638
val_dataloader = dict(
3739
batch_size=1,
3840
num_workers=4,

configs/_base_/datasets/hrf.py

+9-7
Original file line numberDiff line numberDiff line change
@@ -25,14 +25,16 @@
2525
num_workers=4,
2626
persistent_workers=True,
2727
sampler=dict(type='InfiniteSampler', shuffle=True),
28-
type='RepeatDataset',
29-
times=40000,
3028
dataset=dict(
31-
type=dataset_type,
32-
data_root=data_root,
33-
data_prefix=dict(
34-
img_path='images/training', seg_map_path='annotations/training'),
35-
pipeline=train_pipeline))
29+
type='RepeatDataset',
30+
times=40000,
31+
dataset=dict(
32+
type=dataset_type,
33+
data_root=data_root,
34+
data_prefix=dict(
35+
img_path='images/training',
36+
seg_map_path='annotations/training'),
37+
pipeline=train_pipeline)))
3638
val_dataloader = dict(
3739
batch_size=1,
3840
num_workers=4,
+59-6
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,62 @@
1-
_base_ = './pascal_voc12.py'
21
# dataset settings
2+
dataset_type = 'PascalVOCDataset'
3+
data_root = 'data/VOCdevkit/VOC2012'
4+
crop_size = (512, 512)
5+
train_pipeline = [
6+
dict(type='LoadImageFromFile'),
7+
dict(type='LoadAnnotations'),
8+
dict(type='RandomResize', scale=(2048, 512), ratio_range=(0.5, 2.0)),
9+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
10+
dict(type='RandomFlip', prob=0.5),
11+
dict(type='PhotoMetricDistortion'),
12+
dict(type='Pad', size=crop_size),
13+
dict(type='PackSegInputs')
14+
]
15+
16+
test_pipeline = [
17+
dict(type='LoadImageFromFile'),
18+
dict(type='Resize', scale=(2048, 512), keep_ratio=True),
19+
# add loading annotation after ``Resize`` because ground truth
20+
# does not need to do resize data transform
21+
dict(type='LoadAnnotations'),
22+
dict(type='PackSegInputs')
23+
]
24+
25+
dataset_train = dict(
26+
type=dataset_type,
27+
data_root=data_root,
28+
data_prefix=dict(img_path='JPEGImages', seg_map_path='SegmentationClass'),
29+
ann_file='ImageSets/Segmentation/train.txt',
30+
pipeline=train_pipeline)
31+
32+
dataset_aug = dict(
33+
type=dataset_type,
34+
data_root=data_root,
35+
data_prefix=dict(
36+
img_path='JPEGImages', seg_map_path='SegmentationClassAug'),
37+
ann_file='ImageSets/Segmentation/aug.txt',
38+
pipeline=train_pipeline)
39+
340
train_dataloader = dict(
41+
batch_size=4,
42+
num_workers=4,
43+
persistent_workers=True,
44+
sampler=dict(type='InfiniteSampler', shuffle=True),
45+
dataset=dict(type='ConcatDataset', datasets=[dataset_train, dataset_aug]))
46+
47+
val_dataloader = dict(
48+
batch_size=1,
49+
num_workers=4,
50+
persistent_workers=True,
51+
sampler=dict(type='DefaultSampler', shuffle=False),
452
dataset=dict(
5-
ann_dir=['SegmentationClass', 'SegmentationClassAug'],
6-
ann_file=[
7-
'ImageSets/Segmentation/train.txt',
8-
'ImageSets/Segmentation/aug.txt'
9-
]))
53+
type=dataset_type,
54+
data_root=data_root,
55+
data_prefix=dict(
56+
img_path='JPEGImages', seg_map_path='SegmentationClass'),
57+
ann_file='ImageSets/Segmentation/val.txt',
58+
pipeline=test_pipeline))
59+
test_dataloader = val_dataloader
60+
61+
val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])
62+
test_evaluator = val_evaluator

configs/_base_/datasets/stare.py

+9-7
Original file line numberDiff line numberDiff line change
@@ -25,14 +25,16 @@
2525
num_workers=4,
2626
persistent_workers=True,
2727
sampler=dict(type='InfiniteSampler', shuffle=True),
28-
type='RepeatDataset',
29-
times=40000,
3028
dataset=dict(
31-
type=dataset_type,
32-
data_root=data_root,
33-
data_prefix=dict(
34-
img_path='images/training', seg_map_path='annotations/training'),
35-
pipeline=train_pipeline))
29+
type='RepeatDataset',
30+
times=40000,
31+
dataset=dict(
32+
type=dataset_type,
33+
data_root=data_root,
34+
data_prefix=dict(
35+
img_path='images/training',
36+
seg_map_path='annotations/training'),
37+
pipeline=train_pipeline)))
3638
val_dataloader = dict(
3739
batch_size=1,
3840
num_workers=4,

mmseg/datasets/__init__.py

+10-10
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
11
# Copyright (c) OpenMMLab. All rights reserved.
2+
from mmengine.dataset import ConcatDataset, RepeatDataset
3+
4+
from mmseg.registry import DATASETS, TRANSFORMS
25
from .ade import ADE20KDataset
3-
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
46
from .chase_db1 import ChaseDB1Dataset
57
from .cityscapes import CityscapesDataset
68
from .coco_stuff import COCOStuffDataset
79
from .custom import CustomDataset
810
from .dark_zurich import DarkZurichDataset
9-
from .dataset_wrappers import (ConcatDataset, MultiImageMixDataset,
10-
RepeatDataset)
11+
from .dataset_wrappers import MultiImageMixDataset
1112
from .drive import DRIVEDataset
1213
from .hrf import HRFDataset
1314
from .isaid import iSAIDDataset
@@ -20,11 +21,10 @@
2021
from .voc import PascalVOCDataset
2122

2223
__all__ = [
23-
'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
24-
'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset',
25-
'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset',
26-
'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset',
27-
'STAREDataset', 'DarkZurichDataset', 'NightDrivingDataset',
28-
'COCOStuffDataset', 'LoveDADataset', 'MultiImageMixDataset',
29-
'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset'
24+
'CustomDataset', 'ConcatDataset', 'RepeatDataset', 'DATASETS',
25+
'TRANSFORMS', 'CityscapesDataset', 'PascalVOCDataset', 'ADE20KDataset',
26+
'PascalContextDataset', 'PascalContextDataset59', 'ChaseDB1Dataset',
27+
'DRIVEDataset', 'HRFDataset', 'STAREDataset', 'DarkZurichDataset',
28+
'NightDrivingDataset', 'COCOStuffDataset', 'LoveDADataset',
29+
'MultiImageMixDataset', 'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset'
3030
]

mmseg/datasets/builder.py

-191
This file was deleted.

0 commit comments

Comments
 (0)