File size: 4,098 Bytes
c94c8c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
from omegaconf import OmegaConf
from torch.utils.data import DataLoader, default_collate, ConcatDataset
from fvcore.common.registry import Registry

from .datasets.dataset_wrapper import DATASETWRAPPER_REGISTRY

DATASET_REGISTRY = Registry("dataset")
DATASET_REGISTRY.__doc__ = """
Registry for datasets, which takes a list of dataset names and returns a dataset object.
Currently it performs similar as registering dataset loading functions, but remains in a
form of object class for future purposes.
"""

def get_dataset(cfg, split):
    assert cfg.data.get(split), f"No valid dataset name in {split}."
    dataset_list = []
    print(split, ': ', ', '.join(cfg.data.get(split)))
    for dataset_name in cfg.data.get(split):
        _dataset = DATASET_REGISTRY.get(dataset_name)(cfg, split)
        assert len(_dataset), f"Dataset '{dataset_name}' is empty!"
        wrapper = cfg.data_wrapper.get(split, cfg.data_wrapper) if not isinstance(cfg.data_wrapper, str) else cfg.data_wrapper
        _dataset = DATASETWRAPPER_REGISTRY.get(wrapper)(cfg, _dataset, split=split)
        # Conduct voxelization
        # TODO: fix voxel config
        if cfg.data.get('use_voxel', None):
            _dataset = DATASETWRAPPER_REGISTRY.get('VoxelDatasetWrapper')(cfg, _dataset)
        dataset_list.append(_dataset)

    print('='*50)
    print('Dataset\t\t\tSize')
    total = sum([len(dataset) for dataset in dataset_list])
    for dataset_name, dataset in zip(cfg.data.get(split), dataset_list):
        print(f'{dataset_name:<20} {len(dataset):>6} ({len(dataset) / total * 100:.1f}%)')
    print(f'Total\t\t\t{total}')
    print('='*50)
    if split in ['warmup', 'pretrain', 'train']:
        dataset_list = ConcatDataset(dataset_list)
    return dataset_list


def build_dataloader(cfg, split='train'):
    """_summary_
    Unittest:
        dataloader_train = build_dataloader(default_cfg, split='train')
        for _item in dataloader_train:
            print(_item.keys())

    Args:
        cfg (_type_): _description_
        split (str, optional): _description_. Defaults to 'train'.

    Returns:
        _type_: _description_
    """
    
    if split in ['warmup','pretrain']:
        dataset= get_dataset(cfg, split)
        collate_fn = getattr(dataset.datasets[0], 'collate_fn', default_collate)
        return DataLoader(dataset,
                          batch_size=cfg.dataloader.batchsize,
                          num_workers=cfg.dataloader.num_workers,
                          collate_fn = collate_fn,
                          pin_memory=True, # TODO: Test speed
                          persistent_workers = False,
                          shuffle=True,
                          drop_last=True)
    else:
        loader_list = []
        collate_fn = default_collate 
        if split == 'train':
            dataset = get_dataset(cfg, split)
            return DataLoader(dataset,
                        batch_size=cfg.dataloader.get('batchsize_eval', cfg.dataloader.batchsize),
                        num_workers=8,
                        collate_fn = collate_fn,
                        pin_memory=True, # TODO: Test speed
                        persistent_workers = True,
                        drop_last=True,
                        prefetch_factor=4,
                        shuffle=True)
        else:
            for dataset in get_dataset(cfg, split):
                loader_list.append(
                    DataLoader(dataset,
                        batch_size=cfg.dataloader.get('batchsize_eval', cfg.dataloader.batchsize),
                        num_workers=8,
                        collate_fn = collate_fn,
                        pin_memory=True, # TODO: Test speed
                        shuffle=False,
                        prefetch_factor=4,
                        persistent_workers = True))
                
            # TODO: temporary solution for backward compatibility.
            if len(loader_list) == 1:
                return loader_list[0]
            else:
                return loader_list

if __name__ == '__main__':
    pass