-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathmain.py
More file actions
117 lines (92 loc) · 3.65 KB
/
main.py
File metadata and controls
117 lines (92 loc) · 3.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import os
import torch
import torch.optim.adam
from tools.utils import parse_args, load_yaml_config, merge_opts_to_config, \
instantiate_from_config, seed_everything, create_mlab_eng, Logger
from pprint import pprint
def main():
args = parse_args()
config = load_yaml_config(args.config_file)
config = merge_opts_to_config(config, args.opts)
logger = Logger(args)
logger.save_config(config, name='config.yaml')
config['train_dataset']['params']['root'] = os.path.join(
args.root_dir,
config['train_dataset']['params']['root']
)
config['train_dataset']['params']['stim_to_sub_pth'] = os.path.join(
config['train_dataset']['params']['root'],
config['train_dataset']['params']['stim_to_sub_pth']
)
config['val_dataset']['params']['root'] = os.path.join(
args.root_dir,
config['val_dataset']['params']['root']
)
config['val_dataset']['params']['stim_to_sub_pth'] = os.path.join(
config['val_dataset']['params']['root'],
config['val_dataset']['params']['stim_to_sub_pth']
)
config['trainer']['params']['ground_truth_dir'] = os.path.join(
config['train_dataset']['params']['root'],
config['trainer']['params']['ground_truth_dir']
)
print(config)
logger.save_config(config, name='config_w_abs_paths.yaml')
logger.save_git_commit()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'Using device: {device}')
seed_everything(args.seed)
model = instantiate_from_config(config['model']).to(device)
# print trainable parameters number
print(f'Trainable parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}')
optimizer = instantiate_from_config(
config['optimizer'],
params=model.parameters()
)
ckpt_pth = args.ckpt_pth
if ckpt_pth is not None:
print(f'Loading checkpoint from {ckpt_pth}')
checkpoint = torch.load(ckpt_pth, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
train_scheduler = instantiate_from_config(config['diff_train_scheduler'])
train_dataset = instantiate_from_config(config['train_dataset'])
train_loader = instantiate_from_config(
config['train_loader'],
dataset=train_dataset
)
criterion = instantiate_from_config(config['criterion'])
if config['trainer']['params']['should_evaluate'] == True:
val_scheduler = instantiate_from_config(config['diff_val_scheduler'])
if config['diff_val_scheduler']['target'] == 'diffusers.schedulers.DDIMScheduler':
val_scheduler.set_timesteps(config['diff_val_scheduler']['num_inference_steps'])
val_dataset = instantiate_from_config(config['val_dataset'])
val_loader = instantiate_from_config(
config['val_loader'],
dataset=val_dataset
)
matlab_eng = create_mlab_eng(
os.path.join(config['train_dataset']['params']['root'], 'DatabaseCode'),
)
else:
matlab_eng = None
val_loader = None
val_scheduler = None
trainer = instantiate_from_config(
config=config['trainer'],
cfg=config,
args=args,
model=model,
train_scheduler=train_scheduler,
val_scheduler=val_scheduler,
train_loader=train_loader,
val_loader=val_loader,
optimizer=optimizer,
criterion=criterion,
matlab_eng=matlab_eng,
device=device,
logger=logger,
)
trainer.train()
if __name__ == "__main__":
main()