-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathscript-train.py
More file actions
153 lines (133 loc) · 4.4 KB
/
script-train.py
File metadata and controls
153 lines (133 loc) · 4.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import torch
import os
from visionutils.utils import collate_fn
from visionutils.engine import train_one_epoch, evaluate
from utils.nn import (
ClusterDataset,
get_transform,
get_instance_frcnn_model,
)
import argparse as argp
def _make_parser() -> argp.ArgumentParser:
parser = argp.ArgumentParser(
description="Train a neural network on a set of images of galaxy clusters"
)
parser.add_argument("root", help="Path to the dataset of images")
parser.add_argument(
"--tile_type",
"-tt",
type=str,
default="indv",
help="What type of images to train on.",
)
parser.add_argument(
"--seed",
"-s",
type=int,
default=1,
help="Seed to use for splitting train/test.",
)
parser.add_argument(
"--test_num",
"-tn",
type=int,
default=200,
help="Number of training images to reserve.",
)
parser.add_argument(
"--backbone",
"-bb",
type=str,
default="mobilenet",
help="Backbone class to use.",
)
parser.add_argument(
"--backbone_path",
"-bp",
type=str,
default="/mnt/welch/USERS/jorlo/ml-clusters/models/torch-act/",
help="Path to backbone save location",
) # TODO: maybe this should be required
parser.add_argument(
"--num_epochs",
"-ne",
type=int,
default=10,
help="Number of epochs to train for.",
)
return parser
def main():
parser = _make_parser()
args = parser.parse_args()
# use our dataset and defined transformations
dataset = ClusterDataset(
args.root,
get_transform(train=True),
cluster_dir="{}_freq_stamps".format(args.tile_type),
mask_dir="{}_freq_masks".format(args.tile_type),
)
dataset_test = ClusterDataset(
args.root,
get_transform(train=False),
cluster_dir="{}_freq_stamps".format(args.tile_type),
mask_dir="{}_freq_masks".format(args.tile_type),
)
# split the dataset in train and test set
torch.manual_seed(args.seed)
indices = torch.randperm(len(dataset)).tolist()
test_num = args.test_num
dataset = torch.utils.data.Subset(dataset, indices[:-test_num])
dataset_test = torch.utils.data.Subset(dataset_test, indices[-test_num:])
# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=8,
shuffle=True,
num_workers=4,
collate_fn=collate_fn,
)
data_loader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=1,
shuffle=False,
num_workers=4,
collate_fn=collate_fn,
)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# our dataset has two classes only - background and person
num_classes = 2
backbone = args.backbone
backbone_path = args.backbone_path
# get the model using our helper function
model = get_instance_frcnn_model(
num_classes,
backbone_path=backbone_path + "act-{}.pth".format(backbone),
backbone_type=backbone,
)
# move model to the right device
model.to(device)
# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(
params, lr=0.005, momentum=0.9, weight_decay=0.0005
) # TODO: make parameters command line adjustable
# and a learning rate scheduler which decreases the learning rate by
# 10x every 3 epochs
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1)
num_epochs = args.num_epochs
model_path = "/mnt/welch/USERS/jorlo/ml-clusters/models/torch-act/act-{}-frcnn-{}-tiles.pth".format(
backbone, args.tile_type
) # TODO: fix this path
load_exiting_weights = True
if load_exiting_weights and os.path.exists(model_path):
model.load_state_dict(torch.load(model_path))
for epoch in range(num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
# evaluate on the test dataset
evaluate(model, data_loader_test, device=device)
torch.save(model.state_dict(), model_path)
if __name__ == "__main__":
main()