Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit fe0e493

Browse files
authored
Update train.py
1 parent 4fd288e commit fe0e493

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

train.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,genval,Epo
4040
with torch.no_grad():
4141
if cuda:
4242
images = Variable(torch.from_numpy(images).type(torch.FloatTensor)).cuda()
43-
targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]
43+
targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)).cuda() for ann in targets]
4444
else:
4545
images = Variable(torch.from_numpy(images).type(torch.FloatTensor))
4646
targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]
@@ -66,11 +66,11 @@ def fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,genval,Epo
6666

6767
with torch.no_grad():
6868
if cuda:
69-
images = Variable(torch.from_numpy(images).cuda().type(torch.FloatTensor))
70-
targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]
69+
images_val = Variable(torch.from_numpy(images_val).type(torch.FloatTensor)).cuda()
70+
targets_val = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)).cuda() for ann in targets_val]
7171
else:
72-
images = Variable(torch.from_numpy(images).type(torch.FloatTensor))
73-
targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]
72+
images_val = Variable(torch.from_numpy(images_val).type(torch.FloatTensor))
73+
targets_val = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets_val]
7474
optimizer.zero_grad()
7575
outputs = net(images_val)
7676
losses = []
@@ -154,7 +154,7 @@ def fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,genval,Epo
154154
Init_Epoch = 0
155155
Freeze_Epoch = 25
156156

157-
optimizer = optim.Adam(net.parameters(),lr)
157+
optimizer = optim.Adam(net.parameters(),lr,weight_decay=5e-4)
158158
if Cosine_lr:
159159
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)
160160
else:
@@ -183,7 +183,7 @@ def fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,genval,Epo
183183
Freeze_Epoch = 25
184184
Unfreeze_Epoch = 50
185185

186-
optimizer = optim.Adam(net.parameters(),lr)
186+
optimizer = optim.Adam(net.parameters(),lr,weight_decay=5e-4)
187187
if Cosine_lr:
188188
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)
189189
else:
@@ -204,4 +204,4 @@ def fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,genval,Epo
204204

205205
for epoch in range(Freeze_Epoch,Unfreeze_Epoch):
206206
fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,gen_val,Unfreeze_Epoch,Cuda)
207-
lr_scheduler.step()
207+
lr_scheduler.step()

0 commit comments

Comments
 (0)