Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 363aa89

Browse files
committed
added 14_cnn
1 parent 00657e4 commit 363aa89

File tree

1 file changed

+130
-0
lines changed

1 file changed

+130
-0
lines changed

14_cnn.py

Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
import torch
2+
import torch.nn as nn
3+
import torch.nn.functional as F
4+
import torchvision
5+
import torchvision.transforms as transforms
6+
import matplotlib.pyplot as plt
7+
import numpy as np
8+
9+
# Device configuration
10+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
11+
12+
# Hyper-parameters
13+
num_epochs = 5
14+
batch_size = 4
15+
learning_rate = 0.001
16+
17+
# dataset has PILImage images of range [0, 1].
18+
# We transform them to Tensors of normalized range [-1, 1]
19+
transform = transforms.Compose(
20+
[transforms.ToTensor(),
21+
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
22+
23+
# CIFAR10: 60000 32x32 color images in 10 classes, with 6000 images per class
24+
train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True,
25+
download=True, transform=transform)
26+
27+
test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False,
28+
download=True, transform=transform)
29+
30+
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
31+
shuffle=True)
32+
33+
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,
34+
shuffle=False)
35+
36+
classes = ('plane', 'car', 'bird', 'cat',
37+
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
38+
39+
def imshow(img):
40+
img = img / 2 + 0.5 # unnormalize
41+
npimg = img.numpy()
42+
plt.imshow(np.transpose(npimg, (1, 2, 0)))
43+
plt.show()
44+
45+
46+
# get some random training images
47+
dataiter = iter(train_loader)
48+
images, labels = dataiter.next()
49+
50+
# show images
51+
imshow(torchvision.utils.make_grid(images))
52+
53+
class ConvNet(nn.Module):
54+
def __init__(self):
55+
super(ConvNet, self).__init__()
56+
self.conv1 = nn.Conv2d(3, 6, 5)
57+
self.pool = nn.MaxPool2d(2, 2)
58+
self.conv2 = nn.Conv2d(6, 16, 5)
59+
self.fc1 = nn.Linear(16 * 5 * 5, 120)
60+
self.fc2 = nn.Linear(120, 84)
61+
self.fc3 = nn.Linear(84, 10)
62+
63+
def forward(self, x):
64+
# -> n, 3, 32, 32
65+
x = self.pool(F.relu(self.conv1(x))) # -> n, 6, 14, 14
66+
x = self.pool(F.relu(self.conv2(x))) # -> n, 16, 5, 5
67+
x = x.view(-1, 16 * 5 * 5) # -> n, 400
68+
x = F.relu(self.fc1(x)) # -> n, 120
69+
x = F.relu(self.fc2(x)) # -> n, 84
70+
x = self.fc3(x) # -> n, 10
71+
return x
72+
73+
74+
model = ConvNet().to(device)
75+
76+
criterion = nn.CrossEntropyLoss()
77+
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
78+
79+
n_total_steps = len(train_loader)
80+
for epoch in range(num_epochs):
81+
for i, (images, labels) in enumerate(train_loader):
82+
# origin shape: [4, 3, 32, 32] = 4, 3, 1024
83+
# input_layer: 3 input channels, 6 output channels, 5 kernel size
84+
images = images.to(device)
85+
labels = labels.to(device)
86+
87+
# Forward pass
88+
outputs = model(images)
89+
loss = criterion(outputs, labels)
90+
91+
# Backward and optimize
92+
optimizer.zero_grad()
93+
loss.backward()
94+
optimizer.step()
95+
96+
if (i+1) % 2000 == 0:
97+
print (f'Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{n_total_steps}], Loss: {loss.item():.4f}')
98+
99+
print('Finished Training')
100+
PATH = './cnn.pth'
101+
torch.save(model.state_dict(), PATH)
102+
103+
with torch.no_grad():
104+
n_correct = 0
105+
n_samples = 0
106+
n_class_correct = [0 for i in range(10)]
107+
n_class_samples = [0 for i in range(10)]
108+
for images, labels in test_loader:
109+
images = images.to(device)
110+
labels = labels.to(device)
111+
outputs = model(images)
112+
# max returns (value ,index)
113+
_, predicted = torch.max(outputs, 1)
114+
n_samples += labels.size(0)
115+
n_correct += (predicted == labels).sum().item()
116+
117+
for i in range(batch_size):
118+
label = labels[i]
119+
pred = predicted[i]
120+
if (label == pred):
121+
n_class_correct[label] += 1
122+
n_class_samples[label] += 1
123+
124+
acc = 100.0 * n_correct / n_samples
125+
print(f'Accuracy of the network: {acc} %')
126+
127+
for i in range(10):
128+
acc = 100.0 * n_class_correct[i] / n_class_samples[i]
129+
print(f'Accuracy of {classes[i]}: {acc} %')
130+

0 commit comments

Comments
 (0)