# 0) data import and set as pytorch dataset
import torchvision
import torchvision.transforms as transforms
# MNIST
train_dataset = torchvision.datasets.MNIST(root='./data', train=True,
transform=transforms.ToTensor(), download=False)
test_dataset = torchvision.datasets.MNIST(root='./data', train=False,
transform=transforms.ToTensor(), download=False)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size,
shuffle=True)
# 1) model build
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
# image shape is 1 * 28 * 28, where 1 is one color channel
# 28 * 28 is the image size
self.conv1 = nn.Conv2d(in_channels=1, out_channels=3, kernel_size=5) # output shape = 3 * 24 * 24
self.pool = nn.MaxPool2d(kernel_size=2, stride=2) # output shape = 3 * 12 * 12
# intput shape is 3 * 12 * 12
self.conv2 = nn.Conv2d(in_channels=3, out_channels=9, kernel_size=5) # output shape = 9 * 8 * 8
# add another max pooling, output shape = 9 * 4 * 4
self.fc1 = nn.Linear(9*4*4, 100)
self.fc2 = nn.Linear(100, 50)
# last fully connected layer output should be same as classes
self.fc3 = nn.Linear(50, 10)
def forward(self, x):
# first conv
x = self.pool(F.relu(self.conv1(x)))
# second conv
x = self.pool(F.relu(self.conv2(x)))
# flatten all dimensions except batch
x = torch.flatten(x, 1)
# fully connected layers
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
model = ConvNet()
in_channels
在最前面一定要符合图片的特徵,也就是有几通道的图片颜色import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import torchvision
import torchvision.transforms as transforms
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
num_epochs = 4
batch_size = 10
learning_rate = 0.001
# 0) data import and set as pytorch dataset
# MNIST
train_dataset = torchvision.datasets.MNIST(root='./data', train=True,
transform=transforms.ToTensor(), download=False)
test_dataset = torchvision.datasets.MNIST(root='./data', train=False,
transform=transforms.ToTensor(), download=False)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size,
shuffle=True)
# 1) model build
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
# image shape is 1 * 28 * 28, where 1 is one color channel
# 28 * 28 is the image size
self.conv1 = nn.Conv2d(in_channels=1, out_channels=3, kernel_size=5) # output shape = 3 * 24 * 24
self.pool = nn.MaxPool2d(kernel_size=2, stride=2) # output shape = 3 * 12 * 12
# intput shape is 3 * 12 * 12
self.conv2 = nn.Conv2d(in_channels=3, out_channels=9, kernel_size=5) # output shape = 9 * 8 * 8
# add another max pooling, output shape = 9 * 4 * 4
self.fc1 = nn.Linear(9*4*4, 100)
self.fc2 = nn.Linear(100, 50)
# last fully connected layer output should be same as classes
self.fc3 = nn.Linear(50, 10)
def forward(self, x):
# first conv
x = self.pool(F.relu(self.conv1(x)))
# second conv
x = self.pool(F.relu(self.conv2(x)))
# flatten all dimensions except batch
x = torch.flatten(x, 1)
# fully connected layers
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
model = ConvNet().to(device)
.to(device)
# 2) loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# 3) Training loop
n_total_steps = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# init optimizer
optimizer.zero_grad()
# forward -> backward -> update
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i + 1) % 1000 == 0:
print(f'epoch {epoch+1}/{num_epochs}, step {i+1}/{n_total_steps}, loss = {loss.item():.4f}')
print('Finished Training')
# 4) Testing loop
with torch.no_grad():
n_correct = 0
n_samples = 0
n_class_correct = [0 for i in range(10)]
n_class_samples = [0 for i in range(10)]
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
# max returns (value, index)
_, predicted = torch.max(outputs, 1)
n_samples += labels.size(0)
n_correct += (predicted == labels).sum().item()
for i in range(batch_size):
label = labels[i]
pred = predicted[i]
if (label == pred):
n_class_correct[label] += 1
n_class_samples[label] += 1
acc = 100.0 * n_correct / n_samples
print(f'Accuracy of the network: {acc} %')
for i in range(10):
acc = 100.0 * n_class_correct[i] / n_class_samples[i]
print(f'Accuracy of {i}: {acc} %')
<<: Day 29:K-近邻演算法(k-nearest neighbors)
>>: Day 27:语系包在 i 身上-Vue I18n 前置作业
在Genero FGL上也可以做出 RESTful 的 WEB Service。 先将回应WEB R...
大家好,我是YIYI,今天是我开始铁人赛的第一天。请大家多多指教! 为什麽会想自己制作APP? 会想...
GitHub Issue 有点像是专案管理系统内管理工作事项的功能,但它能达到功能更多:无论是个人或...
我相信,很少人是做好准备才当上主管。通常是凭自己的技术过硬、绩效超群而被赋予领导职,然後开始学管理。...
所谓的主力,就是有绝对多的资金,或是大量持有某一档股票,最大的优势就是容易操控股票价格。 今年一堆散...