eval()
or train()
import pytorch_lightning as pl
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import pytorch_lightning as pl
nn.Module
成 pl.LightningModule
,那这样的改写不会影响到任何旧的撰写,原因是 pl.LightningModule
中有提供所有 nn.Module
中的所有 function,且多提供了 Lightning 会用的 function
class FeedForwardNeuralNet(pl.LightningModule):
def __init__(self, input_size, hidden_size, num_classes):
super(FeedForwardNeuralNet, self).__init__()
# define first layer
self.l1 = nn.Linear(input_size, hidden_size)
# activation function
self.relu = nn.ReLU()
# define second layer
self.l2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
return out
configure_optimizers
,所以我们会多一个
def configure_optimizers(self, learning_rate):
optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
return optimizer
def training_step(self, batch, batch_idx):
datas, labels = batch
outputs = self(datas)
loss = F.cross_entropy(outputs, labels)
return loss
torch.nn.functional
套件的 functionsclass FeedForwardNeuralNet(pl.LightningModule):
def __init__(self, input_size, hidden_size, num_classes):
super(FeedForwardNeuralNet, self).__init__()
# define first layer
self.l1 = nn.Linear(input_size, hidden_size)
# activation function
self.relu = nn.ReLU()
# define second layer
self.l2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
return out
def training_step(self, batch, batch_idx):
datas, labels = batch
outputs = self(datas)
loss = F.cross_entropy(outputs, labels)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
return optimizer
Trainer
from pytorch_lightning import Trainer
trainer = Trainer()
train_dataset = torchvision.datasets.MNIST(root='./data',
train=True,
transform=transforms.ToTensor(),
download=False)
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
model = FeedForwardNeuralNet(input_size, hidden_size, num_classes)
trainer = Trainer()
trainer.fit(model, train_loader)
<<: [Day30]程序菜鸟自学C++资料结构演算法 – 心得总结
emlog是完全开源的软件,但编写,支持和分发仍然需要花费很多时间和费用。投我以桃,报之以李,作为付...
13. Roman to Integer 今天我们一起挑战leetcode第13题Roman to ...
为什麽要用 git Git 是一个好用的分散式版本控管软件,在我们要开始写 html 之前,推荐要先...
根据ARM官方的介绍,M55是第一个支援v8.1-M架构的M系列处理器。而M系列主要是针对对於成本和...
ScrollView 今天要介绍的元件,当介面的内容开始变多时就派上用场了,毕竟手机萤幕或着各类3c...