from PIL import Image
import cv2
import matplotlib.pyplot as plt
import torchvision
from torchvision import transforms
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import numpy as np
import tqdm as tqdm
class LeNet(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sequential = nn.Sequential(nn.Conv2d(1,6,kernel_size=5,padding=2),nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2,stride=2),
nn.Conv2d(6,16,kernel_size=5),nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2,stride=2),
nn.Flatten(),
nn.Linear(16*25,120),nn.Sigmoid(),
nn.Linear(120,84),nn.Sigmoid(),
nn.Linear(84,10))
def forward(self,x):
return self.sequential(x)
class MLP(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sequential = nn.Sequential(nn.Flatten(),
nn.Linear(28*28,120),nn.Sigmoid(),
nn.Linear(120,84),nn.Sigmoid(),
nn.Linear(84,10))
def forward(self,x):
return self.sequential(x)
epochs = 15
batch = 32
lr=0.9
loss = nn.CrossEntropyLoss()
model = LeNet()
optimizer = torch.optim.SGD(model.parameters(),lr)
device = torch.device('cuda')
root = r"./"
trans_compose = transforms.Compose([transforms.ToTensor(),
])
train_data = torchvision.datasets.MNIST(root,train=True,transform=trans_compose,download=True)
test_data = torchvision.datasets.MNIST(root,train=False,transform=trans_compose,download=True)
train_loader = DataLoader(train_data,batch_size=batch,shuffle=True)
test_loader = DataLoader(test_data,batch_size=batch,shuffle=False)
model.to(device)
loss.to(device)
# model.apply(init_weights)
for epoch in range(epochs):
train_loss = 0
test_loss = 0
correct_train = 0
correct_test = 0
for index,(x,y) in enumerate(train_loader):
x = x.to(device)
y = y.to(device)
predict = model(x)
L = loss(predict,y)
optimizer.zero_grad()
L.backward()
optimizer.step()
train_loss = train_loss + L
correct_train += (predict.argmax(dim=1)==y).sum()
acc_train = correct_train/(batch*len(train_loader))
with torch.no_grad():
for index,(x,y) in enumerate(test_loader):
[x,y] = [x.to(device),y.to(device)]
predict = model(x)
L1 = loss(predict,y)
test_loss = test_loss + L1
correct_test += (predict.argmax(dim=1)==y).sum()
acc_test = correct_test/(batch*len(test_loader))
print(f'epoch:{epoch},train_loss:{train_loss/batch},test_loss:{test_loss/batch},acc_train:{acc_train},acc_test:{acc_test}')
模型用于输入的图片是单通道的黑白图片,这里由于可视化出现了黄色,但实际上是黑白色,反色操作说明了数据的预处理十分的重要,很多数据如果是不清理过是无法直接用于推理的。