pytorch卷积神经网络CNN实例

pytorch卷积神经网络训练

关于卷积神经网络(CNN)的基础知识此处就再也不多说,详细的资料参考我在CSDN的说明
CNN卷积神经网络原理流程整理python

如下是一个可视化展现卷积过程的网站
https://www.cs.ryerson.ca/~aharley/vis/conv/数组

1、使用pytorch训练MINST手写数字数据集

1. 导入相关库

import torch
import numpy as np
from torch.autograd import Variable
from torchvision import datasets,transforms
from torch.utils.data import DataLoader
from torch import nn
from torch import optim

2. 载入和下载数据集

train_dataset = datasets.MNIST(root='./data/06_MNIST/',
                               train=True, 
                               transform=transforms.ToTensor(), 
                               download=True)       
test_dataset = datasets.MNIST(root='./data/06_MNIST/',
                               train=False, # 载入测试集
                               transform=transforms.ToTensor(), 
                               download=True) 
batch_size = 64
train_loader = DataLoader(dataset = train_dataset,
                         batch_size=batch_size,
                         shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
                         batch_size=batch_size,
                        shuffle=True)

3. 定义网络结构

# 定义网络结构
class CNN(nn.Module):
    def __init__(self):
        ''' 
            Conv2d的参数:
            nn.Conv2d(
            in_channels, # 输入图片的通道(黑白的图片就是一个通道)
            out_channels, # 输出数据通道:卷积层输出的通道数
            kernel_size,  # 卷积核的大小
            stride=1,     # 步长
            padding=0,    # 填充0值的圈数(3*3窗口1圈,5*5两圈获得的结果与原图相同)
            dilation=1,
            groups=1,
            bias=True,
            padding_mode='zeros',) 
        '''
        super(CNN,self).__init__()
        # 卷积+激活+池化
        self.conv1 = nn.Sequential(nn.Conv2d(1,32,5,1,2),nn.ReLU(),nn.MaxPool2d(2,2)) # 1张输入获得32张14*14的特征图
        self.conv2 = nn.Sequential(nn.Conv2d(32,64,5,1,2),nn.ReLU(),nn.MaxPool2d(2,2))# 获得64张7*7的特征图
        self.fc1 = nn.Sequential(nn.Linear(64*7*7,1000),nn.Dropout(p=0.5),nn.ReLU()) # 输入64*7*7的数据,输出一个大小1000的数组
        self.fc2 = nn.Sequential(nn.Linear(1000,10),nn.Softmax(dim=1))
        
    def forward(self,x):
        # [64,1,28,28] 传入数据的格式
        x = self.conv1(x)
        x = self.conv2(x)
        # [64,64,7,7]
        x = x.view(x.size()[0],-1)
        x = self.fc1(x)
        x = self.fc2(x)
        return x

4.定义模型

# 定义模型
LR = 0.001
model = CNN()
crossEntropyloss = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(),LR)

5. 训练模型

def train():
    # 训练状态
    model.train()
    for i,data in enumerate(train_loader):
        inputs,labels = data
        out = model(inputs)
        loss = crossEntropyloss(out,labels)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    
    correct = 0
    for i,data in enumerate(train_loader):
        inputs,labels = data
        out = model(inputs)
        _,predictions = torch.max(out,1)
        correct +=(predictions == labels).sum()
    print("Train acc:{0}".format(correct.item()/len(train_dataset)))
        
def test():
    model.eval()
    correct = 0
    for i,data in enumerate(test_loader):
        inputs,labels = data
        out = model(inputs)
        _,predictions = torch.max(out,1)
        correct +=(predictions == labels).sum()
    print("Test acc:{0}".format(correct.item()/len(test_dataset)))

6. 运行模型

if __name__=='__main__':
    for epoch in range(10):
        print('epoch:',epoch)
        train()
        test()

7. 结果:

2、pytorch训练CIFAR-10彩色图片数据集

主要的步骤与上述方法相同网络

1. 加载与查看数据

# 导入数据
CIFAR_train_dataset = datasets.CIFAR10(root='./data/',
                                    train=True,
                                    download=True,
                                    transform = transforms.ToTensor()
                                   )
CIFAR_test_dataset = datasets.CIFAR10(root='./data/',
                                    train=False,
                                    download=True,
                                    transform = transforms.ToTensor()
                                   )

# 查看数据
imgdata,label = CIFAR_train_dataset[90]
print('label:',label)
print('imgdata类型:',type(imgdata))
print('测试集',CIFAR_train_dataset.data.shape)
print('训练集',CIFAR_test_dataset.data.shape)

# 数据装载
batch_size = 64
CIFAR_train_loader = DataLoader(dataset=CIFAR_train_dataset,
                                batch_size=batch_size,
                                shuffle=True)
CIFAR_test_loader = DataLoader(dataset=CIFAR_test_dataset,
                                batch_size=batch_size,
                                shuffle=True)

2.绘图查看图片集

fig, ax = plt.subplots(
    nrows=3,
    ncols=4,
    sharex=True,
    sharey=True) # sharex和sharey表示子图是否是有相同的坐标

ax = ax.flatten()

for i in range(12):
    # 只查看了前面12张图片
    img = CIFAR_train_dataset.data[i]
    ax[i].imshow(img, cmap='Greys', interpolation='nearest')
    ax[i].set_title("".join([k for k,v in CIFAR_train_dataset.class_to_idx.items() if v==CIFAR_train_dataset.targets[i]]))

ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()

查看图片

3.定义网络结构

说明:CNN网络卷积层数量、卷积层参数、卷积层以后的激活函数、池化层的参数都须要本身再调整,我所填写的参数获得的结果并非很好。ide

# 定义网络结构
class CNN2(nn.Module):
    def __init__(self):
        ''' 
            Conv2d的参数:
            nn.Conv2d(
            in_channels, # 输入图片的通道(黑白的图片就是一个通道)
            out_channels, # 输出数据通道:卷积层输出的通道数
            kernel_size,  # 卷积核的大小
            stride=1,     # 步长
            padding=0,    # 填充0值的圈数(3*3窗口1圈,5*5两圈获得的结果与原图相同)
            dilation=1,
            groups=1,
            bias=True,
            padding_mode='zeros',) 
        '''
        super(CNN2,self).__init__()
        # 卷积+激活+池化
        self.conv1 = nn.Sequential(nn.Conv2d(3,32,5,1,2),nn.ReLU(),nn.MaxPool2d(2,2)) # 1张输入获得32张16*16的特征图
        self.conv2 = nn.Sequential(nn.Conv2d(32,64,5,1,2),nn.ReLU(),nn.MaxPool2d(2,2))# 获得64张8*8的特征图
        self.fc1 = nn.Sequential(nn.Linear(64*8*8,1000),nn.Dropout(p=0.5),nn.ReLU()) # 输入64*8*8的数据,输出一个大小1000的数组
        self.fc2 = nn.Sequential(nn.Linear(1000,10),nn.Softmax(dim=1))
        
    def forward(self,x):
        # [64,3,32,32] 传入数据的格式
        x = self.conv1(x)
        x = self.conv2(x)
        # [64,64,8,8]
        x = x.view(x.size()[0],-1)
        x = self.fc1(x)
        x = self.fc2(x)
        return x

4.定义模型与训练模型

# 定义模型
LR = 0.001
model = CNN2()
crossEntropyloss = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(),LR)

def train(epoch):
    # 训练状态
    model.train()
    running_loss = 0.0
    for i,data in enumerate(CIFAR_train_loader):
        inputs,labels = data
        out = model(inputs)
        loss = crossEntropyloss(out,labels)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        _,predictions = torch.max(out,1)
        running_loss +=(predictions == labels).sum()
        if i % 10 == 0:    # 每10批次打印一次(1批次64张图)
            print('[%d, %5d] loss: %.3f' %
                  (epoch+1, i + 1, running_loss.item()/640))
            running_loss = 0.0
    
    correct = 0
    for i,data in enumerate(CIFAR_train_loader):
        inputs,labels = data
        out = model(inputs)
        _,predictions = torch.max(out,1)
        correct +=(predictions == labels).sum()
    print("Train acc:{0}".format(correct.item()/len(CIFAR_train_dataset)))
        
def test():
    model.eval()
    correct = 0
    for i,data in enumerate(CIFAR_test_loader):
        inputs,labels = data
        out = model(inputs)
        _,predictions = torch.max(out,1)
        correct +=(predictions == labels).sum()
    print("Test acc:{0}".format(correct.item()/len(CIFAR_test_dataset)))

if __name__=='__main__':
    for epoch in range(10):
        print('epoch:',epoch)
        train(epoch)
        test()

最后结果就不展现了,要获得好的训练结果就须要调整参数和神经网络的结构。函数

相关文章
相关标签/搜索