鱼的深度学习

深度学习(五)下 Pytorch实现简单CNN

2020-06-24  本文已影响0人  升不上三段的大鱼

上一篇里尝试自己实现CNN 深度学习(五)Python徒手实现CNN,这篇用pytorch写一个结构相同的CNN作为对比。

首先用到的库:

import numpy as np
from PIL import Image
import  torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms
import os

1. 读入数据

pytorch有自己的dataloader函数,只需要自己写一个Dataset类,然后调用dataloader函数就可以了。

class Dataset:
    def __init__(self, root_dir, train, test, transform=None):
        """
        Args:
            root_dir (string): Directory with all the images.
            transform (callable, optional): Optional transform to be applied
                on a sample.
        """
        # TODO: rewrite dataloader
        self.root_dir = root_dir
        self.labels = pd.read_csv(csv_file)
        self.train = train
        self.test = test
        self.transform = transform

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, index):
        if torch.is_tensor(index):
            index = index.tolist()

        img_name = os.path.join(self.root_dir,
                                self.labels.iloc[index,0])
        image = Image.open(img_name+'.jpg')
        label = self.labels.iloc[index,1:]
        if self.transform:
            img_tensor = self.transform(image)
        return img_tensor, label

# set dataloader
train_loader = torch.utils.data.DataLoader(
    dataset = Dataset(
        root_dir=r'directory',
        train=True,
        test=False,
        transform=transforms.Compose([transforms.Resize((64,64)),
                                      transforms.RandomHorizontalFlip(p=0.5),
                                      transforms.RandomVerticalFlip(p=0.5),
                                      transforms.ToTensor()
                                      ])),
    batch_size=10,
    shuffle=True,
    num_workers=4
)

test_loader = torch.utils.data.DataLoader(
    dataset = Dataset(
        root_dir=r'directory',
        train=False,
        test=True,
        transform=transforms.Compose([transforms.Resize((64,64)),
                                      transforms.ToTensor()
                                      ])),
    batch_size=10,
    shuffle=False,
    num_workers=4
)

2. 定义神经网络(so easy)

class Net(nn.Module):

    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=4, kernel_size=3, stride=1)
        self.fc1 = nn.Linear(3844,120)
        self.fc2 = nn.Linear(120,2)

    def forward(self, x):
        x = F.max_pool2d(F.relu(self.conv1(x)),(2,2))
        x = x.view(-1, self.num_flat_features(x))
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x

    def num_flat_features(self, x):
        size = x.size()[1:]  # all dimensions except the batch dimension
        num_features = 1
        for s in size:
            num_features *= s
        return num_features

3.定义optimizer

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = Net().to(device)
optimizer = optim.SGD(net.parameters(), lr=0.01)

4. 定义训练和测试函数

def train(epoch):
    net.train()  # train the network 
    correct = 0
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)

        optimizer.zero_grad()
        with torch.set_grad_enabled(True):
            output = net(data)
            loss = F.cross_entropy(output, target)
            pred = output.max(1, keepdim=True)[1]
            correct += pred.eq(target.view_as(pred)).sum().item()
            loss.backward()
            optimizer.step()

    print('Train Epoch: {} \nLoss: {:.6f} \tAccuracy: {:.4f}'.format(
        epoch, loss.item(),
        correct / len(train_loader.dataset)))

def test():
    with torch.no_grad():
        net.eval() # evaluate mod, not training
        test_loss = 0
        correct = 0
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = net(data)

            # sum up batch loss
            test_loss += F.cross_entropy(output, target, size_average=False).item()
            # get the index of the max log-probability
            pred = output.max(1, keepdim=True)[1]
            correct += pred.eq(target.view_as(pred)).sum().item()

        test_loss /= len(test_loader.dataset)
        print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'
              .format(test_loss, correct, len(test_loader.dataset),
                      100. * correct / len(test_loader.dataset)))

万事就绪,跑起来

if __name__ == '__main__':
    # for multiprocessing
    torch.multiprocessing.freeze_support()

    print("Params to learn:")
    for name, param in net.named_parameters():
        print("\t", name)

    for i in range(20):
        train(i)

    test()

看一看结果,比自己写的不知道好到哪里去了

Params to learn:
     conv1.weight
     conv1.bias
     fc1.weight
     fc1.bias
     fc2.weight
     fc2.bias
Train Epoch: 0 
Loss: 0.696898  Accuracy: 0.5110
Train Epoch: 1 
Loss: 0.690233  Accuracy: 0.5198
Train Epoch: 2 
Loss: 0.699285  Accuracy: 0.5330
Train Epoch: 3 
Loss: 0.721311  Accuracy: 0.5517
Train Epoch: 4 
Loss: 0.703058  Accuracy: 0.5645
Train Epoch: 5 
Loss: 0.696190  Accuracy: 0.5757
Train Epoch: 6 
Loss: 0.736775  Accuracy: 0.5960
Train Epoch: 7 
Loss: 0.657192  Accuracy: 0.6020
Train Epoch: 8 
Loss: 0.732337  Accuracy: 0.6258
Train Epoch: 9 
Loss: 0.694613  Accuracy: 0.6265
Train Epoch: 10 
Loss: 0.515953  Accuracy: 0.6465
Train Epoch: 11 
Loss: 0.598678  Accuracy: 0.6298
Train Epoch: 12 
Loss: 0.630211  Accuracy: 0.6368
Train Epoch: 13 
Loss: 0.594365  Accuracy: 0.6448
Train Epoch: 14 
Loss: 0.543401  Accuracy: 0.6435
Train Epoch: 15 
Loss: 0.458548  Accuracy: 0.6522
Train Epoch: 16 
Loss: 0.621378  Accuracy: 0.6577
Train Epoch: 17 
Loss: 0.537145  Accuracy: 0.6627
Train Epoch: 18 
Loss: 0.500808  Accuracy: 0.6773
Train Epoch: 19 
Loss: 0.532935  Accuracy: 0.6667


Test set: Average loss: 0.6555, Accuracy: 632/1000 (63%)
上一篇 下一篇

猜你喜欢

热点阅读