pytorch

pytorch 学习

2022-11-18  本文已影响0人  午字横
1:如何创建anaconda环境

(base) 代表基础环境。

conda create -n pytorch python=3.6
conda activate pytorch //激活这个环境
pip list //列出当前环境中有哪些包

-n 代表name名字,叫pytorch。
python=3.6 代表这个环境用到的一些包。
激活环境后,左侧括号(base)会变成(环境包名)。

2:pytorch安装
image.png

Package windows选择conda,Linux选择pip。
Language 安装anaconda中创建的环境选择,自处选择python3.6。
CUDA 无显卡选择None,有显卡推荐选择9.2。
最后一步复制代码黏贴到新环境运行。

python
import torch
torch.cuda.is_available() //检验当前机器是否支持CPU
conda install pytorch torchvision torchaudio cpuonly -c pytorch

conda info --envs //查看当前创建的所有环境
activate your_envs //进入你的环境

pip install notebook//通过此命令将jupyter notebook 连接到新环境
//注意此处不要使用conda install nb_conda 会失败!!

TensorBoard的使用

在conda 中激活新环境 打开对应文件夹 映射端口

tensorboard --logdir=logs --port=6007 

from torch.utils.tensorboard import SummaryWriter
s=SummaryWriter('logs')
s.close()

安装opencv

安装opencv失败,尝试切换墙内网络环境。
ERROR: Could not find a version that satisfies the requirement opencv-python (from versions: none) ERROR: No matching distribution found for opencv-python
安装命令
pip install opencv-python

导入
import cv2

transform工具的使用

导入
from torchvision import transforms
tensor_trans=transforms.ToTensor()
_t=tensor_trans(img)
print(_t)

内置__call__的使用
Resize() img PIL ==>img PIL size
tran_resize=transforms.Resize((12,12))

Compose 注意传入的是一个transfromd的数组[]
"""tran_resize_02=transforms.Resize((120,120))
tran_totensor=transforms.ToTensor()
trans_compose=transforms.Compose([tran_resize_02,tran_totensor])
img_resize_02=trans_compose(img)
print(img_resize_02.shape)"""
trans_compose=transforms.Compose([transforms.Resize((120,120)),transforms.ToTensor()])
img_resize_02=trans_compose(img)
print(img_resize_02.shape)
s.add_image('compose_test',img_resize_02,0)

dataset dataloader

import torchvision
from torch.utils.tensorboard import SummaryWriter
from PIL import Image

dataset_transfrom=torchvision.transforms.Compose([
    torchvision.transforms.ToTensor()

])
train_set=torchvision.datasets.CIFAR10(root='./dataset',train=True,transform=dataset_transfrom,download=True)
test_set=torchvision.datasets.CIFAR10(root='./dataset',train=False,transform= dataset_transfrom,download=True)

s=SummaryWriter('logs')
for i in range(10):
    img,target=train_set[i]
    s.add_image('traindata_test',img,i)

s.close()

import torchvision
from torch.utils.tensorboard import SummaryWriter

test_data=torchvision.datasets.CIFAR10('./dataset',train=False,transform=torchvision.transforms.ToTensor())
# test_loder=DataLoader(test_data,4,True,0,True)
test_loder=DataLoader(dataset=test_data,batch_size=64,shuffle=True,num_workers=0,drop_last=True)

img,target=test_data[0]
print(img.shape)
print(target)

s=SummaryWriter('logs')
for epoch in range(2):
    step = 0
    for i in test_loder:
        imgs, targets = i
        s.add_images('Epoch', imgs, step)
        step = step + 1

s.close()
torchvison下有一些数据集,可以引用使用,避免下载数据
dilation 空洞卷积
每个之间差一


image.png

最大池化

最大池化作用
池化层的最直观的作用就是降维、减少参数量、去除冗余信息、对特征进行压缩、简化网络复杂度、减少计算量、减少内存消耗等等。
《动手学习深度学习》一书中还提到了这样一个作用: 缓解卷积层对位置的过度敏感性,实际上就是特征不变性

#最大池化的操作
import  torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torchvision import transforms
from torch.utils.tensorboard import  SummaryWriter
"""input=torch.randn([5,5])
print(input.shape)

input=torch.reshape(input,(-1,1,5,5))
print(input.shape)"""

dataset =torchvision.datasets.CIFAR10('./dataset',train=False,
                                     transform=transforms.ToTensor(),
                                     download=True
                                     )
dataloader=DataLoader(dataset,batch_size=64)

class My_Module(nn.Module):
    def __init__(self):
        super(My_Module, self).__init__()
        self.mex_pool=nn.MaxPool2d(kernel_size=3,stride=1
                                   ,padding=0,ceil_mode=True)

    def forward(self,input):
        output= self.mex_pool(input)
        return output
s=SummaryWriter('logs')

m=My_Module()
"""output=m(input)
print(output.shape)"""
step=0
for data in dataloader:
    imgs,targets=data
    s.add_images('maxpool_test_before',imgs,step)
    imgs=m(imgs)
    s.add_images('maxpool_test',imgs,step)
    step=step+1
    pass

s.close()

非线性激活

RELU 小于0不取值,取0。

import torch
import torchvision.datasets
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import  SummaryWriter
from torchvision import transforms

"""input =torch.randn([2,2])
print(input)
print(input.shape)"""

dataset =torchvision.datasets.CIFAR10('./dataset',train=False,
                                     transform=transforms.ToTensor(),
                                     download=True
                                     )
dataloader=DataLoader(dataset,batch_size=64)


"""input=torch.reshape(input,(-1,1,2,2))
print(input)
print(input.shape)"""

class My_Module(nn.Module):
    def __init__(self):
        super(My_Module, self).__init__()
        self.rule=nn.ReLU(inplace=False)
        self.sigmoid=nn.Sigmoid()

    def forward(self,input):
        output =self.sigmoid(input)
        return  output

m=My_Module()
"""output=m(input)
print(output)"""
s=SummaryWriter('logs')
step=0
for data in dataloader:
    imgs,targets=data
    imgs=m(imgs)
    s.add_images('sigmoid_test_05',imgs,global_step=step)
    step+=1

s.close()


正则化层

有一篇论文:采用正则化会加快训练的速度

线性层
import torch
import torchvision.datasets
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import  SummaryWriter
from torchvision import transforms

dataset =torchvision.datasets.CIFAR10('./dataset',train=False,
                                     transform=transforms.ToTensor(),
                                     download=True
                                     )
dataloader=DataLoader(dataset,batch_size=64,drop_last=True)


class My_Module(nn.Module):
    def __init__(self):
        super(My_Module, self).__init__()
        self.rule=nn.ReLU(inplace=False)
        self.sigmoid=nn.Sigmoid()
        self.linear=nn.Linear(196608,10)

    def forward(self,input):
        output =self.linear(input)
        return  output

m=My_Module()

s=SummaryWriter('logs')
step=0
for data in dataloader:
    imgs,targets=data
    print(imgs.shape)
    imgs=torch.reshape(imgs,(1,1,1,-1))
    print(imgs.shape)
    imgs=m(imgs)
    print(imgs.shape)
    s.add_images('sigmoid_test_07',imgs,global_step=step)
    step+=1

s.close()


几个卷积核就是几通道的。一个卷积核作用完RGB三个通道后会把得到的三个矩阵的对应值相加,也就是说会合并,所以一个卷积核产生一个通道。

import torch
from torch import nn
from torch.nn import Conv2d,MaxPool2d,Flatten,Linear
from torch.utils.tensorboard import SummaryWriter
class My_Module(nn.Module):
    def __init__(self):
        super(My_Module, self).__init__()
        """self.conv1=Conv2d(3,32,5,stride=1,padding=2)
        self.maxpool1=MaxPool2d(2)
        self.conv2=Conv2d(32,32,5,stride=1,padding=2)
        self.maxpool2=MaxPool2d(2)
        self.conv3=Conv2d(32,64,5,stride=1,padding=2)
        self.maxpool3=MaxPool2d(2)
        self.flatten=Flatten()
        self.linear1=Linear(1024,64)
        self.linear2=Linear(64,10)"""
        self.model_1=nn.Sequential(
            Conv2d(3, 32, 5, stride=1, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, stride=1, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, stride=1, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )

    def forward(self,input):
        """x=self.conv1(input)
        x=self.maxpool1(x)
        x=self.conv2(x)
        x=self.maxpool2(x)
        x=self.conv3(x)
        x=self.maxpool3(x)
        x=self.flatten(x)
        x=self.linear1(x)
        x=self.linear2(x)"""
        x=self.model_1(input)
        return x

m=My_Module()
print(m)

input=torch.ones((64,3,32,32))
output=m(input)
print(output.shape)

s=SummaryWriter('logs')
s.add_graph(m,input)
s.close()

Loss Functions

准备数据,加载数据,准备模型,设置损失函数,设置优化器,开始训练,最后验证,结果聚合展示

上一篇下一篇

猜你喜欢

热点阅读