• P27 含并行连结的网络 GoogLeNet / Inception V3


    1.名字来源于谷歌

    2.最好的卷积层超参数是什么?GoogLeNet表示全部都要

    3.Inception块是核心

    和3x3,5x5的卷积相比,Inception块有更少的参数个数和计算复杂度

    4.有五段,五个stage

    5.后面还有其他变种,比如最著名的InceptionV3-ResNet,还有我用的Xception

    6.金句:里面的数字不知道怎么来的(原话

    1. import torch
    2. from torch import nn
    3. from torch.nn import functional as F
    4. from d2l import torch as d2l
    5. class Inception(nn.Module):
    6. def __init__(self,in_channels,c1,c2,c3,c4, **kwargs):
    7. #c1,c2,c3,c4是每一条path,要分别设置每一条线路的channel数
    8. super(Inception,self).__init__(**kwargs)
    9. #线路1
    10. self.p1_1 = nn.Conv2d(in_channels,c1,kernel_size=1)
    11. #线路2,有两个卷积层
    12. self.p2_1 = nn.Conv2d(in_channels,c2[0],kernel_size=1)
    13. self.p2_2 = nn.Conv2d(c2[0],c2[1],kernel_size=3,padding=1)
    14. #线路3
    15. self.p3_1 = nn.Conv2d(in_channels,c3[0],kernel_size=1)
    16. self.p3_2 = nn.Conv2d(c3[0],c3[1],kernel_size=5,padding=2)
    17. #线路4
    18. self.p4_1 = nn.MaxPool2d(kernel_size=3,stride=1,padding=1)
    19. self.p4_2 = nn.Conv2d(in_channels,c4,kernel_size=1)
    20. def forward(self,x):
    21. p1 = F.relu(self.p1_1(x)) #第一个path里加relu
    22. p2 = F.relu(self.p2_2(F.relu(self.p2_1(x)))) #放到第一层以后放第二层里,继续relu
    23. p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))
    24. p4 = F.relu(self.p4_2(self.p4_1(x)))
    25. #在通道维度上连结输出
    26. return torch.cat((p1,p2,p3,p4),dim=1) #在通道数是1(dim=1)的情况下contact起来
    #实现每个模块
    1. #第一个模块用64个通道,7*7的卷积层
    2. b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
    3. nn.ReLU(),
    4. nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
    5. #第二个模块使用两个卷积层:1.64个通道,1*1卷积层 2.通道数量增加3倍=192,3*3卷积层
    6. b2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1),
    7. nn.ReLU(),
    8. nn.Conv2d(64, 192, kernel_size=3, padding=1),
    9. nn.ReLU(),
    10. nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
    11. #第三个模块,连接第一个和第二个模块,里面全部都是通道数
    12. b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),
    13. Inception(256, 128, (128, 192), (32, 96), 64),
    14. nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
    15. #第四个模块串联5个inception块,里面的数字不知道怎么来的(原话),都是通道数
    16. b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),
    17. Inception(512, 160, (112, 224), (24, 64), 64),
    18. Inception(512, 128, (128, 256), (24, 64), 64),
    19. Inception(512, 112, (144, 288), (32, 64), 64),
    20. Inception(528, 256, (160, 320), (32, 128), 128),
    21. nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
    22. b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),
    23. Inception(832, 384, (192, 384), (48, 128), 128),
    24. nn.AdaptiveAvgPool2d((1,1)),
    25. nn.Flatten())
    26. net = nn.Sequential(b1,b2,b3,b4,b5,nn.Linear(1024,10)) #输出类别是10,最后输出block是1024维度
    #用Fashion-MNIST测试,把图片的高和宽变成了96
    1. X = torch.rand(size=(1, 1, 96, 96))
    2. for layer in net:
    3. X = layer(X)
    4. print(layer.__class__.__name__,'output shape:\t', X.shape)
    Sequential output shape:     torch.Size([1, 64, 24, 24])
    Sequential output shape:    torch.Size([1, 192, 12, 12])
    Sequential output shape:    torch.Size([1, 480, 6, 6])
    Sequential output shape:    torch.Size([1, 832, 3, 3])
    Sequential output shape:    torch.Size([1, 1024])
    Linear output shape:    torch.Size([1, 10])
    
    #训练模型
    1. lr,num_epochs, batch_size = 0.1,10,128
    2. train_iter,test_iter = d2l.load_data_fashion_mnist(batch_size,resize=96)
    3. d2l.train_ch6(net,train_iter,test_iter,num_epochs,lr,d2l.try_gpu())
    #loss 0.236, train acc 0.909, test acc 0.898b

    pycharm不能显示图片很遗憾

  • 相关阅读:
    pandas实现列转行
    读书笔记:c++对话系列,模板方法模式(Template Method Pattern)
    针对JavaScript混淆加密,JShaman推出新功能
    内容分发网络CDN分布式部署加速原理
    防火墙概述
    doccano安装问题
    前端编程技术UP之研究大佬代码
    【python基础】函数-值传递
    java计算机毕业设计Web企业差旅在线管理系统源码+mysql数据库+系统+lw文档+部署
    【shiro】Apache Shiro 1.2.4 反序列化漏洞(CVE-2016-4437 )
  • 原文地址:https://blog.csdn.net/anelance/article/details/126687531