torch.nn.Sequential(*args)

由官网给的Example可以大概了解到Sequential是将多层网络进行便捷整合,方便可视化以及简化网络复杂性

这里面有个Hidden units隐藏单元其实就是连个线性层
把隐藏层全部展开整个神经网络架构如下:



查看下官网给的卷积层padding的计算公式

分析一下:

故padding = 2,加了两成外边,之所以channel由3变成了32,是因为卷积核有多个并非一个卷积核
最终:输入3通道;输出32通道;stride = 1;padding = 2;dilation = 1(默认值);kernel_size = 5;
torch.nn.Conv2d(in_channels=3,out_channels=32,kernel_size=5,stride=1,padding=2)

torch.nn.MaxPool2d(kernel_size=2)

同理根据官网给的计算公式可以求出padding = 2

通过上面两次的计算可以看出,只要通过卷积核大小为(5,5),卷积之后的大小不变则padding肯定为2
故padding = 2,加了两成外边,这里channel由32变成了32,可以得知仅使用了一个卷积核
最终:输入32通道;输出32通道;stride = 1;padding = 2;dilation = 1(默认值);kernel_size = 5;
torch.nn.Conv2d(in_channels=32,out_channels=32,kernel_size=5,stride=1,padding=2)

torch.nn.MaxPool2d(kernel_size=2)

torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2)

torch.nn.MaxPool2d(kernel_size=2)
torch.nn.Flatten()


torch.nn.Linear(1024,64)

torch.nn.Linear(64,10)
import torch
from torch import nn
from torch.nn import Conv2d
from torch.utils.tensorboard import SummaryWriter
class Beyond(nn.Module):
def __init__(self):
super(Beyond,self).__init__()
self.conv_1 = torch.nn.Conv2d(in_channels=3,out_channels=32,kernel_size=5,stride=1,padding=2)
self.maxpool_1 = torch.nn.MaxPool2d(kernel_size=2)
self.conv_2 = torch.nn.Conv2d(in_channels=32,out_channels=32,kernel_size=5,stride=1,padding=2)
self.maxpool_2 = torch.nn.MaxPool2d(kernel_size=2)
self.conv_3 = torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2)
self.maxpool_3 = torch.nn.MaxPool2d(kernel_size=2)
self.flatten = torch.nn.Flatten()
self.linear_1 = torch.nn.Linear(1024,64)
self.linear_2 = torch.nn.Linear(64,10)
def forward(self,x):
x = self.conv_1(x)
x = self.maxpool_1(x)
x = self.conv_2(x)
x = self.maxpool_2(x)
x = self.conv_3(x)
x = self.maxpool_3(x)
x = self.flatten(x)
x = self.linear_1(x)
x = self.linear_2(x)
return x
beyond = Beyond()
print(beyond)
"""
Beyond(
(conv_1): Conv2d(3, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(maxpool_1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(conv_2): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(maxpool_2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(conv_3): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(maxpool_3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(flatten): Flatten(start_dim=1, end_dim=-1)
(linear_1): Linear(in_features=1024, out_features=64, bias=True)
(linear_2): Linear(in_features=64, out_features=10, bias=True)
)
"""
input = torch.zeros((64,3,32,32))
print(input.shape)#torch.Size([64, 3, 32, 32])
output = beyond(input)
print(output.shape)#torch.Size([64, 10])
#将网络图上传值tensorboard中进行可视化展示
writer = SummaryWriter("y_log")
writer.add_graph(beyond,input)
writer.close()
在Terminal下运行tensorboard --logdir=y_log --port=7870,logdir为打开事件文件的路径,port为指定端口打开;
通过指定端口2312进行打开tensorboard,若不设置port参数,默认通过6006端口进行打开。


import torch
from torch import nn
from torch.nn import Conv2d
from torch.utils.tensorboard import SummaryWriter
class Beyond(nn.Module):
def __init__(self):
super(Beyond,self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Conv2d(3,32,5,padding=2),
torch.nn.MaxPool2d(2),
torch.nn.Conv2d(32,32,5,padding=2),
torch.nn.MaxPool2d(2),
torch.nn.Conv2d(32,64,5,padding=2),
torch.nn.MaxPool2d(2),
torch.nn.Flatten(),
torch.nn.Linear(1024,64),
torch.nn.Linear(64,10)
)
def forward(self,x):
x = self.model(x)
return x
beyond = Beyond()
print(beyond)
"""
Beyond(
(conv_1): Conv2d(3, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(maxpool_1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(conv_2): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(maxpool_2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(conv_3): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(maxpool_3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(flatten): Flatten(start_dim=1, end_dim=-1)
(linear_1): Linear(in_features=1024, out_features=64, bias=True)
(linear_2): Linear(in_features=64, out_features=10, bias=True)
)
"""
input = torch.zeros((64,3,32,32))
print(input.shape)#torch.Size([64, 3, 32, 32])
output = beyond(input)
print(output.shape)#torch.Size([64, 10])
#将网络图上传值tensorboard中进行可视化展示
writer = SummaryWriter("y_log")
writer.add_graph(beyond,input)
writer.close()
在Terminal下运行tensorboard --logdir=y_log --port=7870,logdir为打开事件文件的路径,port为指定端口打开;
通过指定端口2312进行打开tensorboard,若不设置port参数,默认通过6006端口进行打开。


实现效果是完全一样的,使用Sequential看起来更加简介,可视化效果更好些。