适合新手搭建ResNet50残差网络的架构图+代码(最全)
网上的教程大多复杂难懂,不适合新手,本来神经网络就难,这些教程本身更难,对新手极度不友好,因此自己做的这个架构图和写的代码,面向新手,大神跳过
from torch import nn
import torch
from torchviz import make_dot
classbox(nn.Module):def__init__(self, in_channels, index=999, stride=1, downsample=False):super(box, self).__init__()
last_stride =2# 虚残差中卷积核的步距if downsample:# 虚残差结构
f_out_channnels = in_channels *2
out_channels =int(in_channels /2)if index ==0:# here is first core
in_channels =int(in_channels /2)# 第一层设置为128,是方便了后面的统一处理
out_channels = in_channels
f_out_channnels = in_channels *4
last_stride =1
stride =1else:# 实残差
f_out_channnels = in_channels *1
out_channels =int(in_channels /4)
self.downsample = downsample
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.relu = nn.ReLU(inplace=True)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(in_channels=out_channels, out_channels=f_out_channnels, kernel_size=1, stride=1, padding=0)
self.bn3 = nn.BatchNorm2d(f_out_channnels)
self.fe = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=f_out_channnels, kernel_size=1, stride=last_stride, padding=0, bias=False),
nn.BatchNorm2d(f_out_channnels),)defforward(self, x):
identity = x
if self.downsample:
identity = self.fe(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
out = x + identity
out = self.relu(out)return out
classNew50(nn.Module):def__init__(self,in_out, num_classes=4):super(New50, self).__init__()
self.conv1 = nn.Conv2d(3,64, kernel_size=7, stride=2,padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.avgpool = nn.AdaptiveAvgPool2d((1,1))# output size = (1, 1)
self.fc = nn.Linear(512*4, num_classes)
layers =[]for index, z inenumerate(in_out):
in_ch = z[0]# 这里通道/2
layers.append(box(in_channels=in_ch, stride=2, downsample=z[2], index=index))# 这里处理第一层for i inrange(1, z[1]):
layers.append(box(in_channels=z[3]))# 这里处理其他两层# print(layers)
self.fes = nn.Sequential(*layers)defforward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.fes(x)
x = self.avgpool(x)
x = torch.flatten(x,1)
x = self.fc(x)return x
in_out =[(128,3,True,256),(256,4,True,512),(512,6,True,1024),(1024,3,True,2048)]
s = New50(in_out=in_out)defresnet500():return New50(in_out=in_out)'''每层的第一层输入
每层重复的次数
是否走虚残差
每层的第二个卷积核的输入'''
后续还会上传ResNet30,FCN,UNet等架构图和代码。
本文转载自: https://blog.csdn.net/qq_44697987/article/details/128178998
版权归原作者 克里斯的星星 所有, 如有侵权,请联系我们删除。
版权归原作者 克里斯的星星 所有, 如有侵权,请联系我们删除。