pytorch笔记:VGG 16
生活随笔
收集整理的這篇文章主要介紹了
pytorch笔记:VGG 16
小編覺(jué)得挺不錯(cuò)的,現(xiàn)在分享給大家,幫大家做個(gè)參考.
理論部分見(jiàn):機(jī)器學(xué)習(xí)筆記:VGG 16_UQI-LIUWJ的博客-CSDN博客
1 直接調(diào)用
import torch, torchvisionmodel = torchvision.models.vgg16()1.1 torchsummary 查看模型和參數(shù)
from torchsummary import summarysummary(model, (3, 224, 224))''' ----------------------------------------------------------------Layer (type) Output Shape Param # ================================================================Conv2d-1 [-1, 64, 224, 224] 1,792ReLU-2 [-1, 64, 224, 224] 0Conv2d-3 [-1, 64, 224, 224] 36,928ReLU-4 [-1, 64, 224, 224] 0MaxPool2d-5 [-1, 64, 112, 112] 0Conv2d-6 [-1, 128, 112, 112] 73,856ReLU-7 [-1, 128, 112, 112] 0Conv2d-8 [-1, 128, 112, 112] 147,584ReLU-9 [-1, 128, 112, 112] 0MaxPool2d-10 [-1, 128, 56, 56] 0Conv2d-11 [-1, 256, 56, 56] 295,168ReLU-12 [-1, 256, 56, 56] 0Conv2d-13 [-1, 256, 56, 56] 590,080ReLU-14 [-1, 256, 56, 56] 0Conv2d-15 [-1, 256, 56, 56] 590,080ReLU-16 [-1, 256, 56, 56] 0MaxPool2d-17 [-1, 256, 28, 28] 0Conv2d-18 [-1, 512, 28, 28] 1,180,160ReLU-19 [-1, 512, 28, 28] 0Conv2d-20 [-1, 512, 28, 28] 2,359,808ReLU-21 [-1, 512, 28, 28] 0Conv2d-22 [-1, 512, 28, 28] 2,359,808ReLU-23 [-1, 512, 28, 28] 0MaxPool2d-24 [-1, 512, 14, 14] 0Conv2d-25 [-1, 512, 14, 14] 2,359,808ReLU-26 [-1, 512, 14, 14] 0Conv2d-27 [-1, 512, 14, 14] 2,359,808ReLU-28 [-1, 512, 14, 14] 0Conv2d-29 [-1, 512, 14, 14] 2,359,808ReLU-30 [-1, 512, 14, 14] 0MaxPool2d-31 [-1, 512, 7, 7] 0 AdaptiveAvgPool2d-32 [-1, 512, 7, 7] 0Linear-33 [-1, 4096] 102,764,544ReLU-34 [-1, 4096] 0Dropout-35 [-1, 4096] 0Linear-36 [-1, 4096] 16,781,312ReLU-37 [-1, 4096] 0Dropout-38 [-1, 4096] 0Linear-39 [-1, 1000] 4,097,000 ================================================================ Total params: 138,357,544 Trainable params: 138,357,544 Non-trainable params: 0 ---------------------------------------------------------------- Input size (MB): 0.57 Forward/backward pass size (MB): 218.78 Params size (MB): 527.79 Estimated Total Size (MB): 747.15 ---------------------------------------------------------------- '''2 自己動(dòng)手搭建
除了第32層的AdaptiveAvgPool2d 之外,其他的和直接調(diào)用的是一樣的
import torch.nn as nn import torchclass VGG16(nn.Module):def __init__(self):super(VGG16,self).__init__()#輸入該模塊數(shù)據(jù)大小 224*224*3self.block1=nn.Sequential(nn.Conv2d(in_channels=3,out_channels=64,kernel_size=3,stride=1,padding=1),nn.ReLU(),nn.Conv2d(in_channels=64,out_channels=64,kernel_size=3,stride=1,padding=1),nn.ReLU(),nn.MaxPool2d(kernel_size=2, stride=2))#輸入該模塊數(shù)據(jù)大小 112*112*64self.block2=nn.Sequential(nn.Conv2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1),nn.ReLU(),nn.Conv2d(in_channels=128,out_channels=128,kernel_size=3,stride=1,padding=1),nn.ReLU(),nn.MaxPool2d(kernel_size=2, stride=2))#輸入該模塊數(shù)據(jù)大小 56*56*128self.block3=nn.Sequential(nn.Conv2d(in_channels=128,out_channels=256,kernel_size=3,stride=1,padding=1),nn.ReLU(),nn.Conv2d(in_channels=256,out_channels=256,kernel_size=3,stride=1,padding=1),nn.ReLU(),nn.Conv2d(in_channels=256,out_channels=256,kernel_size=3,stride=1,padding=1),nn.ReLU(),nn.MaxPool2d(kernel_size=2, stride=2))#輸入該模塊數(shù)據(jù)大小 28*28*256self.block4=nn.Sequential(nn.Conv2d(in_channels=256,out_channels=512,kernel_size=3,stride=1,padding=1),nn.ReLU(),nn.Conv2d(in_channels=512,out_channels=512,kernel_size=3,stride=1,padding=1),nn.ReLU(),nn.Conv2d(in_channels=512,out_channels=512,kernel_size=3,stride=1,padding=1),nn.ReLU(),nn.MaxPool2d(kernel_size=2, stride=2))#輸入該模塊數(shù)據(jù)大小 14*14*512self.block5=nn.Sequential(nn.Conv2d(in_channels=512,out_channels=512,kernel_size=3,stride=1,padding=1),nn.ReLU(),nn.Conv2d(in_channels=512,out_channels=512,kernel_size=3,stride=1,padding=1),nn.ReLU(),nn.Conv2d(in_channels=512,out_channels=512,kernel_size=3,stride=1,padding=1),nn.ReLU(),nn.MaxPool2d(kernel_size=2, stride=2))#輸入該模塊數(shù)據(jù)大小 7*7*512self.fc_layer=nn.Sequential(nn.Linear(7*7*512,4096),nn.ReLU(),nn.Dropout(p=0.5),nn.Linear(4096,4096),nn.ReLU(),nn.Dropout(p=0.5),nn.Linear(4096,1000) )#輸入該模塊數(shù)據(jù)大小 1000self.Softmax=nn.Softmax(dim=0)def forward(self,x):x=self.block1(x)x=self.block2(x)x=self.block3(x)x=self.block4(x)x=self.block5(x)x=x.view(x.shape[0],-1)x=self.fc_layer(x)x=self.Softmax(x)return xvgg = VGG16()from torchsummary import summary summary(vgg,(3,244,244))''' ----------------------------------------------------------------Layer (type) Output Shape Param # ================================================================Conv2d-1 [-1, 64, 244, 244] 1,792ReLU-2 [-1, 64, 244, 244] 0Conv2d-3 [-1, 64, 244, 244] 36,928ReLU-4 [-1, 64, 244, 244] 0MaxPool2d-5 [-1, 64, 122, 122] 0Conv2d-6 [-1, 128, 122, 122] 73,856ReLU-7 [-1, 128, 122, 122] 0Conv2d-8 [-1, 128, 122, 122] 147,584ReLU-9 [-1, 128, 122, 122] 0MaxPool2d-10 [-1, 128, 61, 61] 0Conv2d-11 [-1, 256, 61, 61] 295,168ReLU-12 [-1, 256, 61, 61] 0Conv2d-13 [-1, 256, 61, 61] 590,080ReLU-14 [-1, 256, 61, 61] 0Conv2d-15 [-1, 256, 61, 61] 590,080ReLU-16 [-1, 256, 61, 61] 0MaxPool2d-17 [-1, 256, 30, 30] 0Conv2d-18 [-1, 512, 30, 30] 1,180,160ReLU-19 [-1, 512, 30, 30] 0Conv2d-20 [-1, 512, 30, 30] 2,359,808ReLU-21 [-1, 512, 30, 30] 0Conv2d-22 [-1, 512, 30, 30] 2,359,808ReLU-23 [-1, 512, 30, 30] 0MaxPool2d-24 [-1, 512, 15, 15] 0Conv2d-25 [-1, 512, 15, 15] 2,359,808ReLU-26 [-1, 512, 15, 15] 0Conv2d-27 [-1, 512, 15, 15] 2,359,808ReLU-28 [-1, 512, 15, 15] 0Conv2d-29 [-1, 512, 15, 15] 2,359,808ReLU-30 [-1, 512, 15, 15] 0MaxPool2d-31 [-1, 512, 7, 7] 0Linear-32 [-1, 4096] 102,764,544ReLU-33 [-1, 4096] 0Dropout-34 [-1, 4096] 0Linear-35 [-1, 4096] 16,781,312ReLU-36 [-1, 4096] 0Dropout-37 [-1, 4096] 0Linear-38 [-1, 1000] 4,097,000Softmax-39 [-1, 1000] 0 ================================================================ Total params: 138,357,544 Trainable params: 138,357,544 Non-trainable params: 0 ---------------------------------------------------------------- Input size (MB): 0.68 Forward/backward pass size (MB): 258.33 Params size (MB): 527.79 Estimated Total Size (MB): 786.80 ---------------------------------------------------------------- '''總結(jié)
以上是生活随笔為你收集整理的pytorch笔记:VGG 16的全部?jī)?nèi)容,希望文章能夠幫你解決所遇到的問(wèn)題。
- 上一篇: pytorch 笔记:torchsumm
- 下一篇: 机器学习笔记:triplet loss