利用PyTorch实现VGG16教程


Posted in Python onJune 24, 2020

我就废话不多说了,大家还是直接看代码吧~

import torch
import torch.nn as nn
import torch.nn.functional as F
class VGG16(nn.Module):
 
 def __init__(self):
  super(VGG16, self).__init__()
  
  # 3 * 224 * 224
  self.conv1_1 = nn.Conv2d(3, 64, 3) # 64 * 222 * 222
  self.conv1_2 = nn.Conv2d(64, 64, 3, padding=(1, 1)) # 64 * 222* 222
  self.maxpool1 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 64 * 112 * 112
  
  self.conv2_1 = nn.Conv2d(64, 128, 3) # 128 * 110 * 110
  self.conv2_2 = nn.Conv2d(128, 128, 3, padding=(1, 1)) # 128 * 110 * 110
  self.maxpool2 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 128 * 56 * 56
  
  self.conv3_1 = nn.Conv2d(128, 256, 3) # 256 * 54 * 54
  self.conv3_2 = nn.Conv2d(256, 256, 3, padding=(1, 1)) # 256 * 54 * 54
  self.conv3_3 = nn.Conv2d(256, 256, 3, padding=(1, 1)) # 256 * 54 * 54
  self.maxpool3 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 256 * 28 * 28
  
  self.conv4_1 = nn.Conv2d(256, 512, 3) # 512 * 26 * 26
  self.conv4_2 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 26 * 26
  self.conv4_3 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 26 * 26
  self.maxpool4 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 512 * 14 * 14
  
  self.conv5_1 = nn.Conv2d(512, 512, 3) # 512 * 12 * 12
  self.conv5_2 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 12 * 12
  self.conv5_3 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 12 * 12
  self.maxpool5 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 512 * 7 * 7
  # view
  
  self.fc1 = nn.Linear(512 * 7 * 7, 4096)
  self.fc2 = nn.Linear(4096, 4096)
  self.fc3 = nn.Linear(4096, 1000)
  # softmax 1 * 1 * 1000
  
 def forward(self, x):
  
  # x.size(0)即为batch_size
  in_size = x.size(0)
  
  out = self.conv1_1(x) # 222
  out = F.relu(out)
  out = self.conv1_2(out) # 222
  out = F.relu(out)
  out = self.maxpool1(out) # 112
  
  out = self.conv2_1(out) # 110
  out = F.relu(out)
  out = self.conv2_2(out) # 110
  out = F.relu(out)
  out = self.maxpool2(out) # 56
  
  out = self.conv3_1(out) # 54
  out = F.relu(out)
  out = self.conv3_2(out) # 54
  out = F.relu(out)
  out = self.conv3_3(out) # 54
  out = F.relu(out)
  out = self.maxpool3(out) # 28
  
  out = self.conv4_1(out) # 26
  out = F.relu(out)
  out = self.conv4_2(out) # 26
  out = F.relu(out)
  out = self.conv4_3(out) # 26
  out = F.relu(out)
  out = self.maxpool4(out) # 14
  
  out = self.conv5_1(out) # 12
  out = F.relu(out)
  out = self.conv5_2(out) # 12
  out = F.relu(out)
  out = self.conv5_3(out) # 12
  out = F.relu(out)
  out = self.maxpool5(out) # 7
  
  # 展平
  out = out.view(in_size, -1)
  
  out = self.fc1(out)
  out = F.relu(out)
  out = self.fc2(out)
  out = F.relu(out)
  out = self.fc3(out)
  
  out = F.log_softmax(out, dim=1)
  return out

补充知识:Pytorch实现VGG(GPU版)

看代码吧~

import torch
from torch import nn
from torch import optim
from PIL import Image
import numpy as np

print(torch.cuda.is_available())
device = torch.device('cuda:0')
path="/content/drive/My Drive/Colab Notebooks/data/dog_vs_cat/"

train_X=np.empty((2000,224,224,3),dtype="float32")
train_Y=np.empty((2000,),dtype="int")
train_XX=np.empty((2000,3,224,224),dtype="float32")

for i in range(1000):
 file_path=path+"cat."+str(i)+".jpg"
 image=Image.open(file_path)
 resized_image = image.resize((224, 224), Image.ANTIALIAS)
 img=np.array(resized_image)
 train_X[i,:,:,:]=img
 train_Y[i]=0

for i in range(1000):
 file_path=path+"dog."+str(i)+".jpg"
 image = Image.open(file_path)
 resized_image = image.resize((224, 224), Image.ANTIALIAS)
 img = np.array(resized_image)
 train_X[i+1000, :, :, :] = img
 train_Y[i+1000] = 1

train_X /= 255

index = np.arange(2000)
np.random.shuffle(index)

train_X = train_X[index, :, :, :]
train_Y = train_Y[index]

for i in range(3):
 train_XX[:,i,:,:]=train_X[:,:,:,i]
# 创建网络

class Net(nn.Module):

 def __init__(self):
  super(Net, self).__init__()
  self.conv1 = nn.Sequential(
   nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(num_features=64, eps=1e-05, momentum=0.1, affine=True),
   nn.MaxPool2d(kernel_size=2,stride=2)
  )
  self.conv2 = nn.Sequential(
   nn.Conv2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(128,eps=1e-5,momentum=0.1,affine=True),
   nn.MaxPool2d(kernel_size=2,stride=2)
  )
  self.conv3 = nn.Sequential(
   nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(256,eps=1e-5, momentum=0.1, affine=True),
   nn.MaxPool2d(kernel_size=2, stride=2)
  )
  self.conv4 = nn.Sequential(
   nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(512, eps=1e-5, momentum=0.1, affine=True),
   nn.MaxPool2d(kernel_size=2, stride=2)
  )
  self.conv5 = nn.Sequential(
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(512, eps=1e-5, momentum=0.1, affine=True),
   nn.MaxPool2d(kernel_size=2, stride=2)
  )
  self.dense1 = nn.Sequential(
   nn.Linear(7*7*512,4096),
   nn.ReLU(),
   nn.Linear(4096,4096),
   nn.ReLU(),
   nn.Linear(4096,2)
  )

 def forward(self, x):
  x=self.conv1(x)
  x=self.conv2(x)
  x=self.conv3(x)
  x=self.conv4(x)
  x=self.conv5(x)
  x=x.view(-1,7*7*512)
  x=self.dense1(x)
  return x

batch_size=16
net = Net().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0005)

train_loss = []
for epoch in range(10):

 for i in range(2000//batch_size):
  x=train_XX[i*batch_size:i*batch_size+batch_size]
  y=train_Y[i*batch_size:i*batch_size+batch_size]

  x = torch.from_numpy(x)  #(batch_size,input_feature_shape)
  y = torch.from_numpy(y)  #(batch_size,label_onehot_shape)
  x = x.cuda()
  y = y.long().cuda()

  out = net(x)

  loss = criterion(out, y)   # 计算两者的误差
  optimizer.zero_grad()    # 清空上一步的残余更新参数值
  loss.backward()     # 误差反向传播, 计算参数更新值
  optimizer.step()     # 将参数更新值施加到 net 的 parameters 上
  train_loss.append(loss.item())

  print(epoch, i*batch_size, np.mean(train_loss))
  train_loss=[]

total_correct = 0
for i in range(2000):
 x = train_XX[i].reshape(1,3,224,224)
 y = train_Y[i]
 x = torch.from_numpy(x)

 x = x.cuda()
 out = net(x).cpu()
 out = out.detach().numpy()
 pred=np.argmax(out)
 if pred==y:
  total_correct += 1
 print(total_correct)

acc = total_correct / 2000.0
print('test acc:', acc)

torch.cuda.empty_cache()

将上面代码中batch_size改为32,训练次数改为100轮,得到如下准确率

利用PyTorch实现VGG16教程

过拟合了~

以上这篇利用PyTorch实现VGG16教程就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持三水点靠木。

Python 相关文章推荐
爬山算法简介和Python实现实例
Apr 26 Python
Python简单读取json文件功能示例
Nov 30 Python
使用django-crontab实现定时任务的示例
Feb 26 Python
python3.6.3安装图文教程 TensorFlow安装配置方法
Jun 24 Python
Python 数据库操作 SQLAlchemy的示例代码
Feb 18 Python
详解Python 函数如何重载?
Apr 23 Python
Python实现时间序列可视化的方法
Aug 06 Python
python SVD压缩图像的实现代码
Nov 05 Python
Python操作Jira库常用方法解析
Apr 10 Python
Python如何操作docker redis过程解析
Aug 10 Python
仅用几行Python代码就能复制她的U盘文件?
Jun 26 Python
Python中字符串对象语法分享
Feb 24 Python
python安装读取grib库总结(推荐)
Jun 24 #Python
Pytorch mask-rcnn 实现细节分享
Jun 24 #Python
在Pytorch中使用Mask R-CNN进行实例分割操作
Jun 24 #Python
OpenCV+python实现实时目标检测功能
Jun 24 #Python
基于Python下载网络图片方法汇总代码实例
Jun 24 #Python
Python 分布式缓存之Reids数据类型操作详解
Jun 24 #Python
PyTorch中model.zero_grad()和optimizer.zero_grad()用法
Jun 24 #Python
You might like
mac下安装nginx和php
2013/11/04 PHP
详解Window7 下开发php扩展
2015/12/31 PHP
php的常量和变量实例详解
2017/06/27 PHP
PHP 应用容器化以及部署方法
2018/02/12 PHP
Javascript中定义方法的另类写法(批量定义js对象的方法)
2011/02/25 Javascript
屏蔽网页右键复制和ctrl+c复制的js代码
2013/01/04 Javascript
node.js中的favicon.ico请求问题处理
2014/12/15 Javascript
原生JS实现仿淘宝网左侧商品分类菜单效果代码
2015/09/10 Javascript
jquery获取css的color值返回RGB的方法
2015/12/18 Javascript
在WordPress中加入Google搜索功能的简单步骤讲解
2016/01/04 Javascript
Javascript实现通过选择周数显示开始日和结束日的实现代码
2016/05/30 Javascript
AngularJS 中的Promise --- $q服务详解
2016/09/14 Javascript
html判断当前页面是否在iframe中的实例
2016/11/30 Javascript
webpack4 SCSS提取和懒加载的示例
2018/09/03 Javascript
axios如何取消重复无用的请求详解
2019/12/15 Javascript
JavaScript实现猜数字游戏
2020/05/20 Javascript
[04:19]完美世界携手游戏风云打造 卡尔工作室模型介绍篇
2013/04/24 DOTA
[01:10:49]Secret vs VGJ.S 2018国际邀请赛淘汰赛BO3 第二场 8.24
2018/08/25 DOTA
python 示例分享---逻辑推理编程解决八皇后
2014/07/20 Python
Python的Flask框架的简介和安装方法
2015/11/13 Python
python 异常处理总结
2016/10/18 Python
快速实现基于Python的微信聊天机器人示例代码
2017/03/03 Python
python中cPickle类使用方法详解
2018/08/27 Python
python数据结构学习之实现线性表的顺序
2018/09/28 Python
Python学习笔记之视频人脸检测识别实例教程
2019/03/06 Python
图片上传插件ImgUploadJS:用HTML5 File API 实现截图粘贴上传、拖拽上传
2016/01/20 HTML / CSS
计算机专业职业规划
2014/02/28 职场文书
人民教师求职自荐信
2014/03/12 职场文书
试用期自我鉴定范文
2014/03/20 职场文书
小学教师读书活动总结
2014/07/08 职场文书
应届毕业生求职简历自我评价
2015/03/02 职场文书
解除劳动合同通知书范本
2015/04/16 职场文书
2015年教研组工作总结
2015/05/04 职场文书
2015年七夕情人节感言
2015/08/03 职场文书
使用Redis实现秒杀功能的简单方法
2021/05/08 Redis
Go语言基础知识点介绍
2021/07/04 Golang