利用PyTorch实现VGG16教程


Posted in Python onJune 24, 2020

我就废话不多说了,大家还是直接看代码吧~

import torch
import torch.nn as nn
import torch.nn.functional as F
class VGG16(nn.Module):
 
 def __init__(self):
  super(VGG16, self).__init__()
  
  # 3 * 224 * 224
  self.conv1_1 = nn.Conv2d(3, 64, 3) # 64 * 222 * 222
  self.conv1_2 = nn.Conv2d(64, 64, 3, padding=(1, 1)) # 64 * 222* 222
  self.maxpool1 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 64 * 112 * 112
  
  self.conv2_1 = nn.Conv2d(64, 128, 3) # 128 * 110 * 110
  self.conv2_2 = nn.Conv2d(128, 128, 3, padding=(1, 1)) # 128 * 110 * 110
  self.maxpool2 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 128 * 56 * 56
  
  self.conv3_1 = nn.Conv2d(128, 256, 3) # 256 * 54 * 54
  self.conv3_2 = nn.Conv2d(256, 256, 3, padding=(1, 1)) # 256 * 54 * 54
  self.conv3_3 = nn.Conv2d(256, 256, 3, padding=(1, 1)) # 256 * 54 * 54
  self.maxpool3 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 256 * 28 * 28
  
  self.conv4_1 = nn.Conv2d(256, 512, 3) # 512 * 26 * 26
  self.conv4_2 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 26 * 26
  self.conv4_3 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 26 * 26
  self.maxpool4 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 512 * 14 * 14
  
  self.conv5_1 = nn.Conv2d(512, 512, 3) # 512 * 12 * 12
  self.conv5_2 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 12 * 12
  self.conv5_3 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 12 * 12
  self.maxpool5 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 512 * 7 * 7
  # view
  
  self.fc1 = nn.Linear(512 * 7 * 7, 4096)
  self.fc2 = nn.Linear(4096, 4096)
  self.fc3 = nn.Linear(4096, 1000)
  # softmax 1 * 1 * 1000
  
 def forward(self, x):
  
  # x.size(0)即为batch_size
  in_size = x.size(0)
  
  out = self.conv1_1(x) # 222
  out = F.relu(out)
  out = self.conv1_2(out) # 222
  out = F.relu(out)
  out = self.maxpool1(out) # 112
  
  out = self.conv2_1(out) # 110
  out = F.relu(out)
  out = self.conv2_2(out) # 110
  out = F.relu(out)
  out = self.maxpool2(out) # 56
  
  out = self.conv3_1(out) # 54
  out = F.relu(out)
  out = self.conv3_2(out) # 54
  out = F.relu(out)
  out = self.conv3_3(out) # 54
  out = F.relu(out)
  out = self.maxpool3(out) # 28
  
  out = self.conv4_1(out) # 26
  out = F.relu(out)
  out = self.conv4_2(out) # 26
  out = F.relu(out)
  out = self.conv4_3(out) # 26
  out = F.relu(out)
  out = self.maxpool4(out) # 14
  
  out = self.conv5_1(out) # 12
  out = F.relu(out)
  out = self.conv5_2(out) # 12
  out = F.relu(out)
  out = self.conv5_3(out) # 12
  out = F.relu(out)
  out = self.maxpool5(out) # 7
  
  # 展平
  out = out.view(in_size, -1)
  
  out = self.fc1(out)
  out = F.relu(out)
  out = self.fc2(out)
  out = F.relu(out)
  out = self.fc3(out)
  
  out = F.log_softmax(out, dim=1)
  return out

补充知识:Pytorch实现VGG(GPU版)

看代码吧~

import torch
from torch import nn
from torch import optim
from PIL import Image
import numpy as np

print(torch.cuda.is_available())
device = torch.device('cuda:0')
path="/content/drive/My Drive/Colab Notebooks/data/dog_vs_cat/"

train_X=np.empty((2000,224,224,3),dtype="float32")
train_Y=np.empty((2000,),dtype="int")
train_XX=np.empty((2000,3,224,224),dtype="float32")

for i in range(1000):
 file_path=path+"cat."+str(i)+".jpg"
 image=Image.open(file_path)
 resized_image = image.resize((224, 224), Image.ANTIALIAS)
 img=np.array(resized_image)
 train_X[i,:,:,:]=img
 train_Y[i]=0

for i in range(1000):
 file_path=path+"dog."+str(i)+".jpg"
 image = Image.open(file_path)
 resized_image = image.resize((224, 224), Image.ANTIALIAS)
 img = np.array(resized_image)
 train_X[i+1000, :, :, :] = img
 train_Y[i+1000] = 1

train_X /= 255

index = np.arange(2000)
np.random.shuffle(index)

train_X = train_X[index, :, :, :]
train_Y = train_Y[index]

for i in range(3):
 train_XX[:,i,:,:]=train_X[:,:,:,i]
# 创建网络

class Net(nn.Module):

 def __init__(self):
  super(Net, self).__init__()
  self.conv1 = nn.Sequential(
   nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(num_features=64, eps=1e-05, momentum=0.1, affine=True),
   nn.MaxPool2d(kernel_size=2,stride=2)
  )
  self.conv2 = nn.Sequential(
   nn.Conv2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(128,eps=1e-5,momentum=0.1,affine=True),
   nn.MaxPool2d(kernel_size=2,stride=2)
  )
  self.conv3 = nn.Sequential(
   nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(256,eps=1e-5, momentum=0.1, affine=True),
   nn.MaxPool2d(kernel_size=2, stride=2)
  )
  self.conv4 = nn.Sequential(
   nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(512, eps=1e-5, momentum=0.1, affine=True),
   nn.MaxPool2d(kernel_size=2, stride=2)
  )
  self.conv5 = nn.Sequential(
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(512, eps=1e-5, momentum=0.1, affine=True),
   nn.MaxPool2d(kernel_size=2, stride=2)
  )
  self.dense1 = nn.Sequential(
   nn.Linear(7*7*512,4096),
   nn.ReLU(),
   nn.Linear(4096,4096),
   nn.ReLU(),
   nn.Linear(4096,2)
  )

 def forward(self, x):
  x=self.conv1(x)
  x=self.conv2(x)
  x=self.conv3(x)
  x=self.conv4(x)
  x=self.conv5(x)
  x=x.view(-1,7*7*512)
  x=self.dense1(x)
  return x

batch_size=16
net = Net().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0005)

train_loss = []
for epoch in range(10):

 for i in range(2000//batch_size):
  x=train_XX[i*batch_size:i*batch_size+batch_size]
  y=train_Y[i*batch_size:i*batch_size+batch_size]

  x = torch.from_numpy(x)  #(batch_size,input_feature_shape)
  y = torch.from_numpy(y)  #(batch_size,label_onehot_shape)
  x = x.cuda()
  y = y.long().cuda()

  out = net(x)

  loss = criterion(out, y)   # 计算两者的误差
  optimizer.zero_grad()    # 清空上一步的残余更新参数值
  loss.backward()     # 误差反向传播, 计算参数更新值
  optimizer.step()     # 将参数更新值施加到 net 的 parameters 上
  train_loss.append(loss.item())

  print(epoch, i*batch_size, np.mean(train_loss))
  train_loss=[]

total_correct = 0
for i in range(2000):
 x = train_XX[i].reshape(1,3,224,224)
 y = train_Y[i]
 x = torch.from_numpy(x)

 x = x.cuda()
 out = net(x).cpu()
 out = out.detach().numpy()
 pred=np.argmax(out)
 if pred==y:
  total_correct += 1
 print(total_correct)

acc = total_correct / 2000.0
print('test acc:', acc)

torch.cuda.empty_cache()

将上面代码中batch_size改为32,训练次数改为100轮,得到如下准确率

利用PyTorch实现VGG16教程

过拟合了~

以上这篇利用PyTorch实现VGG16教程就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持三水点靠木。

Python 相关文章推荐
python从sqlite读取并显示数据的方法
May 08 Python
Python实现比较两个文件夹中代码变化的方法
Jul 10 Python
python之PyMongo使用总结
May 26 Python
Python 实现数据库(SQL)更新脚本的生成方法
Jul 09 Python
Python实现动态图解析、合成与倒放
Jan 18 Python
Python中property属性实例解析
Feb 10 Python
解决python3中解压zip文件是文件名乱码的问题
Mar 22 Python
pandas数据预处理之dataframe的groupby操作方法
Apr 13 Python
Ubuntu下Python2与Python3的共存问题
Oct 31 Python
基于PyQt4和PySide实现输入对话框效果
Feb 27 Python
详解Python3除法之真除法、截断除法和下取整对比
May 23 Python
解决python3安装pandas出错的问题
May 20 Python
python安装读取grib库总结(推荐)
Jun 24 #Python
Pytorch mask-rcnn 实现细节分享
Jun 24 #Python
在Pytorch中使用Mask R-CNN进行实例分割操作
Jun 24 #Python
OpenCV+python实现实时目标检测功能
Jun 24 #Python
基于Python下载网络图片方法汇总代码实例
Jun 24 #Python
Python 分布式缓存之Reids数据类型操作详解
Jun 24 #Python
PyTorch中model.zero_grad()和optimizer.zero_grad()用法
Jun 24 #Python
You might like
jQuery 源码分析笔记
2011/05/25 PHP
PHP中全局变量global和$GLOBALS[]的区别分析
2012/08/06 PHP
php实现memcache缓存示例讲解
2013/12/04 PHP
PHP截取指定图片大小的方法
2014/12/10 PHP
php中实现可以返回多个值的函数实例
2015/03/21 PHP
php简单图像创建入门实例
2015/06/10 PHP
php导出csv文件,可导出前导0实例代码
2016/11/16 PHP
简述php环境搭建与配置
2016/12/05 PHP
thinkPHP5.0框架独立配置与动态配置方法
2017/03/17 PHP
Yii 框架使用数据库(databases)的方法示例
2020/05/19 PHP
多浏览器支持的右下角浮动窗口
2010/04/01 Javascript
AngularJs页面筛选标签小功能
2016/08/01 Javascript
vue.js如何更改默认端口号8080为指定端口的方法
2017/07/14 Javascript
jQuery实现腾讯信用界面(自制刻度尺)样式
2017/08/15 jQuery
基于JSON数据格式详解
2017/08/31 Javascript
微信小程序使用map组件实现获取定位城市天气或者指定城市天气数据功能
2019/01/22 Javascript
js实现图片局部放大效果详解
2019/03/18 Javascript
Vue动态创建注册component的实例代码
2019/06/14 Javascript
[01:37]全新的一集《真视界》——TI7总决赛
2017/09/21 DOTA
Python中字符串的格式化方法小结
2016/05/03 Python
python将秒数转化为时间格式的实例
2018/09/16 Python
python实现桌面气泡提示功能
2019/07/29 Python
python将print输出的信息保留到日志文件中
2019/09/27 Python
python sorted方法和列表使用解析
2019/11/18 Python
Python安装与卸载流程详细步骤(图解)
2020/02/20 Python
Python接口测试get请求过程详解
2020/02/28 Python
Python3之乱码\xe6\x97\xa0\xe6\xb3\x95处理方式
2020/05/11 Python
在echarts中图例legend和坐标系grid实现左右布局实例
2020/05/16 Python
Python爬虫防封ip的一些技巧
2020/08/06 Python
医生实习工作总结的自我评价
2013/09/27 职场文书
妇产科护士自我鉴定
2013/10/15 职场文书
搞笑婚礼主持词
2014/03/13 职场文书
秋天的图画教学反思
2014/05/01 职场文书
基层党员学习党的群众路线教育实践活动心得体会
2014/11/04 职场文书
2015年世界卫生日活动总结
2015/02/09 职场文书
民事调解协议书
2016/03/21 职场文书