利用PyTorch实现VGG16教程


Posted in Python onJune 24, 2020

我就废话不多说了,大家还是直接看代码吧~

import torch
import torch.nn as nn
import torch.nn.functional as F
class VGG16(nn.Module):
 
 def __init__(self):
  super(VGG16, self).__init__()
  
  # 3 * 224 * 224
  self.conv1_1 = nn.Conv2d(3, 64, 3) # 64 * 222 * 222
  self.conv1_2 = nn.Conv2d(64, 64, 3, padding=(1, 1)) # 64 * 222* 222
  self.maxpool1 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 64 * 112 * 112
  
  self.conv2_1 = nn.Conv2d(64, 128, 3) # 128 * 110 * 110
  self.conv2_2 = nn.Conv2d(128, 128, 3, padding=(1, 1)) # 128 * 110 * 110
  self.maxpool2 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 128 * 56 * 56
  
  self.conv3_1 = nn.Conv2d(128, 256, 3) # 256 * 54 * 54
  self.conv3_2 = nn.Conv2d(256, 256, 3, padding=(1, 1)) # 256 * 54 * 54
  self.conv3_3 = nn.Conv2d(256, 256, 3, padding=(1, 1)) # 256 * 54 * 54
  self.maxpool3 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 256 * 28 * 28
  
  self.conv4_1 = nn.Conv2d(256, 512, 3) # 512 * 26 * 26
  self.conv4_2 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 26 * 26
  self.conv4_3 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 26 * 26
  self.maxpool4 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 512 * 14 * 14
  
  self.conv5_1 = nn.Conv2d(512, 512, 3) # 512 * 12 * 12
  self.conv5_2 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 12 * 12
  self.conv5_3 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 12 * 12
  self.maxpool5 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 512 * 7 * 7
  # view
  
  self.fc1 = nn.Linear(512 * 7 * 7, 4096)
  self.fc2 = nn.Linear(4096, 4096)
  self.fc3 = nn.Linear(4096, 1000)
  # softmax 1 * 1 * 1000
  
 def forward(self, x):
  
  # x.size(0)即为batch_size
  in_size = x.size(0)
  
  out = self.conv1_1(x) # 222
  out = F.relu(out)
  out = self.conv1_2(out) # 222
  out = F.relu(out)
  out = self.maxpool1(out) # 112
  
  out = self.conv2_1(out) # 110
  out = F.relu(out)
  out = self.conv2_2(out) # 110
  out = F.relu(out)
  out = self.maxpool2(out) # 56
  
  out = self.conv3_1(out) # 54
  out = F.relu(out)
  out = self.conv3_2(out) # 54
  out = F.relu(out)
  out = self.conv3_3(out) # 54
  out = F.relu(out)
  out = self.maxpool3(out) # 28
  
  out = self.conv4_1(out) # 26
  out = F.relu(out)
  out = self.conv4_2(out) # 26
  out = F.relu(out)
  out = self.conv4_3(out) # 26
  out = F.relu(out)
  out = self.maxpool4(out) # 14
  
  out = self.conv5_1(out) # 12
  out = F.relu(out)
  out = self.conv5_2(out) # 12
  out = F.relu(out)
  out = self.conv5_3(out) # 12
  out = F.relu(out)
  out = self.maxpool5(out) # 7
  
  # 展平
  out = out.view(in_size, -1)
  
  out = self.fc1(out)
  out = F.relu(out)
  out = self.fc2(out)
  out = F.relu(out)
  out = self.fc3(out)
  
  out = F.log_softmax(out, dim=1)
  return out

补充知识:Pytorch实现VGG(GPU版)

看代码吧~

import torch
from torch import nn
from torch import optim
from PIL import Image
import numpy as np

print(torch.cuda.is_available())
device = torch.device('cuda:0')
path="/content/drive/My Drive/Colab Notebooks/data/dog_vs_cat/"

train_X=np.empty((2000,224,224,3),dtype="float32")
train_Y=np.empty((2000,),dtype="int")
train_XX=np.empty((2000,3,224,224),dtype="float32")

for i in range(1000):
 file_path=path+"cat."+str(i)+".jpg"
 image=Image.open(file_path)
 resized_image = image.resize((224, 224), Image.ANTIALIAS)
 img=np.array(resized_image)
 train_X[i,:,:,:]=img
 train_Y[i]=0

for i in range(1000):
 file_path=path+"dog."+str(i)+".jpg"
 image = Image.open(file_path)
 resized_image = image.resize((224, 224), Image.ANTIALIAS)
 img = np.array(resized_image)
 train_X[i+1000, :, :, :] = img
 train_Y[i+1000] = 1

train_X /= 255

index = np.arange(2000)
np.random.shuffle(index)

train_X = train_X[index, :, :, :]
train_Y = train_Y[index]

for i in range(3):
 train_XX[:,i,:,:]=train_X[:,:,:,i]
# 创建网络

class Net(nn.Module):

 def __init__(self):
  super(Net, self).__init__()
  self.conv1 = nn.Sequential(
   nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(num_features=64, eps=1e-05, momentum=0.1, affine=True),
   nn.MaxPool2d(kernel_size=2,stride=2)
  )
  self.conv2 = nn.Sequential(
   nn.Conv2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(128,eps=1e-5,momentum=0.1,affine=True),
   nn.MaxPool2d(kernel_size=2,stride=2)
  )
  self.conv3 = nn.Sequential(
   nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(256,eps=1e-5, momentum=0.1, affine=True),
   nn.MaxPool2d(kernel_size=2, stride=2)
  )
  self.conv4 = nn.Sequential(
   nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(512, eps=1e-5, momentum=0.1, affine=True),
   nn.MaxPool2d(kernel_size=2, stride=2)
  )
  self.conv5 = nn.Sequential(
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
   nn.ReLU(),
   nn.BatchNorm2d(512, eps=1e-5, momentum=0.1, affine=True),
   nn.MaxPool2d(kernel_size=2, stride=2)
  )
  self.dense1 = nn.Sequential(
   nn.Linear(7*7*512,4096),
   nn.ReLU(),
   nn.Linear(4096,4096),
   nn.ReLU(),
   nn.Linear(4096,2)
  )

 def forward(self, x):
  x=self.conv1(x)
  x=self.conv2(x)
  x=self.conv3(x)
  x=self.conv4(x)
  x=self.conv5(x)
  x=x.view(-1,7*7*512)
  x=self.dense1(x)
  return x

batch_size=16
net = Net().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0005)

train_loss = []
for epoch in range(10):

 for i in range(2000//batch_size):
  x=train_XX[i*batch_size:i*batch_size+batch_size]
  y=train_Y[i*batch_size:i*batch_size+batch_size]

  x = torch.from_numpy(x)  #(batch_size,input_feature_shape)
  y = torch.from_numpy(y)  #(batch_size,label_onehot_shape)
  x = x.cuda()
  y = y.long().cuda()

  out = net(x)

  loss = criterion(out, y)   # 计算两者的误差
  optimizer.zero_grad()    # 清空上一步的残余更新参数值
  loss.backward()     # 误差反向传播, 计算参数更新值
  optimizer.step()     # 将参数更新值施加到 net 的 parameters 上
  train_loss.append(loss.item())

  print(epoch, i*batch_size, np.mean(train_loss))
  train_loss=[]

total_correct = 0
for i in range(2000):
 x = train_XX[i].reshape(1,3,224,224)
 y = train_Y[i]
 x = torch.from_numpy(x)

 x = x.cuda()
 out = net(x).cpu()
 out = out.detach().numpy()
 pred=np.argmax(out)
 if pred==y:
  total_correct += 1
 print(total_correct)

acc = total_correct / 2000.0
print('test acc:', acc)

torch.cuda.empty_cache()

将上面代码中batch_size改为32,训练次数改为100轮,得到如下准确率

利用PyTorch实现VGG16教程

过拟合了~

以上这篇利用PyTorch实现VGG16教程就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持三水点靠木。

Python 相关文章推荐
pymssql ntext字段调用问题解决方法
Dec 17 Python
python通过zlib实现压缩与解压字符串的方法
Nov 19 Python
Python判断文件和文件夹是否存在的方法
May 21 Python
浅谈插入排序算法在Python程序中的实现及简单改进
May 04 Python
TensorFlow入门使用 tf.train.Saver()保存模型
Apr 24 Python
Python  unittest单元测试框架的使用
Sep 08 Python
详解Python中is和==的区别
Mar 21 Python
Django Rest framework频率原理与限制
Jul 26 Python
Pytorch Tensor基本数学运算详解
Dec 30 Python
利用python绘制数据曲线图的实现
Apr 09 Python
pyecharts动态轨迹图的实现示例
Apr 17 Python
django Model层常用验证器及自定义验证器详解
Jul 15 Python
python安装读取grib库总结(推荐)
Jun 24 #Python
Pytorch mask-rcnn 实现细节分享
Jun 24 #Python
在Pytorch中使用Mask R-CNN进行实例分割操作
Jun 24 #Python
OpenCV+python实现实时目标检测功能
Jun 24 #Python
基于Python下载网络图片方法汇总代码实例
Jun 24 #Python
Python 分布式缓存之Reids数据类型操作详解
Jun 24 #Python
PyTorch中model.zero_grad()和optimizer.zero_grad()用法
Jun 24 #Python
You might like
[原创]PHP中通过ADODB库实现调用Access数据库之修正版本
2006/12/31 PHP
php获取post中的json数据的实现方法
2011/06/08 PHP
php计算title标题相似比的方法
2015/07/29 PHP
PHP实现批量删除(封装)
2017/04/28 PHP
基于jQuery的仿flash的广告轮播
2010/11/05 Javascript
ejs v9 javascript模板系统
2012/03/21 Javascript
js unicode 编码解析关于数据转换为中文的两种方法
2014/04/21 Javascript
jquery隔行换色效果实现方法
2015/01/15 Javascript
JavaScript实现点击单选按钮改变输入框中文本域内容的方法
2015/08/12 Javascript
基于jQuery实现带动画效果超炫酷的弹出对话框(附源码下载)
2016/02/22 Javascript
jquery实现简单的banner轮播效果【实例】
2016/03/30 Javascript
深入理解JavaScript 函数
2016/06/06 Javascript
Bootstrap Metronic完全响应式管理模板学习笔记
2016/07/08 Javascript
浅谈Nodejs应用主文件index.js
2016/08/28 NodeJs
详解javascript常用工具类的封装
2018/01/30 Javascript
快速解决angularJS中用post方法时后台拿不到值的问题
2018/08/14 Javascript
详解React中传入组件的props改变时更新组件的几种实现方法
2018/09/13 Javascript
JavaScript循环遍历你会用哪些之小结篇
2018/09/28 Javascript
浅谈Vue使用Cascader级联选择器数据回显中的坑
2020/10/31 Javascript
浅谈Vue使用Elementui修改默认的最快方法
2020/12/05 Vue.js
[37:45]完美世界DOTA2联赛PWL S3 LBZS vs Phoenix 第二场 12.09
2020/12/11 DOTA
python处理文本文件并生成指定格式的文件
2014/07/31 Python
详解Python中映射类型的内建函数和工厂函数
2015/08/19 Python
pygame游戏之旅 计算游戏中躲过的障碍数量
2018/11/20 Python
Python *args和**kwargs用法实例解析
2020/03/02 Python
韩国三大免税店之一:THE GRAND 中文免税店
2016/07/21 全球购物
Debenhams爱尔兰:英国知名的百货公司
2017/01/02 全球购物
法拉利英国精品店:Ferraris Boutique UK
2019/07/20 全球购物
大学学习个人的自我评价
2014/02/18 职场文书
感恩母亲节演讲稿
2014/05/07 职场文书
旷课检讨书范文
2014/10/30 职场文书
蜗居观后感
2015/06/11 职场文书
土木工程生产实习心得体会
2016/01/22 职场文书
SQL Server2019数据库之简单子查询的具有方法
2021/04/27 SQL Server
在K8s上部署Redis集群的方法步骤
2021/04/27 Redis
使用Ajax实现进度条的绘制
2022/04/07 Javascript