keras实现VGG16 CIFAR10数据集方式


Posted in Python onJuly 07, 2020

我就废话不多说了,大家还是直接看代码吧!

import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras import optimizers
import numpy as np
from keras.layers.core import Lambda
from keras import backend as K
from keras.optimizers import SGD
from keras import regularizers
 
#import data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
 
weight_decay = 0.0005
nb_epoch=100
batch_size=32
 
#layer1 32*32*3
model = Sequential()
model.add(Conv2D(64, (3, 3), padding='same',
input_shape=(32,32,3),kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
#layer2 32*32*64
model.add(Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#layer3 16*16*64
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer4 16*16*128
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#layer5 8*8*128
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer6 8*8*256
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer7 8*8*256
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#layer8 4*4*256
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer9 4*4*512
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer10 4*4*512
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#layer11 2*2*512
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer12 2*2*512
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer13 2*2*512
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
#layer14 1*1*512
model.add(Flatten())
model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
#layer15 512
model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
#layer16 512
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
# 10
 
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
 
model.fit(x_train,y_train,epochs=nb_epoch, batch_size=batch_size,
       validation_split=0.1, verbose=1)

补充知识:pytorch一步一步在VGG16上训练自己的数据集

准备数据集及加载,ImageFolder

在很多机器学习或者深度学习的任务中,往往我们要提供自己的图片。也就是说我们的数据集不是预先处理好的,像mnist,cifar10等它已经给你处理好了,更多的是原始的图片。比如我们以猫狗分类为例。在data文件下,有两个分别为train和val的文件夹。然后train下是cat和dog两个文件夹,里面存的是自己的图片数据,val文件夹同train。这样我们的数据集就准备好了。

keras实现VGG16 CIFAR10数据集方式

ImageFolder能够以目录名作为标签来对数据集做划分,下面是pytorch中文文档中关于ImageFolder的介绍:

keras实现VGG16 CIFAR10数据集方式

#对训练集做一个变换
train_transforms = transforms.Compose([
  transforms.RandomResizedCrop(224), #对图片尺寸做一个缩放切割
  transforms.RandomHorizontalFlip(), #水平翻转
  transforms.ToTensor(),   #转化为张量
  transforms.Normalize((.5, .5, .5), (.5, .5, .5)) #进行归一化
])
#对测试集做变换
val_transforms = transforms.Compose([
  transforms.Resize(256),
  transforms.RandomResizedCrop(224),
  transforms.ToTensor(),
  transforms.Normalize((.5, .5, .5), (.5, .5, .5))
])

train_dir = "G:/data/train"      #训练集路径
#定义数据集
train_datasets = datasets.ImageFolder(train_dir, transform=train_transforms)
#加载数据集
train_dataloader = torch.utils.data.DataLoader(train_datasets, batch_size=batch_size, shuffle=True)

val_dir = "G:/datat/val" 
val_datasets = datasets.ImageFolder(val_dir, transform=val_transforms)
val_dataloader = torch.utils.data.DataLoader(val_datasets, batch_size=batch_size, shuffle=True)

迁移学习以VGG16为例

下面是迁移代码的实现:

class VGGNet(nn.Module):
  def __init__(self, num_classes=2):  #num_classes,此处为 二分类值为2
    super(VGGNet, self).__init__()
    net = models.vgg16(pretrained=True)  #从预训练模型加载VGG16网络参数
    net.classifier = nn.Sequential() #将分类层置空,下面将改变我们的分类层
    self.features = net #保留VGG16的特征层
    self.classifier = nn.Sequential(  #定义自己的分类层
        nn.Linear(512 * 7 * 7, 512), #512 * 7 * 7不能改变 ,由VGG16网络决定的,第二个参数为神经元个数可以微调
        nn.ReLU(True),
        nn.Dropout(),
        nn.Linear(512, 128),
        nn.ReLU(True),
        nn.Dropout(),
        nn.Linear(128, num_classes),
    )

  def forward(self, x):
    x = self.features(x)
    x = x.view(x.size(0), -1)
    x = self.classifier(x)
    return x

完整代码如下

from __future__ import print_function, division

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import numpy as np
from torchvision import models

batch_size = 16
learning_rate = 0.0002
epoch = 10

train_transforms = transforms.Compose([
  transforms.RandomResizedCrop(224),
  transforms.RandomHorizontalFlip(),
  transforms.ToTensor(),
  transforms.Normalize((.5, .5, .5), (.5, .5, .5))
])
val_transforms = transforms.Compose([
  transforms.Resize(256),
  transforms.RandomResizedCrop(224),
  transforms.ToTensor(),
  transforms.Normalize((.5, .5, .5), (.5, .5, .5))
])

train_dir = './VGGDataSet/train'
train_datasets = datasets.ImageFolder(train_dir, transform=train_transforms)
train_dataloader = torch.utils.data.DataLoader(train_datasets, batch_size=batch_size, shuffle=True)

val_dir = './VGGDataSet/val'
val_datasets = datasets.ImageFolder(val_dir, transform=val_transforms)
val_dataloader = torch.utils.data.DataLoader(val_datasets, batch_size=batch_size, shuffle=True)

class VGGNet(nn.Module):
  def __init__(self, num_classes=3):
    super(VGGNet, self).__init__()
    net = models.vgg16(pretrained=True)
    net.classifier = nn.Sequential()
    self.features = net
    self.classifier = nn.Sequential(
        nn.Linear(512 * 7 * 7, 512),
        nn.ReLU(True),
        nn.Dropout(),
        nn.Linear(512, 128),
        nn.ReLU(True),
        nn.Dropout(),
        nn.Linear(128, num_classes),
    )

  def forward(self, x):
    x = self.features(x)
    x = x.view(x.size(0), -1)
    x = self.classifier(x)
    return x

#--------------------训练过程---------------------------------
model = VGGNet()
if torch.cuda.is_available():
  model.cuda()
params = [{'params': md.parameters()} for md in model.children()
     if md in [model.classifier]]
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
loss_func = nn.CrossEntropyLoss()

Loss_list = []
Accuracy_list = []

for epoch in range(100):
  print('epoch {}'.format(epoch + 1))
  # training-----------------------------
  train_loss = 0.
  train_acc = 0.
  for batch_x, batch_y in train_dataloader:
    batch_x, batch_y = Variable(batch_x).cuda(), Variable(batch_y).cuda()
    out = model(batch_x)
    loss = loss_func(out, batch_y)
    train_loss += loss.data[0]
    pred = torch.max(out, 1)[1]
    train_correct = (pred == batch_y).sum()
    train_acc += train_correct.data[0]
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
  print('Train Loss: {:.6f}, Acc: {:.6f}'.format(train_loss / (len(
    train_datasets)), train_acc / (len(train_datasets))))

  # evaluation--------------------------------
  model.eval()
  eval_loss = 0.
  eval_acc = 0.
  for batch_x, batch_y in val_dataloader:
    batch_x, batch_y = Variable(batch_x, volatile=True).cuda(), Variable(batch_y, volatile=True).cuda()
    out = model(batch_x)
    loss = loss_func(out, batch_y)
    eval_loss += loss.data[0]
    pred = torch.max(out, 1)[1]
    num_correct = (pred == batch_y).sum()
    eval_acc += num_correct.data[0]
  print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len(
    val_datasets)), eval_acc / (len(val_datasets))))
    
	Loss_list.append(eval_loss / (len(val_datasets)))
  Accuracy_list.append(100 * eval_acc / (len(val_datasets)))

x1 = range(0, 100)
x2 = range(0, 100)
y1 = Accuracy_list
y2 = Loss_list
plt.subplot(2, 1, 1)
plt.plot(x1, y1, 'o-')
plt.title('Test accuracy vs. epoches')
plt.ylabel('Test accuracy')
plt.subplot(2, 1, 2)
plt.plot(x2, y2, '.-')
plt.xlabel('Test loss vs. epoches')
plt.ylabel('Test loss')
plt.show()
# plt.savefig("accuracy_loss.jpg")

以上这篇keras实现VGG16 CIFAR10数据集方式就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持三水点靠木。

Python 相关文章推荐
Python中input和raw_input的一点区别
Oct 21 Python
详细解析Python当中的数据类型和变量
Apr 25 Python
python复制文件的方法实例详解
May 22 Python
简介二分查找算法与相关的Python实现示例
Aug 26 Python
pycharm中连接mysql数据库的步骤详解
May 02 Python
Python常见异常分类与处理方法
Jun 04 Python
有关Python的22个编程技巧
Aug 29 Python
Python设计模式之观察者模式原理与用法详解
Jan 16 Python
ERLANG和PYTHON互通实现过程详解
Jul 05 Python
Python之变量类型和if判断方式
May 05 Python
Python 整行读取文本方法并去掉readlines换行\n操作
Sep 03 Python
python 动态绘制爱心的示例
Sep 27 Python
使用darknet框架的imagenet数据分类预训练操作
Jul 07 #Python
Python调用C语言程序方法解析
Jul 07 #Python
keras实现VGG16方式(预测一张图片)
Jul 07 #Python
通过实例解析Python RPC实现原理及方法
Jul 07 #Python
Keras预训练的ImageNet模型实现分类操作
Jul 07 #Python
Scrapy模拟登录赶集网的实现代码
Jul 07 #Python
scrapy框架携带cookie访问淘宝购物车功能的实现代码
Jul 07 #Python
You might like
php curl post 时出现的问题解决
2014/01/30 PHP
基于PHP技术开发客服工单系统
2016/01/06 PHP
PHP耦合设计模式实例分析
2018/08/08 PHP
IE JS编程需注意的内存释放问题
2009/06/23 Javascript
自己整理的一个javascript日期处理函数
2010/10/16 Javascript
jQuery代码优化 选择符篇
2011/11/01 Javascript
给jqGrid数据行添加修改和删除操作链接(之一)
2011/11/04 Javascript
JSON 数据格式介绍
2012/01/13 Javascript
解决ExtJS在chrome或火狐中正常显示在ie中不显示的浏览器兼容问题
2013/01/11 Javascript
hover的用法及live的用法介绍(鼠标悬停效果)
2013/03/29 Javascript
整理AngularJS框架使用过程当中的一些性能优化要点
2016/03/05 Javascript
jQuery实现在新增加的元素上添加事件方法案例分析
2017/02/09 Javascript
利用策略模式与装饰模式扩展JavaScript表单验证功能
2017/02/14 Javascript
微信小程序 滚动到某个位置添加class效果实现代码
2017/04/19 Javascript
详解Vue一个案例引发「内容分发slot」的最全总结
2018/12/02 Javascript
layui 选择列表,打勾,点击确定返回数据的例子
2019/09/02 Javascript
vue中使用v-for时为什么不能用index作为key
2020/04/04 Javascript
Vue如何提升首屏加载速度实例解析
2020/06/25 Javascript
Python入门篇之字典
2014/10/17 Python
Python解析nginx日志文件
2015/05/11 Python
python-opencv在有噪音的情况下提取图像的轮廓实例
2017/08/30 Python
python数字图像处理之高级滤波代码详解
2017/11/23 Python
Python管理Windows服务小脚本
2018/03/12 Python
PYTHON EVAL的用法及注意事项解析
2019/09/06 Python
python接口自动化之ConfigParser配置文件的使用详解
2020/08/03 Python
详解Django中views数据查询使用locals()函数进行优化
2020/08/24 Python
Python中的特殊方法以及应用详解
2020/09/20 Python
总结html5自定义属性有哪些
2020/04/01 HTML / CSS
Nuts.com:优质散装,批发坚果、干果和巧克力等
2017/03/21 全球购物
IRO美国官网:法国服装品牌
2018/03/06 全球购物
银行简历自我评价
2014/02/11 职场文书
领导欢迎词致辞
2015/01/23 职场文书
新员工辞职信范文
2015/05/12 职场文书
史上最全的军训拉歌口号
2015/12/25 职场文书
Nginx解决403 forbidden的完整步骤
2021/04/01 Servers
PHP实现两种排课方式
2021/06/26 PHP