keras实现VGG16 CIFAR10数据集方式


Posted in Python onJuly 07, 2020

我就废话不多说了,大家还是直接看代码吧!

import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras import optimizers
import numpy as np
from keras.layers.core import Lambda
from keras import backend as K
from keras.optimizers import SGD
from keras import regularizers
 
#import data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
 
weight_decay = 0.0005
nb_epoch=100
batch_size=32
 
#layer1 32*32*3
model = Sequential()
model.add(Conv2D(64, (3, 3), padding='same',
input_shape=(32,32,3),kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
#layer2 32*32*64
model.add(Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#layer3 16*16*64
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer4 16*16*128
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#layer5 8*8*128
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer6 8*8*256
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer7 8*8*256
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#layer8 4*4*256
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer9 4*4*512
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer10 4*4*512
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#layer11 2*2*512
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer12 2*2*512
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer13 2*2*512
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
#layer14 1*1*512
model.add(Flatten())
model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
#layer15 512
model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
#layer16 512
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
# 10
 
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
 
model.fit(x_train,y_train,epochs=nb_epoch, batch_size=batch_size,
       validation_split=0.1, verbose=1)

补充知识:pytorch一步一步在VGG16上训练自己的数据集

准备数据集及加载,ImageFolder

在很多机器学习或者深度学习的任务中,往往我们要提供自己的图片。也就是说我们的数据集不是预先处理好的,像mnist,cifar10等它已经给你处理好了,更多的是原始的图片。比如我们以猫狗分类为例。在data文件下,有两个分别为train和val的文件夹。然后train下是cat和dog两个文件夹,里面存的是自己的图片数据,val文件夹同train。这样我们的数据集就准备好了。

keras实现VGG16 CIFAR10数据集方式

ImageFolder能够以目录名作为标签来对数据集做划分,下面是pytorch中文文档中关于ImageFolder的介绍:

keras实现VGG16 CIFAR10数据集方式

#对训练集做一个变换
train_transforms = transforms.Compose([
  transforms.RandomResizedCrop(224), #对图片尺寸做一个缩放切割
  transforms.RandomHorizontalFlip(), #水平翻转
  transforms.ToTensor(),   #转化为张量
  transforms.Normalize((.5, .5, .5), (.5, .5, .5)) #进行归一化
])
#对测试集做变换
val_transforms = transforms.Compose([
  transforms.Resize(256),
  transforms.RandomResizedCrop(224),
  transforms.ToTensor(),
  transforms.Normalize((.5, .5, .5), (.5, .5, .5))
])

train_dir = "G:/data/train"      #训练集路径
#定义数据集
train_datasets = datasets.ImageFolder(train_dir, transform=train_transforms)
#加载数据集
train_dataloader = torch.utils.data.DataLoader(train_datasets, batch_size=batch_size, shuffle=True)

val_dir = "G:/datat/val" 
val_datasets = datasets.ImageFolder(val_dir, transform=val_transforms)
val_dataloader = torch.utils.data.DataLoader(val_datasets, batch_size=batch_size, shuffle=True)

迁移学习以VGG16为例

下面是迁移代码的实现:

class VGGNet(nn.Module):
  def __init__(self, num_classes=2):  #num_classes,此处为 二分类值为2
    super(VGGNet, self).__init__()
    net = models.vgg16(pretrained=True)  #从预训练模型加载VGG16网络参数
    net.classifier = nn.Sequential() #将分类层置空,下面将改变我们的分类层
    self.features = net #保留VGG16的特征层
    self.classifier = nn.Sequential(  #定义自己的分类层
        nn.Linear(512 * 7 * 7, 512), #512 * 7 * 7不能改变 ,由VGG16网络决定的,第二个参数为神经元个数可以微调
        nn.ReLU(True),
        nn.Dropout(),
        nn.Linear(512, 128),
        nn.ReLU(True),
        nn.Dropout(),
        nn.Linear(128, num_classes),
    )

  def forward(self, x):
    x = self.features(x)
    x = x.view(x.size(0), -1)
    x = self.classifier(x)
    return x

完整代码如下

from __future__ import print_function, division

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import numpy as np
from torchvision import models

batch_size = 16
learning_rate = 0.0002
epoch = 10

train_transforms = transforms.Compose([
  transforms.RandomResizedCrop(224),
  transforms.RandomHorizontalFlip(),
  transforms.ToTensor(),
  transforms.Normalize((.5, .5, .5), (.5, .5, .5))
])
val_transforms = transforms.Compose([
  transforms.Resize(256),
  transforms.RandomResizedCrop(224),
  transforms.ToTensor(),
  transforms.Normalize((.5, .5, .5), (.5, .5, .5))
])

train_dir = './VGGDataSet/train'
train_datasets = datasets.ImageFolder(train_dir, transform=train_transforms)
train_dataloader = torch.utils.data.DataLoader(train_datasets, batch_size=batch_size, shuffle=True)

val_dir = './VGGDataSet/val'
val_datasets = datasets.ImageFolder(val_dir, transform=val_transforms)
val_dataloader = torch.utils.data.DataLoader(val_datasets, batch_size=batch_size, shuffle=True)

class VGGNet(nn.Module):
  def __init__(self, num_classes=3):
    super(VGGNet, self).__init__()
    net = models.vgg16(pretrained=True)
    net.classifier = nn.Sequential()
    self.features = net
    self.classifier = nn.Sequential(
        nn.Linear(512 * 7 * 7, 512),
        nn.ReLU(True),
        nn.Dropout(),
        nn.Linear(512, 128),
        nn.ReLU(True),
        nn.Dropout(),
        nn.Linear(128, num_classes),
    )

  def forward(self, x):
    x = self.features(x)
    x = x.view(x.size(0), -1)
    x = self.classifier(x)
    return x

#--------------------训练过程---------------------------------
model = VGGNet()
if torch.cuda.is_available():
  model.cuda()
params = [{'params': md.parameters()} for md in model.children()
     if md in [model.classifier]]
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
loss_func = nn.CrossEntropyLoss()

Loss_list = []
Accuracy_list = []

for epoch in range(100):
  print('epoch {}'.format(epoch + 1))
  # training-----------------------------
  train_loss = 0.
  train_acc = 0.
  for batch_x, batch_y in train_dataloader:
    batch_x, batch_y = Variable(batch_x).cuda(), Variable(batch_y).cuda()
    out = model(batch_x)
    loss = loss_func(out, batch_y)
    train_loss += loss.data[0]
    pred = torch.max(out, 1)[1]
    train_correct = (pred == batch_y).sum()
    train_acc += train_correct.data[0]
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
  print('Train Loss: {:.6f}, Acc: {:.6f}'.format(train_loss / (len(
    train_datasets)), train_acc / (len(train_datasets))))

  # evaluation--------------------------------
  model.eval()
  eval_loss = 0.
  eval_acc = 0.
  for batch_x, batch_y in val_dataloader:
    batch_x, batch_y = Variable(batch_x, volatile=True).cuda(), Variable(batch_y, volatile=True).cuda()
    out = model(batch_x)
    loss = loss_func(out, batch_y)
    eval_loss += loss.data[0]
    pred = torch.max(out, 1)[1]
    num_correct = (pred == batch_y).sum()
    eval_acc += num_correct.data[0]
  print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len(
    val_datasets)), eval_acc / (len(val_datasets))))
    
	Loss_list.append(eval_loss / (len(val_datasets)))
  Accuracy_list.append(100 * eval_acc / (len(val_datasets)))

x1 = range(0, 100)
x2 = range(0, 100)
y1 = Accuracy_list
y2 = Loss_list
plt.subplot(2, 1, 1)
plt.plot(x1, y1, 'o-')
plt.title('Test accuracy vs. epoches')
plt.ylabel('Test accuracy')
plt.subplot(2, 1, 2)
plt.plot(x2, y2, '.-')
plt.xlabel('Test loss vs. epoches')
plt.ylabel('Test loss')
plt.show()
# plt.savefig("accuracy_loss.jpg")

以上这篇keras实现VGG16 CIFAR10数据集方式就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持三水点靠木。

Python 相关文章推荐
Python内置的字符串处理函数整理
Jan 29 Python
[原创]使用豆瓣提供的国内pypi源
Jul 02 Python
Python实现判断一个字符串是否包含子串的方法总结
Nov 21 Python
快速入门python学习笔记
Dec 06 Python
Python统计单词出现的次数
Apr 04 Python
python 字典中文key处理,读取,比较方法
Jul 06 Python
使用pandas读取文件的实现
Jul 31 Python
Python 处理文件的几种方式
Aug 23 Python
解决python DataFrame 打印结果不换行问题
Apr 09 Python
python如何爬取动态网站
Sep 09 Python
TensorFlow2.0使用keras训练模型的实现
Feb 20 Python
Python Django / Flask如何使用Elasticsearch
Apr 19 Python
使用darknet框架的imagenet数据分类预训练操作
Jul 07 #Python
Python调用C语言程序方法解析
Jul 07 #Python
keras实现VGG16方式(预测一张图片)
Jul 07 #Python
通过实例解析Python RPC实现原理及方法
Jul 07 #Python
Keras预训练的ImageNet模型实现分类操作
Jul 07 #Python
Scrapy模拟登录赶集网的实现代码
Jul 07 #Python
scrapy框架携带cookie访问淘宝购物车功能的实现代码
Jul 07 #Python
You might like
php Smarty date_format [格式化时间日期]
2010/03/15 PHP
使用PHP遍历文件夹与子目录的函数代码
2011/09/26 PHP
深入理解ob_flush和flush的区别(ob_flush()与flush()使用方法)
2013/02/06 PHP
PHP父类调用子类方法的代码例子
2014/04/09 PHP
用js怎么把&字符换成"&amp:"
2006/10/19 Javascript
网页设计常用的一些技巧
2006/12/22 Javascript
提示$ is not defined错误分析及解决
2013/04/09 Javascript
使用javascript:将其它类型值转换成布尔类型值的解决方法详解
2013/05/07 Javascript
js解析json读取List中的实体对象示例
2014/03/11 Javascript
解决checkbox的attr(checked)一直为undefined问题
2014/06/16 Javascript
ECHO.js 纯javascript轻量级延迟加载的实例代码
2016/05/24 Javascript
Node.js环境下Koa2添加travis ci持续集成工具的方法
2017/06/19 Javascript
node.js利用mongoose获取mongodb数据的格式化问题详解
2017/10/06 Javascript
值得收藏的vuejs安装教程
2017/11/21 Javascript
简单的Vue异步组件实例Demo
2017/12/27 Javascript
JavaScript 九种跨域方式实现原理
2019/02/11 Javascript
微信小程序上传图片并等比列压缩到指定大小的实例代码
2019/10/24 Javascript
vue 动态给每个页面添加title、关键词和描述的方法
2020/08/28 Javascript
举例简单讲解Python中的数据存储模块shelve的用法
2016/03/03 Python
Django models.py应用实现过程详解
2019/07/29 Python
pytorch 实现在一个优化器中设置多个网络参数的例子
2020/02/20 Python
python操作yaml说明
2020/04/08 Python
python如何查看安装了的模块
2020/06/23 Python
Python常用数字处理基本操作汇总
2020/09/10 Python
如何用python实现一个HTTP连接池
2021/01/14 Python
python中操作文件的模块的方法总结
2021/02/04 Python
Python plt 利用subplot 实现在一张画布同时画多张图
2021/02/26 Python
css3简单练习实现遨游浏览器logo的绘制
2013/01/30 HTML / CSS
通过HTML5规范搞定i、em、b、strong元素的区别
2017/03/04 HTML / CSS
英国高级百货公司:Harvey Nichols
2017/01/29 全球购物
澳大利亚音乐商店:Bava’s Music City
2019/05/05 全球购物
模具专业推荐信
2013/10/30 职场文书
公司会计岗位职责
2014/02/13 职场文书
优秀电子工程系毕业生求职信
2014/05/24 职场文书
电子商务求职信
2014/06/15 职场文书
Arthas排查Kubernetes中应用频繁挂掉重启异常
2022/02/28 MySQL