keras实现多种分类网络的方式


Posted in Python onJune 11, 2020

Keras应该是最简单的一种深度学习框架了,入门非常的简单.

简单记录一下keras实现多种分类网络:如AlexNet、Vgg、ResNet

采用kaggle猫狗大战的数据作为数据集.

由于AlexNet采用的是LRN标准化,Keras没有内置函数实现,这里用batchNormalization代替

收件建立一个model.py的文件,里面存放着alexnet,vgg两种模型,直接导入就可以了

#coding=utf-8
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, BatchNormalization
from keras.layers import *
from keras.layers.advanced_activations import LeakyReLU,PReLU
from keras.models import Model
 
def keras_batchnormalization_relu(layer):
 BN = BatchNormalization()(layer)
 ac = PReLU()(BN)
 return ac
 
def AlexNet(resize=227, classes=2):
 model = Sequential()
 # 第一段
 model.add(Conv2D(filters=96, kernel_size=(11, 11),
      strides=(4, 4), padding='valid',
      input_shape=(resize, resize, 3),
      activation='relu'))
 model.add(BatchNormalization())
 model.add(MaxPooling2D(pool_size=(3, 3),
       strides=(2, 2),
       padding='valid'))
 # 第二段
 model.add(Conv2D(filters=256, kernel_size=(5, 5),
      strides=(1, 1), padding='same',
      activation='relu'))
 model.add(BatchNormalization())
 model.add(MaxPooling2D(pool_size=(3, 3),
       strides=(2, 2),
       padding='valid'))
 # 第三段
 model.add(Conv2D(filters=384, kernel_size=(3, 3),
      strides=(1, 1), padding='same',
      activation='relu'))
 model.add(Conv2D(filters=384, kernel_size=(3, 3),
      strides=(1, 1), padding='same',
      activation='relu'))
 model.add(Conv2D(filters=256, kernel_size=(3, 3),
      strides=(1, 1), padding='same',
      activation='relu'))
 model.add(MaxPooling2D(pool_size=(3, 3),
       strides=(2, 2), padding='valid'))
 # 第四段
 model.add(Flatten())
 model.add(Dense(4096, activation='relu'))
 model.add(Dropout(0.5))
 
 model.add(Dense(4096, activation='relu'))
 model.add(Dropout(0.5))
 
 model.add(Dense(1000, activation='relu'))
 model.add(Dropout(0.5))
 
 # Output Layer
 model.add(Dense(classes,activation='softmax'))
 # model.add(Activation('softmax'))
 
 return model
 
def AlexNet2(inputs, classes=2, prob=0.5):
 '''
 自己写的函数,尝试keras另外一种写法
 :param inputs: 输入
 :param classes: 类别的个数
 :param prob: dropout的概率
 :return: 模型
 '''
 # Conv2D(32, (3, 3), dilation_rate=(2, 2), padding='same')(inputs)
 print "input shape:", inputs.shape
 
 conv1 = Conv2D(filters=96, kernel_size=(11, 11), strides=(4, 4), padding='valid')(inputs)
 conv1 = keras_batchnormalization_relu(conv1)
 print "conv1 shape:", conv1.shape
 pool1 = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(conv1)
 print "pool1 shape:", pool1.shape
 
 conv2 = Conv2D(filters=256, kernel_size=(5, 5), padding='same')(pool1)
 conv2 = keras_batchnormalization_relu(conv2)
 print "conv2 shape:", conv2.shape
 pool2 = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(conv2)
 print "pool2 shape:", pool2.shape
 
 conv3 = Conv2D(filters=384, kernel_size=(3, 3), padding='same')(pool2)
 conv3 = PReLU()(conv3)
 print "conv3 shape:", conv3.shape
 
 conv4 = Conv2D(filters=384, kernel_size=(3, 3), padding='same')(conv3)
 conv4 = PReLU()(conv4)
 print "conv4 shape:", conv4
 
 conv5 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')(conv4)
 conv5 = PReLU()(conv5)
 print "conv5 shape:", conv5
 
 pool3 = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(conv5)
 print "pool3 shape:", pool3.shape
 
 dense1 = Flatten()(pool3)
 dense1 = Dense(4096, activation='relu')(dense1)
 print "dense2 shape:", dense1
 dense1 = Dropout(prob)(dense1)
 # print "dense1 shape:", dense1
 
 dense2 = Dense(4096, activation='relu')(dense1)
 print "dense2 shape:", dense2
 dense2 = Dropout(prob)(dense2)
 # print "dense2 shape:", dense2
 
 predict= Dense(classes, activation='softmax')(dense2)
 
 model = Model(inputs=inputs, outputs=predict)
 return model
 
def vgg13(resize=224, classes=2, prob=0.5):
 model = Sequential()
 model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=(resize, resize, 3), padding='same', activation='relu',
      kernel_initializer='uniform'))
 model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(MaxPooling2D(pool_size=(2, 2)))
 model.add(Conv2D(128, (3, 2), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(MaxPooling2D(pool_size=(2, 2)))
 model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(MaxPooling2D(pool_size=(2, 2)))
 model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(MaxPooling2D(pool_size=(2, 2)))
 model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(MaxPooling2D(pool_size=(2, 2)))
 model.add(Flatten())
 model.add(Dense(4096, activation='relu'))
 model.add(Dropout(prob))
 model.add(Dense(4096, activation='relu'))
 model.add(Dropout(prob))
 model.add(Dense(classes, activation='softmax'))
 return model
 
def vgg16(resize=224, classes=2, prob=0.5):
 model = Sequential()
 model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=(resize, resize, 3), padding='same', activation='relu',
      kernel_initializer='uniform'))
 model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(MaxPooling2D(pool_size=(2, 2)))
 model.add(Conv2D(128, (3, 2), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(MaxPooling2D(pool_size=(2, 2)))
 model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(MaxPooling2D(pool_size=(2, 2)))
 model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(MaxPooling2D(pool_size=(2, 2)))
 model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
 model.add(MaxPooling2D(pool_size=(2, 2)))
 model.add(Flatten())
 model.add(Dense(4096, activation='relu'))
 model.add(Dropout(prob))
 model.add(Dense(4096, activation='relu'))
 model.add(Dropout(prob))
 model.add(Dense(classes, activation='softmax'))
 return model

然后建立一个train.py文件,用于读取数据和训练数据的.

#coding=utf-8
import keras
import cv2
import os
import numpy as np
import model
import modelResNet
import tensorflow as tf
from keras.layers import Input, Dense
from keras.preprocessing.image import ImageDataGenerator
 
resize = 224
batch_size = 128
path = "/home/hjxu/PycharmProjects/01_cats_vs_dogs/data"
 
trainDirectory = '/home/hjxu/PycharmProjects/01_cats_vs_dogs/data/train/'
def load_data():
 imgs = os.listdir(path + "/train/")
 num = len(imgs)
 train_data = np.empty((5000, resize, resize, 3), dtype="int32")
 train_label = np.empty((5000, ), dtype="int32")
 test_data = np.empty((5000, resize, resize, 3), dtype="int32")
 test_label = np.empty((5000, ), dtype="int32")
 for i in range(5000):
  if i % 2:
   train_data[i] = cv2.resize(cv2.imread(path + '/train/' + 'dog.' + str(i) + '.jpg'), (resize, resize))
   train_label[i] = 1
  else:
   train_data[i] = cv2.resize(cv2.imread(path + '/train/' + 'cat.' + str(i) + '.jpg'), (resize, resize))
   train_label[i] = 0
 for i in range(5000, 10000):
  if i % 2:
   test_data[i-5000] = cv2.resize(cv2.imread(path + '/train/' + 'dog.' + str(i) + '.jpg'), (resize, resize))
   test_label[i-5000] = 1
  else:
   test_data[i-5000] = cv2.resize(cv2.imread(path + '/train/' + 'cat.' + str(i) + '.jpg'), (resize, resize))
   test_label[i-5000] = 0
 return train_data, train_label, test_data, test_label
 
def main():
 
 train_data, train_label, test_data, test_label = load_data()
 train_data, test_data = train_data.astype('float32'), test_data.astype('float32')
 train_data, test_data = train_data/255, test_data/255
 
 train_label = keras.utils.to_categorical(train_label, 2)
 '''
  #one_hot转码,如果使用 categorical_crossentropy,就需要用到to_categorical函数完成转码
 '''
 test_label = keras.utils.to_categorical(test_label, 2)
 
 inputs = Input(shape=(224, 224, 3))
 
 modelAlex = model.AlexNet2(inputs, classes=2)
 '''
 导入模型
 '''
 modelAlex.compile(loss='categorical_crossentropy',
     optimizer='sgd',
     metrics=['accuracy'])
 '''
 def compile(self, optimizer, loss, metrics=None, loss_weights=None,
     sample_weight_mode=None, **kwargs):
  optimizer:优化器,为预定义优化器名或优化器对象,参考优化器
  loss: 损失函数,为预定义损失函数名或者一个目标函数
  metrics:列表,包含评估模型在训练和测试时的性能指标,典型用法是 metrics=['accuracy']
  sample_weight_mode:如果需要按时间步为样本赋值,需要将改制设置为"temoral"
  如果想用自定义的性能评估函数:如下
   def mean_pred(y_true, y_pred):
   return k.mean(y_pred)
  model.compile(loss = 'binary_crossentropy', metrics=['accuracy', mean_pred],...)
  损失函数同理,再看 keras内置支持的损失函数有
   mean_squared_error
  mean_absolute_error
  mean_absolute_percentage_error
  mean_squared_logarithmic_error
  squared_hinge
  hinge
  categorical_hinge
  logcosh
  categorical_crossentropy
  sparse_categorical_crossentropy
  binary_crossentropy
  kullback_leibler_divergence
  poisson
  cosine_proximity
 '''
 modelAlex.summary()
 '''
 # 打印模型信息
 '''
 modelAlex.fit(train_data, train_label,
    batch_size=batch_size,
    epochs=50,
    validation_split=0.2,
    shuffle=True)
 '''
 def fit(self, x=None,   # x:输入数据
   y=None,     # y:标签 Numpy array
   batch_size=32,   # batch_size:训练时,一个batch的样本会被计算一次梯度下降
   epochs=1,    # epochs: 训练的轮数,每个epoch会把训练集循环一遍
   verbose=1,    # 日志显示:0表示不在标准输入输出流输出,1表示输出进度条,2表示每个epoch输出
   callbacks=None,   # 回调函数
   validation_split=0.,  # 0-1的浮点数,用来指定训练集一定比例作为验证集,验证集不参与训练
   validation_data=None, # (x,y)的tuple,是指定的验证集
   shuffle=True,   # 如果是"batch",则是用来处理HDF5数据的特殊情况,将在batch内部将数据打乱
   class_weight=None,  # 字典,将不同的类别映射为不同的权值,用来在训练过程中调整损失函数的
   sample_weight=None,  # 权值的numpy array,用于训练的时候调整损失函数
   initial_epoch=0,   # 该参数用于从指定的epoch开始训练,继续之前的训练
   **kwargs):
 返回:返回一个History的对象,其中History.history损失函数和其他指标的数值随epoch变化的情况
 '''
 scores = modelAlex.evaluate(train_data, train_label, verbose=1)
 print(scores)
 
 scores = modelAlex.evaluate(test_data, test_label, verbose=1)
 print(scores)
 modelAlex.save('my_model_weights2.h5')
 
def main2():
 train_datagen = ImageDataGenerator(rescale=1. / 255,
          shear_range=0.2,
          zoom_range=0.2,
          horizontal_flip=True)
 test_datagen = ImageDataGenerator(rescale=1. / 255)
 train_generator = train_datagen.flow_from_directory(trainDirectory,
              target_size=(224, 224),
              batch_size=32,
              class_mode='binary')
 
 validation_generator = test_datagen.flow_from_directory(trainDirectory,
               target_size=(224, 224),
               batch_size=32,
               class_mode='binary')
 
 inputs = Input(shape=(224, 224, 3))
 # modelAlex = model.AlexNet2(inputs, classes=2)
 modelAlex = model.vgg13(resize=224, classes=2, prob=0.5)
 # modelAlex = modelResNet.ResNet50(shape=224, classes=2)
 modelAlex.compile(loss='sparse_categorical_crossentropy',
      optimizer='sgd',
      metrics=['accuracy'])
 modelAlex.summary()
 
 modelAlex.fit_generator(train_generator,
      steps_per_epoch=1000,
      epochs=60,
      validation_data=validation_generator,
      validation_steps=200)
 
 modelAlex.save('model32.hdf5')
 #
if __name__ == "__main__":
 '''
 如果数据是按照猫狗大战的数据,都在同一个文件夹下,使用main()函数
 如果数据按照猫和狗分成两类,则使用main2()函数
 '''
 main2()

得到模型后该怎么测试一张图像呢?

建立一个testOneImg.py脚本,代码如下

#coding=utf-8
from keras.preprocessing.image import load_img#load_image作用是载入图片
from keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.applications.vgg16 import decode_predictions
import numpy as np
import cv2
import model
from keras.models import Sequential
 
pats = '/home/hjxu/tf_study/catVsDogsWithKeras/my_model_weights.h5'
modelAlex = model.AlexNet(resize=224, classes=2)
# AlexModel = model.AlexNet(weightPath='/home/hjxu/tf_study/catVsDogsWithKeras/my_model_weights.h5')
 
modelAlex.load_weights(pats)
#
img = cv2.imread('/home/hjxu/tf_study/catVsDogsWithKeras/111.jpg')
img = cv2.resize(img, (224, 224))
x = img_to_array(img/255) # 三维(224,224,3)
 
x = np.expand_dims(x, axis=0) # 四维(1,224,224,3)#因为keras要求的维度是这样的,所以要增加一个维度
# x = preprocess_input(x) # 预处理
print(x.shape)
y_pred = modelAlex.predict(x) # 预测概率 t1 = time.time() print("测试图:", decode_predictions(y_pred)) # 输出五个最高概率(类名, 语义概念, 预测概率)
print y_pred

不得不说,Keras真心简单方便。

补充知识:keras中的函数式API——残差连接+权重共享的理解

1、残差连接

# coding: utf-8
"""残差连接 residual connection:
  是一种常见的类图网络结构,解决了所有大规模深度学习的两个共性问题:
   1、梯度消失
   2、表示瓶颈
  (甚至,向任何>10层的神经网络添加残差连接,都可能会有帮助)

  残差连接:让前面某层的输出作为后面某层的输入,从而在序列网络中有效地创造一条捷径。
       """
from keras import layers

x = ...
y = layers.Conv2D(128, 3, activation='relu', padding='same')(x)
y = layers.Conv2D(128, 3, activation='relu', padding='same')(y)
y = layers.Conv2D(128, 3, activation='relu', padding='same')(y)

y = layers.add([y, x]) # 将原始x与输出特征相加

# ---------------------如果特征图尺寸不同,采用线性残差连接-------------------
x = ...
y = layers.Conv2D(128, 3, activation='relu', padding='same')(x)
y = layers.Conv2D(128, 3, activation='relu', padding='same')(y)
y = layers.MaxPooling2D(2, strides=2)(y)

residual = layers.Conv2D(128, 1, strides=2, padding='same')(x) # 使用1*1的卷积,将原始张量线性下采样为y具有相同的形状

y = layers.add([y, residual]) # 将原始x与输出特征相加

2、权重共享

即多次调用同一个实例

# coding: utf-8
"""函数式子API:权重共享
  能够重复的使用同一个实例,这样相当于重复使用一个层的权重,不需要重新编写"""
from keras import layers
from keras import Input
from keras.models import Model


lstm = layers.LSTM(32) # 实例化一个LSTM层,后面被调用很多次

# ------------------------左边分支--------------------------------
left_input = Input(shape=(None, 128))
left_output = lstm(left_input) # 调用lstm实例

# ------------------------右分支---------------------------------
right_input = Input(shape=(None, 128))
right_output = lstm(right_input) # 调用lstm实例

# ------------------------将层进行连接合并------------------------
merged = layers.concatenate([left_output, right_output], axis=-1)

# -----------------------在上面构建一个分类器---------------------
predictions = layers.Dense(1, activation='sigmoid')(merged)

# -------------------------构建模型,并拟合训练-----------------------------------
model = Model([left_input, right_input], predictions)
model.fit([left_data, right_data], targets)

以上这篇keras实现多种分类网络的方式就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持三水点靠木。

Python 相关文章推荐
python enumerate函数的使用方法总结
Nov 15 Python
如何使用Python的Requests包实现模拟登陆
Apr 27 Python
对python中的pop函数和append函数详解
May 04 Python
python采集微信公众号文章
Dec 20 Python
python super的使用方法及实例详解
Sep 25 Python
python框架django项目部署相关知识详解
Nov 04 Python
python return逻辑判断表达式实现解析
Dec 02 Python
tensorflow 报错unitialized value的解决方法
Feb 06 Python
浅谈Python中re.match()和re.search()的使用及区别
Apr 14 Python
pandas中的ExcelWriter和ExcelFile的实现方法
Apr 24 Python
解析python 中/ 和 % 和 //(地板除)
Jun 28 Python
python实现邮件循环自动发件功能
Sep 11 Python
python的help函数如何使用
Jun 11 #Python
新手学python应该下哪个版本
Jun 11 #Python
python开发前景如何
Jun 11 #Python
python编写softmax函数、交叉熵函数实例
Jun 11 #Python
python能开发游戏吗
Jun 11 #Python
使用sklearn对多分类的每个类别进行指标评价操作
Jun 11 #Python
python属于解释语言吗
Jun 11 #Python
You might like
php下MYSQL limit的优化
2008/01/10 PHP
一个比较不错的PHP日历类分享
2014/11/18 PHP
PHP+MySQL修改记录的方法
2015/01/21 PHP
php实现处理输入转义字符的代码
2015/11/08 PHP
php利用云片网实现短信验证码功能的示例代码
2017/11/18 PHP
PHP autoload使用方法及步骤详解
2020/09/05 PHP
使用原生js实现页面蒙灰(mask)效果示例代码
2014/06/20 Javascript
jQuery中animate用法实例分析
2015/03/09 Javascript
js实现简单的省市县三级联动效果实例
2016/02/18 Javascript
点击页面任何位置隐藏div的实现方法
2016/09/05 Javascript
Web开发中客户端的跳转与服务器端的跳转的区别
2017/03/05 Javascript
Vue2单一事件管理组件通信
2017/05/09 Javascript
Mac系统下Webstorm快捷键整理大全
2017/05/28 Javascript
JS表单提交验证、input(type=number) 去三角 刷新验证码
2017/06/21 Javascript
vue 中的keep-alive实例代码
2018/07/20 Javascript
使用JavaScript计算前一天和后一天的思路详解
2019/12/20 Javascript
[02:42]DOTA2城市挑战赛收官在即 四强之争风起云涌
2018/06/05 DOTA
python 生成器协程运算实例
2017/09/04 Python
python画微信表情符的实例代码
2019/10/09 Python
在Pytorch中计算自己模型的FLOPs方式
2019/12/30 Python
python读取当前目录下的CSV文件数据
2020/03/11 Python
Kears 使用:通过回调函数保存最佳准确率下的模型操作
2020/06/17 Python
Python SMTP发送电子邮件的示例
2020/09/23 Python
Python控制鼠标键盘代码实例
2020/12/08 Python
python中翻译功能translate模块实现方法
2020/12/17 Python
css3实现图片遮罩效果鼠标hover以后出现文字
2013/11/05 HTML / CSS
亚洲独特体验旅游专家:eOasia
2018/08/15 全球购物
德国自然时尚和有机产品购物网站:Waschbär
2019/05/29 全球购物
酒店服务实习自我鉴定
2013/09/22 职场文书
函授大专自我鉴定
2013/11/01 职场文书
合作意向书模板
2014/03/31 职场文书
小学生教师节演讲稿
2014/09/03 职场文书
社区活动总结范文
2015/05/07 职场文书
大国崛起日本观后感
2015/06/02 职场文书
可可西里观后感
2015/06/08 职场文书
司法廉洁教育心得体会
2016/01/20 职场文书