当前位置:   article > 正文

基于keras的卷积神经网络_keras二维卷积多分类

keras二维卷积多分类

构建网络

model = models.Sequential()  #定义模型
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(300, 300, 3)))  #添加卷积层,设定激活函数及输入图片大小
#model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))  #添加卷积层,设定激活函数及输入图片大小
# 32 * 3 * (30*30)
model.add(layers.MaxPooling2D((2, 2)))  #最大池化 15
model.add(layers.Conv2D(64, (3, 3), activation='relu'))   #卷积核数量调整,直接接收上层信息,不需要设定输入图片大小
#model.add(layers.Conv2D(64, (3, 3), activation='relu'))   #卷积核数量调整,直接接收上层信息,不需要设定输入图片大小

model.add(layers.MaxPooling2D((2, 2)))  # 6*6
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
#model.add(layers.Conv2D(64, (3, 3), activation='relu'))
#再加一层
#model.add(layers.MaxPooling2D((2, 2)))
#model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2))) # 2*2
model.add(layers.Conv2D(256, (2, 2), activation='relu'))


model.add(layers.Flatten())  #数据展开铺平成一维
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(100, activation='softmax'))   #映射出100个类别
model.summary()   #显示模型结构
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
#train_generator validation_generator
train_dir  = ''
validation_dir = ''
#二值分类问题 文件夹下有两个文件夹,分别放着对应的类

train_datagen = ImageDataGenerator(rescale=1./255) # 将原始数据缩放到原来的1/255
test_datagen = ImageDataGenerator(rescale=1./255)
# 从指定的目录中产生批量的格式化数据
# target_size:所有图片经过处理后的尺寸
# 该generator每次返回一个20*150*150*3的张量和binary类型的标签(shape(20,))
train_generator = train_datagen.flow_from_directory(train_dir,
                                                   target_size=(300,300),
                                                   batch_size=20,
                                                   class_mode='binary')
validation_generator = test_datagen.flow_from_directory(validation_dir,
                                                      target_size=(300,300),
                                                     batch_size=20,
                                                   class_mode='binary')

#当然也有更简单的构建方法
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
#数据集的导入
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()  #利用自带数据库导入,此时数据集已经经过处理,训练会快很多

#train_images = train_images.reshape((60000,28,28))  #图片数量及大小
#test_images = test_images.reshape((10000,28,28))

train_images , test_images = train_images / 255.0 , test_images / 255.0  #数据处理映射到【0,1】空间
model.fit(train_images, train_labels, epochs=100)  #整合数据定义训练次数
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
#加载模型 
model.load_weights("./cats_and_dogs_small_1.h5")
  • 1
  • 2
model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
'''
优化器为adam,实际为梯度下降函数的变式
损失函数定义为交叉熵损失函数
'''

history = model.fit_generator(train_generator,
                             steps_per_epoch=100,
                             epochs=10,
                             validation_data=validation_generator,
                             validation_steps=50)
model.save('vin.h5')
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

评估

score = model.evaluate_generator(validation_generator,steps=1)
print("样本准确率%s: %.2f%%" % (model.metrics_names[1], score[1] * 100))

######################################################################
test_loss, test_acc = model.evaluate(test_images, test_labels)
print(test_acc)
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6

使用vgg16网络训练二分类问题的完整代码:

import numpy as np
import tensorflow as tf
from keras import Model, Sequential
from keras.applications.imagenet_utils import (decode_predictions,
                                               preprocess_input)
from keras.layers import (Conv2D, Dense, Flatten, GlobalAveragePooling2D,
                          GlobalMaxPooling2D, Input, MaxPooling2D)
from keras.preprocessing import image
from keras.utils.data_utils import get_file
from tensorflow import keras
from keras.preprocessing.image import ImageDataGenerator
#WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
#WEIGHTS_PATH = "./vgg16_weights_tf_dim_ordering_tf_kernels.h5"
'''

'''


def VGG16(num_classes):
    image_input = Input(shape = (300,300,3))

    # 224,224,3 -> 112,112,64
    x = Conv2D(64,(3,3),activation = 'relu',padding = 'same',name = 'block1_conv1')(image_input)
    x = Conv2D(64,(3,3),activation = 'relu',padding = 'same', name = 'block1_conv2')(x)
    x = MaxPooling2D((2,2), strides = (2,2), name = 'block1_pool')(x)

    # 第二个卷积部分
    # 112,112,64 -> 56,56,128
    x = Conv2D(128,(3,3),activation = 'relu',padding = 'same',name = 'block2_conv1')(x)
    x = Conv2D(128,(3,3),activation = 'relu',padding = 'same',name = 'block2_conv2')(x)
    x = MaxPooling2D((2,2),strides = (2,2),name = 'block2_pool')(x)

    # 第三个卷积部分
    # 56,56,128 -> 28,28,256
    x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv1')(x)
    x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv2')(x)
    x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv3')(x)
    x = MaxPooling2D((2,2),strides = (2,2),name = 'block3_pool')(x)

    # 第四个卷积部分
    # 28,28,256 -> 14,14,512
    x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv1')(x)
    x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv2')(x)
    x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv3')(x)
    x = MaxPooling2D((2,2),strides = (2,2),name = 'block4_pool')(x)

    # 第五个卷积部分
    # 14,14,512 -> 7,7,512
    x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block5_conv1')(x)
    x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block5_conv2')(x)
    x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block5_conv3')(x)    
    x = MaxPooling2D((2,2),strides = (2,2),name = 'block5_pool')(x)

    # 分类部分
    # 7,7,512 -> 25088 -> 4096 -> 4096 -> num_classes
    x = Flatten(name='flatten')(x)
    x = Dense(4096, activation='relu', name='fc1')(x)
    x = Dense(4096, activation='relu', name='fc2')(x)
    x = Dense(num_classes, activation='softmax', name='predictions')(x)
    
    model = Model(image_input,x,name = 'vgg16')
    return model

if __name__ == '__main__':
    model = VGG16(2)
    #weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models')
    #model.load_weights(WEIGHTS_PATH)
    train_dir = 'D:/program/keras/vin_'
    validation_dir = 'D:/program/keras/vin_'

    train_datagen = ImageDataGenerator(rescale=1. / 255)  # 将原始数据缩放到原来的1/255
    test_datagen = ImageDataGenerator(rescale=1. / 255)
    # 从指定的目录中产生批量的格式化数据
    # target_size:所有图片经过处理后的尺寸
    # 该generator每次返回一个20*150*150*3的张量和binary类型的标签(shape(20,))
    train_generator = train_datagen.flow_from_directory(train_dir,
                                                        target_size=(300, 300),
                                                        batch_size=20,
                                                        class_mode='binary')
    validation_generator = test_datagen.flow_from_directory(validation_dir,
                                                            target_size=(300, 300),
                                                            batch_size=20,
                                                            class_mode='binary')
    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    '''
    优化器为adam,实际为梯度下降函数的变式
    损失函数定义为交叉熵损失函数
    '''

    history = model.fit_generator(train_generator,
                                  steps_per_epoch=100,
                                  epochs=10,
                                  validation_data=validation_generator,
                                  validation_steps=50)
    model.save('vin.h5')


    #test
    #img_path = 'elephant.jpg'
    #img = image.load_img(img_path, target_size=(224, 224))
    #x = image.img_to_array(img)
    #x = np.expand_dims(x, axis=0)
    #x = preprocess_input(x)
    #print('Input image shape:', x.shape)


    #preds = model.predict(x)
    #print('Predicted:', decode_predictions(preds))

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
声明:本文内容由网友自发贡献,转载请注明出处:【wpsshop博客】
推荐阅读
相关标签
  

闽ICP备14008679号