当前位置:   article > 正文

【Python深度学习】Tensorflow+CNN进行人脸识别实战(附源码和数据集)_tensorflow 实现人脸识别所有过程

tensorflow 实现人脸识别所有过程

需要源码和数据集请点赞关注收藏后评论区留言私信~~~

下面利用tensorflow平台进行人脸识别实战,使用的是Olivetti Faces人脸图像 部分数据集展示如下

 程序训练过程如下

 接下来训练CNN模型 可以看到训练进度和损失值变化

接下来展示人脸识别结果

 

程序会根据一张图片自动去图片集中寻找相似的人脸 如上图所示

部分代码如下 需要全部源码和数据集请点赞关注收藏后评论区留言私信~~~

  1. from os import listdir
  2. import numpy as np
  3. from PIL import Image
  4. import cv2
  5. from tensorflow.keras.models import Sequential, load_model
  6. from tensorflow.keras.layers import Dense, Activation, Convolution2D, MaxPooling2D, Flatten
  7. from sklearn.model_selection import train_test_split
  8. from tensorflow.python.keras.utils import np_utils
  9. # 读取人脸图片数据
  10. def img2vector(fileNamestr):
  11. # 创建向量
  12. returnVect = np.zeros((57,47))
  13. image = Image.open(fileNamestr).convert('L')
  14. img = np.asarray(image).reshape(57,47)
  15. return img
  16. # 制作人脸数据集
  17. def GetDataset(imgDataDir):
  18. print('| Step1 |: Get dataset...')
  19. imgDataDir='faces_4/'
  20. FileDir = listdir(imgDataDir)
  21. m = len(FileDir)
  22. imgarray=[]
  23. hwLabels=[]
  24. hwdata=[]
  25. # 逐个读取图片文件
  26. for i in range(m):
  27. # 提取子目录
  28. className=i
  29. subdirName='faces_4/'+str(FileDir[i])+'/'
  30. fileNames = listdir(subdirName)
  31. lenFiles=len(fileNames)
  32. # 提取文件名
  33. for j in range(lenFiles):
  34. fileNamestr = subdirName+fileNames[j]
  35. hwLabels.append(className)
  36. imgarray=img2vector(fileNamestr)
  37. hwdata.append(imgarray)
  38. hwdata = np.array(hwdata)
  39. return hwdata,hwLabels,6
  40. # CNN模型类
  41. class MyCNN(object):
  42. FILE_PATH = "face_recognition.h5" # 模型存储/读取目录
  43. picHeight = 57 # 模型的人脸图片长47,宽57
  44. picWidth = 47
  45. def __init__(self):
  46. self.model = None
  47. # 获取训练数据集
  48. def read_trainData(self, dataset):
  49. self.dataset = dataset
  50. # 建立Sequential模型,并赋予参数
  51. def build_model(self):
  52. print('| Step2 |: Init CNN model...')
  53. self.model = Sequential()
  54. print('self.dataset.X_train.shape[1:]',self.dataset.X_train.shape[1:])
  55. self.model.add( Convolution2D( filters=32,
  56. kernel_size=(5, 5),
  57. padding='same',
  58. #dim_ordering='th',
  59. input_shape=self.dataset.X_train.shape[1:]))
  60. self.model.add(Activation('relu'))
  61. self.model.add( MaxPooling2D(pool_size=(2, 2),
  62. strides=(2, 2),
  63. padding='same' ) )
  64. self.model.add(Convolution2D(filters=64,
  65. kernel_size=(5, 5),
  66. padding='same') )
  67. self.model.add(Activation('relu'))
  68. self.model.add(MaxPooling2D(pool_size=(2, 2),
  69. strides=(2, 2),
  70. padding='same') )
  71. self.model.add(Flatten())
  72. self.model.add(Dense(512))
  73. self.model.add(Activation('relu'))
  74. self.model.add(Dense(self.dataset.num_classes))
  75. self.model.add(Activation('softmax'))
  76. self.model.summary()
  77. # 模型训练
  78. def train_model(self):
  79. print('| Step3 |: Train CNN model...')
  80. self.model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
  81. # epochs:训练代次、batch_size:每次训练样本数
  82. self.model.fit(self.dataset.X_train, self.dataset.Y_train, epochs=10, batch_size=20)
  83. def evaluate_model(self):
  84. loss, accuracy = self.model.evaluate(self.dataset.X_test, self.dataset.Y_test)
  85. print('| Step4 |: Evaluate performance...')
  86. print('===================================')
  87. print('Loss Value is :', loss)
  88. print('Accuracy Value is :', accuracy)
  89. def save(self, file_path=FILE_PATH):
  90. print('| Step5 |: Save model...')
  91. self.model.save(file_path)
  92. print('Model ',file_path,'is succeesfuly saved.')
  93. # 建立一个用于存储和格式化读取训练数据的类
  94. class DataSet(object):
  95. def __init__(self, path):
  96. self.num_classes = None
  97. self.X_train = None
  98. self.X_test = None
  99. self.Y_train = None
  100. self.Y_test = None
  101. self.picWidth = 47
  102. self.picHeight = 57
  103. self.makeDataSet(path) # 在这个类初始化的过程中读取path下的训练数据
  104. def makeDataSet(self, path):
  105. # 根据指定路径读取出图片、标签和类别数
  106. imgs, labels, clasNum = GetDataset(path)
  107. # 将数据集打乱随机分组
  108. X_train, X_test, y_train, y_test = train_test_split(imgs, labels, test_size=0.2,random_state=1)
  109. # 重新格式化和标准化
  110. X_train = X_train.reshape(X_train.shape[0], 1, self.picHeight, self.picWidth) / 255.0
  111. X_test = X_test.reshape(X_test.shape[0], 1, self.picHeight, self.picWidth) / 255.0
  112. X_train = X_train.astype('float32')
  113. X_test = X_test.astype('float32')
  114. # 将labels转成 binary class matrices
  115. Y_train = np_utils.to_categorical(y_train, num_classes=clasNum)
  116. Y_test = np_utils.to_categorical(y_test, num_classes=clasNum)
  117. # 将格式化后的数据赋值给类的属性上
  118. self.X_train = X_train
  119. self.X_test = X_test
  120. self.Y_train = Y_train
  121. self.Y_test = Y_test
  122. self.num_classes = clasNum
  123. # 人脸图片目录
  124. dataset = DataSet('faces_4/')
  125. model = MyCNN()
  126. model.read_trainData(dataset)
  127. model.build_model()
  128. model.train_model()
  129. model.evaluate_model()
  130. model.save()

 创作不易 觉得有帮助请点赞关注收藏~~~

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/weixin_40725706/article/detail/726859
推荐阅读
相关标签
  

闽ICP备14008679号