当前位置:   article > 正文

基于卷积神经网络(CNN)的情感分析_cnn情感分析

cnn情感分析
  1. 基于卷积神经网络(CNN)的情感分析
  2. 目录结构如下图所示。
  3. 1.1 构建表情识别模型
  4. 1)导入库
  5. import tensorflow as tf
  6. from tensorflow.keras.models import Sequential
  7. from tensorflow.keras.layers import BatchNormalization
  8. from tensorflow.keras.layers import Conv2D
  9. from tensorflow.keras.layers import MaxPooling2D
  10. from tensorflow.keras.layers import Activation
  11. from tensorflow.keras.layers import Flatten
  12. from tensorflow.keras.layers import Dropout
  13. from tensorflow.keras.layers import Dense
  14. from tensorflow.keras import backend as K
  15. from tensorflow.keras.utils import plot_model
  16. 2)构建神经网络
  17. 构建自定义CNN网络
  18. class CustomNet:
  19. @staticmethod
  20. def build(width, height, depth, classes):
  21. # 初始化模型
  22. # "#默认通道在后
  23. model = Sequential()
  24. inputShape = (height, width, depth)
  25. chanDim = -1
  26. # 如果通道在前,我们更换输入数据通道轴的位置
  27. # and channels dimension
  28. if K.image_data_format() == "channels_first":
  29. inputShape = (depth, height, width)
  30. chanDim = 1
  31. # 第一层 CONV => RELU => CONV => RELU => POOL layer set
  32. model.add(Conv2D(32, (3, 3), padding="same",
  33. input_shape=inputShape))
  34. model.add(Activation("relu"))
  35. model.add(BatchNormalization(axis=chanDim))
  36. model.add(Conv2D(32, (3, 3), padding="same"))
  37. model.add(Activation("relu"))
  38. model.add(BatchNormalization(axis=chanDim))
  39. model.add(MaxPooling2D(pool_size=(2, 2)))
  40. model.add(Dropout(0.25))
  41. # 第二层 CONV => RELU => CONV => RELU => POOL layer set
  42. model.add(Conv2D(64, (3, 3), padding="same"))
  43. model.add(Activation("relu"))
  44. model.add(BatchNormalization(axis=chanDim))
  45. model.add(Conv2D(64, (3, 3), padding="same"))
  46. model.add(Activation("relu"))
  47. model.add(BatchNormalization(axis=chanDim))
  48. model.add(MaxPooling2D(pool_size=(2, 2)))
  49. model.add(Dropout(0.25))
  50. # first (and only) set of FC => RELU layers
  51. model.add(Flatten())
  52. model.add(Dense(512))
  53. model.add(Activation("relu"))
  54. model.add(BatchNormalization())
  55. model.add(Dropout(0.5))
  56. # softmax classifier
  57. model.add(Dense(classes))
  58. model.add(Activation("softmax"))
  59. # return the constructed network architecture
  60. return model
  61. 初始化模型
  62. model = CustomNet.build(28,28,1,2)
  63. 3)打印模型信息
  64. 打印模型结构信息。
  65. 注:安装如下软件和Python包
  66. 1. apt-get install graphviz
  67. apt-get install graphviz graphviz-doc
  68. 2. pip install graphviz
  69. 3. pip install pydot
  70. plot_model(model, show_shapes=True) #绘制网络结构图
  71. 打印模型参数信息。
  72. model.summary()
  73. 1.2 训练表情识别模型
  74. 1)设置超参数及路径
  75. TARGET_WIDTH = 28
  76. TARGET_HEIGHT = 28
  77. BATCH_SIZE = 64
  78. EPOCHS = 15
  79. LR_INIT = 0.1
  80. DECAY = LR_INIT/EPOCHS
  81. MOMENTUM = 0.6
  82. dataset_path = 'images'
  83. output_model_path = 'models/face_expression.hdf5'
  84. output_plot_path = 'plots/face_expression.png'
  85. 2)初始化数据预处理器
  86. from oldcare.preprocessing import AspectAwarePreprocessor
  87. aap = AspectAwarePreprocessor(TARGET_WIDTH, TARGET_HEIGHT)
  88. from oldcare.preprocessing import ImageToArrayPreprocessor
  89. iap = ImageToArrayPreprocessor()
  90. 3)获取数据集
  91. print("[INFO] loading images...")
  92. from imutils import paths
  93. imagePaths = list(paths.list_images(dataset_path))
  94. #创建数据加载器
  95. from oldcare.datasets import SimpleDatasetLoader
  96. sdl = SimpleDatasetLoader(preprocessors=[aap, iap])
  97. (data, labels) = sdl.load(imagePaths, 500, True)
  98. 4)数据预处理
  99. 特征缩放
  100. data = data.astype("float") / 255.0 # 特征缩放,是非常重要的步骤
  101. 对标签进行one-hot编码
  102. # 对标签进行one-hot编码
  103. from sklearn.preprocessing import LabelEncoder
  104. le = LabelEncoder().fit(labels)
  105. from tensorflow.keras.utils import to_categorical
  106. labels = to_categorical(le.transform(labels), 2)
  107. 划分训练集和测试集
  108. from sklearn.model_selection import train_test_split
  109. (trainX, testX, trainY, testY) = train_test_split(data,
  110. labels, test_size=0.20, stratify=labels, random_state=42)
  111. 5)创建模型
  112. print("[INFO] compiling model...")
  113. #创建MiniVGGNet实例
  114. from oldcare.conv import MiniVGGNet
  115. model = MiniVGGNet.build(width=TARGET_WIDTH,
  116. height=TARGET_HEIGHT, depth=1, classes=2)
  117. #创建优化器
  118. from tensorflow.keras.optimizers import SGD
  119. opt = SGD(learning_rate=LR_INIT, decay=DECAY, momentum = MOMENTUM, nesterov=True)
  120. #编译模型
  121. model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
  122. 6)引入TensorBoard
  123. 用于实时监控模型训练过程
  124. from oldcare.callbacks import TrainingMonitor
  125. callbacks = [TrainingMonitor(output_plot_path)]
  126. 7)训练模型
  127. print("[INFO] training network...")
  128. H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=BATCH_SIZE, epochs=EPOCHS,
  129. callbacks = callbacks, verbose=1)
  130. 1.3 评估模型
  131. print("[INFO] evaluating network...")
  132. predictions = model.predict(testX, batch_size=64)
  133. #输出分类报告
  134. from sklearn.metrics import classification_report
  135. print(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=le.classes_))
  136. 1.4 保存模型
  137. print("[INFO] serializing network...")
  138. model.save(output_model_path)
  139. 2. 情感分析模型的优化
  140. 从优化器的角度优化。
  141. 当前优化器使用的是SGD,即梯度下降。SGD有4个比较重要的参数,分别是lr、decay、momentum、nesterov。
  142. 从这4个参数的原理出发,不断使用不同的参数值做优化。
  143. 优化后这4个参数的值是:
  144. LR_INIT = 0.01
  145. DECAY = LR_INIT/EPOCHS
  146. MOMENTUM = 0.9
  147. 3. 老人情感分析
  148. 传入视频文件(或者通过电脑摄像头捕捉视频),对每一帧做表情识别。
  149. #!/usr/bin/env python
  150. # coding: utf-8
  151. # # <center>应用表情识别模型</center>
  152. # ## 1. 导入库
  153. # In[1]:
  154. from keras.preprocessing.image import img_to_array
  155. from keras.models import load_model
  156. from oldcare.facial import FaceUtil
  157. import numpy as np
  158. import imutils
  159. import cv2
  160. import time
  161. # ## 2. 设置路径
  162. # In[2]:
  163. model_path = 'models/face_expression.hdf5'
  164. input_video = 'tests/room_04.avi'
  165. # # 3. 设置超参数
  166. # In[3]:
  167. FACIAL_EXPRESSION_TARGET_WIDTH = 28
  168. FACIAL_EXPRESSION_TARGET_HEIGHT = 28
  169. # ## 4. 加载模型
  170. # In[4]:
  171. model = load_model(model_path)
  172. # ## 5. 设置视频资源的获取
  173. # In[5]:
  174. #如果没有提供video,则调用网络摄像头
  175. if not input_video:
  176. camera = cv2.VideoCapture(0)
  177. time.sleep(2)
  178. else:
  179. camera = cv2.VideoCapture(input_video)
  180. # ## 6. 创建面部识别工具类
  181. # In[6]:
  182. faceutil = FaceUtil()
  183. # ## 7. 读取视频的每帧进行识别
  184. # In[7]:
  185. while True:
  186. # 获取当前帧
  187. (grabbed, frame) = camera.read()
  188. #当前帧为空,则视频结束,停止循环
  189. if input_video and not grabbed:
  190. break
  191. if not input_video:
  192. frame = cv2.flip(frame, 1)
  193. #调整帧大小,转成灰度形式,
  194. frame = imutils.resize(frame, width=600)
  195. face_location_list = faceutil.get_face_location(frame)
  196. gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
  197. # 遍历边界框
  198. for (left, top, right, bottom) in face_location_list:
  199. #从灰度图像提取roi区域,调整大小为28x28,然后经过cnn进行分类
  200. roi = gray[top:bottom, left:right]
  201. roi = cv2.resize(roi, (FACIAL_EXPRESSION_TARGET_WIDTH,
  202. FACIAL_EXPRESSION_TARGET_HEIGHT))
  203. roi = roi.astype("float") / 255.0
  204. roi = img_to_array(roi)
  205. roi = np.expand_dims(roi, axis=0)
  206. # 判断面部表情
  207. (neutral, smile) = model.predict(roi)[0]
  208. label = "Neutral" if neutral > smile else "Smile"
  209. # 在输出的帧上显示表情类别和面部边界框
  210. cv2.putText(frame, label, (left, top - 10),
  211. cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
  212. cv2.rectangle(frame, (left, top), (right, bottom),
  213. (0, 0, 255), 2)
  214. # 显示面部表情识别结果
  215. cv2.imshow("Facial Expression Detect", frame)
  216. # 按 'ESC' 退出视频
  217. k = cv2.waitKey(100) & 0xff
  218. if k == 27:
  219. break
  220. # ## 8. 释放相关资源
  221. # In[8]:
  222. # 关闭摄像头和窗口等资源
  223. camera.release()
  224. cv2.destroyAllWindows()

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/weixin_40725706/article/detail/549640
推荐阅读
相关标签
  

闽ICP备14008679号