当前位置:   article > 正文

BERT源码分析(PART III)

bert.config.hidden_size

写在前面

继续之前没有介绍完的 Pre-training 部分,在上一篇中(BERT源码分析(PART II))我们已经完成了对输入数据的处理,接下来看看 BERT 是怎么完成「Masked LM」和「Next Sentence Prediction」两个任务的训练。

  • run_pretraining[1]

除了代码块外部,在内部也有注释噢。之前代码黑色背景好像有点不舒服,换成白色试试

另外,把BERT源码分析系列整理成了PDF版本方便阅读,有需要的可以在文末获取(别急着拉到下面,先看完这篇

任务#1:Masked LM

get_masked_lm_output函数用于计算「任务#1」的训练 loss。输入为 BertModel 的最后一层 sequence_output 输出([batch_size, seq_length, hidden_size]),因为对一个序列的 MASK 标记的预测属于标注问题,需要整个 sequence 的输出状态。

  1. def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
  2. label_ids, label_weights):
  3. """Get loss and log probs for the masked LM."""
  4. # 获取mask词的encode
  5. input_tensor = gather_indexes(input_tensor, positions)
  6. with tf.variable_scope("cls/predictions"):
  7. # 在输出之前添加一个非线性变换,只在预训练阶段起作用
  8. with tf.variable_scope("transform"):
  9. input_tensor = tf.layers.dense(
  10. input_tensor,
  11. units=bert_config.hidden_size,
  12. activation=modeling.get_activation(bert_config.hidden_act),
  13. kernel_initializer=modeling.create_initializer(
  14. bert_config.initializer_range))
  15. input_tensor = modeling.layer_norm(input_tensor)
  16. # output_weights是和传入的word embedding一样的
  17. # 这里再添加一个bias
  18. output_bias = tf.get_variable(
  19. "output_bias",
  20. shape=[bert_config.vocab_size],
  21. initializer=tf.zeros_initializer())
  22. logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
  23. logits = tf.nn.bias_add(logits, output_bias)
  24. log_probs = tf.nn.log_softmax(logits, axis=-1)
  25. # label_ids表示mask掉的Token的id
  26. label_ids = tf.reshape(label_ids, [-1])
  27. label_weights = tf.reshape(label_weights, [-1])
  28. one_hot_labels = tf.one_hot(
  29. label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
  30. # 但是由于实际MASK的可能不到20,比如只MASK18,那么label_ids有20(padding)
  31. # 而label_weights=[1, 1, ...., 0, 0],说明后面两个label_id是padding的,计算loss要去掉。
  32. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
  33. numerator = tf.reduce_sum(label_weights * per_example_loss)
  34. denominator = tf.reduce_sum(label_weights) + 1e-5
  35. loss = numerator / denominator
  36. return (loss, per_example_loss, log_probs)

任务#2 Next Sentence Prediction

get_next_sentence_output函数用于计算「任务#2」的训练 loss。输入为 BertModel 的最后一层 pooled_output 输出([batch_size, hidden_size]),因为该任务属于二分类问题,所以只需要每个序列的第一个 token【CLS】即可。

  1. def get_next_sentence_output(bert_config, input_tensor, labels):
  2. """Get loss and log probs for the next sentence prediction."""
  3. # 标签0表示 下一个句子关系成立;标签1表示 下一个句子关系不成立。
  4. # 这个分类器的参数在实际Fine-tuning阶段会丢弃掉
  5. with tf.variable_scope("cls/seq_relationship"):
  6. output_weights = tf.get_variable(
  7. "output_weights",
  8. shape=[2, bert_config.hidden_size],
  9. initializer=modeling.create_initializer(bert_config.initializer_range))
  10. output_bias = tf.get_variable(
  11. "output_bias", shape=[2], initializer=tf.zeros_initializer())
  12. logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
  13. logits = tf.nn.bias_add(logits, output_bias)
  14. log_probs = tf.nn.log_softmax(logits, axis=-1)
  15. labels = tf.reshape(labels, [-1])
  16. one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
  17. per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
  18. loss = tf.reduce_mean(per_example_loss)
  19. return (loss, per_example_loss, log_probs)

自定义模型

module_fn_builder函数,用于构造 Estimator 使用的model_fn。定义好了上述两个训练任务,就可以写出训练过程,之后将训练集传入自动训练。

  1. def model_fn_builder(bert_config, init_checkpoint, learning_rate,
  2. num_train_steps, num_warmup_steps, use_tpu,
  3. use_one_hot_embeddings):
  4. def model_fn(features, labels, mode, params):
  5. tf.logging.info("*** Features ***")
  6. for name in sorted(features.keys()):
  7. tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
  8. input_ids = features["input_ids"]
  9. input_mask = features["input_mask"]
  10. segment_ids = features["segment_ids"]
  11. masked_lm_positions = features["masked_lm_positions"]
  12. masked_lm_ids = features["masked_lm_ids"]
  13. masked_lm_weights = features["masked_lm_weights"]
  14. next_sentence_labels = features["next_sentence_labels"]
  15. is_training = (mode == tf.estimator.ModeKeys.TRAIN)
  16. # 创建Transformer实例对象
  17. model = modeling.BertModel(
  18. config=bert_config,
  19. is_training=is_training,
  20. input_ids=input_ids,
  21. input_mask=input_mask,
  22. token_type_ids=segment_ids,
  23. use_one_hot_embeddings=use_one_hot_embeddings)
  24. # 获得MASK LM任务的批损失,平均损失以及预测概率矩阵
  25. (masked_lm_loss,
  26. masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(
  27. bert_config, model.get_sequence_output(), model.get_embedding_table(),
  28. masked_lm_positions, masked_lm_ids, masked_lm_weights)
  29. # 获得NEXT SENTENCE PREDICTION任务的批损失,平均损失以及预测概率矩阵
  30. (next_sentence_loss, next_sentence_example_loss,
  31. next_sentence_log_probs) = get_next_sentence_output(
  32. bert_config, model.get_pooled_output(), next_sentence_labels)
  33. # 总的损失定义为两者之和
  34. total_loss = masked_lm_loss + next_sentence_loss
  35. # 获取所有变量
  36. tvars = tf.trainable_variables()
  37. initialized_variable_names = {}
  38. scaffold_fn = None
  39. # 如果有之前保存的模型,则进行恢复
  40. if init_checkpoint:
  41. (assignment_map, initialized_variable_names
  42. ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
  43. if use_tpu:
  44. def tpu_scaffold():
  45. tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
  46. return tf.train.Scaffold()
  47. scaffold_fn = tpu_scaffold
  48. else:
  49. tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
  50. tf.logging.info("**** Trainable Variables ****")
  51. for var in tvars:
  52. init_string = ""
  53. if var.name in initialized_variable_names:
  54. init_string = ", *INIT_FROM_CKPT*"
  55. tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
  56. init_string)
  57. output_spec = None
  58. # 训练过程,获得spec
  59. if mode == tf.estimator.ModeKeys.TRAIN:
  60. train_op = optimization.create_optimizer(
  61. total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
  62. output_spec = tf.contrib.tpu.TPUEstimatorSpec(
  63. mode=mode,
  64. loss=total_loss,
  65. train_op=train_op,
  66. scaffold_fn=scaffold_fn)
  67. # 验证过程spec
  68. elif mode == tf.estimator.ModeKeys.EVAL:
  69. def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
  70. masked_lm_weights, next_sentence_example_loss,
  71. next_sentence_log_probs, next_sentence_labels):
  72. """计算损失和准确率"""
  73. masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
  74. [-1, masked_lm_log_probs.shape[-1]])
  75. masked_lm_predictions = tf.argmax(
  76. masked_lm_log_probs, axis=-1, output_type=tf.int32)
  77. masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
  78. masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
  79. masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
  80. masked_lm_accuracy = tf.metrics.accuracy(
  81. labels=masked_lm_ids,
  82. predictions=masked_lm_predictions,
  83. weights=masked_lm_weights)
  84. masked_lm_mean_loss = tf.metrics.mean(
  85. values=masked_lm_example_loss, weights=masked_lm_weights)
  86. next_sentence_log_probs = tf.reshape(
  87. next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
  88. next_sentence_predictions = tf.argmax(
  89. next_sentence_log_probs, axis=-1, output_type=tf.int32)
  90. next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
  91. next_sentence_accuracy = tf.metrics.accuracy(
  92. labels=next_sentence_labels, predictions=next_sentence_predictions)
  93. next_sentence_mean_loss = tf.metrics.mean(
  94. values=next_sentence_example_loss)
  95. return {
  96. "masked_lm_accuracy": masked_lm_accuracy,
  97. "masked_lm_loss": masked_lm_mean_loss,
  98. "next_sentence_accuracy": next_sentence_accuracy,
  99. "next_sentence_loss": next_sentence_mean_loss,
  100. }
  101. eval_metrics = (metric_fn, [
  102. masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
  103. masked_lm_weights, next_sentence_example_loss,
  104. next_sentence_log_probs, next_sentence_labels
  105. ])
  106. output_spec = tf.contrib.tpu.TPUEstimatorSpec(
  107. mode=mode,
  108. loss=total_loss,
  109. eval_metrics=eval_metrics,
  110. scaffold_fn=scaffold_fn)
  111. else:
  112. raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
  113. return output_spec
  114. return model_fn

主函数

基于上述函数实现训练过程

  1. def main(_):
  2. tf.logging.set_verbosity(tf.logging.INFO)
  3. if not FLAGS.do_train and not FLAGS.do_eval:
  4. raise ValueError("At least one of `do_train` or `do_eval` must be True.")
  5. bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
  6. tf.gfile.MakeDirs(FLAGS.output_dir)
  7. input_files = []
  8. for input_pattern in FLAGS.input_file.split(","):
  9. input_files.extend(tf.gfile.Glob(input_pattern))
  10. tf.logging.info("*** Input Files ***")
  11. for input_file in input_files:
  12. tf.logging.info(" %s" % input_file)
  13. tpu_cluster_resolver = None
  14. if FLAGS.use_tpu and FLAGS.tpu_name:
  15. tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
  16. FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
  17. is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
  18. run_config = tf.contrib.tpu.RunConfig(
  19. cluster=tpu_cluster_resolver,
  20. master=FLAGS.master,
  21. model_dir=FLAGS.output_dir,
  22. save_checkpoints_steps=FLAGS.save_checkpoints_steps,
  23. tpu_config=tf.contrib.tpu.TPUConfig(
  24. iterations_per_loop=FLAGS.iterations_per_loop,
  25. num_shards=FLAGS.num_tpu_cores,
  26. per_host_input_for_training=is_per_host))
  27. # 自定义模型用于estimator训练
  28. model_fn = model_fn_builder(
  29. bert_config=bert_config,
  30. init_checkpoint=FLAGS.init_checkpoint,
  31. learning_rate=FLAGS.learning_rate,
  32. num_train_steps=FLAGS.num_train_steps,
  33. num_warmup_steps=FLAGS.num_warmup_steps,
  34. use_tpu=FLAGS.use_tpu,
  35. use_one_hot_embeddings=FLAGS.use_tpu)
  36. # 如果没有TPU,会自动转为CPU/GPU的Estimator
  37. estimator = tf.contrib.tpu.TPUEstimator(
  38. use_tpu=FLAGS.use_tpu,
  39. model_fn=model_fn,
  40. config=run_config,
  41. train_batch_size=FLAGS.train_batch_size,
  42. eval_batch_size=FLAGS.eval_batch_size)
  43. if FLAGS.do_train:
  44. tf.logging.info("***** Running training *****")
  45. tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
  46. train_input_fn = input_fn_builder(
  47. input_files=input_files,
  48. max_seq_length=FLAGS.max_seq_length,
  49. max_predictions_per_seq=FLAGS.max_predictions_per_seq,
  50. is_training=True)
  51. estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
  52. if FLAGS.do_eval:
  53. tf.logging.info("***** Running evaluation *****")
  54. tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
  55. eval_input_fn = input_fn_builder(
  56. input_files=input_files,
  57. max_seq_length=FLAGS.max_seq_length,
  58. max_predictions_per_seq=FLAGS.max_predictions_per_seq,
  59. is_training=False)
  60. result = estimator.evaluate(
  61. input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
  62. output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
  63. with tf.gfile.GFile(output_eval_file, "w") as writer:
  64. tf.logging.info("***** Eval results *****")
  65. for key in sorted(result.keys()):
  66. tf.logging.info(" %s = %s", key, str(result[key]))
  67. writer.write("%s = %s\n" % (key, str(result[key])))

代码测试

预训练运行脚本

  1. python run_pretraining.py \
  2. --input_file=/tmp/tf_examples.tfrecord \
  3. --output_dir=/tmp/pretraining_output \
  4. --do_train=True \
  5. --do_eval=True \
  6. --bert_config_file=$BERT_BASE_DIR/bert_config.json \
  7. --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \
  8. --train_batch_size=32 \
  9. --max_seq_length=128 \
  10. --max_predictions_per_seq=20 \
  11. --num_train_steps=20 \
  12. --num_warmup_steps=10 \
  13. --learning_rate=2e-5

之后你可以得到类似以下输出日志:

  1. ***** Eval results *****
  2. global_step = 20
  3. loss = 0.0979674
  4. masked_lm_accuracy = 0.985479
  5. masked_lm_loss = 0.0979328
  6. next_sentence_accuracy = 1.0
  7. next_sentence_loss = 3.45724e-05

最后贴一个预训练过程的 tips【反正我也做不了,看看就行= 。=】


Over~BERT源码系列到这里就结束啦。

PS.到现在为止,BERT也更新了很多比如Whole Word Masking等等,所以之前有错误的还请大家一定指出,我好及时修正~

本文参考资料

[1]

run_pretraining: https://github.com/google-research/bert/blob/master/run_pretraining.py

往期精彩回顾




适合初学者入门人工智能的路线及资料下载机器学习在线手册深度学习在线手册AI基础下载(pdf更新到25集)备注:加入本站微信群或者qq群,请回复“加群”获取一折本站知识星球优惠券,请回复“知识星球”喜欢文章,点个在看
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/很楠不爱3/article/detail/312350
推荐阅读
相关标签
  

闽ICP备14008679号