赞
踩
转载自https://www.cnblogs.com/cxq1126/p/14277134.html
如上图,将bert_model.ckpt文件转化成pytorch_model.bin文件
可以自己创建一个文件convert.py,复制以下代码,修改一下路径名称即可。
from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert import logging logging.basicConfig(level=logging.INFO) def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path): # Initialise PyTorch model config = BertConfig.from_json_file(bert_config_file) print("Building PyTorch model from configuration: {}".format(str(config))) model = BertForPreTraining(config) # Load weights from tf checkpoint load_tf_weights_in_bert(model, config, tf_checkpoint_path) # Save pytorch-model print("Save PyTorch model to {}".format(pytorch_dump_path)) torch.save(model.state_dict(), pytorch_dump_path) # if __name__ == "__main__": parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--tf_checkpoint_path", default = './chinese_L-12_H-768_A-12_improve1/bert_model.ckpt', type = str, help = "Path to the TensorFlow checkpoint path.") parser.add_argument("--bert_config_file", default = './chinese_L-12_H-768_A-12_improve1/config.json', type = str, help = "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture.") parser.add_argument("--pytorch_dump_path", default = './chinese_L-12_H-768_A-12_improve1/pytorch_model.bin', type = str, help = "Path to the output PyTorch model.") args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。