当前位置:   article > 正文

CPM-main 部署实践 AI生成小说3_cpm-bee模型环境部署教程

cpm-bee模型环境部署教程

 CPM-main是用来写作文的,改一下训练集就可以生成小说.

1下载项目

打开后下面代码和模型,下载模型解压后 放到model目录

GitHub - yangjianxin1/CPM: Easy-to-use CPM for Chinese text generation(基于CPM的中文文本生成)Easy-to-use CPM for Chinese text generation(基于CPM的中文文本生成) - GitHub - yangjianxin1/CPM: Easy-to-use CPM for Chinese text generation(基于CPM的中文文本生成)https://github.com/yangjianxin1/CPM

2安装其他依赖包

  1. 安装SentencePiece
  2. pip install -i https://pypi.tuna.tsinghua.edu.cn/simple SentencePiece
  3. 安装jieba
  4. 输入命令更新pip:python -m pip install --upgrade pip -i https://pypi.douban.com/simple
  5. 输入命令下载jieba:pip install -i https://pypi.tuna.tsinghua.edu.cn/simple jieba
  6. 如果报这个错:AttributeError: 'CpmTokenizer' object has no attribute 'convert_tokens_to_ids'
  7. 重启电脑,看看还有没有,有的话报错引用 改成下面这个
  8. from transformers.models.cpm.tokenization_cpm import CpmTokenizer

 

 3preprocess.py对数据集进行预处理

  把小说放到:'\CPM-main\data\zuowen' 目录,utf-8格式

  1. import argparse
  2. from utils import set_logger
  3. from transformers import CpmTokenizer
  4. import os
  5. import pickle
  6. from tqdm import tqdm
  7. def preprocess():
  8. """
  9. 对故事数据集进行预处理
  10. """
  11. # 设置参数
  12. parser = argparse.ArgumentParser()
  13. parser.add_argument('--vocab_file', default='vocab/chinese_vocab.model', type=str, required=False,
  14. help='词表路径')
  15. parser.add_argument('--log_path', default='log/preprocess.log', type=str, required=False, help='日志存放位置')
  16. parser.add_argument('--data_path', default='data/zuowen', type=str, required=False, help='数据集存放位置')
  17. parser.add_argument('--save_path', default='data/train.pkl', type=str, required=False, help='对训练数据集进行tokenize之后的数据存放位置')
  18. parser.add_argument('--win_size', default=200, type=int, required=False, help='滑动窗口的大小,相当于每条数据的最大长度')
  19. parser.add_argument('--step', default=200, type=int, required=False, help='滑动窗口的滑动步幅')
  20. args = parser.parse_args()
  21. # 初始化日志对象
  22. logger = set_logger(args.log_path)
  23. # 初始化tokenizer
  24. tokenizer = CpmTokenizer(vocab_file="vocab/chinese_vocab.model")
  25. eod_id = tokenizer.convert_tokens_to_ids("<eod>") # 文档结束符
  26. sep_id = tokenizer.sep_token_id
  27. # 读取作文数据集目录下的所有文件
  28. train_list = []
  29. logger.info("start tokenizing data")
  30. for file in tqdm(os.listdir(args.data_path)):
  31. file = os.path.join(args.data_path, file)
  32. with open(file, "r", encoding="utf8")as reader:
  33. lines = reader.readlines()
  34. title = lines[1][3:].strip() # 取出标题
  35. lines = lines[7:] # 取出正文内容
  36. article = ""
  37. for line in lines:
  38. if line.strip() != "": # 去除换行
  39. article += line
  40. title_ids = tokenizer.encode(title, add_special_tokens=False)
  41. article_ids = tokenizer.encode(article, add_special_tokens=False)
  42. token_ids = title_ids + [sep_id] + article_ids + [eod_id]
  43. # train_list.append(token_ids)
  44. # 对于每条数据,使用滑动窗口对其进行截断
  45. win_size = args.win_size
  46. step = args.step
  47. start_index = 0
  48. end_index = win_size
  49. data = token_ids[start_index:end_index]
  50. train_list.append(data)
  51. start_index += step
  52. end_index += step
  53. while end_index+50 < len(token_ids): # 剩下的数据长度,大于或等于50,才加入训练数据集
  54. data = token_ids[start_index:end_index]
  55. train_list.append(data)
  56. start_index += step
  57. end_index += step
  58. # 序列化训练数据
  59. with open(args.save_path, "wb") as f:
  60. pickle.dump(train_list, f)
  61. if __name__ == '__main__':
  62. preprocess()

 4train.py 训练小说

  1. import argparse
  2. import math
  3. import time
  4. import torch
  5. import torch.nn.functional as F
  6. import torch.optim as optim
  7. import logging
  8. from datetime import datetime
  9. import os
  10. from torch.utils.data import Dataset, DataLoader
  11. from os.path import join, exists
  12. from torch.nn import CrossEntropyLoss
  13. from tqdm import tqdm
  14. from torch.nn import DataParallel
  15. import transformers
  16. import pickle
  17. import sys
  18. from utils import set_logger, set_random_seed
  19. from data_parallel import BalancedDataParallel
  20. from transformers import GPT2LMHeadModel, GPT2Config, CpmTokenizer
  21. import pandas as pd
  22. import torch.nn.utils.rnn as rnn_utils
  23. import numpy as np
  24. from dataset import CPMDataset
  25. def set_args():
  26. parser = argparse.ArgumentParser()
  27. parser.add_argument('--device', default='0,1', type=str, required=False, help='设置使用哪些显卡')
  28. parser.add_argument('--no_cuda', action='store_true', help='不使用GPU进行训练')
  29. parser.add_argument('--vocab_path', default='vocab/chinese_vocab.model', type=str, required=False,
  30. help='sp模型路径')
  31. parser.add_argument('--model_config', default='config/cpm-small.json', type=str, required=False,
  32. help='需要从头训练一个模型时,模型参数的配置文件')
  33. parser.add_argument('--train_path', default='data/train.pkl', type=str, required=False, help='经过预处理之后的数据存放路径')
  34. parser.add_argument('--max_len', default=200, type=int, required=False, help='训练时,输入数据的最大长度')
  35. parser.add_argument('--log_path', default='log/train.log', type=str, required=False, help='训练日志存放位置')
  36. parser.add_argument('--ignore_index', default=-100, type=int, required=False, help='对于ignore_index的label token不计算梯度')
  37. parser.add_argument('--epochs', default=100, type=int, required=False, help='训练的最大轮次')
  38. parser.add_argument('--batch_size', default=1, type=int, required=False, help='训练的batch size')
  39. parser.add_argument('--gpu0_bsz', default=6, type=int, required=False, help='0号卡的batch size')
  40. parser.add_argument('--lr', default=1.5e-4, type=float, required=False, help='学习率')
  41. parser.add_argument('--eps', default=1.0e-09, type=float, required=False, help='AdamW优化器的衰减率')
  42. parser.add_argument('--log_step', default=1, type=int, required=False, help='多少步汇报一次loss')
  43. parser.add_argument('--gradient_accumulation_steps', default=6, type=int, required=False, help='梯度积累的步数')
  44. parser.add_argument('--max_grad_norm', default=1.0, type=float, required=False)
  45. parser.add_argument('--save_model_path', default='model/zuowen_epoch40', type=str, required=False,
  46. help='模型输出路径')
  47. parser.add_argument('--pretrained_model', default='model/zuowen_epoch40', type=str, required=False,
  48. help='预训练的模型的路径')
  49. parser.add_argument('--seed', type=int, default=1234, help='设置随机种子')
  50. parser.add_argument('--num_workers', type=int, default=0, help="dataloader加载数据时使用的线程数量")
  51. # parser.add_argument('--patience', type=int, default=0, help="用于early stopping,设为0时,不进行early stopping.early stop得到的模型的生成效果不一定会更好。")
  52. parser.add_argument('--warmup_steps', type=int, default=4000, help='warm up步数')
  53. # parser.add_argument('--label_smoothing', default=True, action='store_true', help='是否进行标签平滑')
  54. args = parser.parse_args()
  55. return args
  56. def collate_fn(batch):
  57. input_ids = rnn_utils.pad_sequence(batch, batch_first=True, padding_value=5)
  58. labels = rnn_utils.pad_sequence(batch, batch_first=True, padding_value=-100)
  59. return input_ids, labels
  60. def load_dataset(logger, args):
  61. """
  62. 加载训练集
  63. """
  64. logger.info("loading training dataset")
  65. train_path = args.train_path
  66. with open(train_path, "rb") as f:
  67. train_list = pickle.load(f)
  68. # test
  69. # train_list = train_list[:24]
  70. train_dataset = CPMDataset(train_list, args.max_len)
  71. return train_dataset
  72. def train_epoch(model, train_dataloader, optimizer, scheduler, logger,
  73. epoch, args):
  74. model.train()
  75. device = args.device
  76. ignore_index = args.ignore_index
  77. epoch_start_time = datetime.now()
  78. total_loss = 0 # 记录下整个epoch的loss的总和
  79. epoch_correct_num = 0 # 每个epoch中,预测正确的word的数量
  80. epoch_total_num = 0 # 每个epoch中,预测的word的总数量
  81. for batch_idx, (input_ids, labels) in enumerate(train_dataloader):
  82. # 捕获cuda out of memory exception
  83. try:
  84. input_ids = input_ids.to(device)
  85. labels = labels.to(device)
  86. outputs = model.forward(input_ids, labels=labels)
  87. logits = outputs.logits
  88. loss = outputs.loss
  89. loss = loss.mean()
  90. # 统计该batch的预测token的正确数与总数
  91. batch_correct_num, batch_total_num = calculate_acc(logits, labels, ignore_index=ignore_index)
  92. # 统计该epoch的预测token的正确数与总数
  93. epoch_correct_num += batch_correct_num
  94. epoch_total_num += batch_total_num
  95. # 计算该batch的accuracy
  96. batch_acc = batch_correct_num / batch_total_num
  97. total_loss += loss.item()
  98. if args.gradient_accumulation_steps > 1:
  99. loss = loss / args.gradient_accumulation_steps
  100. loss.backward()
  101. # 梯度裁剪
  102. torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
  103. # 进行一定step的梯度累计之后,更新参数
  104. if (batch_idx + 1) % args.gradient_accumulation_steps == 0:
  105. # 更新参数
  106. optimizer.step()
  107. # 更新学习率
  108. scheduler.step()
  109. # 清空梯度信息
  110. optimizer.zero_grad()
  111. if (batch_idx + 1) % args.log_step == 0:
  112. logger.info(
  113. "batch {} of epoch {}, loss {}, batch_acc {}, lr {}".format(
  114. batch_idx + 1, epoch + 1, loss.item() * args.gradient_accumulation_steps, batch_acc, scheduler.get_lr()))
  115. if batch_idx % 1000 == 0:
  116. model.save_pretrained(args.save_model_path)
  117. del input_ids, outputs
  118. except RuntimeError as exception:
  119. if "out of memory" in str(exception):
  120. logger.info("WARNING: ran out of memory")
  121. if hasattr(torch.cuda, 'empty_cache'):
  122. torch.cuda.empty_cache()
  123. else:
  124. logger.info(str(exception))
  125. raise exception
  126. # 记录当前epoch的平均loss与accuracy
  127. epoch_mean_loss = total_loss / len(train_dataloader)
  128. epoch_mean_acc = epoch_correct_num / epoch_total_num
  129. logger.info(
  130. "epoch {}: loss {}, predict_acc {}".format(epoch + 1, epoch_mean_loss, epoch_mean_acc))
  131. # save model
  132. logger.info('saving model for epoch {}'.format(epoch + 1))
  133. model_path = join(args.save_model_path, 'epoch{}'.format(epoch + 1))
  134. if not os.path.exists(model_path):
  135. os.mkdir(model_path)
  136. model_to_save = model.module if hasattr(model, 'module') else model
  137. model_to_save.save_pretrained(model_path)
  138. logger.info('epoch {} finished'.format(epoch + 1))
  139. epoch_finish_time = datetime.now()
  140. logger.info('time for one epoch: {}'.format(epoch_finish_time - epoch_start_time))
  141. return epoch_mean_loss
  142. def train(model, logger, train_dataset, args):
  143. train_dataloader = DataLoader(
  144. train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=collate_fn,
  145. drop_last=True
  146. )
  147. t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.epochs
  148. optimizer = transformers.AdamW(model.parameters(), lr=args.lr, eps=args.eps)
  149. scheduler = transformers.get_linear_schedule_with_warmup(
  150. optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
  151. )
  152. logger.info('start training')
  153. train_losses = [] # 记录每个epoch的平均loss
  154. # ========== start training ========== #
  155. for epoch in range(args.epochs):
  156. train_loss = train_epoch(
  157. model=model, train_dataloader=train_dataloader,
  158. optimizer=optimizer, scheduler=scheduler,
  159. logger=logger, epoch=epoch, args=args)
  160. train_losses.append(round(train_loss, 4))
  161. logger.info("train loss list:{}".format(train_losses))
  162. logger.info('training finished')
  163. logger.info("train_losses:{}".format(train_losses))
  164. def caculate_loss(logit, target, pad_idx, smoothing=True):
  165. if smoothing:
  166. logit = logit[..., :-1, :].contiguous().view(-1, logit.size(2))
  167. target = target[..., 1:].contiguous().view(-1)
  168. eps = 0.1
  169. n_class = logit.size(-1)
  170. one_hot = torch.zeros_like(logit).scatter(1, target.view(-1, 1), 1)
  171. one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
  172. log_prb = F.log_softmax(logit, dim=1)
  173. non_pad_mask = target.ne(pad_idx)
  174. loss = -(one_hot * log_prb).sum(dim=1)
  175. loss = loss.masked_select(non_pad_mask).mean() # average later
  176. else:
  177. # loss = F.cross_entropy(predict_logit, target, ignore_index=pad_idx)
  178. logit = logit[..., :-1, :].contiguous().view(-1, logit.size(-1))
  179. labels = target[..., 1:].contiguous().view(-1)
  180. loss = F.cross_entropy(logit, labels, ignore_index=pad_idx)
  181. return loss
  182. def calculate_acc(logit, labels, ignore_index=-100):
  183. logit = logit[..., :-1, :].contiguous().view(-1, logit.size(-1))
  184. labels = labels[..., 1:].contiguous().view(-1)
  185. _, logit = logit.max(dim=-1) # 对于每条数据,返回最大的index
  186. # 进行非运算,返回一个tensor,若labels的第i个位置为pad_id,则置为0,否则为1
  187. non_pad_mask = labels.ne(ignore_index)
  188. n_correct = logit.eq(labels).masked_select(non_pad_mask).sum().item()
  189. n_word = non_pad_mask.sum().item()
  190. return n_correct, n_word
  191. def main():
  192. # 初始化参数
  193. args = set_args()
  194. # 设置使用哪些显卡进行训练
  195. os.environ["CUDA_VISIBLE_DEVICES"] = args.device
  196. args.cuda = not args.no_cuda
  197. # if args.batch_size < 2048 and args.warmup_steps <= 4000:
  198. # print('[Warning] The warmup steps may be not enough.\n' \
  199. # '(sz_b, warmup) = (2048, 4000) is the official setting.\n' \
  200. # 'Using smaller batch w/o longer warmup may cause ' \
  201. # 'the warmup stage ends with only little data trained.')
  202. # 创建日志对象
  203. logger = set_logger(args.log_path)
  204. # 当用户使用GPU,并且GPU可用时
  205. args.cuda = torch.cuda.is_available() and not args.no_cuda
  206. device = 'cuda:0' if args.cuda else 'cpu'
  207. args.device = device
  208. logger.info('using device:{}'.format(device))
  209. # 设置随机种子
  210. set_random_seed(args.seed, args.cuda)
  211. # 初始化tokenizer
  212. tokenizer = CpmTokenizer(vocab_file="vocab/chinese_vocab.model")
  213. args.eod_id = tokenizer.convert_tokens_to_ids("<eod>") # 文档结束符
  214. args.pad_id = tokenizer.pad_token_id
  215. # 创建模型的输出目录
  216. if not os.path.exists(args.save_model_path):
  217. os.mkdir(args.save_model_path)
  218. # 创建模型
  219. if args.pretrained_model: # 加载预训练模型
  220. model = GPT2LMHeadModel.from_pretrained(args.pretrained_model)
  221. else: # 初始化模型
  222. model_config = GPT2Config.from_json_file(args.model_config)
  223. model = GPT2LMHeadModel(config=model_config)
  224. model = model.to(device)
  225. logger.info('model config:\n{}'.format(model.config.to_json_string()))
  226. assert model.config.vocab_size == tokenizer.vocab_size
  227. # 多卡并行训练模型
  228. if args.cuda and torch.cuda.device_count() > 1:
  229. # model = DataParallel(model).cuda()
  230. model = BalancedDataParallel(args.gpu0_bsz, model, dim=0).cuda()
  231. logger.info("use GPU {} to train".format(args.device))
  232. # 计算模型参数数量
  233. num_parameters = 0
  234. parameters = model.parameters()
  235. for parameter in parameters:
  236. num_parameters += parameter.numel()
  237. logger.info('number of model parameters: {}'.format(num_parameters))
  238. # 记录参数设置
  239. logger.info("args:{}".format(args))
  240. # 加载训练集和验证集
  241. # ========= Loading Dataset ========= #
  242. train_dataset = load_dataset(logger, args)
  243. train(model, logger, train_dataset, args)
  244. if __name__ == '__main__':
  245. main()

5generate.py生成小说

  1. import torch
  2. import torch.nn.functional as F
  3. import os
  4. import argparse
  5. from tqdm import trange
  6. from transformers import GPT2LMHeadModel, GPT2Config
  7. from transformers.models.cpm.tokenization_cpm import CpmTokenizer
  8. from utils import top_k_top_p_filtering, set_logger
  9. from os.path import join, exists
  10. def generate_next_token(input_ids):
  11. """
  12. 对于给定的上文,生成下一个单词
  13. """
  14. outputs = model(input_ids=input_ids)
  15. logits = outputs.logits
  16. # next_token_logits表示最后一个token的hidden_state对应的prediction_scores,也就是模型要预测的下一个token的概率
  17. next_token_logits = logits[0, -1, :]
  18. next_token_logits = next_token_logits / args.temperature
  19. # 对于<unk>的概率设为无穷小,也就是说模型的预测结果不可能是[UNK]这个token
  20. next_token_logits[unk_id] = -float('Inf')
  21. filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=args.topk, top_p=args.topp)
  22. # torch.multinomial表示从候选集合中选出无放回地进行抽取num_samples个元素,权重越高,抽到的几率越高,返回元素的下标
  23. next_token_id = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
  24. return next_token_id
  25. def generate(max_len):
  26. # 对title与context进行tokenize
  27. title_ids = tokenizer.encode(title, add_special_tokens=False)
  28. context_ids = tokenizer.encode(context, add_special_tokens=False)
  29. input_ids = title_ids + [sep_id] + context_ids
  30. cur_len = len(input_ids)
  31. last_token_id = input_ids[-1] # 已生成的内容的最后一个token
  32. input_ids = torch.tensor([input_ids], dtype=torch.long, device=device)
  33. while True:
  34. next_token_id = generate_next_token(input_ids[:, -args.context_len:])
  35. input_ids = torch.cat((input_ids, next_token_id.unsqueeze(0)), dim=1)
  36. cur_len += 1
  37. word = tokenizer.convert_ids_to_tokens(next_token_id.item())
  38. # if cur_len >= max_len:
  39. # break
  40. # 超过最大长度,并且换行
  41. if cur_len >= max_len and last_token_id == 8 and next_token_id == 3:
  42. break
  43. # 超过最大长度,并且生成标点符号
  44. if cur_len >= max_len and word in [".", "。", "!", "!", "?", "?", ",", ","]:
  45. break
  46. # 生成结束符
  47. if next_token_id == eod_id:
  48. break
  49. result = tokenizer.decode(input_ids.squeeze(0))
  50. return result
  51. if __name__ == '__main__':
  52. # 参数设置
  53. parser = argparse.ArgumentParser()
  54. parser.add_argument('--device', default='0', type=str, required=False, help='生成设备')
  55. parser.add_argument('--temperature', default=1, type=float, required=False, help='生成温度')
  56. parser.add_argument('--topk', default=0, type=int, required=False, help='最高几选一')
  57. parser.add_argument('--topp', default=0.85, type=float, required=False, help='最高积累概率')
  58. parser.add_argument('--repetition_penalty', default=1.0, type=float, required=False, help='重复惩罚参数')
  59. parser.add_argument('--context_len', default=800, type=int, required=False, help='每一步生成时,参考的上文的长度')
  60. parser.add_argument('--max_len', default=300, type=int, required=False, help='生成的最长长度')
  61. parser.add_argument('--log_path', default='log/generate.log', type=str, required=False, help='日志存放位置')
  62. parser.add_argument('--no_cuda', action='store_true', help='不使用GPU进行预测')
  63. parser.add_argument('--model_path', type=str, default='model/zuowen_epoch40', help='模型存放位置')
  64. # parser.add_argument('--title', type=str, default='徜徉在书籍的阳光世界', help='作文标题')
  65. # parser.add_argument('--context', type=str, default='一本书是一个人的眼睛,它可以让你看到另一个世界的奇妙', help='作文上文')
  66. parser.add_argument('--title', type=str, default='罗峰', help='作文标题')
  67. parser.add_argument('--context', type=str, default='罗峰刚修炼完毕', help='作文上文')
  68. args = parser.parse_args()
  69. os.environ["CUDA_VISIBLE_DEVICES"] = args.device # 此处设置程序使用哪些显卡
  70. args.cuda = torch.cuda.is_available() and not args.no_cuda # 当用户使用GPU,并且GPU可用时
  71. device = 'cuda:0' if args.cuda else 'cpu'
  72. # device = 'cpu'
  73. # 创建日志对象
  74. logger = set_logger(args.log_path)
  75. # 初始化tokenizer
  76. tokenizer = CpmTokenizer(vocab_file="vocab/chinese_vocab.model")
  77. eod_id = tokenizer.convert_tokens_to_ids("<eod>") # 文档结束符
  78. sep_id = tokenizer.sep_token_id
  79. unk_id = tokenizer.unk_token_id
  80. # 加载模型
  81. model = GPT2LMHeadModel.from_pretrained(args.model_path)
  82. model.eval()
  83. model = model.to(device)
  84. title = args.title
  85. context = args.context
  86. logger.info("title:{}".format(title))
  87. logger.info("context:{}".format(context))
  88. # 开始生成
  89. result = generate(args.max_len)
  90. result = result.split("<sep>")[1]
  91. logger.info("result:{}\n".format(result))
  92. # 通过控制台循环生成
  93. # print('开始生成,输入CTRL + Z以退出')
  94. # while True:
  95. # try:
  96. # # 用户输入title与context
  97. # title = input("请输入作文标题:")
  98. # context = input("请输入作文起始句子:")
  99. #
  100. # logger.info("title:{}".format(title))
  101. # logger.info("context:{}".format(context))
  102. #
  103. # # 开始生成
  104. # result = generate(args.max_len)
  105. # result = result.split("<sep>")[1]
  106. # logger.info("result:{}\n".format(result))
  107. # break
  108. #
  109. # except KeyboardInterrupt:
  110. # break

训练5个小时,生成效果

  1. 罗峰刚修炼完毕,正端着果酒微笑看着眼前这个老者。
  2. “银河,你说这是什么意思?”混沌城主连追问。
  3. “这是《九劫秘典》,我已经在第十五劫第三劫第四劫第三劫的第四劫核心。”罗峰恭敬道,“接下来的日子,你可不能乱来。”
  4. “这《九劫秘典》我还是有些疑惑的,《九劫秘典》,你可得认真观看。”混沌城主微笑道。
  5. 罗峰摇头:“没兴趣,没兴趣。”
  6. “哈哈哈......”混沌城主也忍不住笑起来,“哈哈......”
  7. 罗峰也笑了。。
  8. 那般自然无比此时境界主根本没了,他也笑。,《九劫秘典御柯南龙行,这一套陨落在他的神力也笑着罗峰都笑着笑了。。,完全是。“哈哈哈..........................................这第七劫完了!
  9. 都笑声色的笑声讨了
  10. 虚拟宇宙中正透过虚拟宇宙也是你们自己。

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/知新_RL/article/detail/527485
推荐阅读
相关标签
  

闽ICP备14008679号