当前位置:   article > 正文

预训练模型transformers综合总结(三)_automodelforcausallm.from_pretrained

automodelforcausallm.from_pretrained

这就是最后的部分,关于语言建模、多选、问答系统、文本分类、命名实体识别等任务的模型微调,这一块我也是直接看人家笔记本总结的。按照这三个部分,这个库基本就弄熟了

语言建模

主要有这两种方式进行建模

  • 因果语言建模:模型必须预测句子中的下一个标记(因此标签与向右移动的输入相同)。为确保模型不作弊,当尝试预测句子中的标记i + 1时,它将获得一个注意掩码,以防止其访问标记i之后的标记。
  • 屏蔽语言建模:模型必须预测输入中被屏蔽的一些标记。它仍然可以访问整个句子,因此它可以在屏蔽标记之前和之后使用标记来预测其值。
  1. 数据加载
  2. from datasets import load_dataset
  3. datasets = load_dataset('wikitext', 'wikitext-2-raw-v1')
  4. #可以替换成如下的,取消注释即可,只需取消注释以下单元格,然后将路径替换为本地地址即可:
  5. # datasets = load_dataset("text", data_files={"train": path_to_train.txt, "validation": path_to_validation.txt}
  6. datasets["train"][10]
  7. from datasets import ClassLabel
  8. import random
  9. import pandas as pd
  10. from IPython.display import display, HTML
  11. def show_random_elements(dataset, num_examples=10):
  12. assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset."
  13. picks = []
  14. for _ in range(num_examples):
  15. pick = random.randint(0, len(dataset)-1)
  16. while pick in picks:
  17. pick = random.randint(0, len(dataset)-1)
  18. picks.append(pick)
  19. df = pd.DataFrame(dataset[picks])
  20. for column, typ in dataset.features.items():
  21. if isinstance(typ, ClassLabel):
  22. df[column] = df[column].transform(lambda i: typ.names[i])
  23. display(HTML(df.to_html()))
  24. show_random_elements(datasets["train"])

因果建模

  1. model_path="H:\\code\\Model\\distilgpt2\\"
  2. from transformers import AutoTokenizer
  3. tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
  4. def tokenize_function(examples):
  5. return tokenizer(examples["text"])
  6. tokenized_datasets = datasets.map(tokenize_function, batched=True, num_proc=4, remove_columns=["text"])
  7. tokenized_datasets["train"][1]
  8. # block_size = tokenizer.model_max_length
  9. block_size = 128
  10. #数据预处理
  11. def group_texts(examples):
  12. # Concatenate all texts.
  13. concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
  14. total_length = len(concatenated_examples[list(examples.keys())[0]])
  15. # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
  16. # customize this part to your needs.
  17. total_length = (total_length // block_size) * block_size
  18. # Split by chunks of max_len.
  19. result = {
  20. k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
  21. for k, t in concatenated_examples.items()
  22. }
  23. result["labels"] = result["input_ids"].copy()
  24. return result
  25. lm_datasets = tokenized_datasets.map(
  26. group_texts,
  27. batched=True,
  28. batch_size=1000,
  29. num_proc=4,
  30. )
  31. tokenizer.decode(lm_datasets["train"][1]["input_ids"])
  32. from transformers import AutoModelForCausalLM
  33. model = AutoModelForCausalLM.from_pretrained(model_path)
  34. from transformers import Trainer, TrainingArguments
  35. training_args = TrainingArguments(
  36. "test-clm",
  37. evaluation_strategy = "epoch",
  38. learning_rate=2e-5,
  39. weight_decay=0.01,
  40. )
  41. trainer = Trainer(
  42. model=model,
  43. args=training_args,
  44. train_dataset=lm_datasets["train"],
  45. eval_dataset=lm_datasets["validation"],
  46. )
  47. trainer.train()
  48. import math
  49. eval_results = trainer.evaluate()
  50. print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}")


掩盖语言建模

  1. model_path="H:\\code\\Model\\distilroberta-base\\"
  2. tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
  3. tokenized_datasets = datasets.map(tokenize_function, batched=True, num_proc=4, remove_columns=["text"])
  4. lm_datasets = tokenized_datasets.map(
  5. group_texts,
  6. batched=True,
  7. batch_size=1000,
  8. num_proc=4,
  9. )
  10. from transformers import AutoModelForMaskedLM
  11. model = AutoModelForMaskedLM.from_pretrained(model_path)
  12. from transformers import DataCollatorForLanguageModeling
  13. data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15)
  14. trainer = Trainer(
  15. model=model,
  16. args=training_args,
  17. train_dataset=lm_datasets["train"],
  18. eval_dataset=lm_datasets["validation"],
  19. data_collator=data_collator,
  20. )
  21. trainer.train()
  22. eval_results = trainer.evaluate()
  23. print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}")

多项选择任务

  1. model_checkpoint ="H:\\code\\Model\\bert-base-uncased\\"
  2. batch_size = 16
  3. from datasets import load_dataset, load_metric
  4. datasets = load_dataset("swag", "regular")
  5. datasets
  6. datasets["train"][0]
  7. from datasets import ClassLabel
  8. import random
  9. import pandas as pd
  10. from IPython.display import display, HTML
  11. def show_random_elements(dataset, num_examples=10):
  12. assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset."
  13. picks = []
  14. for _ in range(num_examples):
  15. pick = random.randint(0, len(dataset)-1)
  16. while pick in picks:
  17. pick = random.randint(0, len(dataset)-1)
  18. picks.append(pick)
  19. df = pd.DataFrame(dataset[picks])
  20. for column, typ in dataset.features.items():
  21. if isinstance(typ, ClassLabel):
  22. df[column] = df[column].transform(lambda i: typ.names[i])
  23. display(HTML(df.to_html()))
  24. show_random_elements(datasets["train"])
  25. def show_one(example):
  26. print(f"Context: {example['sent1']}")
  27. print(f" A - {example['sent2']} {example['ending0']}")
  28. print(f" B - {example['sent2']} {example['ending1']}")
  29. print(f" C - {example['sent2']} {example['ending2']}")
  30. print(f" D - {example['sent2']} {example['ending3']}")
  31. print(f"\nGround truth: option {['A', 'B', 'C', 'D'][example['label']]}")
  32. #预处理数据
  33. from transformers import AutoTokenizer
  34. tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True)
  35. ending_names = ["ending0", "ending1", "ending2", "ending3"]
  36. def preprocess_function(examples):
  37. # Repeat each first sentence four times to go with the four possibilities of second sentences.
  38. first_sentences = [[context] * 4 for context in examples["sent1"]]
  39. # Grab all second sentences possible for each context.
  40. question_headers = examples["sent2"]
  41. second_sentences = [[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers)]
  42. # Flatten everything
  43. first_sentences = sum(first_sentences, [])
  44. second_sentences = sum(second_sentences, [])
  45. # Tokenize
  46. tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True)
  47. # Un-flatten
  48. return {k: [v[i:i+4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}
  49. examples = datasets["train"][:5]
  50. features = preprocess_function(examples)
  51. print(len(features["input_ids"]), len(features["input_ids"][0]), [len(x) for x in features["input_ids"][0]])
  52. idx = 3
  53. [tokenizer.decode(features["input_ids"][idx][i]) for i in range(4)]
  54. show_one(datasets["train"][3])
  55. encoded_datasets = datasets.map(preprocess_function, batched=True)
  56. #微调模型
  57. from transformers import AutoModelForMultipleChoice, TrainingArguments, Trainer
  58. model = AutoModelForMultipleChoice.from_pretrained(model_checkpoint)
  59. args = TrainingArguments(
  60. "test-glue",
  61. evaluation_strategy = "epoch",
  62. learning_rate=5e-5,
  63. per_device_train_batch_size=batch_size,
  64. per_device_eval_batch_size=batch_size,
  65. num_train_epochs=3,
  66. weight_decay=0.01,
  67. )
  68. from dataclasses import dataclass
  69. from transformers.tokenization_utils_base import PreTrainedTokenizerBase, PaddingStrategy
  70. from typing import Optional, Union
  71. import torch
  72. @dataclass
  73. class DataCollatorForMultipleChoice:
  74. """
  75. Data collator that will dynamically pad the inputs for multiple choice received.
  76. """
  77. tokenizer: PreTrainedTokenizerBase
  78. padding: Union[bool, str, PaddingStrategy] = True
  79. max_length: Optional[int] = None
  80. pad_to_multiple_of: Optional[int] = None
  81. def __call__(self, features):
  82. label_name = "label" if "label" in features[0].keys() else "labels"
  83. labels = [feature.pop(label_name) for feature in features]
  84. batch_size = len(features)
  85. num_choices = len(features[0]["input_ids"])
  86. flattened_features = [[{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features]
  87. flattened_features = sum(flattened_features, [])
  88. batch = self.tokenizer.pad(
  89. flattened_features,
  90. padding=self.padding,
  91. max_length=self.max_length,
  92. pad_to_multiple_of=self.pad_to_multiple_of,
  93. return_tensors="pt",
  94. )
  95. # Un-flatten
  96. batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()}
  97. # Add back labels
  98. batch["labels"] = torch.tensor(labels, dtype=torch.int64)
  99. return batch
  100. accepted_keys = ["input_ids", "attention_mask", "label"]
  101. features = [{k: v for k, v in encoded_datasets["train"][i].items() if k in accepted_keys} for i in range(10)]
  102. batch = DataCollatorForMultipleChoice(tokenizer)(features)
  103. import numpy as np
  104. def compute_metrics(eval_predictions):
  105. predictions, label_ids = eval_predictions
  106. preds = np.argmax(predictions, axis=1)
  107. return {"accuracy": (preds == label_ids).astype(np.float32).mean().item()}
  108. trainer = Trainer(
  109. model,
  110. args,
  111. train_dataset=encoded_datasets["train"],
  112. eval_dataset=encoded_datasets["validation"],
  113. tokenizer=tokenizer,
  114. data_collator=DataCollatorForMultipleChoice(tokenizer),
  115. compute_metrics=compute_metrics,
  116. )
  117. trainer.train()

问答系统

  1. # This flag is the difference between SQUAD v1 or 2 (if you're using another dataset, it indicates if impossible
  2. # answers are allowed or not).
  3. squad_v2 = False
  4. model_checkpoint="H:\\code\\Model\\distilbert-base-cased\\"
  5. batch_size = 16
  6. from datasets import load_dataset, load_metric
  7. datasets = load_dataset("squad_v2" if squad_v2 else "squad")
  8. datasets
  9. from datasets import ClassLabel, Sequence
  10. import random
  11. import pandas as pd
  12. from IPython.display import display, HTML
  13. def show_random_elements(dataset, num_examples=10):
  14. assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset."
  15. picks = []
  16. for _ in range(num_examples):
  17. pick = random.randint(0, len(dataset)-1)
  18. while pick in picks:
  19. pick = random.randint(0, len(dataset)-1)
  20. picks.append(pick)
  21. df = pd.DataFrame(dataset[picks])
  22. for column, typ in dataset.features.items():
  23. if isinstance(typ, ClassLabel):
  24. df[column] = df[column].transform(lambda i: typ.names[i])
  25. elif isinstance(typ, Sequence) and isinstance(typ.feature, ClassLabel):
  26. df[column] = df[column].transform(lambda x: [typ.feature.names[i] for i in x])
  27. display(HTML(df.to_html()))
  28. show_random_elements(datasets["train"])
  29. #预处理训练数据
  30. from transformers import AutoTokenizer
  31. tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
  32. import transformers
  33. assert isinstance(tokenizer, transformers.PreTrainedTokenizerFast)
  34. tokenizer("What is your name?", "My name is Sylvain.")
  35. max_length = 384 # The maximum length of a feature (question and context)
  36. doc_stride = 128 # The authorized overlap between two part of the context when splitting it is needed.
  37. for i, example in enumerate(datasets["train"]):
  38. if len(tokenizer(example["question"], example["context"])["input_ids"]) > 384:
  39. break
  40. example = datasets["train"][i]
  41. len(tokenizer(example["question"], example["context"])["input_ids"])
  42. len(tokenizer(example["question"], example["context"], max_length=max_length, truncation="only_second")["input_ids"])
  43. tokenized_example = tokenizer(
  44. example["question"],
  45. example["context"],
  46. max_length=max_length,
  47. truncation="only_second",
  48. return_overflowing_tokens=True,
  49. return_offsets_mapping=True,
  50. stride=doc_stride
  51. )
  52. print(tokenized_example["offset_mapping"][0][:100])
  53. first_token_id = tokenized_example["input_ids"][0][1]
  54. offsets = tokenized_example["offset_mapping"][0][1]
  55. print(tokenizer.convert_ids_to_tokens([first_token_id])[0], example["question"][offsets[0]:offsets[1]])
  56. sequence_ids = tokenized_example.sequence_ids()
  57. print(sequence_ids)
  58. answers = example["answers"]
  59. start_char = answers["answer_start"][0]
  60. end_char = start_char + len(answers["text"][0])
  61. # Start token index of the current span in the text.
  62. token_start_index = 0
  63. while sequence_ids[token_start_index] != 1:
  64. token_start_index += 1
  65. # End token index of the current span in the text.
  66. token_end_index = len(tokenized_example["input_ids"][0]) - 1
  67. while sequence_ids[token_end_index] != 1:
  68. token_end_index -= 1
  69. # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
  70. offsets = tokenized_example["offset_mapping"][0]
  71. if (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
  72. # Move the token_start_index and token_end_index to the two ends of the answer.
  73. # Note: we could go after the last offset if the answer is the last word (edge case).
  74. while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
  75. token_start_index += 1
  76. start_position = token_start_index - 1
  77. while offsets[token_end_index][1] >= end_char:
  78. token_end_index -= 1
  79. end_position = token_end_index + 1
  80. print(start_position, end_position)
  81. else:
  82. print("The answer is not in this feature.")
  83. print(tokenizer.decode(tokenized_example["input_ids"][0][start_position: end_position+1]))
  84. print(answers["text"][0])
  85. pad_on_right = tokenizer.padding_side == "right"
  86. def prepare_train_features(examples):
  87. # Tokenize our examples with truncation and padding, but keep the overflows using a stride. This results
  88. # in one example possible giving several features when a context is long, each of those features having a
  89. # context that overlaps a bit the context of the previous feature.
  90. tokenized_examples = tokenizer(
  91. examples["question" if pad_on_right else "context"],
  92. examples["context" if pad_on_right else "question"],
  93. truncation="only_second" if pad_on_right else "only_first",
  94. max_length=max_length,
  95. stride=doc_stride,
  96. return_overflowing_tokens=True,
  97. return_offsets_mapping=True,
  98. padding="max_length",
  99. )
  100. # Since one example might give us several features if it has a long context, we need a map from a feature to
  101. # its corresponding example. This key gives us just that.
  102. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
  103. # The offset mappings will give us a map from token to character position in the original context. This will
  104. # help us compute the start_positions and end_positions.
  105. offset_mapping = tokenized_examples.pop("offset_mapping")
  106. # Let's label those examples!
  107. tokenized_examples["start_positions"] = []
  108. tokenized_examples["end_positions"] = []
  109. for i, offsets in enumerate(offset_mapping):
  110. # We will label impossible answers with the index of the CLS token.
  111. input_ids = tokenized_examples["input_ids"][i]
  112. cls_index = input_ids.index(tokenizer.cls_token_id)
  113. # Grab the sequence corresponding to that example (to know what is the context and what is the question).
  114. sequence_ids = tokenized_examples.sequence_ids(i)
  115. # One example can give several spans, this is the index of the example containing this span of text.
  116. sample_index = sample_mapping[i]
  117. answers = examples["answers"][sample_index]
  118. # If no answers are given, set the cls_index as answer.
  119. if len(answers["answer_start"]) == 0:
  120. tokenized_examples["start_positions"].append(cls_index)
  121. tokenized_examples["end_positions"].append(cls_index)
  122. else:
  123. # Start/end character index of the answer in the text.
  124. start_char = answers["answer_start"][0]
  125. end_char = start_char + len(answers["text"][0])
  126. # Start token index of the current span in the text.
  127. token_start_index = 0
  128. while sequence_ids[token_start_index] != (1 if pad_on_right else 0):
  129. token_start_index += 1
  130. # End token index of the current span in the text.
  131. token_end_index = len(input_ids) - 1
  132. while sequence_ids[token_end_index] != (1 if pad_on_right else 0):
  133. token_end_index -= 1
  134. # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
  135. if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
  136. tokenized_examples["start_positions"].append(cls_index)
  137. tokenized_examples["end_positions"].append(cls_index)
  138. else:
  139. # Otherwise move the token_start_index and token_end_index to the two ends of the answer.
  140. # Note: we could go after the last offset if the answer is the last word (edge case).
  141. while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
  142. token_start_index += 1
  143. tokenized_examples["start_positions"].append(token_start_index - 1)
  144. while offsets[token_end_index][1] >= end_char:
  145. token_end_index -= 1
  146. tokenized_examples["end_positions"].append(token_end_index + 1)
  147. return tokenized_examples
  148. features = prepare_train_features(datasets['train'][:5])
  149. tokenized_datasets = datasets.map(prepare_train_features, batched=True, remove_columns=datasets["train"].column_names)
  150. #微调模型
  151. from transformers import AutoModelForQuestionAnswering, TrainingArguments, Trainer
  152. model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint)
  153. args = TrainingArguments(
  154. f"test-squad",
  155. evaluation_strategy = "epoch",
  156. learning_rate=2e-5,
  157. per_device_train_batch_size=batch_size,
  158. per_device_eval_batch_size=batch_size,
  159. num_train_epochs=3,
  160. weight_decay=0.01,
  161. )
  162. from transformers import default_data_collator
  163. data_collator = default_data_collator
  164. trainer = Trainer(
  165. model,
  166. args,
  167. train_dataset=tokenized_datasets["train"],
  168. eval_dataset=tokenized_datasets["validation"],
  169. data_collator=data_collator,
  170. tokenizer=tokenizer,
  171. )
  172. trainer.train()
  173. trainer.save_model("test-squad-trained")
  174. #评估
  175. import torch
  176. for batch in trainer.get_eval_dataloader():
  177. break
  178. batch = {k: v.to(trainer.args.device) for k, v in batch.items()}
  179. with torch.no_grad():
  180. output = trainer.model(**batch)
  181. output.keys()
  182. output.start_logits.shape, output.end_logits.shape
  183. output.start_logits.argmax(dim=-1), output.end_logits.argmax(dim=-1)
  184. n_best_size = 20
  185. import numpy as np
  186. start_logits = output.start_logits[0].cpu().numpy()
  187. end_logits = output.end_logits[0].cpu().numpy()
  188. # Gather the indices the best start/end logits:
  189. start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()
  190. end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
  191. valid_answers = []
  192. for start_index in start_indexes:
  193. for end_index in end_indexes:
  194. if start_index <= end_index: # We need to refine that test to check the answer is inside the context
  195. valid_answers.append(
  196. {
  197. "score": start_logits[start_index] + end_logits[end_index],
  198. "text": "" # We need to find a way to get back the original substring corresponding to the answer in the context
  199. }
  200. )
  201. def prepare_validation_features(examples):
  202. # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
  203. # in one example possible giving several features when a context is long, each of those features having a
  204. # context that overlaps a bit the context of the previous feature.
  205. tokenized_examples = tokenizer(
  206. examples["question" if pad_on_right else "context"],
  207. examples["context" if pad_on_right else "question"],
  208. truncation="only_second" if pad_on_right else "only_first",
  209. max_length=max_length,
  210. stride=doc_stride,
  211. return_overflowing_tokens=True,
  212. return_offsets_mapping=True,
  213. padding="max_length",
  214. )
  215. # Since one example might give us several features if it has a long context, we need a map from a feature to
  216. # its corresponding example. This key gives us just that.
  217. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
  218. # We keep the example_id that gave us this feature and we will store the offset mappings.
  219. tokenized_examples["example_id"] = []
  220. for i in range(len(tokenized_examples["input_ids"])):
  221. # Grab the sequence corresponding to that example (to know what is the context and what is the question).
  222. sequence_ids = tokenized_examples.sequence_ids(i)
  223. context_index = 1 if pad_on_right else 0
  224. # One example can give several spans, this is the index of the example containing this span of text.
  225. sample_index = sample_mapping[i]
  226. tokenized_examples["example_id"].append(examples["id"][sample_index])
  227. # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
  228. # position is part of the context or not.
  229. tokenized_examples["offset_mapping"][i] = [
  230. (o if sequence_ids[k] == context_index else None)
  231. for k, o in enumerate(tokenized_examples["offset_mapping"][i])
  232. ]
  233. return tokenized_examples
  234. validation_features = datasets["validation"].map(
  235. prepare_validation_features,
  236. batched=True,
  237. remove_columns=datasets["validation"].column_names
  238. )
  239. raw_predictions = trainer.predict(validation_features)
  240. validation_features.set_format(type=validation_features.format["type"], columns=list(validation_features.features.keys()))
  241. max_answer_length = 30
  242. start_logits = output.start_logits[0].cpu().numpy()
  243. end_logits = output.end_logits[0].cpu().numpy()
  244. offset_mapping = validation_features[0]["offset_mapping"]
  245. # The first feature comes from the first example. For the more general case, we will need to be match the example_id to
  246. # an example index
  247. context = datasets["validation"][0]["context"]
  248. # Gather the indices the best start/end logits:
  249. start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()
  250. end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
  251. valid_answers = []
  252. for start_index in start_indexes:
  253. for end_index in end_indexes:
  254. # Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
  255. # to part of the input_ids that are not in the context.
  256. if (
  257. start_index >= len(offset_mapping)
  258. or end_index >= len(offset_mapping)
  259. or offset_mapping[start_index] is None
  260. or offset_mapping[end_index] is None
  261. ):
  262. continue
  263. # Don't consider answers with a length that is either < 0 or > max_answer_length.
  264. if end_index < start_index or end_index - start_index + 1 > max_answer_length:
  265. continue
  266. if start_index <= end_index: # We need to refine that test to check the answer is inside the context
  267. start_char = offset_mapping[start_index][0]
  268. end_char = offset_mapping[end_index][1]
  269. valid_answers.append(
  270. {
  271. "score": start_logits[start_index] + end_logits[end_index],
  272. "text": context[start_char: end_char]
  273. }
  274. )
  275. valid_answers = sorted(valid_answers, key=lambda x: x["score"], reverse=True)[:n_best_size]
  276. valid_answers
  277. import collections
  278. examples = datasets["validation"]
  279. features = validation_features
  280. example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
  281. features_per_example = collections.defaultdict(list)
  282. for i, feature in enumerate(features):
  283. features_per_example[example_id_to_index[feature["example_id"]]].append(i)
  284. from tqdm.auto import tqdm
  285. def postprocess_qa_predictions(examples, features, raw_predictions, n_best_size = 20, max_answer_length = 30):
  286. all_start_logits, all_end_logits = raw_predictions
  287. # Build a map example to its corresponding features.
  288. example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
  289. features_per_example = collections.defaultdict(list)
  290. for i, feature in enumerate(features):
  291. features_per_example[example_id_to_index[feature["example_id"]]].append(i)
  292. # The dictionaries we have to fill.
  293. predictions = collections.OrderedDict()
  294. # Logging.
  295. print(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
  296. # Let's loop over all the examples!
  297. for example_index, example in enumerate(tqdm(examples)):
  298. # Those are the indices of the features associated to the current example.
  299. feature_indices = features_per_example[example_index]
  300. min_null_score = None # Only used if squad_v2 is True.
  301. valid_answers = []
  302. context = example["context"]
  303. # Looping through all the features associated to the current example.
  304. for feature_index in feature_indices:
  305. # We grab the predictions of the model for this feature.
  306. start_logits = all_start_logits[feature_index]
  307. end_logits = all_end_logits[feature_index]
  308. # This is what will allow us to map some the positions in our logits to span of texts in the original
  309. # context.
  310. offset_mapping = features[feature_index]["offset_mapping"]
  311. # Update minimum null prediction.
  312. cls_index = features[feature_index]["input_ids"].index(tokenizer.cls_token_id)
  313. feature_null_score = start_logits[cls_index] + end_logits[cls_index]
  314. if min_null_score is None or min_null_score < feature_null_score:
  315. min_null_score = feature_null_score
  316. # Go through all possibilities for the `n_best_size` greater start and end logits.
  317. start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()
  318. end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
  319. for start_index in start_indexes:
  320. for end_index in end_indexes:
  321. # Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
  322. # to part of the input_ids that are not in the context.
  323. if (
  324. start_index >= len(offset_mapping)
  325. or end_index >= len(offset_mapping)
  326. or offset_mapping[start_index] is None
  327. or offset_mapping[end_index] is None
  328. ):
  329. continue
  330. # Don't consider answers with a length that is either < 0 or > max_answer_length.
  331. if end_index < start_index or end_index - start_index + 1 > max_answer_length:
  332. continue
  333. start_char = offset_mapping[start_index][0]
  334. end_char = offset_mapping[end_index][1]
  335. valid_answers.append(
  336. {
  337. "score": start_logits[start_index] + end_logits[end_index],
  338. "text": context[start_char: end_char]
  339. }
  340. )
  341. if len(valid_answers) > 0:
  342. best_answer = sorted(valid_answers, key=lambda x: x["score"], reverse=True)[0]
  343. else:
  344. # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
  345. # failure.
  346. best_answer = {"text": "", "score": 0.0}
  347. # Let's pick our final answer: the best one or the null answer (only for squad_v2)
  348. if not squad_v2:
  349. predictions[example["id"]] = best_answer["text"]
  350. else:
  351. answer = best_answer["text"] if best_answer["score"] > min_null_score else ""
  352. predictions[example["id"]] = answer
  353. return predictions
  354. final_predictions = postprocess_qa_predictions(datasets["validation"], validation_features, raw_predictions.predictions)
  355. metric = load_metric("squad_v2" if squad_v2 else "squad")
  356. if squad_v2:
  357. formatted_predictions = [{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()]
  358. else:
  359. formatted_predictions = [{"id": k, "prediction_text": v} for k, v in final_predictions.items()]
  360. references = [{"id": ex["id"], "answers": ex["answers"]} for ex in datasets["validation"]]
  361. metric.compute(predictions=formatted_predictions, references=references)

文本分类

  1. GLUE_TASKS = ["cola", "mnli", "mnli-mm", "mrpc", "qnli", "qqp", "rte", "sst2", "stsb", "wnli"]
  2. task = "cola"
  3. model_checkpoint="H:\\code\\Model\\distilbert-base-cased\\"
  4. batch_size = 16
  5. from datasets import load_dataset, load_metric
  6. actual_task = "mnli" if task == "mnli-mm" else task
  7. dataset = load_dataset("glue", actual_task)
  8. metric = load_metric('glue', actual_task)
  9. import datasets
  10. import random
  11. import pandas as pd
  12. from IPython.display import display, HTML
  13. def show_random_elements(dataset, num_examples=10):
  14. assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset."
  15. picks = []
  16. for _ in range(num_examples):
  17. pick = random.randint(0, len(dataset)-1)
  18. while pick in picks:
  19. pick = random.randint(0, len(dataset)-1)
  20. picks.append(pick)
  21. df = pd.DataFrame(dataset[picks])
  22. for column, typ in dataset.features.items():
  23. if isinstance(typ, datasets.ClassLabel):
  24. df[column] = df[column].transform(lambda i: typ.names[i])
  25. display(HTML(df.to_html()))
  26. show_random_elements(dataset["train"])
  27. import numpy as np
  28. fake_preds = np.random.randint(0, 2, size=(64,))
  29. fake_labels = np.random.randint(0, 2, size=(64,))
  30. metric.compute(predictions=fake_preds, references=fake_labels)
  31. #预处理数据
  32. from transformers import AutoTokenizer
  33. tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True)
  34. tokenizer("Hello, this one sentence!", "And this sentence goes with it.")
  35. task_to_keys = {
  36. "cola": ("sentence", None),
  37. "mnli": ("premise", "hypothesis"),
  38. "mnli-mm": ("premise", "hypothesis"),
  39. "mrpc": ("sentence1", "sentence2"),
  40. "qnli": ("question", "sentence"),
  41. "qqp": ("question1", "question2"),
  42. "rte": ("sentence1", "sentence2"),
  43. "sst2": ("sentence", None),
  44. "stsb": ("sentence1", "sentence2"),
  45. "wnli": ("sentence1", "sentence2"),
  46. }
  47. sentence1_key, sentence2_key = task_to_keys[task]
  48. if sentence2_key is None:
  49. print(f"Sentence: {dataset['train'][0][sentence1_key]}")
  50. else:
  51. print(f"Sentence 1: {dataset['train'][0][sentence1_key]}")
  52. print(f"Sentence 2: {dataset['train'][0][sentence2_key]}")
  53. def preprocess_function(examples):
  54. if sentence2_key is None:
  55. return tokenizer(examples[sentence1_key], truncation=True)
  56. return tokenizer(examples[sentence1_key], examples[sentence2_key], truncation=True)
  57. preprocess_function(dataset['train'][:5])
  58. encoded_dataset = dataset.map(preprocess_function, batched=True)
  59. #微调模型
  60. from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer
  61. num_labels = 3 if task.startswith("mnli") else 1 if task=="stsb" else 2
  62. model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=num_labels)
  63. metric_name = "pearson" if task == "stsb" else "matthews_correlation" if task == "cola" else "accuracy"
  64. args = TrainingArguments(
  65. "test-glue",
  66. evaluation_strategy = "epoch",
  67. learning_rate=2e-5,
  68. per_device_train_batch_size=batch_size,
  69. per_device_eval_batch_size=batch_size,
  70. num_train_epochs=5,
  71. weight_decay=0.01,
  72. load_best_model_at_end=True,
  73. metric_for_best_model=metric_name,
  74. )
  75. def compute_metrics(eval_pred):
  76. predictions, labels = eval_pred
  77. if task != "stsb":
  78. predictions = np.argmax(predictions, axis=1)
  79. else:
  80. predictions = predictions[:, 0]
  81. return metric.compute(predictions=predictions, references=labels)
  82. validation_key = "validation_mismatched" if task == "mnli-mm" else "validation_matched" if task == "mnli" else "validation"
  83. trainer = Trainer(
  84. model,
  85. args,
  86. train_dataset=encoded_dataset["train"],
  87. eval_dataset=encoded_dataset[validation_key],
  88. tokenizer=tokenizer,
  89. compute_metrics=compute_metrics
  90. )
  91. trainer.train()
  92. trainer.evaluate()
  93. #超参数搜索
  94. def model_init():
  95. return AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=num_labels)
  96. trainer = Trainer(
  97. model_init=model_init,
  98. args=args,
  99. train_dataset=encoded_dataset["train"],
  100. eval_dataset=encoded_dataset[validation_key],
  101. tokenizer=tokenizer,
  102. compute_metrics=compute_metrics
  103. )
  104. best_run = trainer.hyperparameter_search(n_trials=10, direction="maximize")
  105. best_run
  106. for n, v in best_run.hyperparameters.items():
  107. setattr(trainer.args, n, v)
  108. trainer.train()

命名实体识别

  1. task = "ner" # Should be one of "ner", "pos" or "chunk"
  2. model_checkpoint="H:\\code\\Model\\distilbert-base-cased\\"
  3. batch_size = 16
  4. from datasets import load_dataset, load_metric
  5. datasets = load_dataset("conll2003")
  6. datasets
  7. label_list = datasets["train"].features[f"{task}_tags"].feature.names
  8. label_list
  9. from datasets import ClassLabel, Sequence
  10. import random
  11. import pandas as pd
  12. from IPython.display import display, HTML
  13. def show_random_elements(dataset, num_examples=10):
  14. assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset."
  15. picks = []
  16. for _ in range(num_examples):
  17. pick = random.randint(0, len(dataset)-1)
  18. while pick in picks:
  19. pick = random.randint(0, len(dataset)-1)
  20. picks.append(pick)
  21. df = pd.DataFrame(dataset[picks])
  22. for column, typ in dataset.features.items():
  23. if isinstance(typ, ClassLabel):
  24. df[column] = df[column].transform(lambda i: typ.names[i])
  25. elif isinstance(typ, Sequence) and isinstance(typ.feature, ClassLabel):
  26. df[column] = df[column].transform(lambda x: [typ.feature.names[i] for i in x])
  27. display(HTML(df.to_html()))
  28. show_random_elements(datasets["train"])
  29. #预处理数据
  30. from transformers import AutoTokenizer
  31. tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
  32. import transformers
  33. assert isinstance(tokenizer, transformers.PreTrainedTokenizerFast)
  34. tokenizer("Hello, this is one sentence!")
  35. example = datasets["train"][4]
  36. print(example["tokens"])
  37. tokenized_input = tokenizer(example["tokens"], is_split_into_words=True)
  38. tokens = tokenizer.convert_ids_to_tokens(tokenized_input["input_ids"])
  39. print(tokens)
  40. word_ids = tokenized_input.word_ids()
  41. aligned_labels = [-100 if i is None else example[f"{task}_tags"][i] for i in word_ids]
  42. print(len(aligned_labels), len(tokenized_input["input_ids"]))
  43. label_all_tokens = True
  44. def tokenize_and_align_labels(examples):
  45. tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True)
  46. labels = []
  47. for i, label in enumerate(examples[f"{task}_tags"]):
  48. word_ids = tokenized_inputs.word_ids(batch_index=i)
  49. previous_word_idx = None
  50. label_ids = []
  51. for word_idx in word_ids:
  52. # Special tokens have a word id that is None. We set the label to -100 so they are automatically
  53. # ignored in the loss function.
  54. if word_idx is None:
  55. label_ids.append(-100)
  56. # We set the label for the first token of each word.
  57. elif word_idx != previous_word_idx:
  58. label_ids.append(label[word_idx])
  59. # For the other tokens in a word, we set the label to either the current label or -100, depending on
  60. # the label_all_tokens flag.
  61. else:
  62. label_ids.append(label[word_idx] if label_all_tokens else -100)
  63. previous_word_idx = word_idx
  64. labels.append(label_ids)
  65. tokenized_inputs["labels"] = labels
  66. return tokenized_inputs
  67. tokenize_and_align_labels(datasets['train'][:5])
  68. tokenized_datasets = datasets.map(tokenize_and_align_labels, batched=True)
  69. #微调模型
  70. from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer
  71. model = AutoModelForTokenClassification.from_pretrained(model_checkpoint, num_labels=len(label_list))
  72. args = TrainingArguments(
  73. f"test-{task}",
  74. evaluation_strategy = "epoch",
  75. learning_rate=2e-5,
  76. per_device_train_batch_size=batch_size,
  77. per_device_eval_batch_size=batch_size,
  78. num_train_epochs=3,
  79. weight_decay=0.01,
  80. )
  81. from transformers import DataCollatorForTokenClassification
  82. data_collator = DataCollatorForTokenClassification(tokenizer)
  83. metric = load_metric("seqeval")
  84. labels = [label_list[i] for i in example[f"{task}_tags"]]
  85. metric.compute(predictions=[labels], references=[labels])
  86. import numpy as np
  87. def compute_metrics(p):
  88. predictions, labels = p
  89. predictions = np.argmax(predictions, axis=2)
  90. # Remove ignored index (special tokens)
  91. true_predictions = [
  92. [label_list[p] for (p, l) in zip(prediction, label) if l != -100]
  93. for prediction, label in zip(predictions, labels)
  94. ]
  95. true_labels = [
  96. [label_list[l] for (p, l) in zip(prediction, label) if l != -100]
  97. for prediction, label in zip(predictions, labels)
  98. ]
  99. results = metric.compute(predictions=true_predictions, references=true_labels)
  100. return {
  101. "precision": results["overall_precision"],
  102. "recall": results["overall_recall"],
  103. "f1": results["overall_f1"],
  104. "accuracy": results["overall_accuracy"],
  105. }
  106. trainer = Trainer(
  107. model,
  108. args,
  109. train_dataset=tokenized_datasets["train"],
  110. eval_dataset=tokenized_datasets["validation"],
  111. data_collator=data_collator,
  112. tokenizer=tokenizer,
  113. compute_metrics=compute_metrics
  114. )
  115. trainer.train()
  116. trainer.evaluate()
  117. predictions, labels, _ = trainer.predict(tokenized_datasets["validation"])
  118. predictions = np.argmax(predictions, axis=2)
  119. # Remove ignored index (special tokens)
  120. true_predictions = [
  121. [label_list[p] for (p, l) in zip(prediction, label) if l != -100]
  122. for prediction, label in zip(predictions, labels)
  123. ]
  124. true_labels = [
  125. [label_list[l] for (p, l) in zip(prediction, label) if l != -100]
  126. for prediction, label in zip(predictions, labels)
  127. ]
  128. results = metric.compute(predictions=true_predictions, references=true_labels)
  129. results

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小小林熬夜学编程/article/detail/175488
推荐阅读
相关标签
  

闽ICP备14008679号