当前位置:   article > 正文

(4-7)文本分类与情感分析算法:递归神经网络(2)_使用神经网络实现文本情感分类

使用神经网络实现文本情感分类

(3)编写文件Continuous-RvNN-main/classifier/models/encoders/FOCN_LSTM.py,定义了一个名为 FOCN_LSTM 的 PyTorch 模型,这是一个基于注意力机制和循环神经网络的自动机器学习模型。通过对序列数据进行递归生成和注意力机制来捕捉序列中的信息,并且可以通过对惩罚项的优化来控制模型的生成过程。这是一个比较复杂的模型,用于处理序列生成等任务,具体的用途和效果可能需要根据具体的应用场景和数据进行调整和评估。文件FOCN_LSTM.py的具体实现流程如下所示。

  1. 构造函数 __init__ 初始化了模型的各种参数和模块。这些参数包括隐藏状态的大小、窗口大小、阈值等。具体实现代码如下所示:
  1. def __init__(self, config):
  2. super(FOCN_LSTM, self).__init__()
  3. self.config = config
  4. self.hidden_size = config["hidden_size"]
  5. self.cell_hidden_size = config["cell_hidden_size"]
  6. self.window_size = config["window_size"]
  7. self.stop_threshold = config["stop_threshold"]
  8. # self.switch_threshold = config["switch_threshold"]
  9. self.entropy_gamma = config["entropy_gamma"]
  10. self.structure_gamma = 0.01 # config["structure_gamma"]
  11. self.speed_gamma = config["speed_gamma"]
  12. self.in_dropout = config["in_dropout"]
  13. self.hidden_dropout = config["hidden_dropout"]
  14. self.recurrent_momentum = config["recurrent_momentum"]
  15. self.small_d = config["small_d"]
  16. self.START = nn.Parameter(T.randn(self.hidden_size))
  17. self.END = nn.Parameter(T.randn(self.hidden_size))
  18. if self.recurrent_momentum:
  19. self.past_transition_features = nn.Parameter(T.randn(self.small_d))
  20. self.past_non_transition_features = nn.Parameter(T.randn(self.small_d))
  21. self.conv_layer = Linear(self.window_size * self.hidden_size + self.small_d, self.hidden_size)
  22. else:
  23. self.conv_layer = Linear(self.window_size * self.hidden_size, self.hidden_size)
  24. self.scorer = Linear(self.hidden_size, 1)
  25. self.wcell0 = Linear(self.hidden_size, 2 * self.hidden_size,
  26. true_fan_in=self.hidden_size,
  27. true_fan_out=self.hidden_size)
  28. self.wcell1 = Linear(2 * self.hidden_size, 5 * self.hidden_size,
  29. true_fan_in=self.hidden_size,
  30. true_fan_out=self.hidden_size)
  31. # self.LN = nn.LayerNorm(self.hidden_size)
  32. self.eps = 1e-8
  33. # %%
  34. def sum_normalize(self, logits, dim=-1):
  35. return logits / T.sum(logits + self.eps, keepdim=True, dim=dim)
  1. 方法augment_sequence()用于向输入序列添加起始和结束标记,以处理文本序列的开始和结束。具体实现代码如下所示。
  1. def augment_sequence(self, sequence, input_mask):
  2. N, S, D = sequence.size()
  3. assert input_mask.size() == (N, S, 1)
  4. """
  5. AUGMENT SEQUENCE WITH START AND END TOKENS
  6. """
  7. # ADD START TOKEN
  8. START = self.START.view(1, 1, D).repeat(N, 1, 1)
  9. sequence = T.cat([START, sequence], dim=1)
  10. assert sequence.size() == (N, S + 1, D)
  11. input_mask = T.cat([T.ones(N, 1, 1).float().to(input_mask.device), input_mask], dim=1)
  12. assert input_mask.size() == (N, S + 1, 1)
  13. # ADD END TOKEN
  14. input_mask_no_end = T.cat([input_mask.clone(), T.zeros(N, 1, 1).float().to(input_mask.device)], dim=1)
  15. input_mask_yes_end = T.cat([T.ones(N, 1, 1).float().to(input_mask.device), input_mask.clone()], dim=1)
  16. END_mask = input_mask_yes_end - input_mask_no_end
  17. assert END_mask.size() == (N, S + 2, 1)
  18. END = self.END.view(1, 1, D).repeat(N, S + 2, 1)
  19. sequence = T.cat([sequence, T.zeros(N, 1, D).float().to(sequence.device)], dim=1)
  20. sequence = END_mask * END + (1 - END_mask) * sequence
  21. input_mask = input_mask_yes_end
  22. input_mask_no_start = T.cat([T.zeros(N, 1, 1).float().to(input_mask.device),
  23. input_mask[:, 1:, :]], dim=1)
  24. return sequence, input_mask, END_mask, input_mask_no_start, input_mask_no_end
  1. 方法compute_neighbor_probs()用于计算相邻单词之间的概率,该概率用于生成窗口。具体实现代码如下所示。
  1. def compute_neighbor_probs(self, active_probs, input_mask):
  2. N, S, _ = input_mask.size()
  3. assert input_mask.size() == (N, S, 1)
  4. input_mask = input_mask.permute(0, 2, 1).contiguous()
  5. assert input_mask.size() == (N, 1, S)
  6. assert active_probs.size() == (N, S, 1)
  7. active_probs = active_probs.permute(0, 2, 1).contiguous()
  8. assert active_probs.size() == (N, 1, S)
  9. input_mask_flipped = T.flip(input_mask.clone(), dims=[2])
  10. active_probs_flipped = T.flip(active_probs.clone(), dims=[2])
  11. input_mask = T.stack([input_mask_flipped, input_mask], dim=1)
  12. active_probs = T.stack([active_probs_flipped, active_probs], dim=1)
  13. assert input_mask.size() == (N, 2, 1, S)
  14. assert active_probs.size() == (N, 2, 1, S)
  15. active_probs_matrix = active_probs.repeat(1, 1, S, 1) * input_mask
  16. assert active_probs_matrix.size() == (N, 2, S, S)
  17. right_probs_matrix = T.triu(active_probs_matrix, diagonal=1) # mask self and left
  18. right_probs_matrix_cumsum = T.cumsum(right_probs_matrix, dim=-1)
  19. assert right_probs_matrix_cumsum.size() == (N, 2, S, S)
  20. remainders = 1.0 - right_probs_matrix_cumsum
  21. remainders_from_left = T.cat([T.ones(N, 2, S, 1).float().to(remainders.device), remainders[:, :, :, 0:-1]],
  22. dim=-1)
  23. assert remainders_from_left.size() == (N, 2, S, S)
  24. remainders_from_left = T.max(T.zeros(N, 2, S, 1).float().to(remainders.device), remainders_from_left)
  25. assert remainders_from_left.size() == (N, 2, S, S)
  26. right_neighbor_probs = T.where(right_probs_matrix_cumsum > 1.0,
  27. remainders_from_left,
  28. right_probs_matrix)
  29. right_neighbor_probs = right_neighbor_probs * input_mask
  30. left_neighbor_probs = right_neighbor_probs[:, 0, :, :]
  31. left_neighbor_probs = T.flip(left_neighbor_probs, dims=[1, 2])
  32. right_neighbor_probs = right_neighbor_probs[:, 1, :, :]
  33. return left_neighbor_probs, right_neighbor_probs
  1. 方法make_window()用于生成一个窗口,包括了相邻单词的信息。具体实现代码如下所示。
  1. def make_window(self, sequence, left_child_probs, right_child_probs):
  2. N, S, D = sequence.size()
  3. left_children_list = []
  4. right_children_list = []
  5. left_children_k = sequence.clone()
  6. right_children_k = sequence.clone()
  7. for k in range(self.window_size // 2):
  8. left_children_k = T.matmul(left_child_probs, left_children_k)
  9. left_children_list = [left_children_k.clone()] + left_children_list
  10. right_children_k = T.matmul(right_child_probs, right_children_k)
  11. right_children_list = right_children_list + [right_children_k.clone()]
  12. windowed_sequence = left_children_list + [sequence] + right_children_list
  13. windowed_sequence = T.stack(windowed_sequence, dim=-2)
  14. assert windowed_sequence.size() == (N, S, self.window_size, D)
  15. return windowed_sequence
  1. 方法initial_transform()进行初始变换,准备用于模型的初始输入。具体实现代码如下所示。
  1. # %%
  2. def initial_transform(self, sequence):
  3. N, S, D = sequence.size()
  4. contents = self.wcell0(sequence)
  5. contents = contents.view(N, S, 2, D)
  6. o = T.sigmoid(contents[:, :, 0, :])
  7. cell = T.tanh(contents[:, :, 1, :])
  8. transition = o * T.tanh(cell)
  9. return transition, cell
  1. 方法score_fn()用于计算窗口内各个位置的分数,具体实现代码如下所示。
  1. def score_fn(self, windowed_sequence, transition_feats):
  2. N, S, W, D = windowed_sequence.size()
  3. windowed_sequence = windowed_sequence.view(N, S, W * D)
  4. if self.recurrent_momentum:
  5. windowed_sequence = T.cat([windowed_sequence, transition_feats], dim=-1)
  6. scores = self.scorer(gelu(self.conv_layer(windowed_sequence)))
  7. transition_scores = scores[:, :, 0].unsqueeze(-1)
  8. # reduce_probs = T.sigmoid(scores[:,:,1].unsqueeze(-1))
  9. no_op_scores = T.zeros_like(transition_scores).float().to(transition_scores.device)
  10. scores = T.cat([transition_scores, no_op_scores], dim=-1)
  11. scores = scores / self.temperature
  12. max_score = T.max(scores)
  13. exp_scores = T.exp(scores - max_score)
  14. return exp_scores
  1. 方法composer()用于将两个子节点的信息组合成一个新的节点信息,具体实现代码如下所示。
  1. def composer(self, child1, child2, cell_child1, cell_child2):
  2. N, S, D = child1.size()
  3. concated = T.cat([child1, child2], dim=-1)
  4. assert concated.size() == (N, S, 2 * D)
  5. contents = F.dropout(self.wcell1(concated), p=self.hidden_dropout, training=self.training)
  6. contents = contents.view(N, S, 5, D)
  7. gates = T.sigmoid(contents[:, :, 0:4, :])
  8. u = T.tanh(contents[:, :, 4, :])
  9. f1 = gates[..., 0, :]
  10. f2 = gates[..., 1, :]
  11. i = gates[..., 2, :]
  12. o = gates[..., 3, :]
  13. cell = f1 * cell_child1 + f2 * cell_child2 + i * u
  14. transition = o * T.tanh(cell)
  15. return transition, cell
  1. 方法compute_entropy_penalty()用于计算熵惩罚,用于鼓励模型停止生成。具体实现代码如下所示。
  1. def compute_entropy_penalty(self, active_probs, last_token_mask):
  2. N, S = active_probs.size()
  3. active_prob_dist = self.sum_normalize(active_probs, dim=-1)
  4. nll_loss = - T.log(T.sum(last_token_mask * active_prob_dist, dim=1) + self.eps)
  5. nll_loss = nll_loss.view(N)
  6. return nll_loss
  1. 方法compute_speed_penalty()用于计算速度惩罚,以鼓励模型更快地停止生成。具体实现代码如下所示。
  1.     def compute_speed_penalty(self, steps, input_mask):
  2.         steps = T.max(steps, dim=1)[0]
  3.         speed_penalty = steps.squeeze(-1) / (T.sum(input_mask.squeeze(-1), dim=1) - 2.0)
  4.         return speed_penalty
  1. 方法encoder_block()实现了编码器的主要逻辑,包括了循环的生成和停止条件的判定。具体实现代码如下所示。
  1. def encoder_block(self, sequence, input_mask):
  2. sequence, input_mask, END_mask, \
  3. input_mask_no_start, input_mask_no_end = self.augment_sequence(sequence, input_mask)
  4. N, S, D = sequence.size()
  5. """
  6. Initial Preparations
  7. """
  8. active_probs = T.ones(N, S, 1).float().to(sequence.device) * input_mask
  9. steps = T.zeros(N, S, 1).float().to(sequence.device)
  10. zeros_sequence = T.zeros(N, 1, 1).float().to(sequence.device)
  11. last_token_mask = T.cat([END_mask[:, 1:, :], zeros_sequence], dim=1)
  12. START_END_LAST_PAD_mask = input_mask_no_start * input_mask_no_end * (1.0 - last_token_mask)
  13. self.START_END_LAST_PAD_mask = START_END_LAST_PAD_mask
  14. halt_ones = T.ones(N).float().to(sequence.device)
  15. halt_zeros = T.zeros(N).float().to(sequence.device)
  16. improperly_terminated_mask = halt_ones.clone()
  17. update_mask = T.ones(N).float().to(sequence.device)
  18. left_transition_probs = T.zeros(N, S, 1).float().to(sequence.device)
  19. """
  20. Initial Transform
  21. """
  22. sequence, cell_sequence = self.initial_transform(sequence)
  23. sequence = sequence * input_mask
  24. cell_sequence = cell_sequence * input_mask
  25. """
  26. Start Recursion
  27. """
  28. t = 0
  29. while t < (S - 2):
  30. original_active_probs = active_probs.clone()
  31. original_sequence = sequence.clone()
  32. residual_sequence = sequence.clone()
  33. residual_cell_sequence = cell_sequence.clone()
  34. original_steps = steps.clone()
  35. original_cell_sequence = cell_sequence.clone()
  36. left_neighbor_probs, right_neighbor_probs \
  37. = self.compute_neighbor_probs(active_probs=active_probs.clone(),
  38. input_mask=input_mask.clone())
  39. windowed_sequence = self.make_window(sequence=sequence,
  40. left_child_probs=left_neighbor_probs,
  41. right_child_probs=right_neighbor_probs)
  42. if self.recurrent_momentum:
  43. transition_feats = left_transition_probs * self.past_transition_features.view(1, 1, -1) \
  44. + (1 - left_transition_probs) * self.past_non_transition_features.view(1, 1, -1)
  45. else:
  46. transition_feats = None
  47. exp_scores = self.score_fn(windowed_sequence, transition_feats)
  48. exp_transition_scores = exp_scores[:, :, 0].unsqueeze(-1)
  49. exp_no_op_scores = exp_scores[:, :, 1].unsqueeze(-1)
  50. exp_transition_scores = exp_transition_scores * START_END_LAST_PAD_mask
  51. if self.config["no_modulation"] is True:
  52. exp_scores = T.cat([exp_transition_scores,
  53. exp_no_op_scores], dim=-1)
  54. else:
  55. exp_left_transition_scores = T.matmul(left_neighbor_probs, exp_transition_scores)
  56. exp_right_transition_scores = T.matmul(right_neighbor_probs, exp_transition_scores)
  57. exp_scores = T.cat([exp_transition_scores,
  58. exp_no_op_scores,
  59. exp_left_transition_scores,
  60. exp_right_transition_scores], dim=-1)
  61. normalized_scores = self.sum_normalize(exp_scores, dim=-1)
  62. transition_probs = normalized_scores[:, :, 0].unsqueeze(-1)
  63. transition_probs = transition_probs * START_END_LAST_PAD_mask
  64. left_transition_probs = T.matmul(left_neighbor_probs, transition_probs)
  65. left_transition_probs = left_transition_probs * input_mask_no_start * input_mask_no_end
  66. left_sequence = windowed_sequence[:, :, self.window_size // 2 - 1, 0:self.hidden_size]
  67. left_cell_sequence = T.matmul(left_neighbor_probs, cell_sequence)
  68. transition_sequence, transition_cell_sequence = self.composer(child1=left_sequence,
  69. child2=sequence,
  70. cell_child1=left_cell_sequence,
  71. cell_child2=cell_sequence)
  72. transition_sequence = transition_sequence * input_mask
  73. transition_cell_sequence = transition_cell_sequence * input_mask
  74. tp = left_transition_probs
  75. sequence = tp * transition_sequence + (1 - tp) * residual_sequence
  76. sequence = sequence * input_mask
  77. cell_sequence = tp * transition_cell_sequence + (1 - tp) * residual_cell_sequence
  78. cell_sequence = cell_sequence * input_mask
  79. steps = steps + active_probs
  80. bounded_probs = transition_probs
  81. active_probs = active_probs * (1.0 - bounded_probs) * input_mask
  82. active_probs = T.where(update_mask.view(N, 1, 1).expand(N, S, 1) == 1.0,
  83. active_probs,
  84. original_active_probs)
  85. steps = T.where(update_mask.view(N, 1, 1).expand(N, S, 1) == 1.0,
  86. steps,
  87. original_steps)
  88. sequence = T.where(update_mask.view(N, 1, 1).expand(N, S, D) == 1.0,
  89. sequence,
  90. original_sequence)
  91. cell_sequence = T.where(update_mask.view(N, 1, 1).expand(N, S, D) == 1.0,
  92. cell_sequence,
  93. original_cell_sequence)
  94. t += 1
  95. discrete_active_status = T.where(active_probs > self.stop_threshold,
  96. T.ones_like(active_probs).to(active_probs.device),
  97. T.zeros_like(active_probs).to(active_probs.device))
  98. halt_condition_component = T.sum(discrete_active_status.squeeze(-1), dim=1) - 2.0
  99. update_mask = T.where((halt_condition_component <= 1) | (T.sum(input_mask.squeeze(-1), dim=-1) - 2.0 < t),
  100. halt_zeros,
  101. halt_ones)
  102. proper_termination_condition = T.sum(discrete_active_status * last_token_mask, dim=1).squeeze(-1)
  103. improperly_terminated_mask_ = T.where((halt_condition_component == 1) & (proper_termination_condition == 1),
  104. halt_zeros,
  105. halt_ones)
  106. improperly_terminated_mask = improperly_terminated_mask * improperly_terminated_mask_
  107. if T.sum(update_mask) == 0.0:
  108. break
  109. steps = steps * START_END_LAST_PAD_mask
  110. sequence = sequence * (1 - END_mask)
  111. active_probs = active_probs * (1 - END_mask)
  112. sequence = sequence[:, 1:-1, :] # remove START and END
  113. active_probs = active_probs[:, 1:-1, :] # remove START and END
  114. last_token_mask = END_mask[:, 2:, :]
  115. global_state = T.sum(sequence * last_token_mask, dim=1)
  116. assert active_probs.size(1) == sequence.size(1)
  117. entropy_penalty = self.compute_entropy_penalty(active_probs.squeeze(-1),
  118. last_token_mask.squeeze(-1))
  119. speed_penalty = self.compute_speed_penalty(steps, input_mask)
  120. entropy_penalty = entropy_penalty * improperly_terminated_mask
  121. penalty = self.entropy_gamma * entropy_penalty + self.speed_gamma * speed_penalty
  122. return sequence, global_state, penalty
  1. 方法forward()定义了前向传播,将输入的序列和输入掩码传递给编码器并返回编码后的序列、惩罚和全局状态。具体实现代码如下所示。
  1. def forward(self, sequence, input_mask, **kwargs):
  2. if "temperature" in kwargs:
  3. self.temperature = kwargs["temperature"]
  4. else:
  5. self.temperature = 1.0
  6. self.temperature = 1.0 if self.temperature is None else self.temperature
  7. input_mask = input_mask.unsqueeze(-1)
  8. sequence = sequence * input_mask
  9. sequence, global_state, penalty = self.encoder_block(sequence, input_mask)
  10. sequence = sequence * input_mask
  11. return {"sequence": sequence, "penalty": penalty, "global_state": global_state}

(4)编写文件Continuous-RvNN-main/classifier/hypertrain.py,功能是使用 Hyperopt 库进行超参数搜索,在给定的搜索空间内,通过超参数搜索来寻找模型的最佳配置,以提高模型性能。超参数是机器学习模型的配置参数,它们不是通过训练得到的,而需要手动调整以获得最佳性能。文件hypertrain.py的具体实现代码如下所示。

  1. def blockPrint():
  2. sys.stdout = open(os.devnull, 'w')
  3. # Restore
  4. def enablePrint():
  5. sys.stdout = sys.__stdout__
  6. parser = get_args()
  7. args = parser.parse_args()
  8. search_space, config_processor = load_hyperconfig(args)
  9. print(search_space)
  10. hp_search_space = {}
  11. for key, val in search_space.items():
  12. hp_search_space[key] = hp.choice(key, val)
  13. space_keys = [k for k in search_space]
  14. hyperopt_config_path = Path("hypertune/tuned_configs/{}_{}.txt".format(args.model, args.dataset))
  15. hyperopt_checkpoint_path = Path("hypertune/checkpoints/{}_{}.pkl".format(args.model, args.dataset))
  16. Path('hypertune/checkpoints/').mkdir(parents=True, exist_ok=True)
  17. Path('hypertune/tuned_configs/').mkdir(parents=True, exist_ok=True)
  18. if args.hypercheckpoint:
  19. with open(hyperopt_checkpoint_path, "rb") as fp:
  20. data = pickle.load(fp)
  21. trials = data["trials"]
  22. tried_configs = data["tried_configs"]
  23. true_total_trials = data["true_total_trials"]
  24. print("\n\nCheckpoint Loaded\n\n")
  25. else:
  26. trials = Trials()
  27. tried_configs = {}
  28. true_total_trials = 0
  29. def generate_args_hash(args):
  30. hash = ""
  31. for key in args:
  32. hash += "{}".format(args[key])
  33. return hash
  34. successive_failures = 0
  35. max_successive_failures = 10
  36. failure_flag = False
  37. def run_wrapper(space):
  38. global args
  39. global tried_configs
  40. global failure_flag
  41. config = load_config(args)
  42. config["epochs"] = args.epochs
  43. hash = generate_args_hash(space)
  44. if hash not in tried_configs:
  45. print("Exploring: {}".format(space))
  46. for key in space:
  47. config[key] = space[key]
  48. config = config_processor(config)
  49. blockPrint()
  50. _, best_metric, _ = run(args, config)
  51. enablePrint()
  52. dev_score = compose_dev_metric(best_metric, args, config)
  53. tried_configs[hash] = -dev_score
  54. print("loss: {}".format(tried_configs[hash]))
  55. failure_flag = False
  56. return {'loss': -dev_score, 'status': STATUS_OK}
  57. else:
  58. #print("loss: {} (Skipped Trial)".format(tried_configs[hash]))
  59. failure_flag = True
  60. return {'loss': tried_configs[hash], 'status': STATUS_OK}
  61. max_trials = min(args.max_trials, np.prod([len(choices) for key, choices in search_space.items()]))
  62. save_intervals = 1
  63. i = len(trials.trials)
  64. successive_failures = 0
  65. while True:
  66. best = fmin(run_wrapper,
  67. space=hp_search_space,
  68. algo=hyperopt.rand.suggest,
  69. trials=trials,
  70. max_evals=len(trials.trials) + save_intervals)
  71. found_config = {}
  72. for key in best:
  73. found_config[key] = search_space[key][best[key]]
  74. if not failure_flag:
  75. true_total_trials += 1
  76. print("Best Config so far: ", found_config)
  77. print("Total Trials: {} out of {}".format(true_total_trials, max_trials))
  78. print("\n\n")
  79. successive_failures = 0
  80. display_string = ""
  81. for key, value in found_config.items():
  82. display_string += "{}: {}\n".format(key, value)
  83. with open(hyperopt_config_path, "w") as fp:
  84. fp.write(display_string)
  85. with open(hyperopt_checkpoint_path, "wb") as fp:
  86. pickle.dump({"trials": trials,
  87. "tried_configs": tried_configs,
  88. "true_total_trials": true_total_trials}, fp)
  89. else:
  90. successive_failures += 1
  91. if successive_failures % 1000 == 0:
  92. print("Successive failures: ", successive_failures)
  93. if true_total_trials >= max_trials:
  94. break
  95. if successive_failures > 100000:
  96. print("\n\nDiscontinuing due to too many successive failures.\n\n")
  97. break

对上述代码的具体说明如下所示:

  1. 定义了一些辅助函数,如 blockPrint 和 enablePrint,用于禁止和启用标准输出。
  2. 从命令行参数获取配置,包括超参数搜索空间、模型和数据集等信息。
  3. 定义了一个搜索空间 hp_search_space,以及超参数搜索的配置和路径。
  4. 根据是否启用超参数搜索的检查点功能,加载先前的搜索结果或创建新的搜索记录。
  5. 定义函数generate_args_hash,用于生成超参数组合的哈希值。
  6. 设置了一些超参数搜索的参数,如最大尝试次数、保存间隔、连续失败次数等。
  7. 进入一个循环,循环中使用 Hyperopt 函数fmin来执行超参数搜索。在每次迭代中,调用 函数run_wrapper来评估当前超参数组合。
  8. 函数run_wrapper根据当前的超参数组合,加载模型配置,运行模型训练,并计算评估指标。
  9. 更新搜索结果,将找到的最佳超参数组合和性能输出到文件,并保存当前的搜索记录。
  10. 如果连续失败次数过多,或者达到最大尝试次数,结束超参数搜索。

本《文本分类与情感分析算法》专题已完结:

(4-1)文本分类与情感分析算法:朴素贝叶斯分类器-CSDN博客

(4-2)文本分类与情感分析算法:支持向量机(SVM)-CSDN博客

(4-3)文本分类与情感分析算法:随机森林(Random Forest)-CSDN博客

(4-4)文本分类与情感分析算法:卷积神经网络(CNN)-CSDN博客

(4-5)文本分类与情感分析算法:循环神经网络(RNN)-CSDN博客

(4-6)文本分类与情感分析算法:递归神经网络(1)-CSDN博客

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/煮酒与君饮/article/detail/782572
推荐阅读
  

闽ICP备14008679号