赞
踩
from torch.utils.tensorboard import SummaryWriter
# TensorBoard
writer = SummaryWriter('runs/mnist_experiment_1')
...
if i % 100 == 99: # 每 100 个 batch 打印一次并记录到 TensorBoard
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 100))
writer.add_scalar('Training Loss',
running_loss / 100,
epoch * len(trainloader) + i)
writer.add_scalar('Accuracy',
correct / total,
epoch * len(trainloader) + i)
running_loss = 0.0
total = 0
correct = 0
# 不要忘了关闭 SummaryWriter
writer.close()
然后终端启动命令: tensorboard --logdir=runs
两者 计算的loss 相同
import torch
import torch.nn.functional as F
import torch.nn as nn
# 定义一个输入张量,形状为 (B, C)
input_tensor = torch.randn(3, 5) # 假设批量大小为 3,类别数量为 5
target = torch.LongTensor([1, 0, 2])
# 使用 log_softmax 函数计算对数softmax值
output_tensor = F.log_softmax(input_tensor, dim=1)
# print("Input Tensor:")
# print(input_tensor)
print("\nOutput Tensor (Log-Softmax):")
print(output_tensor)
# 计算 NLL Loss
loss = F.nll_loss(output_tensor, target)
print("NLL Loss:", loss.item())
# 计算交叉熵损失
loss = F.cross_entropy(input_tensor, target)
print("Cross-Entropy Loss:", loss.item())
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。