赞
踩
optimizer = torch.optim.AdamW(model.parameters(),
lr=1e-4, weight_decay=1e-3)
optimizer.zero_grad()
# loss.backward() # 反向传播
optimizer.step()
optimizer = torch.optim.AdamW(model.parameters(),
lr=1e-4, weight_decay=1e-3)
lr_step = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, patience=5)
lr_step.step(val_dice)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。