赞
踩
- %matplotlib inline
- import math
- import torch
- from torch import nn
- from torch.nn import functional as F
- from d2l import torch as d2l
-
- batch_size, num_steps = 32, 35
- train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
-
- F.one_hot(torch.tensor([0, 2]), len(vocab))
-
-
- X = torch.arange(10).reshape((2, 5))
- F.one_hot(X.T, 28).shape
-
-
- def get_params(vocab_size, num_hiddens, device):
- num_inputs = num_outputs = vocab_size
-
- def normal(shape):
- return torch.randn(size=shape, device=device) * 0.01
-
- # 隐藏层参数
- W_xh = normal((num_inputs, num_hiddens))
- W_hh = normal((num_hiddens, num_hiddens))
- b_h = torch.zeros(num_hiddens, device=device)
- # 输出层参数
- W_hq = normal((num_hiddens, num_outputs))
- b_q = torch.zeros(num_outputs, device=device)
- # 附加梯度
- params = [W_xh, W_hh, b_h, W_hq, b_q]
- for param in params:
- param.requires_grad_(True)
- return params
以后根据学习进度会随时补偿每个笔记的知识的
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。