赞
踩
# Tensorflow语法:
out = tf.clip_by_value(input, minvalue, maxvalue)
# Pytorch语法:
out = torch.clamp(input, min, max, out=None)
# Tensorflow语法:
grad = tf.gradients(ys, xs)[0]
# Pytorch语法:
grad = torch.autograd.grad(ys, xs)[0]
# Tensorflow语法:暂时停止在此磁带上记录操作。在此上下文管理器处于活动状态时执行的操作将不会记录在磁带上。这对于减少跟踪所有计算所使用的内存非常有用。
x = tf.constant(4.0)
with tf.GradientTape() as tape:
with tape.stop_recording():
y = x ** 2
dy_dx = tape.gradient(y, x)
print(dy_dx)
None
# Pytorch语法:
>>> x = torch.tensor([1.], requires_grad=True)
>>> with torch.no_grad():
... y = x * 2
>>> y.requires_grad
False
>>> @torch.no_grad()
... def doubler(x):
... return x * 2
>>> z = doubler(x)
>>> z.requires_grad
False
# Tensorflow语法:
max_ = tf.reduce_max(input, axis=1)
# Pytorch语法:
max_ = torch.max(input, dim=1)
# Tensorflow语法:
out = tf.norm(input, ord=1, axis=1)
# Pytorch语法:
out = torch.norm(input, p=1, dim=1)
# Tensorflow语法:
out = tf.concat(values, axis=0)
# Pytorch语法:
out = torch.cat(values, dim=0)
# Tensorflow语法:
tf.saved_model.save(obj, export_dir)
# Pytorch语法:
torch.save(obj, f)
# Tensorflow语法:
tf.zeros_like(input, dtype=None)
# Pytorch语法:
torch.zeros_like(input, dtype=None)
# Tensorflow语法:
tf.random.uniform(shape=[])
# Pytorch语法:
torch.rand(shape=[])
# Tensorflow语法:
tf.losses.mean_squared_error(x, y)
# Pytorch语法:
MSELoss = torch.nn.MSELoss()
MSELoss(x, y)
# Tensorflow语法:
Flatten = tf.keras.layers.Flatten()
Flatten(input)
# Pytorch语法:
Flatten = nn.Flatten()
Flatten(input)
>>> input = torch.randn(32, 1, 5, 5)
>>> # With default parameters
>>> m = nn.Flatten()
>>> output = m(input)
>>> output.size()
torch.Size([32, 25])
>>> # With non-default parameters
>>> m = nn.Flatten(0, 2)
>>> output = m(input)
>>> output.size()
torch.Size([160, 5])
# Tensorflow语法:
tf.math.tanh(x)
# Pytorch语法:
torch.tanh(x)
# Tensorflow语法:
tf.math.log(x)
# Pytorch语法:
torch.log(x)
# Tensorflow语法:
tf.eye(num_rows)
# Construct one identity matrix.
tf.eye(2)
==> [[1., 0.],
[0., 1.]]
# Construct a batch of 3 identity matrices, each 2 x 2.
# batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2.
batch_identity = tf.eye(2, batch_shape=[3])
# Construct one 2 x 3 "identity" matrix
tf.eye(2, num_columns=3)
==> [[ 1., 0., 0.],
[ 0., 1., 0.]]
# Pytorch语法:
torch.eye(num_rows)
>>> torch.eye(3)
tensor([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
# Tensorflow语法:
tf.boolean_mask(tensor, mask, axis=None, name='boolean_mask')
tensor = [0, 1, 2, 3] # 1-D example
mask = np.array([True, False, True, False])
out = tf.boolean_mask(tensor, mask)
# out : tf.Tensor([0 2], shape=(2,), dtype=int32)
tensor = [[0,1,2],[3,4,5],[6,7,8]] # 2-D example
mask = np.array([[True,False,False],
[False, True,False],
[False,False,True]])
out = tf.boolean_mask(tensor, mask)
# out : tf.Tensor([0 4 8], shape=(3,), dtype=int32)
# Pytorch语法:
torch.masked_select(input, mask)
>>> x = torch.randn(3, 4)
>>> x
tensor([[ 0.3552, -2.3825, -0.8297, 0.3477],
[-1.2035, 1.2252, 0.5002, 0.6248],
[ 0.1307, -2.0608, 0.1244, 2.0139]])
>>> mask = x.ge(0.5)
>>> mask
tensor([[False, False, False, False],
[False, True, True, True],
[False, False, False, True]])
>>> torch.masked_select(x, mask)
tensor([ 1.2252, 0.5002, 0.6248, 2.0139])
# Tensorflow语法:
tf.math.abs(x)
# Pytorch语法:
torch.abs(x)
# Tensorflow语法:
tf.stack(values, axis=0, name='stack')
# Pytorch语法:
torch.stack(tensors, dim=0)
# Tensorflow语法:
tf.math.reduce_mean(x,axis=None)
# Pytorch语法:
torch.mean(x,dim=None)
# Tensorflow语法:
tf.raw_ops.PadV2(input, paddings=[[pad_top, pad_bottom],[pad_left,pad_right]],constant_values=0)
# Pytorch语法:
torch.nn.functional.pad(input, [pad_left, pad_right, pad_top, pad_bottom],mode='constant', value=0)
# Tensorflow语法:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(label, logit)
loss_ = tf.reduce_mean(loss)
# logit: tf.Tensor([[-1.6456 0.0097 -1.0953 1.1578 0.917 ]
# [ 1.8985 0.3284 0.7734 0.551 0.5097]
# [ 0.7643 2.2743 1.376 1.3695 0.931 ]], shape=(3, 5), dtype=float32)
# label: tf.Tensor([0 1 2], shape=(3,), dtype=int32)
# loss: tf.Tensor([3.6227016 2.2839847 1.7284999], shape=(3,), dtype=float32)
# loss_: tf.Tensor(2.545062, shape=(), dtype=float32)
# Pytorch语法:
torch.nn.CrossEntropyLoss()
>>> # Example of target with class indices
>>> loss = nn.CrossEntropyLoss()
>>> input = torch.randn(3, 5, requires_grad=True)
>>> target = torch.empty(3, dtype=torch.long).random_(5)
>>> output = loss(input, target)
>>> output.backward()
# input:tensor([[-1.6456, 0.0097, -1.0953, 1.1578, 0.9170],
# [ 1.8985, 0.3284, 0.7734, 0.5510, 0.5097],
# [ 0.7643, 2.2743, 1.3760, 1.3695, 0.9310]], requires_grad=True)
# target:tensor([0, 1, 2])
# output:tensor(2.5451, grad_fn=<NllLossBackward0>)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。