赞
踩
x_vals = tf.linspace(-1., 1., 500) #预测序列x_values target = tf.constant(0.) #目标序列 # L2 loss(L2正则损失函数) # L = (pred - actual)^2差值的平方和 l2_y_vals = tf.square(target - x_vals) l2_y_out = sess.run(l2_y_vals) # L1 loss # L = abs(pred - actual)差的绝对值 l1_y_vals = tf.abs(target - x_vals) l1_y_out = sess.run(l1_y_vals) # Pseudo-Huber loss # L = delta^2 * (sqrt(1 + ((pred - actual)/delta)^2) - 1) delta1 = tf.constant(0.25) phuber1_y_vals = tf.multiply(tf.square(delta1), tf.sqrt(1. + tf.square((target - x_vals)/delta1)) - 1.) phuber1_y_out = sess.run(phuber1_y_vals) delta2 = tf.constant(5.) phuber2_y_vals = tf.multiply(tf.square(delta2), tf.sqrt(1. + tf.square((target - x_vals)/delta2)) - 1.) phuber2_y_out = sess.run(phuber2_y_vals)
x_vals = tf.linspace(-3., 5., 500) target = tf.constant(1.) targets = tf.fill([500,], 1.) # Hinge loss # L = max(0, 1 - (pred * actual)) hinge_y_vals = tf.maximum(0., 1. - tf.multiply(target, x_vals)) hinge_y_out = sess.run(hinge_y_vals) # Cross entropy loss交叉熵损失函数 # L = -actual * (log(pred)) - (1-actual)(log(1-pred)) xentropy_y_vals = - tf.multiply(target, tf.log(x_vals)) - tf.multiply((1. - target), tf.log(1. - x_vals)) xentropy_y_out = sess.run(xentropy_y_vals) # Sigmoid cross entropy loss(Sigmoid交叉熵损失函数,即先把x_vals通过Sigmoid函数转换再计算交叉熵损失) # L = -actual * (log(sigmoid(pred))) - (1-actual)(log(1-sigmoid(pred))) # or # L = max(actual, 0) - actual * pred + log(1 + exp(-abs(actual))) xentropy_sigmoid_y_vals = tf.nn.sigmoid_cross_entropy_with_logits_v2(logits=x_val, labels=targets) xentropy_sigmoid_y_out = sess.run(xentropy_sigmoid_y_vals) # Weighted cross entropy loss(Sigmoid交叉熵损失函数加权) # L = -actual * (log(pred)) * weights - (1-actual)(log(1-pred)) # or # L = (1 - pred) * actual + (1 + (weights - 1) * pred) * log(1 + exp(-actual)) weight = tf.constant(0.5) xentropy_weighted_y_vals = tf.nn.weighted_cross_entropy_with_logits(logits=x_vals, targets=targets, pos_weight=weight) xentropy_weighted_y_out = sess.run(xentropy_weighted_y_vals) # Softmax entropy loss(softmax交叉熵损失函数,通过softmax函数将非归一化输出转化为概率) # L = -actual * (log(softmax(pred))) - (1-actual)(log(1-softmax(pred))) unscaled_logits = tf.constant([[1., -3., 10.]]) target_dist = tf.constant([[0.1, 0.02, 0.88]]) softmax_xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=unscaled_logits, labels=target_dist) print(sess.run(softmax_xentropy)) # Sparse entropy loss(稀疏softmax交叉熵损失函数) # Use when classes and targets have to be mutually exclusive # L = sum( -actual * log(pred) ) unscaled_logits = tf.constant([[1., -3., 10.]]) sparse_target_dist = tf.constant([2]) sparse_xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=unscaled_logits, labels=sparse_target_dist) print(sess.run(sparse_xentropy))
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。