赞
踩
均方误差损失 y_pred.shape=[num,] num为输出个数,第一维度其实是batch_size,进行忽略。 def mse(y_true,y_pred): return K.mean(K.square(y_pred-y_true),axis=-1) 绝对值误差损失 def mae(y_true,y_pred): return K.mean(K.abs(y_pred - y_true),axis =-1) 多分类损失 one-hot编码 output.shape = [None,num_class,] def categorical_crossentropy(y_true,y_pred): y_pred /= tf.reduce_sum(y_pred,axis=-1,keep_dims=True) _epsilon = 1e-4 ######进行裁剪,防止log(0) = None y_pred = tf.clip_by_value(y_pred,_epsilon,1.-_epsilon) return -tf.reduce_sum(y_true*tf.log(y_pred),-1) KL散度距离 def KLD(y_true,y_pred):
_epsilon = 1e-4 y_true = K.clip(y_true,_epsilon,1) y_pred = K.clip(y_pred,_epsilon,1) return K.sum(y_true*K.log(y_true/y_pred),axis=-1)交叉熵损失
_epsilon = 1e-4 final = y_true*tf.log(y_pred+_epsilon)+(1-y_true)*tf.log(1-y_pred+_epsilon) return -tf.reduce_mean(final,axis=-1) smooth_L1_loss损失 def smooth_l1_loss(y_true,y_pred): absolute_loss = tf.abs(y_true-y_pred) square_loss = 0.5*tf.square(absolute) return tf.reduce_sum(tf.where(tf.less(absolute_loss,1.0),square_loss,absolute_loss-0.5)),axis=-1)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。