赞
踩
在caffe中摸爬滚打了一个多月了,修改caffe源代码,早就想练练手了,loss层是一个比较独立的一个层,而且可以仿照caffe给的样例进行添加,难度会稍微小点。caffe自带了十种loss层(contrastive、euclidean、hinge、multinomial_logistic、sigmoid_cross_entropy、smooth_L1、smooth_L1_ohem、softmax、softmax_ohem、infogain)
详细见:http://blog.csdn.net/sihailongwang/article/details/72657637
公式含义推荐:http://blog.csdn.net/u012177034/article/details/52144325
接下来,就是自己添加一个新的loss(层)函数了,我打算添加:Absolute loss
optional AbsoluteLossParameter Absolute_loss_param = 151;
message AbsoluteLossParameter
{
optional float dis = 1 [default = 1];
}
#ifndef CAFFE_ABSOLUTE_LOSS_LAYER_HPP_
#define CAFFE_ABSOLUTE_LOSS_LAYER_HPP_
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/layers/loss_layer.hpp"
namespace caffe {
template <typename Dtype>
class AbsoluteLossLayer : public LossLayer<Dtype> {
public:
explicit AbsoluteLossLayer(const LayerParameter& param)
: LossLayer<Dtype>(param), dis_() {}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "AbsoluteLoss"; }
virtual inline bool AllowForceBackward(const int bottom_index) const {
return true;
}
protected:
/// @copydoc AbsoluteLossLayer
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
Blob<Dtype> dis_;
};
} // namespace caffe
#endif // CAFFE_EUCLIDEAN_LOSS_LAYER_HPP_
#endif // CAFFE_EUCLIDEAN_LOSS_LAYER_HPP_
说明:因为AbsoluteLoss需要绝对值求和,所以在math_fuction.cpp中需要增加一个“绝对值”模板函数(与此同时,我惊喜的发现了BLAS、CBLAS)
/********************************************************************************************************************
TIPS:科普一下什么是BLAS/CBLAS
Basic Linear Algebra Subprograms,即基础线性代数子程序库,里边拥有大量的已经编好的关于线性代数运算的程序,主要用于向量和矩阵的计算的高性能数学库,本身是由Fortran编写的,为了方便C/C++程序使用,就有了BLAS的C接口库CBLAS,详细列表:http://www.netlib.org/blas/
/********************************************************************************************************************
//--------------------------add------------------------------------------
template <>
float caffe_cpu_asum<float>(const int n, const float* x) {
return cblas_sasum(n, x, 1); //sum of absolute values
}
template <>
double caffe_cpu_asum<double>(const int n, const double* x) {
return cblas_dasum(n, x, 1); //sum of absolute values
}
template <typename Dtype>
Dtype caffe_cpu_abs_sum(const int n, const Dtype* x) {
return caffe_cpu_asum(n, x);
}
template
float caffe_cpu_asum<float>(const int n, const float* x);
template
double caffe_cpu_asum<double>(const int n, const double* x);
//-------------------------add-------------------------------------------
CPU版本(absolute_loss_layer.cpp):
#include <vector>
#include "caffe/layers/absolute_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void AbsoluteLossLayer<Dtype>::Reshape(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::Reshape(bottom, top); //在LossLayer 中定义
CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1)) //保证输入维度相同
<< "Inputs must have the same dimension.";
dis_.ReshapeLike(*bottom[0]); //Blob 类型的diff_用来存放两个bottom的差,和bottom具有相同的
}
template <typename Dtype>
void AbsoluteLossLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count(); //总共有count个featuremap
caffe_sub(
count,
bottom[0]->cpu_data(),
bottom[1]->cpu_data(),
dis_.mutable_cpu_data()); //diff_ = bottom[0] - bottom[1]
Dtype loss_param = this->layer_param_.absolute_loss_param().dis();
Dtype abs_sum = caffe_cpu_abs_sum(count,dis_.cpu_data());
//Dtype dot = caffe_cpu_abs_sum()(count, diff_.cpu_data(), dis_.cpu_data());
Dtype loss = loss_param * abs_sum / bottom[0]->num();
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void AbsoluteLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
//对于输入的label bottom propagate_dowm为0
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
caffe_cpu_axpby(
bottom[i]->count(), // count
alpha, // alpha
dis_.cpu_data(), // a
Dtype(0), // beta
bottom[i]->mutable_cpu_diff()); // b
} //bottom[i]->mutable_cpu_diff()) = alpha*dis_.cpu_data()
}
}
#ifdef CPU_ONLY
STUB_GPU(AbsoluteLossLayer);
#endif
INSTANTIATE_CLASS(AbsoluteLossLayer);
REGISTER_LAYER_CLASS(AbsoluteLoss);
} // namespace caffe
GPU版本(absolute_loss_layer.cu):
#include <vector>
#include "caffe/layers/absolute_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void AbsoluteLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count(); //总共有count个featuremap
caffe_gpu_sub(
count,
bottom[0]->gpu_data(),
bottom[1]->gpu_data(),
dis_.mutable_gpu_data());
Dtype loss_param = this->layer_param_.
Dtype abs_sum;
caffe_gpu_asum(count, dis_.gpu_data(), &abs_sum);
Dtype loss = loss_param * abs_sum/ bottom[0]->num();
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void AbsoluteLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
caffe_gpu_axpby(
bottom[i]->count(), // count
alpha, // alpha
dis_.gpu_data(), // a
Dtype(0), // beta
bottom[i]->mutable_gpu_diff()); // b
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(AbsoluteLossLayer);
} // namespace caffe
1.修改../windows/libcaffe下的两个文件:libcaffe.vcxproj和libcaffe.vcxproj.filters
libcaffe.vcxproj增加:
<ClCompile Include="..\..\src\caffe\layers\absolute_loss_layer.cpp" />
<ClInclude Include="..\..\include\caffe\layers\absolute_loss_layer.hpp" />
<CudaCompile Include="..\..\src\caffe\layers\absolute_loss_layer.cu" />
libcaffe.vcxproj.filter增加:
<ClInclude Include="..\..\include\caffe\layers\absolute_loss_layer.hpp">
<Filter>include\layers</Filter>
</ClInclude>
<CudaCompile Include="..\..\src\caffe\layers\absolute_loss_layer.cu">
<Filter>cu\layers</Filter>
</CudaCompile>
<ClCompile Include="..\..\src\caffe\layers\absolute_loss_layer.cpp">
<Filter>src\layers</Filter>
</ClCompile>
2.打开caffe.sln,再进行重新生成新的解决方案,编译通过后,恭喜你功力又升一级!
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。