赞
踩
复现MTCNN,需要修改loss层,基于EuclideanLoss,:
参考Github上的代码,他实现了gpu版本,我在此实现cpu版本
用VS打开工程 caffe-windows/scripts/build/Caffe.sln
Solution下目录Header Files下添加新的头文件euclidean_loss_main_layer.hpp
//euclidean_loss_main_layer.hpp
#ifndef CAFFE_EUCLIDEAN_LOSS_MINE_LAYER_HPP_
#define CAFFE_EUCLIDEAN_LOSS_MINE_LAYER_HPP_
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/layers/loss_layer.hpp"
namespace caffe {
template <typename Dtype>
class EuclideanLossMineLayer : public LossLayer<Dtype> {
public:
explicit EuclideanLossMineLayer(const LayerParameter& param)
: LossLayer<Dtype>(param), diff_() {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "EuclideanLossMine"; }
virtual inline bool AllowForceBackward(const int bottom_index) const {return true;}
protected:
/// @copydoc EuclideanLossMineLayer
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual Dtype get_normalizer(LossParameter_NormalizationMode normalization_mode, int valid_count);
// Whether to ignore instances with a certain label.
bool has_ignore_label_;
// The label indicating that an instance should be ignored.
int ignore_label_;
/// How to normalize the output loss.
LossParameter_NormalizationMode normalization_;
int outer_num_, inner_num_;
Dtype valid_count_;
Blob<Dtype> diff_;
};
}
#endif // CAFFE_EUCLIDEAN_LOSS_MINE_LAYER_HPP_
Source/Layers目录下添加新.cpp文件euclidean_loss_main_layer.cpp
//euclidean_loss_main_layer.cpp
#include <vector>
#include "caffe/layers/euclidean_loss_mine_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void EuclideanLossMineLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::LayerSetUp(bottom, top);
valid_count_ = -1;
//忽略不训练的项,这里以-1标识
has_ignore_label_ = this->layer_param_.loss_param().has_ignore_label();
if (has_ignore_label_) {
ignore_label_ = this->layer_param_.loss_param().ignore_label();
}
//normalization
if (!this->layer_param_.loss_param().has_normalization() &&
this->layer_param_.loss_param().has_normalize()) {
normalization_ = this->layer_param_.loss_param().normalize() ?
LossParameter_NormalizationMode_VALID :
LossParameter_NormalizationMode_BATCH_SIZE;
} else {
normalization_ = this->layer_param_.loss_param().normalization();
}
}
template <typename Dtype>
void EuclideanLossMineLayer<Dtype>::Reshape(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::Reshape(bottom, top);
outer_num_ = bottom[0]->num();
inner_num_ = bottom[0]->channels();
CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1))
<< "Inputs must have the same dimension.";
diff_.ReshapeLike(*bottom[0]);
}
template <typename Dtype>
Dtype EuclideanLossMineLayer<Dtype>::get_normalizer(
LossParameter_NormalizationMode normalization_mode, int valid_count) {
Dtype normalizer;
switch (normalization_mode) {
case LossParameter_NormalizationMode_FULL:
normalizer = Dtype(outer_num_ * inner_num_);
break;
case LossParameter_NormalizationMode_VALID:
if (valid_count == -1) {
normalizer = Dtype(outer_num_);
} else {
normalizer = Dtype(valid_count / inner_num_);
}
break;
case LossParameter_NormalizationMode_BATCH_SIZE:
normalizer = Dtype(outer_num_);
break;
case LossParameter_NormalizationMode_NONE:
normalizer = Dtype(1);
break;
default:
LOG(FATAL) << "Unknown normalization mode: "
<< LossParameter_NormalizationMode_Name(normalization_mode);
}
// Some users will have no labels for some examples in order to 'turn off' a
// particular loss in a multi-task setup. The max prevents NaNs in that case.
return std::max(Dtype(1.0), normalizer);
}
template <typename Dtype>
void EuclideanLossMineLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (!has_ignore_label_) {
LOG(FATAL) << "Use EuclideanLossLayer instead.";
}
int count = bottom[0]->count();
const Dtype* b0 = bottom[0]->cpu_data();
const Dtype* b1 = bottom[1]->cpu_data();
Dtype* diff = diff_.mutable_cpu_data();
const int nthreads = outer_num_ * inner_num_;
Dtype* loss_data = bottom[0]->mutable_cpu_diff();
Dtype* counts = bottom[1]->mutable_cpu_diff();
//关键,在此根据ignore_label_将不训练的项的loss置0
for(int index = 0; index < nthreads; index++)
{
const int label_value = static_cast<int>(b1[index]);
if (has_ignore_label_ && label_value == ignore_label_) {
diff[index] = 0;
loss_data[index] = 0;
counts[index] = 0;
} else {
diff[index] = b0[index] - b1[index];
loss_data[index] = 0.5 * (b0[index] - b1[index]) * (b0[index] - b1[index]);
counts[index] = 1;
}
}
Dtype loss = caffe_cpu_asum(nthreads, loss_data);
if (normalization_ == LossParameter_NormalizationMode_VALID) {
valid_count_ = caffe_cpu_asum(nthreads, counts);
}
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void EuclideanLossMineLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!has_ignore_label_) {
LOG(FATAL) << "Use EuclideanLossLayer instead.";
}
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0];
caffe_cpu_axpby(
bottom[i]->count(), // count
alpha, // alpha
diff_.cpu_data(), // a
Dtype(0), // beta
bottom[i]->mutable_cpu_diff()); // b
}
}
}
#ifdef CPU_ONLY
STUB_GPU(EuclideanLossMineLayer);
#endif
INSTANTIATE_CLASS(EuclideanLossMineLayer);
REGISTER_LAYER_CLASS(EuclideanLossMine);
} // namespace caffe
如果要实现GPU版本,再在此目录下添加.cu文件和相应实现代码
接下来,参考这篇博客修改src\caffe\proto\caffe.proto,一共四个需要修改的地方
特别强调:大小写问题以及命名规则,尽量仿照caffe源码的形式写,如:
EuclideanLossMineParameter
EUCLIDEAN_LOSS_MINE
EuclideanLossMine
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。