Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 17 additions & 16 deletions include/caffe/layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,9 @@ class Layer {
// Forward and backward wrappers. You should implement the cpu and
// gpu specific implementations instead, and should not change these
// functions.
inline void Forward(const vector<Blob<Dtype>*>& bottom,
inline Dtype Forward(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
inline Dtype Backward(const vector<Blob<Dtype>*>& top,
inline void Backward(const vector<Blob<Dtype>*>& top,
const bool propagate_down,
vector<Blob<Dtype>*>* bottom);

Expand All @@ -60,26 +60,26 @@ class Layer {
vector<shared_ptr<Blob<Dtype> > > blobs_;

// Forward functions
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) = 0;
// If no gpu code is provided, we will simply use cpu code.
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
// LOG(WARNING) << "Using CPU code as backup.";
Forward_cpu(bottom, top);
return Forward_cpu(bottom, top);
}

// Backward functions: the backward function will compute the gradients for
// any parameters and also for the bottom blobs if propagate_down is true.
// It will return the loss produced from this layer.
virtual Dtype Backward_cpu(const vector<Blob<Dtype>*>& top,
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down,
vector<Blob<Dtype>*>* bottom) = 0;
virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down,
vector<Blob<Dtype>*>* bottom) {
// LOG(WARNING) << "Using CPU code as backup.";
return Backward_cpu(top, propagate_down, bottom);
Backward_cpu(top, propagate_down, bottom);
}

DISABLE_COPY_AND_ASSIGN(Layer);
Expand All @@ -89,29 +89,30 @@ class Layer {
// gpu specific implementations instead, and should not change these
// functions.
template <typename Dtype>
inline void Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
switch (Caffe::mode()) {
case Caffe::CPU:
Forward_cpu(bottom, top);
break;
return Forward_cpu(bottom, top);
case Caffe::GPU:
Forward_gpu(bottom, top);
break;
return Forward_gpu(bottom, top);
default:
LOG(FATAL) << "Unknown caffe mode.";
return Dtype(0);
}
}

template <typename Dtype>
inline Dtype Layer<Dtype>::Backward(const vector<Blob<Dtype>*>& top,
inline void Layer<Dtype>::Backward(const vector<Blob<Dtype>*>& top,
const bool propagate_down,
vector<Blob<Dtype>*>* bottom) {
switch (Caffe::mode()) {
case Caffe::CPU:
return Backward_cpu(top, propagate_down, bottom);
Backward_cpu(top, propagate_down, bottom);
break;
case Caffe::GPU:
return Backward_gpu(top, propagate_down, bottom);
Backward_gpu(top, propagate_down, bottom);
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
Expand Down
15 changes: 9 additions & 6 deletions include/caffe/net.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,21 +31,24 @@ class Net {

// Run forward with the input blobs already fed separately. You can get the
// input blobs using input_blobs().
const vector<Blob<Dtype>*>& ForwardPrefilled();
const vector<Blob<Dtype>*>& ForwardPrefilled(Dtype* loss = NULL);
// Run forward using a set of bottom blobs, and return the result.
const vector<Blob<Dtype>*>& Forward(const vector<Blob<Dtype>* > & bottom);
const vector<Blob<Dtype>*>& Forward(const vector<Blob<Dtype>* > & bottom,
Dtype* loss = NULL);
// Run forward using a serialized BlobProtoVector and return the result
// as a serialized BlobProtoVector
string Forward(const string& input_blob_protos);
string Forward(const string& input_blob_protos, Dtype* loss = NULL);

// The network backward should take no input and output, since it solely
// computes the gradient w.r.t the parameters, and the data has already
// been provided during the forward pass.
Dtype Backward();
void Backward();

Dtype ForwardBackward(const vector<Blob<Dtype>* > & bottom) {
Forward(bottom);
return Backward();
Dtype loss;
Forward(bottom, &loss);
Backward();
return loss;
}

// Updates the network weights based on the diff values computed.
Expand Down
Loading