Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions include/caffe/vision_layers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,7 @@ class DataLayer : public Layer<Dtype> {
shared_ptr<Blob<Dtype> > prefetch_label_;
Blob<Dtype> data_mean_;
bool output_labels_;
Caffe::Phase phase_;
};

template <typename Dtype>
Expand Down Expand Up @@ -476,6 +477,7 @@ class ImageDataLayer : public Layer<Dtype> {
shared_ptr<Blob<Dtype> > prefetch_data_;
shared_ptr<Blob<Dtype> > prefetch_label_;
Blob<Dtype> data_mean_;
Caffe::Phase phase_;
};

template <typename Dtype>
Expand Down
4 changes: 3 additions & 1 deletion src/caffe/layers/data_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ void* DataLayerPrefetch(void* layer_pointer) {
CHECK(data.size()) << "Image cropping only support uint8 data";
int h_off, w_off;
// We only do random crop when we do training.
if (Caffe::phase() == Caffe::TRAIN) {
if (layer->phase_ == Caffe::TRAIN) {
// NOLINT_NEXT_LINE(runtime/threadsafe_fn)
h_off = rand() % (height - crop_size);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure that this is related to anything, but rand() is not thread safe.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

true - I had planned to followup (either tonight or tomorrow probably) with another PR which removes all uses of rand throughout the codebase (by giving the prefetch thread its own private RNG object). Thanks for the comment.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the rand() followup-to-be. I remember it coming up now and then so it'll be nice to have it squared away.

// NOLINT_NEXT_LINE(runtime/threadsafe_fn)
Expand Down Expand Up @@ -227,6 +227,7 @@ void DataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
}
data_mean_.cpu_data();
DLOG(INFO) << "Initializing prefetch";
phase_ = Caffe::phase();
CHECK(!pthread_create(&thread_, NULL, DataLayerPrefetch<Dtype>,
reinterpret_cast<void*>(this))) << "Pthread execution failed.";
DLOG(INFO) << "Prefetch initialized.";
Expand All @@ -245,6 +246,7 @@ Dtype DataLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
(*top)[1]->mutable_cpu_data());
}
// Start a new prefetch thread
phase_ = Caffe::phase();
CHECK(!pthread_create(&thread_, NULL, DataLayerPrefetch<Dtype>,
reinterpret_cast<void*>(this))) << "Pthread execution failed.";
return Dtype(0.);
Expand Down
1 change: 1 addition & 0 deletions src/caffe/layers/data_layer.cu
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ Dtype DataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
cudaMemcpyHostToDevice));
}
// Start a new prefetch thread
phase_ = Caffe::phase();
CHECK(!pthread_create(&thread_, NULL, DataLayerPrefetch<Dtype>,
reinterpret_cast<void*>(this))) << "Pthread execution failed.";
return Dtype(0.);
Expand Down
4 changes: 3 additions & 1 deletion src/caffe/layers/image_data_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ void* ImageDataLayerPrefetch(void* layer_pointer) {
CHECK(data.size()) << "Image cropping only support uint8 data";
int h_off, w_off;
// We only do random crop when we do training.
if (Caffe::phase() == Caffe::TRAIN) {
if (layer->phase_ == Caffe::TRAIN) {
// NOLINT_NEXT_LINE(runtime/threadsafe_fn)
h_off = rand() % (height - crop_size);
// NOLINT_NEXT_LINE(runtime/threadsafe_fn)
Expand Down Expand Up @@ -228,6 +228,7 @@ void ImageDataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
prefetch_label_->mutable_cpu_data();
data_mean_.cpu_data();
DLOG(INFO) << "Initializing prefetch";
phase_ = Caffe::phase();
CHECK(!pthread_create(&thread_, NULL, ImageDataLayerPrefetch<Dtype>,
reinterpret_cast<void*>(this))) << "Pthread execution failed.";
DLOG(INFO) << "Prefetch initialized.";
Expand All @@ -244,6 +245,7 @@ Dtype ImageDataLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
memcpy((*top)[1]->mutable_cpu_data(), prefetch_label_->cpu_data(),
sizeof(Dtype) * prefetch_label_->count());
// Start a new prefetch thread
phase_ = Caffe::phase();
CHECK(!pthread_create(&thread_, NULL, ImageDataLayerPrefetch<Dtype>,
reinterpret_cast<void*>(this))) << "Pthread execution failed.";
return Dtype(0.);
Expand Down
1 change: 1 addition & 0 deletions src/caffe/layers/image_data_layer.cu
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ Dtype ImageDataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
prefetch_label_->cpu_data(), sizeof(Dtype) * prefetch_label_->count(),
cudaMemcpyHostToDevice));
// Start a new prefetch thread
phase_ = Caffe::phase();
CHECK(!pthread_create(&thread_, NULL, ImageDataLayerPrefetch<Dtype>,
reinterpret_cast<void*>(this))) << "Pthread execution failed.";
return Dtype(0.);
Expand Down