From 46a2d4d4b8fcca20392f668a1654940c379ea5fd Mon Sep 17 00:00:00 2001 From: Emmanouil Stergiadis Date: Fri, 22 Jun 2018 17:13:58 +0200 Subject: [PATCH 1/4] Simplified the APIs and reduced code duplication in `ConvLayer` and `MaxPoolingLayer` * Pooling is now a subclass of Convolutional Layer. As a result common functions and fields are not replicated. * Constructor arguments that can be internally computed are eliminated. --- tmva/tmva/inc/TMVA/DNN/CNN/ConvLayer.h | 88 +++++++++++++++++------ tmva/tmva/inc/TMVA/DNN/CNN/MaxPoolLayer.h | 78 +++++++------------- 2 files changed, 92 insertions(+), 74 deletions(-) diff --git a/tmva/tmva/inc/TMVA/DNN/CNN/ConvLayer.h b/tmva/tmva/inc/TMVA/DNN/CNN/ConvLayer.h index 73b6eda106636..4eb6ef7048eb3 100644 --- a/tmva/tmva/inc/TMVA/DNN/CNN/ConvLayer.h +++ b/tmva/tmva/inc/TMVA/DNN/CNN/ConvLayer.h @@ -46,6 +46,19 @@ class TConvLayer : public VGeneralLayer { using Scalar_t = typename Architecture_t::Scalar_t; private: + bool inline isInteger(Scalar_t x) const { return x == floor(x); } + + /* Calculate the output dimension of the convolutional layer */ + size_t calculateDimension(size_t imgDim, size_t fltDim, size_t padding, size_t stride); + + /* Calculate the number of pixels in a single receptive field */ + size_t inline calculateNLocalViewPixels(size_t depth, size_t height, size_t width) { return depth * height * width; } + + /* Calculate the number of receptive fields in an image given the filter and image sizes */ + size_t calculateNLocalViews(size_t inputHeight, size_t filterHeight, size_t paddingHeight, size_t strideRows, + size_t inputWidth, size_t filterWidth, size_t paddingWidth, size_t strideCols); + +protected: size_t fFilterDepth; ///< The depth of the filter. size_t fFilterHeight; ///< The height of the filter. size_t fFilterWidth; ///< The width of the filter. @@ -53,14 +66,15 @@ class TConvLayer : public VGeneralLayer { size_t fStrideRows; ///< The number of row pixels to slid the filter each step. size_t fStrideCols; ///< The number of column pixels to slid the filter each step. - size_t fPaddingHeight; ///< The number of zero layers added top and bottom of the input. - size_t fPaddingWidth; ///< The number of zero layers left and right of the input. - size_t fNLocalViewPixels; ///< The number of pixels in one local image view. size_t fNLocalViews; ///< The number of local views in one image. Scalar_t fDropoutProbability; ///< Probability that an input is active. +private: + size_t fPaddingHeight; ///< The number of zero layers added top and bottom of the input. + size_t fPaddingWidth; ///< The number of zero layers left and right of the input. + std::vector fDerivatives; ///< First fDerivatives of the activations of this layer. std::vector fForwardIndices; ///< Vector of indices used for a fast Im2Col in forward pass @@ -71,11 +85,10 @@ class TConvLayer : public VGeneralLayer { ERegularization fReg; ///< The regularization method. Scalar_t fWeightDecay; ///< The weight decay. + public: /*! Constructor. */ - TConvLayer(size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t Depth, size_t Height, - size_t Width, size_t WeightsNRows, size_t WeightsNCols, size_t BiasesNRows, size_t BiasesNCols, - size_t OutputNSlices, size_t OutputNRows, size_t OutputNCols, EInitialization Init, size_t FilterDepth, + TConvLayer(size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t Depth, EInitialization Init, size_t FilterHeight, size_t FilterWidth, size_t StrideRows, size_t StrideCols, size_t PaddingHeight, size_t PaddingWidth, Scalar_t DropoutProbability, EActivationFunction f, ERegularization Reg, Scalar_t WeightDecay); @@ -145,23 +158,32 @@ class TConvLayer : public VGeneralLayer { //______________________________________________________________________________ template TConvLayer::TConvLayer(size_t batchSize, size_t inputDepth, size_t inputHeight, size_t inputWidth, - size_t depth, size_t height, size_t width, size_t weightsNRows, - size_t weightsNCols, size_t biasesNRows, size_t biasesNCols, - size_t outputNSlices, size_t outputNRows, size_t outputNCols, - EInitialization init, size_t filterDepth, size_t filterHeight, - size_t filterWidth, size_t strideRows, size_t strideCols, size_t paddingHeight, - size_t paddingWidth, Scalar_t dropoutProbability, EActivationFunction f, - ERegularization reg, Scalar_t weightDecay) - : VGeneralLayer(batchSize, inputDepth, inputHeight, inputWidth, depth, height, width, 1, - weightsNRows, weightsNCols, 1, biasesNRows, biasesNCols, outputNSlices, outputNRows, - outputNCols, init), - fFilterDepth(filterDepth), fFilterHeight(filterHeight), fFilterWidth(filterWidth), fStrideRows(strideRows), - fStrideCols(strideCols), fPaddingHeight(paddingHeight), fPaddingWidth(paddingWidth), - fNLocalViewPixels(filterDepth * filterHeight * filterWidth), fNLocalViews(height * width), - fDropoutProbability(dropoutProbability), fDerivatives(), fF(f), fReg(reg), fWeightDecay(weightDecay) + size_t depth, EInitialization init, size_t filterHeight, size_t filterWidth, + size_t strideRows, size_t strideCols, size_t paddingHeight, size_t paddingWidth, + Scalar_t dropoutProbability, EActivationFunction f, ERegularization reg, + Scalar_t weightDecay) + : VGeneralLayer(batchSize, inputDepth, inputHeight, inputWidth, depth, + calculateDimension(inputHeight, filterHeight, paddingHeight, strideRows), + calculateDimension(inputWidth, filterWidth, paddingWidth, strideCols), + 1, depth, calculateNLocalViewPixels(inputDepth, filterHeight, filterWidth), + 1, depth, 1, batchSize, depth, + calculateNLocalViews(inputHeight, filterHeight, paddingHeight, strideRows, + inputWidth, filterWidth, paddingWidth, strideCols), + init), + fFilterDepth(inputDepth), fFilterHeight(filterHeight), fFilterWidth(filterWidth), fStrideRows(strideRows), + fStrideCols(strideCols), fNLocalViewPixels(calculateNLocalViewPixels(inputDepth, filterHeight, filterWidth)), + fNLocalViews(calculateNLocalViews(inputHeight, filterHeight, paddingHeight, strideRows, + inputWidth, filterWidth, paddingWidth, strideCols)), + fDropoutProbability(dropoutProbability), fPaddingHeight(paddingHeight), fPaddingWidth(paddingWidth), + fDerivatives(), fF(f), fReg(reg), fWeightDecay(weightDecay) { - for (size_t i = 0; i < outputNSlices; i++) { - fDerivatives.emplace_back(outputNRows, outputNCols); + /** Each element in the vector is a `T_Matrix` representing an event, therefore `vec.size() == batchSize`. + * Cells in these matrices are distributed in the following manner: + * Each row represents a single feature map, therefore we have `nRows == depth`. + * Each column represents a single pixel in that feature map, therefore we have `nCols == nLocalViews`. + **/ + for (size_t i = 0; i < batchSize; i++) { + fDerivatives.emplace_back(depth, fNLocalViews); } } @@ -351,6 +373,28 @@ void TConvLayer::ReadWeightsFromXML(void *parent) this->ReadMatrixXML(parent,"Biases", this -> GetBiasesAt(0)); } +template +size_t TConvLayer::calculateDimension(int imgDim, int fltDim, int padding, int stride) +{ + Scalar_t dimension = ((imgDim - fltDim + 2 * padding) / stride) + 1; + if (!isInteger(dimension) || dimension <= 0) { + Fatal("calculateDimension", "Not compatible hyper parameters for layer - (imageDim, filterDim, padding, stride) %d , %d , %d , %d", + imgDim, fltDim, padding, stride); + } + + return (size_t)dimension; +} + +template +size_t TConvLayer::calculateNLocalViews(int inputHeight, int filterHeight, int paddingHeight, + int strideRows, int inputWidth, int filterWidth, + int paddingWidth, int strideCols) +{ + int height = calculateDimension(inputHeight, filterHeight, paddingHeight, strideRows); + int width = calculateDimension(inputWidth, filterWidth, paddingWidth, strideCols); + + return height * width; +} } // namespace CNN } // namespace DNN diff --git a/tmva/tmva/inc/TMVA/DNN/CNN/MaxPoolLayer.h b/tmva/tmva/inc/TMVA/DNN/CNN/MaxPoolLayer.h index 98ad5843e6bb1..6678a8da8525f 100644 --- a/tmva/tmva/inc/TMVA/DNN/CNN/MaxPoolLayer.h +++ b/tmva/tmva/inc/TMVA/DNN/CNN/MaxPoolLayer.h @@ -29,7 +29,7 @@ #include "TMatrix.h" -#include "TMVA/DNN/GeneralLayer.h" +#include "TMVA/DNN/CNN/ConvLayer.h" #include "TMVA/DNN/Functions.h" #include @@ -43,38 +43,30 @@ namespace CNN { Generic Max Pooling Layer class. This generic Max Pooling Layer Class represents a pooling layer of - a CNN. It inherits all of the properties of the generic virtual base class - VGeneralLayer. In addition to that, it contains a matrix of winning units. + a CNN. It inherits all of the properties of the convolutional layer + TConvLayer, but it overrides the propagation methods. In a sense, max pooling + can be seen as non-linear convolution: a filter slides over the input and produces + one element as a function of the the elements within the receptive field. + In addition to that, it contains a matrix of winning units. The height and width of the weights and biases is set to 0, since this layer does not contain any weights. */ template -class TMaxPoolLayer : public VGeneralLayer { +class TMaxPoolLayer : public TConvLayer { + public: - using Matrix_t = typename Architecture_t::Matrix_t; - using Scalar_t = typename Architecture_t::Scalar_t; + using Matrix_t = typename Architecture_t::Matrix_t; + using Scalar_t = typename Architecture_t::Scalar_t; private: std::vector indexMatrix; ///< Matrix of indices for the backward pass. - size_t fFrameHeight; ///< The height of the frame. - size_t fFrameWidth; ///< The width of the frame. - - size_t fStrideRows; ///< The number of row pixels to slid the filter each step. - size_t fStrideCols; ///< The number of column pixels to slid the filter each step. - - size_t fNLocalViewPixels; ///< The number of pixels in one local image view. - size_t fNLocalViews; ///< The number of local views in one image. - - Scalar_t fDropoutProbability; ///< Probability that an input is active. - public: /*! Constructor. */ - TMaxPoolLayer(size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t Height, - size_t Width, size_t OutputNSlices, size_t OutputNRows, size_t OutputNCols, size_t FrameHeight, - size_t FrameWidth, size_t StrideRows, size_t StrideCols, Scalar_t DropoutProbability); + TMaxPoolLayer(size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t FilterHeight, + size_t FilterWidth, size_t StrideRows, size_t StrideCols, Scalar_t DropoutProbability); /*! Copy the max pooling layer provided as a pointer */ TMaxPoolLayer(TMaxPoolLayer *layer); @@ -104,7 +96,6 @@ class TMaxPoolLayer : public VGeneralLayer { /*! Read the information and the weights about the layer from XML node. */ virtual void ReadWeightsFromXML(void *parent); - /*! Prints the info about the layer. */ void Print() const; @@ -112,29 +103,18 @@ class TMaxPoolLayer : public VGeneralLayer { const std::vector &GetIndexMatrix() const { return indexMatrix; } std::vector &GetIndexMatrix() { return indexMatrix; } - size_t GetFrameHeight() const { return fFrameHeight; } - size_t GetFrameWidth() const { return fFrameWidth; } - - size_t GetStrideRows() const { return fStrideRows; } - size_t GetStrideCols() const { return fStrideCols; } - - size_t GetNLocalViewPixels() const { return fNLocalViewPixels; } - size_t GetNLocalViews() const { return fNLocalViews; } - - Scalar_t GetDropoutProbability() const { return fDropoutProbability; } }; //______________________________________________________________________________ template TMaxPoolLayer::TMaxPoolLayer(size_t batchSize, size_t inputDepth, size_t inputHeight, size_t inputWidth, - size_t height, size_t width, size_t outputNSlices, size_t outputNRows, - size_t outputNCols, size_t frameHeight, size_t frameWidth, - size_t strideRows, size_t strideCols, Scalar_t dropoutProbability) - : VGeneralLayer(batchSize, inputDepth, inputHeight, inputWidth, inputDepth, height, width, 0, 0, 0, - 0, 0, 0, outputNSlices, outputNRows, outputNCols, EInitialization::kZero), - indexMatrix(), fFrameHeight(frameHeight), fFrameWidth(frameWidth), fStrideRows(strideRows), - fStrideCols(strideCols), fNLocalViewPixels(inputDepth * frameHeight * frameWidth), fNLocalViews(height * width), - fDropoutProbability(dropoutProbability) + size_t filterHeight, size_t filterWidth, size_t strideRows, + size_t strideCols, Scalar_t dropoutProbability) + + : TConvLayer(batchSize, inputDepth, inputHeight, inputWidth, inputDepth, EInitialization::kZero, + filterHeight, filterWidth, strideRows, strideCols, 0, 0, dropoutProbability, + EActivationFunction::kIdentity, ERegularization::kNone, 0), + indexMatrix() { for (size_t i = 0; i < this->GetBatchSize(); i++) { indexMatrix.emplace_back(this->GetDepth(), this->GetNLocalViews()); @@ -144,10 +124,7 @@ TMaxPoolLayer::TMaxPoolLayer(size_t batchSize, size_t inputDepth //______________________________________________________________________________ template TMaxPoolLayer::TMaxPoolLayer(TMaxPoolLayer *layer) - : VGeneralLayer(layer), indexMatrix(), fFrameHeight(layer->GetFrameHeight()), - fFrameWidth(layer->GetFrameWidth()), fStrideRows(layer->GetStrideRows()), fStrideCols(layer->GetStrideCols()), - fNLocalViewPixels(layer->GetNLocalViewPixels()), fNLocalViews(layer->GetNLocalViews()), - fDropoutProbability(layer->GetDropoutProbability()) + : TConvLayer(layer), indexMatrix() { for (size_t i = 0; i < layer->GetBatchSize(); i++) { indexMatrix.emplace_back(layer->GetDepth(), layer->GetNLocalViews()); @@ -157,10 +134,7 @@ TMaxPoolLayer::TMaxPoolLayer(TMaxPoolLayer *laye //______________________________________________________________________________ template TMaxPoolLayer::TMaxPoolLayer(const TMaxPoolLayer &layer) - : VGeneralLayer(layer), indexMatrix(), fFrameHeight(layer.fFrameHeight), - fFrameWidth(layer.fFrameWidth), fStrideRows(layer.fStrideRows), fStrideCols(layer.fStrideCols), - fNLocalViewPixels(layer.fNLocalViewPixels), fNLocalViews(layer.fNLocalViews), - fDropoutProbability(layer.fDropoutProbability) + : TConvLayer(layer), indexMatrix() { for (size_t i = 0; i < layer.fBatchSize; i++) { indexMatrix.emplace_back(layer.fDepth, layer.fNLocalViews); @@ -184,7 +158,7 @@ auto TMaxPoolLayer::Forward(std::vector &input, bool a } Architecture_t::Downsample(this->GetOutputAt(i), indexMatrix[i], input[i], this->GetInputHeight(), - this->GetInputWidth(), this->GetFrameHeight(), this->GetFrameWidth(), + this->GetInputWidth(), this->GetFilterHeight(), this->GetFilterWidth(), this->GetStrideRows(), this->GetStrideCols()); } } @@ -209,8 +183,8 @@ auto TMaxPoolLayer::Print() const -> void std::cout << " H = " << this->GetHeight() << " , "; std::cout << " D = " << this->GetDepth() << " ) "; - std::cout << "\t Frame ( W = " << this->GetFrameWidth() << " , "; - std::cout << " H = " << this->GetFrameHeight() << " ) "; + std::cout << "\t Filter ( W = " << this->GetFilterWidth() << " , "; + std::cout << " H = " << this->GetFilterHeight() << " ) "; if (this->GetOutput().size() > 0) { std::cout << "\tOutput = ( " << this->GetOutput().size() << " , " << this->GetOutput()[0].GetNrows() << " , " << this->GetOutput()[0].GetNcols() << " ) "; @@ -225,8 +199,8 @@ void TMaxPoolLayer::AddWeightsXMLTo(void *parent) auto layerxml = gTools().xmlengine().NewChild(parent, 0, "MaxPoolLayer"); // write maxpool layer info - gTools().xmlengine().NewAttr(layerxml, 0, "FrameHeight", gTools().StringFromInt(this->GetFrameHeight())); - gTools().xmlengine().NewAttr(layerxml, 0, "FrameWidth", gTools().StringFromInt(this->GetFrameWidth())); + gTools().xmlengine().NewAttr(layerxml, 0, "FilterHeight", gTools().StringFromInt(this->GetFilterHeight())); + gTools().xmlengine().NewAttr(layerxml, 0, "FilterWidth", gTools().StringFromInt(this->GetFilterWidth())); gTools().xmlengine().NewAttr(layerxml, 0, "StrideRows", gTools().StringFromInt(this->GetStrideRows())); gTools().xmlengine().NewAttr(layerxml, 0, "StrideCols", gTools().StringFromInt(this->GetStrideCols())); From ed03a27761282af042a80d9d113c7734ed0828de Mon Sep 17 00:00:00 2001 From: Emmanouil Stergiadis Date: Fri, 22 Jun 2018 17:16:37 +0200 Subject: [PATCH 2/4] Adapted `DeepNet` to the new API --- tmva/tmva/inc/TMVA/DNN/DeepNet.h | 40 +++++--------------------------- 1 file changed, 6 insertions(+), 34 deletions(-) diff --git a/tmva/tmva/inc/TMVA/DNN/DeepNet.h b/tmva/tmva/inc/TMVA/DNN/DeepNet.h index 400546ce343f3..884a90568a3d0 100644 --- a/tmva/tmva/inc/TMVA/DNN/DeepNet.h +++ b/tmva/tmva/inc/TMVA/DNN/DeepNet.h @@ -380,8 +380,8 @@ auto TDeepNet::calculateDimension(int imgDim, int fltDi { Scalar_t dimension = ((imgDim - fltDim + 2 * padding) / stride) + 1; if (!isInteger(dimension) || dimension <= 0) { - this->Print(); - int iLayer = fLayers.size(); + this->Print(); + int iLayer = fLayers.size(); Fatal("calculateDimension","Not compatible hyper parameters for layer %d - (imageDim, filterDim, padding, stride) %d , %d , %d , %d", iLayer, imgDim, fltDim, padding, stride); // std::cout << " calculateDimension - Not compatible hyper parameters (imgDim, fltDim, padding, stride)" @@ -405,16 +405,6 @@ TConvLayer *TDeepNet::AddConvLayer(size size_t inputDepth; size_t inputHeight; size_t inputWidth; - size_t height; - size_t width; - size_t filterDepth; - size_t weightsNRows = depth; - size_t weightsNCols; - size_t biasesNRows = depth; - size_t biasesNCols = 1; - size_t outputNSlices = this->GetBatchSize(); - size_t outputNRows = depth; - size_t outputNCols; EInitialization init = this->GetInitialization(); ERegularization reg = this->GetRegularization(); Scalar_t decay = this->GetWeightDecay(); @@ -430,19 +420,12 @@ TConvLayer *TDeepNet::AddConvLayer(size inputWidth = lastLayer->GetWidth(); } - height = calculateDimension(inputHeight, filterHeight, paddingHeight, strideRows); - width = calculateDimension(inputWidth, filterWidth, paddingWidth, strideCols); - - filterDepth = inputDepth; - weightsNCols = filterDepth * filterHeight * filterWidth; - outputNCols = height * width; // Create the conv layer TConvLayer *convLayer = new TConvLayer( - batchSize, inputDepth, inputHeight, inputWidth, depth, height, width, weightsNRows, weightsNCols, biasesNRows, - biasesNCols, outputNSlices, outputNRows, outputNCols, init, filterDepth, filterHeight, filterWidth, strideRows, - strideCols, paddingHeight, paddingWidth, dropoutProbability, f, reg, decay); + batchSize, inputDepth, inputHeight, inputWidth, depth, init, filterHeight, filterWidth, strideRows, + strideCols, paddingHeight, paddingWidth, dropoutProbability, f, reg, decay); fLayers.push_back(convLayer); return convLayer; @@ -465,11 +448,6 @@ TMaxPoolLayer *TDeepNet::AddMaxPoolLaye size_t inputDepth; size_t inputHeight; size_t inputWidth; - size_t height; - size_t width; - size_t outputNSlices = this->GetBatchSize(); - size_t outputNRows; - size_t outputNCols; if (fLayers.size() == 0) { inputDepth = this->GetInputDepth(); @@ -482,15 +460,9 @@ TMaxPoolLayer *TDeepNet::AddMaxPoolLaye inputWidth = lastLayer->GetWidth(); } - height = calculateDimension(inputHeight, frameHeight, 0, strideRows); - width = calculateDimension(inputWidth, frameWidth, 0, strideCols); - - outputNRows = inputDepth; - outputNCols = height * width; - TMaxPoolLayer *maxPoolLayer = new TMaxPoolLayer( - batchSize, inputDepth, inputHeight, inputWidth, height, width, outputNSlices, outputNRows, outputNCols, - frameHeight, frameWidth, strideRows, strideCols, dropoutProbability); + batchSize, inputDepth, inputHeight, inputWidth, frameHeight, frameWidth, + strideRows, strideCols, dropoutProbability); // But this creates a copy or what? fLayers.push_back(maxPoolLayer); From 29249ff95ba6d30a256e1127543c452d05a01fc8 Mon Sep 17 00:00:00 2001 From: Emmanouil Stergiadis Date: Fri, 22 Jun 2018 17:18:22 +0200 Subject: [PATCH 3/4] Renamed kernel/filter fields to use the prefix "filter" rather than "frame". This is important to have the same naming convention everywhere. --- tmva/tmva/src/MethodDL.cxx | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/tmva/tmva/src/MethodDL.cxx b/tmva/tmva/src/MethodDL.cxx index dac6332e00381..2bcfb57faa773 100644 --- a/tmva/tmva/src/MethodDL.cxx +++ b/tmva/tmva/src/MethodDL.cxx @@ -684,8 +684,8 @@ void MethodDL::ParseMaxPoolLayer(DNN::TDeepNet &deepNet TString delim) { - int frameHeight = 0; - int frameWidth = 0; + int filterHeight = 0; + int filterWidth = 0; int strideRows = 0; int strideCols = 0; @@ -697,15 +697,15 @@ void MethodDL::ParseMaxPoolLayer(DNN::TDeepNet &deepNet for (; token != nullptr; token = (TObjString *)nextToken()) { switch (idxToken) { - case 1: // frame height + case 1: // filter height { TString strFrmHeight(token->GetString()); - frameHeight = strFrmHeight.Atoi(); + filterHeight = strFrmHeight.Atoi(); } break; - case 2: // frame width + case 2: // filter width { TString strFrmWidth(token->GetString()); - frameWidth = strFrmWidth.Atoi(); + filterWidth = strFrmWidth.Atoi(); } break; case 3: // stride in rows { @@ -723,10 +723,11 @@ void MethodDL::ParseMaxPoolLayer(DNN::TDeepNet &deepNet // Add the Max pooling layer // TMaxPoolLayer *maxPoolLayer = - deepNet.AddMaxPoolLayer(frameHeight, frameWidth, strideRows, strideCols); + deepNet.AddMaxPoolLayer(filterHeight, filterWidth, strideRows, strideCols); // Add the same layer to fNet - if (fBuildNet) fNet->AddMaxPoolLayer(frameHeight, frameWidth, strideRows, strideCols); + if (fBuildNet) fNet->AddMaxPoolLayer(filterHeight, filterWidth, strideRows, strideCols); + //TMaxPoolLayer *copyMaxPoolLayer = new TMaxPoolLayer(*maxPoolLayer); @@ -1603,14 +1604,14 @@ void MethodDL::ReadWeightsFromXML(void * rootXML) else if (layerName == "MaxPoolLayer") { // read maxpool layer info - size_t frameHeight, frameWidth = 0; + size_t filterHeight, filterWidth = 0; size_t strideRows, strideCols = 0; - gTools().ReadAttr(layerXML, "FrameHeight", frameHeight); - gTools().ReadAttr(layerXML, "FrameWidth", frameWidth); + gTools().ReadAttr(layerXML, "FilterHeight", filterHeight); + gTools().ReadAttr(layerXML, "FilterWidth", filterWidth); gTools().ReadAttr(layerXML, "StrideRows", strideRows); gTools().ReadAttr(layerXML, "StrideCols", strideCols); - fNet->AddMaxPoolLayer(frameHeight, frameWidth, strideRows, strideCols); + fNet->AddMaxPoolLayer(filterHeight, filterWidth, strideRows, strideCols); } else if (layerName == "ReshapeLayer") { From 09cb78252d40d38fb9c73884803b1dd6fdedda3d Mon Sep 17 00:00:00 2001 From: Emmanouil Stergiadis Date: Fri, 22 Jun 2018 18:51:57 +0200 Subject: [PATCH 4/4] Code review corrections --- tmva/tmva/inc/TMVA/DNN/CNN/ConvLayer.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tmva/tmva/inc/TMVA/DNN/CNN/ConvLayer.h b/tmva/tmva/inc/TMVA/DNN/CNN/ConvLayer.h index 4eb6ef7048eb3..48810ffc97270 100644 --- a/tmva/tmva/inc/TMVA/DNN/CNN/ConvLayer.h +++ b/tmva/tmva/inc/TMVA/DNN/CNN/ConvLayer.h @@ -374,7 +374,7 @@ void TConvLayer::ReadWeightsFromXML(void *parent) } template -size_t TConvLayer::calculateDimension(int imgDim, int fltDim, int padding, int stride) +size_t TConvLayer::calculateDimension(size_t imgDim, size_t fltDim, size_t padding, size_t stride) { Scalar_t dimension = ((imgDim - fltDim + 2 * padding) / stride) + 1; if (!isInteger(dimension) || dimension <= 0) { @@ -386,9 +386,9 @@ size_t TConvLayer::calculateDimension(int imgDim, int fltDim, in } template -size_t TConvLayer::calculateNLocalViews(int inputHeight, int filterHeight, int paddingHeight, - int strideRows, int inputWidth, int filterWidth, - int paddingWidth, int strideCols) +size_t TConvLayer::calculateNLocalViews(size_t inputHeight, size_t filterHeight, size_t paddingHeight, + size_t strideRows, size_t inputWidth, size_t filterWidth, + size_t paddingWidth, size_t strideCols) { int height = calculateDimension(inputHeight, filterHeight, paddingHeight, strideRows); int width = calculateDimension(inputWidth, filterWidth, paddingWidth, strideCols);