diff --git a/src/optimizers/neuralnetworkoptimizer.cpp b/src/optimizers/neuralnetworkoptimizer.cpp index be63db3c..7a4da3e6 100644 --- a/src/optimizers/neuralnetworkoptimizer.cpp +++ b/src/optimizers/neuralnetworkoptimizer.cpp @@ -70,7 +70,7 @@ NeuralNetworkOptimizer::NeuralNetworkOptimizer( Config* settings ) : OptimizerBa _tfModel = new cppflow::model( tfModelPath ); // load model unsigned servingSize = _settings->GetNCells(); if( _settings->GetEnforceNeuralRotationalSymmetry() ) { - if( _settings->GetMaxMomentDegree() > 2 ) { + if( _settings->GetMaxMomentDegree() > 3 ) { ErrorMessages::Error( "This postprocessing step is currently only for M1 and M2 models available.", CURRENT_FUNCTION ); } servingSize *= 2; // Double number of vectors, since we mirror the rotated vector @@ -78,12 +78,11 @@ NeuralNetworkOptimizer::NeuralNetworkOptimizer( Config* settings ) : OptimizerBa _rotationMatsT.resize( _settings->GetNCells() ); } - if (_settings->GetMaxMomentDegree() == 1 && _settings->GetDim() ==2 && _settings->GetEnforceNeuralRotationalSymmetry()){ + if( _settings->GetMaxMomentDegree() == 1 && _settings->GetDim() == 2 && _settings->GetEnforceNeuralRotationalSymmetry() ) { _modelServingVectorU.resize( servingSize * ( _nSystem - 2 ) ); // reserve size for model servitor } - else - { - _modelServingVectorU.resize( servingSize * ( _nSystem - 1 ) ); // reserve size for model servitor + else { + _modelServingVectorU.resize( servingSize * ( _nSystem - 1 ) ); // reserve size for model servitor } // Specify model input name // Call Model (change call depending on model mk) (Seems to be randomly assigned by tensorflow) @@ -367,7 +366,7 @@ void NeuralNetworkOptimizer::InferenceMonomial( VectorVector& alpha, const Vecto } servingSize *= 2; } - else { // No Preprocessing + else { // No Postprocessing #pragma omp parallel for for( unsigned idx_cell = 0; idx_cell < _settings->GetNCells(); idx_cell++ ) { for( unsigned idx_sys = 0; idx_sys < _nSystem - 1; idx_sys++ ) { @@ -402,6 +401,7 @@ void NeuralNetworkOptimizer::InferenceMonomial( VectorVector& alpha, const Vecto // Mirror order 1 Moments alphaRedMirror[idx_sys] = -1 * (double)_modelServingVectorAlpha[( _settings->GetNCells() + idx_cell ) * ( _nSystem - 1 ) + idx_sys]; + alphaRed[idx_sys] = ( alphaRed[idx_sys] + alphaRedMirror[idx_sys] ) / 2; // average (and store in alphaRed) // alphaCorr[idx_sys+1] = alphaRed[idx_sys]; } @@ -460,6 +460,7 @@ void NeuralNetworkOptimizer::InferenceMonomial( VectorVector& alpha, const Vecto } alphaRed[idx_sys] = ( alphaRed[idx_sys] + alphaRedMirror[idx_sys] ) / 2; // average (and store in alphaRed) } + alpha_norms[idx_cell] = norm( alphaRed ) * norm( alphaRed ); // Rotate Back Vector alpha1{ alphaRed[0], alphaRed[1] }; @@ -594,7 +595,7 @@ void NeuralNetworkOptimizer::InferenceSphericalHarmonics2D( VectorVector& alpha, Vector alphaTempFull = Vector( _nSystem, 0.0 ); // local reduced mirrored alpha (with dummy entry at 0) Vector alphaTempMirror = Vector( _nSystem, 0.0 ); // local reduced mirrored alpha (with dummy entry at 0) - if( _settings->GetMaxMomentDegree() == 1 ) { // Using this + if( _settings->GetMaxMomentDegree() == 1 ) { // Using this for( unsigned idx_sys = 0; idx_sys < _nSystem - 2; idx_sys++ ) { alphaTempFull[idx_sys + 1] = (double)_modelServingVectorAlpha[idx_cell * ( _nSystem - 2 ) + idx_sys]; alphaTempMirror[idx_sys + 1] = @@ -754,7 +755,7 @@ void NeuralNetworkOptimizer::InferenceSphericalHarmonics( VectorVector& alpha, alpha_P_Mirror[idx_sys + 1] = (double)_modelServingVectorAlpha[( _settings->GetNCells() + idx_cell ) * ( _nSystem - 1 ) + idx_sys]; } alpha_P_Mirror = rot180 * alpha_P_Mirror; - alpha[idx_cell] = ( alphaP + alpha_P_Mirror ) / 2; // average (and store in alpha) + alpha[idx_cell] = ( alphaP + alpha_P_Mirror ) / 2; // average (and store in alpha) alpha[idx_cell] = _rotationMatsT[idx_cell] * alpha[idx_cell]; // Rotate back // alpha[idx_cell][2] = 0.0; //manually enforce slab geometry diff --git a/tools/docker/docker_run_interactive_specify_cores.sh b/tools/docker/docker_run_interactive_specify_cores.sh index bea10910..a288e322 100644 --- a/tools/docker/docker_run_interactive_specify_cores.sh +++ b/tools/docker/docker_run_interactive_specify_cores.sh @@ -1 +1 @@ -docker run -i -t --rm --cpuset-cpus=0-12 -v $(pwd)/../..:/mnt kitrt/test_ml:latest /bin/bash +docker run -i -t --rm --cpuset-cpus=0-23 -v $(pwd)/../..:/mnt kitrt/test_ml:latest /bin/bash