Commit 01f7ac4a authored by Pawel Sznajder's avatar Pawel Sznajder
Browse files

add documentation and members to evaluate mean and sigma for DVCSCFFNN

parent 69502a07
......@@ -8,6 +8,7 @@
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "../../../beans/automation/BaseObjectData.h"
#include "../../../beans/gpd/GPDType.h"
......@@ -15,18 +16,42 @@
namespace PARTONS {
/**
* @class DVCSCFFNN
*
* @brief DVCS CFFs based on neural network analysis.
*
* This module returns CFFs as estimated from this fit to world data: Eur.Phys.J. C79 (2019) no.7, 614
*
* Mean values and uncertainties should be estimated from a set of values returned by 101 replicas. To run the evaluation for
* a given replica one should use either DVCSCFFNN::loadParameters() function, or DVCSCFFNN::configure() with
* DVCSCFFNN::PARAMETER_NAME_REPLICA parameter (to be used also in xml).
*
* Mean values and uncertainties can be evaluated from a set of values using DVCSCFFNN::getMeanAndUncertainty().
* Please note the procedure to remove the outliers.
*/
class DVCSCFFNN: public DVCSConvolCoeffFunctionModule {
public:
static const std::string PARAMETER_NAME_REPLICA;
static const std::string PARAMETER_NAME_REPLICA; ///< Name of parameter to set replica index via xml file.
static const unsigned int classId;
static const unsigned int classId; ///< Unique ID to automatically register the class in the registry.
/**
* Constructor.
* See BaseObject::BaseObject and ModuleObject::ModuleObject for more details.
* @param className Name of last child class.
*/
DVCSCFFNN(const std::string &className);
virtual DVCSCFFNN* clone() const;
/**
* Destructor.
*/
virtual ~DVCSCFFNN();
virtual DVCSCFFNN* clone() const;
virtual void configure(const ElemUtils::Parameters &parameters);
virtual void resolveObjectDependencies();
......@@ -35,13 +60,29 @@ public:
virtual std::complex<double> computeCFF();
void loadParameters(size_t replica);
/**
* Load parameters for a given replica index.
*/
void loadParameters(size_t replica, bool printInfo = true);
/**
* Get neural networks.
*/
const std::map<PARTONS::GPDType::Type,
std::pair<NumA::NeuralNetwork*, NumA::NeuralNetwork*> >& getNeuralNetworks() const;
/**
* Evaluate mean and uncertainty for a given vector of numbers. The procedure include removing of outliers.
*/
void getMeanAndUncertainty(const std::vector<double>& v, double& mean,
double& unc) const;
protected:
/**
* Copy constructor.
* @param other Object to be copied.
*/
DVCSCFFNN(const DVCSCFFNN &other);
virtual void initModule();
......@@ -49,20 +90,42 @@ protected:
private:
/**
* Build all neural networks.
*/
void buildNeuralNetworks();
/**
* Build single neural network for a given GPD type and CFF RE or Im.
*/
NumA::NeuralNetwork* buildAndConfigureSingleNeuralNetwork(
PARTONS::GPDType::Type gpdType, bool isReal);
/**
* Evaluate mean from a given vector.
*/
double getMean(const std::vector<double>& v) const;
/**
* Evaluate sigma from a given vector.
*/
double getSigma(const std::vector<double>& v) const;
/**
* Remove outliers from a given vector using 3sigma rule.
*/
size_t removeOutliers(std::vector<double>& v) const;
std::map<PARTONS::GPDType::Type,
std::pair<NumA::NeuralNetwork*, NumA::NeuralNetwork*> > m_neuralNetworks;
std::pair<NumA::NeuralNetwork*, NumA::NeuralNetwork*> > m_neuralNetworks; ///< Neural networks
std::pair<double, double> m_rangeLog10Xi;
std::pair<double, double> m_rangeT;
std::pair<double, double> m_rangeQ2;
std::map<PARTONS::GPDType::Type, std::pair<double, double> > m_rangeXiReCFF;
std::map<PARTONS::GPDType::Type, std::pair<double, double> > m_rangeXiImCFF;
std::pair<double, double> m_rangeLog10Xi; ///< Normalization range: log10xi
std::pair<double, double> m_rangeT; ///< Normalization range: t
std::pair<double, double> m_rangeLog10Q2; ///< Normalization range: log10Q2
std::map<PARTONS::GPDType::Type, std::pair<double, double> > m_rangeXiReCFF; ///< Normalization range: xi*ReCFF
std::map<PARTONS::GPDType::Type, std::pair<double, double> > m_rangeXiImCFF; ///< Normalization range: xi*ImCFF
size_t m_replica;
size_t m_replica; ///< Replica index.
};
}
......
......@@ -14,7 +14,6 @@
#include <NumA/neural_network/neural_network_neuron/NeuralNetworkNeuron.h>
#include <cmath>
#include <iterator>
#include <vector>
#include "../../../../../include/partons/BaseObjectRegistry.h"
#include "../../../../../include/partons/modules/convol_coeff_function/ConvolCoeffFunctionModule.h"
......@@ -64,7 +63,7 @@ DVCSCFFNN::DVCSCFFNN(const std::string &className) :
m_rangeLog10Xi = std::make_pair(-6., 1.);
m_rangeT = std::make_pair(-1., 0.5);
m_rangeQ2 = std::make_pair(-1., 2.);
m_rangeLog10Q2 = std::make_pair(-1., 2.);
m_rangeXiReCFF.insert(std::make_pair(GPDType::H, std::make_pair(-1., 1.5)));
m_rangeXiReCFF.insert(std::make_pair(GPDType::E, std::make_pair(-1.5, 1.)));
......@@ -82,7 +81,7 @@ DVCSCFFNN::DVCSCFFNN(const std::string &className) :
m_replica = 0;
buildNeuralNetworks();
loadParameters(m_replica);
loadParameters(m_replica, false);
}
DVCSCFFNN::DVCSCFFNN(const DVCSCFFNN &other) :
......@@ -108,7 +107,7 @@ DVCSCFFNN::DVCSCFFNN(const DVCSCFFNN &other) :
m_rangeLog10Xi = other.m_rangeLog10Xi;
m_rangeT = other.m_rangeT;
m_rangeQ2 = other.m_rangeQ2;
m_rangeLog10Q2 = other.m_rangeLog10Q2;
m_rangeXiReCFF = other.m_rangeXiReCFF;
m_rangeXiImCFF = other.m_rangeXiImCFF;
......@@ -228,7 +227,7 @@ NumA::NeuralNetwork* DVCSCFFNN::buildAndConfigureSingleNeuralNetwork(
"No scaling cell type");
}
pNNCell = static_cast<NumA::ScalingCell*>(pNNLayer->getCells().at(2));
pNNCell->setScalingParameters(m_rangeQ2);
pNNCell->setScalingParameters(m_rangeLog10Q2);
//output
pNNLayer = pNN->getNeuralNetworkLayers().at(
......@@ -252,7 +251,7 @@ NumA::NeuralNetwork* DVCSCFFNN::buildAndConfigureSingleNeuralNetwork(
return pNN;
}
void DVCSCFFNN::loadParameters(size_t replica) {
void DVCSCFFNN::loadParameters(size_t replica, bool printInfo) {
if (replica >= c_nDVCSCFFNNReplicas) {
throw ElemUtils::CustomException(getClassName(), __func__,
......@@ -326,8 +325,11 @@ void DVCSCFFNN::loadParameters(size_t replica) {
}
}
info(__func__,
ElemUtils::Formatter() << "Parameters set for replica " << replica);
if (printInfo) {
info(__func__,
ElemUtils::Formatter() << "Parameters set for replica "
<< replica);
}
}
void DVCSCFFNN::prepareSubModules(
......@@ -385,4 +387,85 @@ const std::map<GPDType::Type,
return m_neuralNetworks;
}
double DVCSCFFNN::getMean(const std::vector<double>& v) const {
if (v.size() == 0) {
throw ElemUtils::CustomException(getClassName(), __func__,
"vector size is 0");
}
double mean = 0.;
for (int i = 0; i < v.size(); i++) {
mean += v.at(i);
}
return mean / v.size();
}
double DVCSCFFNN::getSigma(const std::vector<double>& v) const {
if (v.size() == 0) {
throw ElemUtils::CustomException(getClassName(), __func__,
"vector size is 0");
}
double mean = getMean(v);
double sigma = 0.;
for (int i = 0; i < v.size(); i++) {
sigma += pow(mean - v.at(i), 2);
}
return sqrt(sigma / double(v.size()));
}
size_t DVCSCFFNN::removeOutliers(std::vector<double>& v) const {
if (v.size() == 0) {
throw ElemUtils::CustomException(getClassName(), __func__,
"vector size is 0");
}
double meanData = getMean(v);
double sigmaData = getSigma(v);
if (sigmaData == 0.) {
warn(__func__, "sigma size is 0");
return 0;
}
std::vector<double> result;
std::vector<double>::iterator it;
size_t nRemoved = 0;
for (it = v.begin(); it != v.end(); it++) {
if (fabs((*it) - meanData) / sigmaData > 3.) {
nRemoved++;
} else {
result.push_back(*it);
}
}
v = result;
if (nRemoved != 0)
nRemoved += removeOutliers(v);
return nRemoved;
}
void DVCSCFFNN::getMeanAndUncertainty(const std::vector<double>& v,
double& mean, double& unc) const {
std::vector<double> vOutlierFree = v;
removeOutliers(vOutlierFree);
mean = getMean(vOutlierFree);
unc = getSigma(vOutlierFree);
}
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment