diff --git a/NerlnetBuild.sh b/NerlnetBuild.sh index 1854d4fa..b41565d7 100755 --- a/NerlnetBuild.sh +++ b/NerlnetBuild.sh @@ -166,6 +166,8 @@ if command -v python3 >/dev/null 2>&1; then AUTOGENERATED_DC_DEFINITIONS_PATH_HRL="`pwd`/src_erl/NerlnetApp/src/dc_definitions_ag.hrl" AUTOGENERATED_SOURCE_DEFINITIONS_PATH_HRL="`pwd`/src_erl/NerlnetApp/src/source_definitions_ag.hrl" AUTOGENERATED_ROUTER_DEFINITIONS_PATH_HRL="`pwd`/src_erl/NerlnetApp/src/router_definitions_ag.hrl" + AUTOGENERATED_LAYERS_TYPE_INDEX_DEFINITIONS_PATH_HRL="`pwd`/src_erl/NerlnetApp/src/Bridge/layers_types_ag.hrl" + AUTOGENERATED_MODELS_TYPES_INDEX_DEFINITIONS_PATH_HRL="`pwd`/src_erl/NerlnetApp/src/Bridge/models_types_ag.hrl" echo "$NERLNET_BUILD_PREFIX Generate auto-generated files" python3 src_py/nerlPlanner/CppHeadersExporter.py --output $AUTOGENERATED_WORKER_DEFINITIONS_PATH #--debug @@ -173,6 +175,8 @@ if command -v python3 >/dev/null 2>&1; then python3 src_py/nerlPlanner/ErlHeadersExporter.py --gen_dc_fields_hrl --output $AUTOGENERATED_DC_DEFINITIONS_PATH_HRL #--debug python3 src_py/nerlPlanner/ErlHeadersExporter.py --gen_source_fields_hrl --output $AUTOGENERATED_SOURCE_DEFINITIONS_PATH_HRL #--debug python3 src_py/nerlPlanner/ErlHeadersExporter.py --gen_router_fields_hrl --output $AUTOGENERATED_ROUTER_DEFINITIONS_PATH_HRL #--debug + python3 src_py/nerlPlanner/ErlHeadersExporter.py --gen_layers_type_hrl --output $AUTOGENERATED_LAYERS_TYPE_INDEX_DEFINITIONS_PATH_HRL #--debug + python3 src_py/nerlPlanner/ErlHeadersExporter.py --gen_models_types_hrl --output $AUTOGENERATED_MODELS_TYPES_INDEX_DEFINITIONS_PATH_HRL #--debug set +e else echo "$NERLNET_BUILD_PREFIX Python 3 is not installed" diff --git a/src_cpp/common/CMakeLists.txt b/src_cpp/common/CMakeLists.txt index 8ede71c6..4d419fe4 100644 --- a/src_cpp/common/CMakeLists.txt +++ b/src_cpp/common/CMakeLists.txt @@ -1,6 +1,7 @@ project(common) set(NIFPP_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../nifpp/") +set(SIMPLE_LOGGER_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../simple-cpp-logger/include") set(SRC_CODE "common_definitions.h" @@ -19,4 +20,5 @@ add_library(common SHARED ${SRC_CODE}) target_include_directories(common PUBLIC . ${NIFPP_PATH} + ${SIMPLE_LOGGER_PATH} ) \ No newline at end of file diff --git a/src_cpp/common/common_definitions.h b/src_cpp/common/common_definitions.h index a2f5ca41..fe45e5fd 100644 --- a/src_cpp/common/common_definitions.h +++ b/src_cpp/common/common_definitions.h @@ -5,7 +5,7 @@ namespace nerlnet #define DIM_X_IDX 0 #define DIM_Y_IDX 1 #define DIM_Z_IDX 2 - +#define DIM_W_IDX 3 #define NERLNIF_ATOM_STR "nerlnif" #define NERLNIF_NAN_ATOM_STR "nan" diff --git a/src_cpp/common/nerlLayer.cpp b/src_cpp/common/nerlLayer.cpp index f5ea3d4f..c303a849 100644 --- a/src_cpp/common/nerlLayer.cpp +++ b/src_cpp/common/nerlLayer.cpp @@ -16,15 +16,33 @@ NerlLayer::~NerlLayer() { } - // ----- CNN Layer ----- NerlLayerCNN::NerlLayerCNN(int layer_type, std::vector &layers_dims, int layer_functionality, - std::vector kernel_size, std::vector &stride_dims, std::vector padding_size) : + std::vector kernel_size, std::vector &stride_dims, std::vector padding_size, std::vector type_conv) : NerlLayer(layer_type, layers_dims, layer_functionality) { _kernel_size = kernel_size; _stride_dims = stride_dims; _padding_size = padding_size; + _type_conv = type_conv; } +NerlLayerCNN::~NerlLayerCNN() +{ +} + +NerlLayerPooling::NerlLayerPooling(int layer_type, std::vector &layers_dims, int layer_functionality, +std::vector &pooling_dims, std::vector &stride_dims,std::vector &padding_dims) : + NerlLayer(layer_type, layers_dims, layer_functionality) +{ + + _pooling_dims = pooling_dims; + _stride_dims = stride_dims; + _padding_dims = padding_dims; +}; + + +NerlLayerPooling::~NerlLayerPooling() +{ +} } \ No newline at end of file diff --git a/src_cpp/common/nerlLayer.h b/src_cpp/common/nerlLayer.h index 37f2392a..7b64af0c 100644 --- a/src_cpp/common/nerlLayer.h +++ b/src_cpp/common/nerlLayer.h @@ -13,7 +13,7 @@ class NerlLayer { public: NerlLayer(int layer_type, std::vector &layers_dims, int layer_functionality); - ~NerlLayer(); + virtual ~NerlLayer(); std::shared_ptr get_next_layer_ptr() {return _next_layer;}; std::shared_ptr get_prev_layer_ptr() {return _prev_layer;}; @@ -44,15 +44,17 @@ class NerlLayerPooling : public NerlLayer { public: - NerlLayerPooling(int layer_type, std::vector &layers_dims, int layer_functionality, std::vector &pooling_dims); + NerlLayerPooling(int layer_type, std::vector &layers_dims, int layer_functionality, std::vector &pooling_dims, std::vector &stride_dims,std::vector &padding_dims); ~NerlLayerPooling(); - void get_pooling_dims(std::vector &pooling_dims) {pooling_dims = this->pooling_dims;}; + int get_dim_pooling_size(int dim_idx) {return _pooling_dims[dim_idx];}; // index 0 is the first dim + int get_stride(int dim_idx) {return _stride_dims[dim_idx];}; // index 0 is the first dim + int get_padding_size(int dim_idx) {return _padding_dims[dim_idx];}; // index 0 is the first dim private: - std::vector pooling_dims; //TODO - - + std::vector _pooling_dims; + std::vector _stride_dims; + std::vector _padding_dims; }; class NerlLayerCNN : public NerlLayer @@ -60,13 +62,19 @@ class NerlLayerCNN : public NerlLayer public: NerlLayerCNN(int layer_type, std::vector &layers_dims, int layer_functionality, - std::vector kernel_size, std::vector &stride_dims, std::vector padding_size); - ~NerlLayerCNN(); + std::vector kernel_size, std::vector &stride_dims, std::vector padding_size,std::vector type); + virtual ~NerlLayerCNN(); + + int get_dim_kernel_size(int dim_idx) {return _kernel_size[dim_idx];}; // index 0 is the first dim + const int get_stride(int dim_idx) {return _stride_dims[dim_idx];}; // index 0 is the first dim + const int get_type_conv() {return _type_conv[0];}; // index 0 is the first dim + int get_padding_size(int dim_idx) {return _padding_size[dim_idx];}; // index 0 is the first dim private: std::vector _kernel_size; std::vector _stride_dims; std::vector _padding_size; + std::vector _type_conv; }; } // namespace nerlnet \ No newline at end of file diff --git a/src_cpp/common/nerlWorker.cpp b/src_cpp/common/nerlWorker.cpp index 531f928a..ba2ab195 100644 --- a/src_cpp/common/nerlWorker.cpp +++ b/src_cpp/common/nerlWorker.cpp @@ -14,6 +14,7 @@ NerlWorker::NerlWorker(int model_type, std::string &layer_sizes_str, std::string _distributed_system_type = distributed_system_type; _distributed_system_args_str = distributed_system_args_str; _nerl_layers_linked_list = parse_layers_input(layer_sizes_str,layer_types_list,layers_functionality); + // std::cout << "NerlWorker created" << std::endl; } NerlWorker::~NerlWorker() @@ -43,27 +44,40 @@ std::shared_ptr NerlWorker::parse_layers_input(std::string &layer_siz std::vector layer_sizes_params; parse_layer_sizes_str(layer_sizes_str, layer_types_vec, layer_sizes_params); - std::vector> nerl_layers_vec; nerl_layers_vec.resize(layer_sizes_params.size()); for (int i = 0; i < layer_sizes_params.size(); i++) { int layer_type = std::stoi(layer_types_strs_vec[i]); - // TODO Ori and Nadav add CNN extension int layer_size = layer_sizes_params[i].dimx; int layer_functionality = std::stoi(layers_functionality_strs_vec[i]); - std::vector layer_dims = {layer_size}; //TODO + std::vector layer_dims = {layer_sizes_params[i].dimx, + layer_sizes_params[i].dimy,layer_sizes_params[i].dimz}; switch(layer_type) { case LAYER_TYPE_POOLING: { - break; //TODO Ori and Nadav add pooling layer + LayerSizingParams_t params = layer_sizes_params[i]; + std::vectorpooling_dims = params.get_ext_params(params.KERNEL_SIZE); + std::vectorstride_dims = params.get_ext_params(params.STRIDE_SIZE); + std::vectorpadding_dims = params.get_ext_params(params.PADDING_SIZE); + nerl_layers_vec[i] = std::make_shared(layer_type,layer_dims,layer_functionality, + pooling_dims, stride_dims,padding_dims); + break; } case LAYER_TYPE_CNN: { - break; //TODO Ori and Nadav add CNN layer + LayerSizingParams_t params = layer_sizes_params[i]; + std::vectorkernel_dims = params.get_ext_params(params.KERNEL_SIZE); + std::vectorstride_dims = params.get_ext_params(params.STRIDE_SIZE); + std::vectorpadding_dims = params.get_ext_params(params.PADDING_SIZE); + std::vectortype_conv = params.get_ext_params(params.IS_VALID); + // std::cout << "type_conv 0: " << type_conv[0] << std::endl; + // std::cout << "type_conv 1: " << type_conv[1] << std::endl; + nerl_layers_vec[i] = std::make_shared(layer_type, layer_dims, layer_functionality, kernel_dims, stride_dims, padding_dims,type_conv); + break; } default: { @@ -72,13 +86,11 @@ std::shared_ptr NerlWorker::parse_layers_input(std::string &layer_siz } } } - for (size_t i = 1; i < nerl_layers_vec.size(); i++) { nerl_layers_vec[i-1]->set_next_layer(nerl_layers_vec[i]); nerl_layers_vec[i]->set_prev_layer(nerl_layers_vec[i-1]); } - return nerl_layers_vec.front(); } diff --git a/src_cpp/common/nerlWorkerFunc.h b/src_cpp/common/nerlWorkerFunc.h index d72ce4ac..8fc7f62a 100644 --- a/src_cpp/common/nerlWorkerFunc.h +++ b/src_cpp/common/nerlWorkerFunc.h @@ -1,32 +1,42 @@ #pragma once #include - - +#include +#include #include "utilities.h" #include "worker_definitions_ag.h" +//TODO:i probably need to move NERLPLANNER_INPUT to utilities.h +#define NERLPLANNER_INPUT_KERNEL_CHAR 'k' +#define NERLPLANNER_INPUT_STRIDE_CHAR 's' +#define NERLPLANNER_SIZE_DIMENSION_SEP "x" +#define NERLPLANNER_INPUT_PADDING_CHAR 'p' + + #define SIMPLE_PARSING -1 #define COMPLEX_PARSING -2 + namespace nerlnet { typedef struct LayerSizingParams { - enum {KERNEL_SIZE = -1, PADDING_SIZE = -2, STRIDE_SIZE = -3, POOLING_SIZE= -4}; +enum {KERNEL_SIZE = -1, PADDING_SIZE = -2,STRIDE_SIZE = -3 ,POOLING_SIZE= -4 , IS_VALID = -5}; int dimx = 1; int dimy = 1; - int dimz = 1; - std::vector _ext_params; + int dimz = 1; + std::vector _ext_params; - std::vector get_ext_params(int param_type) { + int get_maxdim() { return (dimz > 1 ? 3 : dimy > 1 ? 2 : 1);} // return the maximum dimension of the param; + + std::vector get_ext_params(int param_type) { std::vector res; int i = 0; int param_extracted = false; int param_start = false; - while (!param_extracted){ + while (!param_extracted && i < _ext_params.size()){ if(param_start){ param_extracted = _ext_params[i]<0; if(!param_extracted){ @@ -37,7 +47,11 @@ typedef struct LayerSizingParams param_start = true; } i++; - } + } + if(!param_extracted){ + res.push_back(0); + res.push_back(0); + } return res; } } LayerSizingParams_t; @@ -59,34 +73,85 @@ std::shared_ptr parse_model_params(std::string &model_type_str,s static void parse_layer_sizes_str(std::string &layer_sizes_str, std::vector &layers_types_vec, std::vector &out_layer_sizes_params) { - std::vector layer_sizes_strs_vec = nerlnet_utilities::split_strings_by_comma(layer_sizes_str); - out_layer_sizes_params.resize(layer_sizes_strs_vec.size()); + out_layer_sizes_params.resize(layer_sizes_strs_vec.size()); assert(layer_sizes_strs_vec.size() == out_layer_sizes_params.size()); for (size_t i = 0; i < layer_sizes_strs_vec.size(); i++) //TODO { - switch (layers_types_vec[i]) //TODO Ori and Nadav change to switch case only between simple and complex (if there is chars in type) - { - case LAYER_TYPE_PERCEPTRON: - case LAYER_TYPE_DEFAULT: - case LAYER_TYPE_SCALING: - case LAYER_TYPE_UNSCALING: - case LAYER_TYPE_PROBABILISTIC: - case SIMPLE_PARSING:{ - out_layer_sizes_params[i].dimx = std::stoi(layer_sizes_strs_vec[i]); - break; - } - case COMPLEX_PARSING:{ - //TODO CNN - break; - } - default: - break; - } + int layer_str_type = nerlnet_utilities::is_integer_number(layer_sizes_strs_vec[i]) ? SIMPLE_PARSING : COMPLEX_PARSING; + switch (layer_str_type) + { + case SIMPLE_PARSING: + { + out_layer_sizes_params[i].dimx = std::stoi(layer_sizes_strs_vec[i]); + break; + } + case COMPLEX_PARSING: + { + std::unordered_map params; + std::regex rgx_dim("[0-9][^kstpx]*"); + std::smatch matches; //this matches variable is for the layer size + std::smatch param_match; // this matches variable is for the rest of the string + std::smatch dim_match; // this matches variable is for the dimensions + std::unordered_map param_codes = { + {'k', -1}, + {'p', -2}, + {'s', -3}, + {'t',-5} + }; + std::string::const_iterator searchStartDim(layer_sizes_strs_vec[i].cbegin()); + for (size_t k = 0; k < 3; k++){ + std::regex_search(searchStartDim, layer_sizes_strs_vec[i].cend(), dim_match, rgx_dim); + if(k == 0){ + out_layer_sizes_params[i].dimx = std::stoi(dim_match[0]); + }else if(k == 1){ + out_layer_sizes_params[i].dimy = std::stoi(dim_match[0]); + }else{ + out_layer_sizes_params[i].dimz = std::stoi(dim_match[0]); + } + searchStartDim = dim_match.suffix().first; + } + std::regex rgx_rest("[kspt]([0-9]*x?[0-9]*)*"); //search for k, s or p followed by a number and then x and then a number + std::string::const_iterator searchStart(layer_sizes_strs_vec[i].cbegin()); + while (std::regex_search(searchStart, layer_sizes_strs_vec[i].cend(), param_match, rgx_rest)) + { + char param_char = param_match[0].str()[0]; //the first character of the match + std::string dimensions_str = param_match.str();//the second part of the match (the dimensions) + std::string dimensions_str_sub = dimensions_str.substr(1,-1); + // Convert the parameter and dimensions to the desired format and add them to _ext_params + out_layer_sizes_params[i]._ext_params.push_back(param_codes[param_char]); + std::istringstream dimensions_stream(dimensions_str_sub); + std::string dimension; + while (std::getline(dimensions_stream, dimension, 'x')) + { + // std::cout << "param_char: " << param_char << std::endl; + // std::cout << "dimension: " << dimension << std::endl; + // std::cout << "std::stoi(dimension): " << std::stoi(dimension) << std::endl; + out_layer_sizes_params[i]._ext_params.push_back(std::stoi(dimension)); + } + if(dimensions_str_sub.length() == 1 && param_char!='t') out_layer_sizes_params[i]._ext_params.push_back(std::stoi(dimension)); + searchStart = param_match.suffix().first; + } + break; + } + default: + LogError("Error parsing layer size string"); + break; + } + } } + } // Closing brace for namespace nerlnet + // Closing brace for namespace nerlnet + + + + + + + - // "5x5k2x2p1s1", 5,5,KERNEL_SIZE_IDX,2,2,PADDING_SIZE_IDX,1 | + // "5x5k2x2p1x1s1", 5,5,KERNEL_SIZE_IDX,2,2,PADDING_SIZE_IDX,1,1,STRIDE_SIZE_IDX,1,1 // "5k2p1", 5,KERNEL_SIZE_IDX,2,PADDING_SIZE_IDX,1 | // "8", 8 @@ -98,6 +163,4 @@ static void parse_layer_sizes_str(std::string &layer_sizes_str, std::vector // 2. Represent in a 1D vector and using a second vector for layer start // 3. Create class -} - -} \ No newline at end of file +// \ No newline at end of file diff --git a/src_cpp/common/utilities.cpp b/src_cpp/common/utilities.cpp index 9deb370c..66664060 100644 --- a/src_cpp/common/utilities.cpp +++ b/src_cpp/common/utilities.cpp @@ -15,12 +15,20 @@ std::vector split_strings_by_comma(std::string &str) { return result; } -std::vector matchRegex(std::string &input, std::regex re) { +// check if string is only an single integer number +bool is_integer_number(const std::string &input_str) { + std::regex reg_num ("^\\d+$"); + if (std::regex_match(input_str,reg_num)) return true; + return false; +} + +// This function modifies the input string! +std::vector matchRegex(std::string &input_str, std::regex re) { std::vector result; std::smatch match; - while (std::regex_search(input, match, re)) { + while (std::regex_search(input_str, match, re)) { result.push_back(match.str()); - input = match.suffix().str(); + input_str = match.suffix().str(); } return result; } diff --git a/src_cpp/common/utilities.h b/src_cpp/common/utilities.h index 8c87f3f4..03c9f175 100644 --- a/src_cpp/common/utilities.h +++ b/src_cpp/common/utilities.h @@ -14,6 +14,7 @@ namespace nerlnet_utilities template bool shared_ptr_uninitialized(std::shared_ptr &in_ptr) {return !in_ptr;}; std::vector split_strings_by_comma(std::string &str); +bool is_integer_number(const std::string &input_str); std::vector matchRegex(std::string &input, std::regex re); } // namespace nerlutils \ No newline at end of file diff --git a/src_cpp/common/worker_definitions_ag.h b/src_cpp/common/worker_definitions_ag.h index e35379bb..101e9032 100644 --- a/src_cpp/common/worker_definitions_ag.h +++ b/src_cpp/common/worker_definitions_ag.h @@ -5,9 +5,10 @@ namespace nerlnet { -enum LayerTypeEnum{LAYER_TYPE_DEFAULT=0,LAYER_TYPE_SCALING=1,LAYER_TYPE_CNN=2,LAYER_TYPE_PERCEPTRON=3,LAYER_TYPE_POOLING=4,LAYER_TYPE_PROBABILISTIC=5,LAYER_TYPE_LSTM=6,LAYER_TYPE_RECCURRENT=7,LAYER_TYPE_UNSCALING=8}; +enum LayerTypeEnum{LAYER_TYPE_DEFAULT=0,LAYER_TYPE_SCALING=1,LAYER_TYPE_CNN=2,LAYER_TYPE_PERCEPTRON=3,LAYER_TYPE_POOLING=4,LAYER_TYPE_PROBABILISTIC=5,LAYER_TYPE_LSTM=6,LAYER_TYPE_RECCURRENT=7,LAYER_TYPE_UNSCALING=8,LAYER_TYPE_BOUNDING=9}; enum ProbabilisticActivationEnum{PROBABILISTIC_ACTIVATION_BINARY=1,PROBABILISTIC_ACTIVATION_LOGISTIC=2,PROBABILISTIC_ACTIVATION_COMPETITIVE=3,PROBABILISTIC_ACTIVATION_SOFTMAX=4}; enum ScalingEnum{SCALING_NONE=1,SCALING_MINMAX=2,SCALING_MEANSTD=3,SCALING_STD=4,SCALING_LOG=5}; +enum BoundingEnum{BOUNDING_NONE=1,BOUNDING_BOUNDING=2}; enum UnscalingEnum{UNSCALING_NONE=1,UNSCALING_MINMAX=2,UNSCALING_MEANSTD=3,UNSCALING_STD=4,UNSCALING_LOG=5}; enum PoolingEnum{POOLING_NONE=1,POOLING_MAX=2,POOLING_AVG=3}; enum ActivationEnum{ACTIVATION_THRESHOLD=1,ACTIVATION_SIGN=2,ACTIVATION_LOGISTIC=3,ACTIVATION_TANH=4,ACTIVATION_LINEAR=5,ACTIVATION_RELU=6,ACTIVATION_ELU=7,ACTIVATION_SELU=8,ACTIVATION_SOFT_PLUS=9,ACTIVATION_SOFT_SIGN=10,ACTIVATION_HARD_SIGMOID=11}; diff --git a/src_cpp/opennnBridge/CMakeLists.txt b/src_cpp/opennnBridge/CMakeLists.txt index 6499a4a9..8908d629 100644 --- a/src_cpp/opennnBridge/CMakeLists.txt +++ b/src_cpp/opennnBridge/CMakeLists.txt @@ -35,6 +35,8 @@ set(SRC_CODE "nerlWorkerOpenNN.h" "nerlWorkerOpenNN.cpp" "nerlWorkerNIF.h" + "ae_red.h" + "ae_red.cpp" ) add_library(${PROJECT_NAME} SHARED ${SRC_CODE}) diff --git a/src_cpp/opennnBridge/ae_red.cpp b/src_cpp/opennnBridge/ae_red.cpp new file mode 100644 index 00000000..2ef1a440 --- /dev/null +++ b/src_cpp/opennnBridge/ae_red.cpp @@ -0,0 +1,41 @@ +#include "ae_red.h" + +using namespace nerlnet; + +AeRed::AeRed(float k , float alpha) +{ + _k = k; + _alpha = alpha; +} + +AeRed::~AeRed() +{ +} + +fTensor1DPtr AeRed::update_batch(fTensor1D loss_values) +{ + fTensor1DPtr result = std::make_shared(loss_values.size()); + for(int i = 0; i < loss_values.size() - 1; i++) + { + float val = update_sample(loss_values(i)); + result->data()[i] = val; + } + return result; +} + +float AeRed::update_sample(float loss_value){ + _ema = _alpha * loss_value + (1 - _alpha) * _prev_ema; + _prev_ema = _ema; + _emad = _alpha * abs(loss_value - _ema) + (1 - _alpha) * _prev_emad; + _prev_emad = _emad; + if(_ema + _k * _emad < loss_value){ + _ema_event = loss_value; + } + else{ + _ema_normal = loss_value; + } + _threshold = (_ema_event + _ema_normal) / 2; // New Threshold + + if(loss_value > _threshold) return loss_value; + else return -loss_value; +} \ No newline at end of file diff --git a/src_cpp/opennnBridge/ae_red.h b/src_cpp/opennnBridge/ae_red.h new file mode 100644 index 00000000..fdc08560 --- /dev/null +++ b/src_cpp/opennnBridge/ae_red.h @@ -0,0 +1,33 @@ +#pragma once +#define PARAM_K_DEFAULT 1.2f +#define ALPHA_DEFAULT 0.3f +#include "eigenTensorTypes.h" + + +namespace nerlnet +{ + +class AeRed +{ + + public: + AeRed(float k = PARAM_K_DEFAULT , float alpha = ALPHA_DEFAULT); + ~AeRed(); + + fTensor1DPtr update_batch(fTensor1D loss_values); + float update_sample(float loss_value); + + private: + float _k; + float _alpha; + float _threshold; + float _ema = 0; + float _emad = 1; + float _ema_event = 0; + float _ema_normal = 0; + float _prev_ema = 0; + float _prev_emad = 0; + +}; + +} // namespace nerlnet diff --git a/src_cpp/opennnBridge/nerlWorkerNIF.h b/src_cpp/opennnBridge/nerlWorkerNIF.h index 6476489d..c8841184 100644 --- a/src_cpp/opennnBridge/nerlWorkerNIF.h +++ b/src_cpp/opennnBridge/nerlWorkerNIF.h @@ -9,11 +9,12 @@ using namespace nerlnet; -static std::shared_ptr create_nerlworker(std::string model_type_str,std::string learning_rate_str,std::string epochs_str,std::string optimizer_type_str,std::string loss_method_str,std::string distributed_system_type_str, -std::string layer_sizes_str,std::string layer_types_str,std::string layers_functionality_str, -std::string optimizer_args_str,std::string distributed_system_args_str) +static std::shared_ptr create_nerlworker(std::string &model_type_str,std::string &learning_rate_str, + std::string &epochs_str, std::string &optimizer_type_str, std::string &loss_method_str, + std::string &distributed_system_type_str, std::string &layer_sizes_str, std:: string &layer_types_str, + std::string &layers_functionality_str, std::string &optimizer_args_str, std::string &distributed_system_args_str) //all should be const reference { - std::shared_ptr new_worker = parse_model_params(model_type_str,learning_rate_str,epochs_str,optimizer_type_str,loss_method_str,distributed_system_type_str,layer_sizes_str, + std::shared_ptr new_worker = parse_model_params(model_type_str,learning_rate_str,epochs_str,optimizer_type_str,loss_method_str,distributed_system_type_str,layer_sizes_str, layer_types_str,layers_functionality_str,optimizer_args_str,distributed_system_args_str); return new_worker; } @@ -76,7 +77,6 @@ static ERL_NIF_TERM test_nerlworker_nif(ErlNifEnv* env, int argc, const ERL_NIF_ std::string loss_method_str; std::string distributed_system_type_str; std::string distributed_system_args_str; - /* std::vector stam = {1,2,3,4,5}; std::shared_ptr stam_tensor_int; std::shared_ptr stam_tensor_index; @@ -101,7 +101,6 @@ static ERL_NIF_TERM test_nerlworker_nif(ErlNifEnv* env, int argc, const ERL_NIF_ nifpp::get_throws(env, argv[ARG_DISTRIBUTED_SYSTEM_ARGS], distributed_system_args_str); std::shared_ptr new_nerl_worker_ptr = create_nerlworker(model_type_str,learning_rate_str,epochs_str,optimizer_type_str,loss_method_str,distributed_system_type_str,layer_sizes_str, layer_types_str,layers_functionality_str,optimizer_args_str,distributed_system_args_str); - // Create the singleton instance BridgeController& onnBrCtrl = BridgeController::GetInstance(); // Put the model record to the map with modelId @@ -127,7 +126,6 @@ static ERL_NIF_TERM remove_nerlworker_nif(ErlNifEnv* env, int argc, const ERL_NI nifpp::get_throws(env,argv[ARG_MODEL_ID],modelId); BridgeController& onnBrCtrl = BridgeController::GetInstance(); onnBrCtrl.deleteModel(modelId); - LogInfo << "remove_worker_nif" << endl; nifpp::str_atom ret_atom = "ok"; return nifpp::make(env, ret_atom); diff --git a/src_cpp/opennnBridge/nerlWorkerOpenNN.cpp b/src_cpp/opennnBridge/nerlWorkerOpenNN.cpp index 5036359a..0bfe8047 100644 --- a/src_cpp/opennnBridge/nerlWorkerOpenNN.cpp +++ b/src_cpp/opennnBridge/nerlWorkerOpenNN.cpp @@ -1,4 +1,5 @@ #include "nerlWorkerOpenNN.h" +#include "ae_red.h" using namespace opennn; @@ -6,7 +7,7 @@ namespace nerlnet { // ----- NerlWorkerOpenNN ----- - NerlWorkerOpenNN::NerlWorkerOpenNN(int model_type, std::string &layer_sizes_str, std::string &layer_types_list, std::string &layers_functionality, + NerlWorkerOpenNN::NerlWorkerOpenNN(int model_type,std::string &layer_sizes_str, std::string &layer_types_list, std::string &layers_functionality, float learning_rate, int epochs, int optimizer_type, std::string &optimizer_args_str, int loss_method, int distributed_system_type, std::string &distributed_system_args_str) : NerlWorker(model_type, layer_sizes_str, layer_types_list, layers_functionality, learning_rate, epochs, optimizer_type, optimizer_args_str, @@ -16,7 +17,7 @@ namespace nerlnet generate_opennn_neural_network(); _training_strategy_ptr = std::make_shared(); generate_training_strategy(); - //TODO Ori and Nadav - implement training strategy (loss method, optimizer type, epochs, only sgd and adam) + _ae_red_ptr = std::make_shared(); } NerlWorkerOpenNN::~NerlWorkerOpenNN() @@ -24,23 +25,114 @@ namespace nerlnet } + void NerlWorkerOpenNN::post_training_process(fTensor2DPtr TrainData) + { + switch(_model_type){ + case MODEL_TYPE_NN: + { + break; + } + case MODEL_TYPE_AUTOENCODER: // Get Loss Values by class LossIndexBackPropagationLM + { + // std::shared_ptr training_strategy_ptr = get_training_strategy_ptr(); + // OptimizationAlgorithm* optimizer = training_strategy_ptr->get_optimization_algorithm_pointer(); + // LossIndex* loss_index = optimizer->get_loss_index_pointer(); + // std::shared_ptr _neural_network_pointer = get_neural_network_ptr(); + // Tensor trainable_layers_pointers = _neural_network_pointer->get_trainable_layers_pointers(); + // cout << "Gradient: " << trainable_layers_pointers[0] << endl; + // cout << "LossType: " << loss_index->get_error_type() << endl; + + } + case MODEL_TYPE_AE_CLASSIFIER: // Get Loss Values , Add RED support - David's Thesis + { + std::shared_ptr neural_network = get_neural_network_ptr(); + Index num_of_samples = _aec_data_set->dimension(0); + Index inputs_number = neural_network->get_inputs_number(); + Tensor inputs_dimensions(2); + inputs_dimensions.setValues({num_of_samples, inputs_number}); + fTensor2DPtr calculate_res = std::make_shared(num_of_samples, neural_network->get_outputs_number()); + *calculate_res = neural_network->calculate_outputs(TrainData->data(), inputs_dimensions); + // SAV + fTensor2D absoluteDifferences = (*calculate_res - *_aec_data_set).abs(); + fTensor1D loss_values_sav = absoluteDifferences.sum(Eigen::array({1})); + // MSE + fTensor1D loss_values_mse = (float)1/_aec_data_set->dimension(0) * (*calculate_res - *_aec_data_set).pow(2).sum(Eigen::array({1})); + //cout << "Loss Values (MSE):" << endl << loss_values_mse << endl; + fTensor1DPtr res_sav = _ae_red_ptr->update_batch(loss_values_sav); + fTensor1DPtr res_mse = _ae_red_ptr->update_batch(loss_values_mse); + //cout << "AE_RED RESULT VECTOR:" << endl << *res_mse << endl; + } + + + + // case MODEL_TYPE_LSTM: + // { + // break; + // } + // case MODEL_TYPE_RECURRENT: + // { + // break; + // } + } + } + + + void NerlWorkerOpenNN::post_predict_process(fTensor2DPtr result_ptr){ + switch(_model_type){ + case MODEL_TYPE_NN: + { + break; + } + case MODEL_TYPE_AUTOENCODER: + { + break; + } + case MODEL_TYPE_AE_CLASSIFIER: + { + std::shared_ptr neural_network = get_neural_network_ptr(); + Index num_of_samples = _aec_data_set->dimension(0); + Index inputs_number = neural_network->get_inputs_number(); + // SAV + fTensor2D absoluteDifferences = (*result_ptr - *_aec_data_set).abs(); + fTensor1D loss_values_sav = absoluteDifferences.sum(Eigen::array({1})); + // MSE + fTensor1D loss_values_mse = (float)1/_aec_data_set->dimension(0) * (*result_ptr - *_aec_data_set).pow(2).sum(Eigen::array({1})); + //cout << "Loss Values (MSE):" << endl << loss_values_mse << endl; + fTensor1DPtr res_sav = _ae_red_ptr->update_batch(loss_values_sav); + fTensor1DPtr res_mse = _ae_red_ptr->update_batch(loss_values_mse); + //cout << "AE_RED RESULT VECTOR:" << endl << *res_mse << endl; + } + // case MODEL_TYPE_LSTM: + // { + // break; + // } + // case MODEL_TYPE_RECURRENT: + // { + // break; + // } + } + + } + /** * @brief generate the training strategy for the opennn neural network * this function doesn't set the data pointer of the training strategy and doesn't call to the train function **/ void NerlWorkerOpenNN::generate_training_strategy() { - _training_strategy_ptr->set_neural_network_pointer(_neural_network_ptr.get()); // Neural network must be defined at this point - _training_strategy_ptr->set_optimization_method((opennn::TrainingStrategy::OptimizationMethod) translate_optimizer_type_int(_optimizer_type)); - _training_strategy_ptr->set_loss_method((opennn::TrainingStrategy::LossMethod) translate_loss_method_int(_loss_method)); - _training_strategy_ptr->set_maximum_epochs_number(_epochs); - _training_strategy_ptr->set_display(TRAINING_STRATEGY_SET_DISPLAY_OFF); // remove opennn training strategy prints + _training_strategy_ptr->set_neural_network_pointer(_neural_network_ptr.get()); // Neural network must be defined at this point + set_optimization_method(_optimizer_type,_learning_rate); + set_loss_method(_loss_method); + _training_strategy_ptr->set_maximum_epochs_number(_epochs); + _training_strategy_ptr->set_display(TRAINING_STRATEGY_SET_DISPLAY_OFF); } void NerlWorkerOpenNN::set_optimization_method(int optimizer_type,int learning_rate){ assert((_training_strategy_ptr->has_neural_network(), "NerlWorkerOpenNN::set_optimization_method - neural network pointer is null")); _optimizer_type = optimizer_type; + //cout << "optimizer_type = " << optimizer_type << endl; _training_strategy_ptr->set_optimization_method(translate_optimizer_type(optimizer_type)); + /* switch(_optimizer_type){ case OPTIMIZER_GD: { @@ -69,6 +161,7 @@ namespace nerlnet break; } } + */ } void NerlWorkerOpenNN::set_loss_method(int loss_method){ @@ -97,14 +190,14 @@ namespace nerlnet generate_custom_model_nn(_neural_network_ptr); break; } - case MODEL_TYPE_AUTOENCODER: //TODO + case MODEL_TYPE_AUTOENCODER: // ! Ask David if AE/AEC should be created in "generate_custom_model_nn" (same building process) { - generate_custom_model_ae(_neural_network_ptr); + generate_custom_model_nn(_neural_network_ptr); break; } - case MODEL_TYPE_AE_CLASSIFIER: //TODO + case MODEL_TYPE_AE_CLASSIFIER: // ! Ask David if AE/AEC should be created in "generate_custom_model_nn" (same building process) { - generate_custom_model_aec(_neural_network_ptr); + generate_custom_model_nn(_neural_network_ptr); break; } // case MODEL_TYPE_LSTM: @@ -126,6 +219,89 @@ namespace nerlnet void NerlWorkerOpenNN::generate_opennn_project(std::shared_ptr &neural_network_ptr) { // TODO Ori and Nadav - implement + switch (_model_type) + { + case MODEL_TYPE_APPROXIMATION: + neural_network_ptr->set_project_type(opennn::NeuralNetwork::ProjectType::Approximation); + break; + case MODEL_TYPE_CLASSIFICATION: + neural_network_ptr->set_project_type(opennn::NeuralNetwork::ProjectType::Classification); + break; + case MODEL_TYPE_FORECASTING: + neural_network_ptr->set_project_type(opennn::NeuralNetwork::ProjectType::Forecasting); + break; + default: + break; + } + + } + + + + + void NerlWorkerOpenNN::set_dataset(std::shared_ptr data_set,fTensor2DPtr TrainDataNNptr){ + _data_set = data_set; + int nerlnet_custom_model; // defines if this is a nerlnet custom project or an opennn project + /// find the type of input layer (first layer) and neural network + // if needed take the data set and change it (in cnn change,in ae duplicate) + // in cnn , see the inputs of the user. if the user gave 3 dimensions so the data set will have 3 dimensions + // the last dim will be the samples num and the second will be multiple channels and columns . + std::shared_ptr neural_network_ptr = get_neural_network_ptr(); + switch (_model_type) { + case MODEL_TYPE_AUTOENCODER: + { + + } + case MODEL_TYPE_AE_CLASSIFIER: + { + _aec_data_set = TrainDataNNptr; + Eigen::array bcast({1, 2}); + std::shared_ptr> autoencoder_data = std::make_shared>(TrainDataNNptr->broadcast(bcast)); + int num_of_features = neural_network_ptr->get_inputs_number(); + int num_of_output_neurons = neural_network_ptr->get_outputs_number(); + bool data_set_condition = (num_of_features + num_of_output_neurons) == autoencoder_data->dimension(1); + assert(("issue with data input/output dimensions", data_set_condition)); + _data_set->set_data(*autoencoder_data); + _data_set->set(autoencoder_data->dimension(0) , num_of_features , num_of_output_neurons); // TODO CHECK + break; + } + default: + { + int data_cols = TrainDataNNptr->dimension(1); + int num_of_features = neural_network_ptr->get_inputs_number(); + int num_of_output_neurons = neural_network_ptr->get_outputs_number(); + _data_set->set_data(*(TrainDataNNptr)); + // Data set definitions + bool data_set_condition = (num_of_features + num_of_output_neurons) == data_cols; + assert(("issue with data input/output dimensions", data_set_condition)); + if(neural_network_ptr->has_convolutional_layer()){ + Tensor input_variable_dimension(3); + input_variable_dimension.setValues({this->_nerl_layers_linked_list->get_dim_size(DIM_Z_IDX), this->_nerl_layers_linked_list->get_dim_size(DIM_Y_IDX), this->_nerl_layers_linked_list->get_dim_size(DIM_X_IDX)}); + _data_set->set_input_variables_dimensions(input_variable_dimension); + int samples_num = _data_set->get_samples_number(); + int input_variable = this->_nerl_layers_linked_list->get_dim_size(DIM_Y_IDX)* this->_nerl_layers_linked_list->get_dim_size(DIM_X_IDX); + for(Index sample_indx = 0; sample_indx < samples_num ; sample_indx++) + { + _data_set->set_sample_use(sample_indx, DataSet::SampleUse::Training); + } + + for(Index column_indx = 0; column_indx set_column_use(column_indx, DataSet::VariableUse::Input); + _data_set->set_column_type(column_indx, DataSet::ColumnType::Numeric); + } + for(Index column_indx = input_variable; column_indx < input_variable + num_of_output_neurons; column_indx++) + { + _data_set->set_column_type(column_indx, DataSet::ColumnType::Binary); + _data_set->set_column_use(column_indx, DataSet::VariableUse::Target); + } + _data_set->set_columns_scalers(Scaler::NoScaling); + }else{ + _data_set->set(TrainDataNNptr->dimension(0), num_of_features, num_of_output_neurons); + } + break; + } + } } void NerlWorkerOpenNN::generate_custom_model_nn(std::shared_ptr &neural_network_ptr) @@ -138,7 +314,98 @@ namespace nerlnet { case LAYER_TYPE_CNN: { + // std::shared_ptr prev_layer = curr_layer->get_prev_layer_ptr(); + int layer_rows_num = curr_layer->get_dim_size(DIM_X_IDX); + int layer_cols_num = curr_layer->get_dim_size(DIM_Y_IDX); + int layer_channels_num = curr_layer->get_dim_size(DIM_Z_IDX); + std::shared_ptr cnn_curr_layer = std::dynamic_pointer_cast(curr_layer); + // set the number of inputs + Tensor cnn_layer_inputs_dimensions(4); + cnn_layer_inputs_dimensions[Convolutional4dDimensions::sample_index] = 1; + cnn_layer_inputs_dimensions[Convolutional4dDimensions::row_index] = layer_rows_num; + cnn_layer_inputs_dimensions[Convolutional4dDimensions::column_index] = layer_cols_num; + cnn_layer_inputs_dimensions[Convolutional4dDimensions::channel_index] = layer_channels_num; + int kernels_rows_number = cnn_curr_layer->get_dim_kernel_size(DIM_X_IDX); + int kernels_columns_number = cnn_curr_layer->get_dim_kernel_size(DIM_Y_IDX); + int kernels_number = cnn_curr_layer->get_dim_kernel_size(DIM_W_IDX); + int kernels_channels_number = cnn_curr_layer->get_dim_kernel_size(DIM_Z_IDX); + + // set the number of kernel + Tensor cnn_layer_kernels_dimensions(4); + cnn_layer_kernels_dimensions[Kernel4dDimensions::row_index] = kernels_rows_number; //according the opennn example + cnn_layer_kernels_dimensions[Kernel4dDimensions::column_index] = kernels_columns_number; //according the opennn example + cnn_layer_kernels_dimensions[Kernel4dDimensions::channel_index] = kernels_channels_number; //change to get_dim_kernel_size z + cnn_layer_kernels_dimensions[Kernel4dDimensions::kernel_index] = kernels_number; //according the opennn example + + ConvolutionalLayer* convolutional_layer = new ConvolutionalLayer(cnn_layer_inputs_dimensions, cnn_layer_kernels_dimensions); + //set stride + int stride_row = cnn_curr_layer->get_stride(DIM_X_IDX); + int stride_col = cnn_curr_layer->get_stride(DIM_Y_IDX); + + convolutional_layer->set_column_stride(stride_col); // set_column_stride + convolutional_layer->set_row_stride(stride_row); // set_row_stride + + //set padding add if to make sure we have padding + // Tensor cnn_layer_padding_dimensions(2); + // Tensor cnn_layer_padding_outputs(layer_rows_num, layer_cols_num, layer_channels_num, 1); + int padding_row = cnn_curr_layer->get_padding_size(DIM_X_IDX); + int padding_col = cnn_curr_layer->get_padding_size(DIM_Y_IDX); + //cnn_layer_padding_dimensions(0) = padding_row; //according the opennn example + // cnn_layer_padding_dimensions(1) = padding_col; //according the opennn example + // set convulution type + if(cnn_curr_layer->get_type_conv()){ + convolutional_layer->set_convolution_type(opennn::ConvolutionalLayer::ConvolutionType::Same); + }else{ + convolutional_layer->set_convolution_type(opennn::ConvolutionalLayer::ConvolutionType::Valid); + } + // insert padding + //convolutional_layer->insert_padding(cnn_layer_padding_dimensions,cnn_layer_padding_outputs); + // set activation function + convolutional_layer->set_activation_function((opennn::ConvolutionalLayer::ActivationFunction)(cnn_curr_layer->get_layer_functionality())); // set activation function + // add layer to the neural network + neural_network_ptr->add_layer(convolutional_layer); // add layer to the neural network + if(curr_layer->get_next_layer_ptr()->get_layer_type() == LAYER_TYPE_PERCEPTRON){ // if the next layer is perceptron + FlattenLayer* flatten_layer = new FlattenLayer(convolutional_layer->get_outputs_dimensions()); // create flatten layer + // TODO :Talk with Noa and Ohad about the sizes - make sure the sizes are correct in NerlPlanner + if (flatten_layer->get_outputs_dimensions()[1] != curr_layer->get_next_layer_ptr()->get_dim_size(DIM_X_IDX)) // make sure the dims correct + { + // LogError("NerlWorkerOpenNN::generate_custom_model_nn - wrong dimensions in CNN and Perceptron"); + // throw std::invalid_argument("NerlWorkerOpenNN::generate_custom_model_nn - wrong dimensions in CNN and Perceptron"); + } + neural_network_ptr->add_layer(flatten_layer); + } break; + + } + case LAYER_TYPE_POOLING: + { + // layer type is pooling + int layer_rows_num = curr_layer->get_dim_size(DIM_X_IDX); + int layer_cols_num = curr_layer->get_dim_size(DIM_Y_IDX); + int layer_channels_num = curr_layer->get_dim_size(DIM_Z_IDX); + //dynamic cast to NerlLayerPooling + std::shared_ptr pooling_curr_layer = std::dynamic_pointer_cast(curr_layer); + //get pooling dims + int pooling_row = pooling_curr_layer->get_dim_pooling_size(DIM_X_IDX); + int pooling_col = pooling_curr_layer->get_dim_pooling_size(DIM_Y_IDX); + Tensor pooling_dimension(2); + pooling_dimension.setValues({ + pooling_row, + pooling_col + }); + //create pooling layer + Tensor input_dimensions(4); + input_dimensions(Convolutional4dDimensions::channel_index) = layer_channels_num; // Number of kernels (Channels) + input_dimensions(Convolutional4dDimensions::row_index) = layer_rows_num; // Rows + input_dimensions(Convolutional4dDimensions::column_index) = layer_cols_num; // Cols + + PoolingLayer* pooling_layer = new PoolingLayer(input_dimensions,pooling_dimension); + // set pooling layer parameters + pooling_layer->set_row_stride(pooling_curr_layer->get_stride(DIM_X_IDX)); + pooling_layer->set_column_stride(pooling_curr_layer->get_stride(DIM_Y_IDX)); + pooling_layer->set_pooling_method(translate_pooling_method(pooling_curr_layer->get_layer_functionality())); + pooling_layer->set_padding_width(pooling_curr_layer->get_padding_size(DIM_X_IDX)); + neural_network_ptr->add_layer(pooling_layer); // add layer to the neural network } case LAYER_TYPE_LSTM: { @@ -159,8 +426,13 @@ namespace nerlnet LogError("NerlWorkerOpenNN::generate_custom_model_nn - PERCEPTRON cannot be first layer"); throw std::invalid_argument("NerlWorkerOpenNN::generate_custom_model_nn - PERCEPTRON cannot be first layer"); } - std::shared_ptr prev_layer = curr_layer->get_prev_layer_ptr(); - int prev_layer_size = prev_layer->get_dim_size(DIM_X_IDX); + int prev_layer_size; + if(neural_network_ptr->get_layer_pointer(neural_network_ptr->get_layers_number()-1)->get_type_string() == "Flatten"){ + prev_layer_size = ((FlattenLayer*)(neural_network_ptr->get_layer_pointer(neural_network_ptr->get_layers_number()-1)))->get_outputs_dimensions()[1]; + }else{ + std::shared_ptr prev_layer = curr_layer->get_prev_layer_ptr(); + prev_layer_size = prev_layer->get_dim_size(DIM_X_IDX); + } int layer_size_curr = curr_layer->get_dim_size(DIM_X_IDX); int get_layer_functionality = curr_layer->get_layer_functionality(); PerceptronLayer* newLayer = new opennn::PerceptronLayer(prev_layer_size, layer_size_curr); @@ -168,19 +440,16 @@ namespace nerlnet neural_network_ptr->add_layer(newLayer); break; } - case LAYER_TYPE_SCALING: + case LAYER_TYPE_SCALING: // TODO Check this layer implementation { - // std::vector layer_dims_vec; - // curr_layer->get_layer_size(layer_dims_vec); int layer_size_curr = curr_layer->get_dim_size(DIM_X_IDX); int get_layer_functionality = curr_layer->get_layer_functionality(); ScalingLayer* newLayer = new opennn::ScalingLayer(layer_size_curr); newLayer->set_scalers(translate_scaling_method(get_layer_functionality)); neural_network_ptr->add_layer(newLayer); break; - } - case LAYER_TYPE_UNSCALING: + case LAYER_TYPE_UNSCALING: // TODO Check this layer implementation { std::vector layer_dims_vec; curr_layer->get_layer_size(layer_dims_vec); @@ -190,28 +459,37 @@ namespace nerlnet newLayer->set_scalers(translate_unscaling_method(get_layer_functionality)); neural_network_ptr->add_layer(newLayer); break; + } + case LAYER_TYPE_BOUNDING: // TODO Check this layer implementation + { + std::vector layer_dims_vec; + curr_layer->get_layer_size(layer_dims_vec); + int layer_size_curr = curr_layer->get_dim_size(DIM_X_IDX); + int get_layer_functionality = curr_layer->get_layer_functionality(); + BoundingLayer* newLayer = new opennn::BoundingLayer(layer_size_curr); + newLayer->set_bounding_method("Bounding"); // ! What this method should be? + neural_network_ptr->add_layer(newLayer); + break; } - case LAYER_TYPE_PROBABILISTIC: + + case LAYER_TYPE_PROBABILISTIC: { - if (curr_layer->is_first()) + if (curr_layer->is_first()) { - LogError("NerlWorkerOpenNN::generate_custom_model_nn - PROBABILISTIC cannot be first layer"); - throw std::invalid_argument("NerlWorkerOpenNN::generate_custom_model_nn - PROBABILISTIC cannot be first layer"); + LogError("NerlWorkerOpenNN::generate_custom_model_nn - PROBABILISTIC cannot be first layer"); + throw std::invalid_argument("NerlWorkerOpenNN::generate_custom_model_nn - PROBABILISTIC cannot be first layer"); } std::shared_ptr prev_layer = curr_layer->get_prev_layer_ptr(); int prev_layer_size = prev_layer->get_dim_size(DIM_X_IDX); - int layer_size_curr = curr_layer->get_dim_size(DIM_X_IDX); - std::vector layer_dims_vec; - curr_layer->get_layer_size(layer_dims_vec); //TODO remove from all layers + int layer_size_curr = curr_layer->get_dim_size(DIM_X_IDX); int get_layer_functionality = curr_layer->get_layer_functionality(); ProbabilisticLayer* newLayer = new opennn::ProbabilisticLayer(prev_layer_size, layer_size_curr); - newLayer->set_activation_function((opennn::ProbabilisticLayer::ActivationFunction)translate_activation_function(get_layer_functionality)); + newLayer->set_activation_function(translate_probabilistic_activation_function(get_layer_functionality)); neural_network_ptr->add_layer(newLayer); break; } } - curr_layer = curr_layer->get_next_layer_ptr(); } } @@ -250,6 +528,7 @@ namespace nerlnet case LAYER_TYPE_LSTM: { res = translate_activation_function_int(layer_functionality); break;} case LAYER_TYPE_RECCURRENT: { res = translate_activation_function_int(layer_functionality); break;} case LAYER_TYPE_UNSCALING: { res = translate_unscaling_method_int(layer_functionality); break;} + case LAYER_TYPE_BOUNDING: { res = translate_activation_function_int(layer_functionality); break;} } return res; } @@ -267,6 +546,7 @@ namespace nerlnet case LAYER_TYPE_PROBABILISTIC:{ res = (int)opennn::Layer::Type::Probabilistic; break;} case LAYER_TYPE_LSTM: { res = (int)opennn::Layer::Type::LongShortTermMemory; break;} case LAYER_TYPE_RECCURRENT: { res = (int)opennn::Layer::Type::Recurrent; break;} + case LAYER_TYPE_BOUNDING: { res = (int)opennn::Layer::Type::Bounding; break;} } return res; } @@ -311,6 +591,21 @@ namespace nerlnet return res; } + ProbabilisticLayer::ActivationFunction NerlWorkerOpenNN::translate_probabilistic_activation_function(int activation_function) + { + ProbabilisticLayer::ActivationFunction res; + switch (activation_function) + { + case PROBABILISTIC_ACTIVATION_BINARY: { res = opennn::ProbabilisticLayer::ActivationFunction::Binary; break;} + case PROBABILISTIC_ACTIVATION_LOGISTIC: { res = opennn::ProbabilisticLayer::ActivationFunction::Logistic; break;} + case PROBABILISTIC_ACTIVATION_COMPETITIVE: { res = opennn::ProbabilisticLayer::ActivationFunction::Competitive; break;} + case PROBABILISTIC_ACTIVATION_SOFTMAX: { res = opennn::ProbabilisticLayer::ActivationFunction::Softmax; break;} + } + + return res; + } + + int NerlWorkerOpenNN::translate_loss_method_int(int loss_method) { int res; @@ -462,7 +757,7 @@ namespace nerlnet case MODEL_TYPE_CLASSIFICATION: {res = (int)NeuralNetwork::ProjectType::Classification; break;} case MODEL_TYPE_FORECASTING: {res = (int)NeuralNetwork::ProjectType::Forecasting; break;} case MODEL_TYPE_NN: {custom_model = true; break;} - case MODEL_TYPE_AUTOENCODER: {custom_model = true; break;} // TODO Guy consider Autoassociation type + case MODEL_TYPE_AUTOENCODER: {custom_model = true; break;} case MODEL_TYPE_AE_CLASSIFIER: {custom_model = true; break;} } return res; diff --git a/src_cpp/opennnBridge/nerlWorkerOpenNN.h b/src_cpp/opennnBridge/nerlWorkerOpenNN.h index 29c98544..69b8d076 100644 --- a/src_cpp/opennnBridge/nerlWorkerOpenNN.h +++ b/src_cpp/opennnBridge/nerlWorkerOpenNN.h @@ -7,6 +7,7 @@ #include "../common/nerlWorker.h" #include "eigenTensorTypes.h" #include "worker_definitions_ag.h" +#include "ae_red.h" #define TRAINING_STRATEGY_SET_DISPLAY_ON 1 #define TRAINING_STRATEGY_SET_DISPLAY_OFF 0 @@ -28,18 +29,26 @@ class NerlWorkerOpenNN : public NerlWorker std::shared_ptr get_neural_network_ptr() { return _neural_network_ptr; }; std::shared_ptr get_training_strategy_ptr() { return _training_strategy_ptr; }; + std::shared_ptr get_data_set() { return _data_set; }; + void post_training_process(fTensor2DPtr TrainDataNNptr); + void post_predict_process(fTensor2DPtr result_ptr); void set_optimization_method(int optimizer_type ,int learning_rate); void set_loss_method(int loss_method); void set_learning_rate(float learning_rate); void set_epochs(int epochs); + void set_dataset(std::shared_ptr data_set,fTensor2DPtr TrainDataNNptr); + std::shared_ptr get_dataset_ptr() { return _data_set; }; + private: std::shared_ptr _neural_network_ptr; std::shared_ptr _training_strategy_ptr; + std::shared_ptr _data_set; + fTensor2DPtr _aec_data_set; + std::shared_ptr _ae_red_ptr; - // neural network generator functions void generate_opennn_project(std::shared_ptr &neural_network_ptr); void generate_custom_model_nn(std::shared_ptr &neural_network_ptr); @@ -47,7 +56,7 @@ class NerlWorkerOpenNN : public NerlWorker void generate_custom_model_ae(std::shared_ptr &neural_network_ptr); void generate_custom_model_lstm(std::shared_ptr &neural_network_ptr); void generate_custom_model_recurrent(std::shared_ptr &neural_network_ptr); - + // translation functions int layer_functionality(int layer_functionality, int layer_type); @@ -63,6 +72,8 @@ class NerlWorkerOpenNN : public NerlWorker int translate_unscaling_method_int(int unscaling_method); opennn::Scaler translate_unscaling_method(int scaling_method); opennn::PoolingLayer::PoolingMethod translate_pooling_method(int pooling_method); + opennn::ProbabilisticLayer::ActivationFunction translate_probabilistic_activation_function(int activation_function); + int translate_pooling_method_int(int pooling_method); int translate_model_type(int model_type, int &custom_model); }; diff --git a/src_cpp/opennnBridge/openNNnif.cpp b/src_cpp/opennnBridge/openNNnif.cpp index ff69ecef..0262d708 100644 --- a/src_cpp/opennnBridge/openNNnif.cpp +++ b/src_cpp/opennnBridge/openNNnif.cpp @@ -9,31 +9,24 @@ void* trainFun(void* arg) double loss_val; ErlNifEnv *env = enif_alloc_env(); - DataSet data_set; - data_set.set_data(*(TrainNNptr->data)); + + //cout << "TrainNNptr->data = " << *(TrainNNptr->data) << endl; + // data_set.set_data(*(TrainNNptr->data)); //get nerlworker from bridge controller BridgeController &bridge_controller = BridgeController::GetInstance(); std::shared_ptr nerlworker = bridge_controller.getModelPtr(TrainNNptr->mid); std::shared_ptr nerlworker_opennn = std::static_pointer_cast(nerlworker); //get neural network from nerlworker + std::shared_ptr data_set_ptr = std::make_shared (); std::shared_ptr neural_network_ptr = nerlworker_opennn->get_neural_network_ptr(); - - int data_cols = TrainNNptr->data->dimension(1); - int num_of_features = neural_network_ptr->get_inputs_number(); - int num_of_output_neurons = neural_network_ptr->get_outputs_number(); - - // Data set definitions - bool data_set_condition = (num_of_features + num_of_output_neurons) == TrainNNptr->data->dimension(1); - assert(("issue with data input/output dimensions", data_set_condition)); - data_set.set_data(*(TrainNNptr->data)); - data_set.set(TrainNNptr->data->dimension(0), num_of_features, num_of_output_neurons); - + nerlworker_opennn->set_dataset(data_set_ptr, TrainNNptr->data); + data_set_ptr = nerlworker_opennn->get_data_set(); std::shared_ptr training_strategy_ptr = nerlworker_opennn->get_training_strategy_ptr(); - training_strategy_ptr->set_data_set_pointer(&data_set); + training_strategy_ptr->set_data_set_pointer(nerlworker_opennn->get_dataset_ptr().get()); TrainingResults res = training_strategy_ptr->perform_training(); + nerlworker_opennn->post_training_process(TrainNNptr->data); loss_val = res.get_training_error(); // learn about "get_training_error" of opennn - // Stop the timer and calculate the time took for training high_resolution_clock::time_point stop = high_resolution_clock::now(); auto duration = duration_cast(stop - TrainNNptr->start_time); @@ -72,7 +65,6 @@ void* PredictFun(void* arg) std::shared_ptr* pPredictNNptr = static_cast*>(arg); std::shared_ptr PredictNNptr = *pPredictNNptr; delete pPredictNNptr; - nifpp::TERM prediction; int EAC_prediction; ErlNifEnv *env = enif_alloc_env(); @@ -85,15 +77,23 @@ void* PredictFun(void* arg) Index num_of_samples = PredictNNptr->data->dimension(0); Index inputs_number = neural_network->get_inputs_number(); - fTensor2DPtr calculate_res = std::make_shared(num_of_samples, neural_network->get_outputs_number()); + Tensor input_variable_dimension(4); Tensor inputs_dimensions(2); - inputs_dimensions.setValues({num_of_samples, inputs_number}); - - *calculate_res = neural_network->calculate_outputs(PredictNNptr->data->data(), inputs_dimensions); + if(neural_network->has_convolutional_layer()) + { + ConvolutionalLayer* conv = (ConvolutionalLayer*)neural_network->get_layer_pointer(0); + input_variable_dimension.setValues({num_of_samples,conv->get_input_variables_dimensions()(1), conv->get_input_variables_dimensions()(2), conv->get_input_variables_dimensions()(3)}); + *calculate_res = neural_network->calculate_outputs(PredictNNptr->data->data(), input_variable_dimension); + }else{ + inputs_dimensions.setValues({num_of_samples, inputs_number}); + *calculate_res = neural_network->calculate_outputs(PredictNNptr->data->data(), inputs_dimensions); + } + nerlworker_opennn->post_predict_process(calculate_res); nifpp::make_tensor_2d(env, prediction, calculate_res); - + // only for AE and AEC calculate the distance between prediction labels and input data + //std::cout << "*calculate_res.get(): " << (*calculate_res.get()).dimensions() << std::endl; // Stop the timer and calculate the time took for training high_resolution_clock::time_point stop = high_resolution_clock::now(); auto duration = duration_cast(stop - PredictNNptr->start_time); diff --git a/src_cpp/opennnBridge/openNNnif.h b/src_cpp/opennnBridge/openNNnif.h index a50fe917..3834c891 100644 --- a/src_cpp/opennnBridge/openNNnif.h +++ b/src_cpp/opennnBridge/openNNnif.h @@ -81,7 +81,8 @@ static ERL_NIF_TERM predict_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM arg PredictNNptr->return_tensor_type = tensor_type; nifpp::get_throws(env, argv[ARG_ModelID], PredictNNptr->mid); // get model id - + //std::cout << "PredictNNptr->mid: " << PredictNNptr->mid << std::endl; + //std::cout << "argv[ARG_BatchTensor]: " << argv[ARG_BatchTensor] << std::endl; nifpp::get_tensor_2d(env,argv[ARG_BatchTensor],PredictNNptr->data); int res; @@ -118,16 +119,12 @@ static ERL_NIF_TERM train_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[ TrainNNptr->start_time = high_resolution_clock::now(); nifpp::str_atom tensor_type; - enum{ARG_ModelID,ARG_DataTensor,ARG_Type}; nifpp::get_throws(env, argv[ARG_ModelID],TrainNNptr->mid); // model id nifpp::get_throws(env, argv[ARG_Type],tensor_type); assert(tensor_type == "float"); TrainNNptr->return_tensor_type = tensor_type; - nifpp::get_tensor_2d(env,argv[ARG_DataTensor],TrainNNptr->data); - // std::cout << *(TrainNNptr->data) << std::endl; - ErlNifPid pid; enif_self(env, &pid); TrainNNptr->pid = pid; diff --git a/src_erl/NerlnetApp/src/Bridge/layers_types_ag.hrl b/src_erl/NerlnetApp/src/Bridge/layers_types_ag.hrl new file mode 100644 index 00000000..bea581cc --- /dev/null +++ b/src_erl/NerlnetApp/src/Bridge/layers_types_ag.hrl @@ -0,0 +1,13 @@ +% This is an auto generated .hrl file +% DC Fields Generated by Nerlplanner version: 1.0.0 + +-define(LAYERS_TYPE_DEFAULT_IDX,"0"). +-define(LAYERS_TYPE_SCALING_IDX,"1"). +-define(LAYERS_TYPE_CNN_IDX,"2"). +-define(LAYERS_TYPE_PERCEPTRON_IDX,"3"). +-define(LAYERS_TYPE_POOLING_IDX,"4"). +-define(LAYERS_TYPE_PROBABILISTIC_IDX,"5"). +-define(LAYERS_TYPE_LSTM_IDX,"6"). +-define(LAYERS_TYPE_RECCURRENT_IDX,"7"). +-define(LAYERS_TYPE_UNSCALING_IDX,"8"). +-define(LAYERS_TYPE_BOUNDING_IDX,"9"). diff --git a/src_erl/NerlnetApp/src/Bridge/models_types_ag.hrl b/src_erl/NerlnetApp/src/Bridge/models_types_ag.hrl new file mode 100644 index 00000000..7e504f95 --- /dev/null +++ b/src_erl/NerlnetApp/src/Bridge/models_types_ag.hrl @@ -0,0 +1,24 @@ +% This is an auto generated .hrl file +% DC Fields Generated by Nerlplanner version: 1.0.0 + +-define(MODEL_TYPE_NN_IDX,"0"). +-define(MODEL_TYPE_APPROXIMATION_IDX,"1"). +-define(MODEL_TYPE_CLASSIFICATION_IDX,"2"). +-define(MODEL_TYPE_FORECASTING_IDX,"3"). +-define(MODEL_TYPE_IMAGE_CLASSIFICATION_IDX,"4"). +-define(MODEL_TYPE_TEXT_CLASSIFICATION_IDX,"5"). +-define(MODEL_TYPE_TEXT_GENERATION_IDX,"6"). +-define(MODEL_TYPE_AUTO_ASSOCIATION_IDX,"7"). +-define(MODEL_TYPE_AUTOENCODER_IDX,"8"). +-define(MODEL_TYPE_AE_CLASSIFIER_IDX,"9"). + +-define(MODEL_TYPE_NN_IDX_INT,0). +-define(MODEL_TYPE_APPROXIMATION_IDX_INT,1). +-define(MODEL_TYPE_CLASSIFICATION_IDX_INT,2). +-define(MODEL_TYPE_FORECASTING_IDX_INT,3). +-define(MODEL_TYPE_IMAGE_CLASSIFICATION_IDX_INT,4). +-define(MODEL_TYPE_TEXT_CLASSIFICATION_IDX_INT,5). +-define(MODEL_TYPE_TEXT_GENERATION_IDX_INT,6). +-define(MODEL_TYPE_AUTO_ASSOCIATION_IDX_INT,7). +-define(MODEL_TYPE_AUTOENCODER_IDX_INT,8). +-define(MODEL_TYPE_AE_CLASSIFIER_IDX_INT,9). diff --git a/src_erl/NerlnetApp/src/Bridge/nerlNIF.erl b/src_erl/NerlnetApp/src/Bridge/nerlNIF.erl index 61178eb4..d472eeee 100644 --- a/src_erl/NerlnetApp/src/Bridge/nerlNIF.erl +++ b/src_erl/NerlnetApp/src/Bridge/nerlNIF.erl @@ -24,7 +24,8 @@ init() -> NELNET_LIB_PATH = ?NERLNET_PATH++?BUILD_TYPE_RELEASE++"/"++?NERLNET_LIB, - RES = erlang:load_nif(NELNET_LIB_PATH, 0), + io:format("PATH: ~p~n",[NELNET_LIB_PATH]), + RES = erlang:load_nif(NELNET_LIB_PATH, 0), %% CRASHES HERE RES. %% make sure nif can be loaded (activates on_load) @@ -115,7 +116,7 @@ printTensor(List,_Type) when is_list(List) -> exit(nif_library_not_loaded). -validate_nerltensor_erl(NerlTensorErl) -> +validate_nerltensor_erl(NerlTensorErl) when is_list(NerlTensorErl) -> {[X,Y,Z], NerlTensorRest} = lists:split(?NUMOF_DIMS, NerlTensorErl), TensorExpectedLength = trunc(X*Y*Z), % io:format("{X,Y,Z} = ~p, TensorLen (X*Y*Z)= ~p~n",[{X,Y,Z}, length(NerlTensorRest)]), @@ -178,7 +179,6 @@ nerltensor_conversion({NerlTensor, Type}, ResType) -> {false, true} -> {decode, ResType, Type}; _ -> throw("invalid types combination") end, - BinTypeInteger = lists:member(BinType, ?LIST_BINARY_INT_NERLTENSOR_TYPE), BinTypeFloat = lists:member(BinType, ?LIST_BINARY_FLOAT_NERLTENSOR_TYPE), @@ -196,7 +196,11 @@ nerltensor_conversion({NerlTensor, Type}, ResType) -> true -> io:format("Wrong NerlTensor size!~n"), {<<>>, BinType} % true -> throw(nerl:string_format("encode failure due to incorrect dimension declaring X*Y*Z not equal to tensor data length! ~p ",[NerlTensor])) end; - decode -> decode_nif(NerlTensor, BinType); + decode -> + if + is_binary(NerlTensor) -> decode_nif(NerlTensor, BinType); + true -> throw("Given non-binary NerlTensor for decoding!") + end; _ -> throw("wrong operation") end. diff --git a/src_erl/NerlnetApp/src/Bridge/nerlTensor.erl b/src_erl/NerlnetApp/src/Bridge/nerlTensor.erl index e7b52b6e..1bdb9bac 100644 --- a/src_erl/NerlnetApp/src/Bridge/nerlTensor.erl +++ b/src_erl/NerlnetApp/src/Bridge/nerlTensor.erl @@ -6,10 +6,34 @@ -import(nerlNIF,[nerltensor_sum_nif/3]). -export([nerltensor_sum_erl/2]). --export([sum_nerltensors_lists/2, sum_nerltensors_lists_erl/2]). +-export([sum_nerltensors_lists/2, sum_nerltensors_lists_erl/2 , split_cols_erl_tensor/3]). get_all_nerltensor_list_types() -> ?LIST_GROUP_NERLTENSOR_TYPE. +split_cols_erl_tensor(Tensor , _DataType , SplitColumnIdx) -> %% DataType should determine the variable type for the dimensions and data + [DimX, DimY, DimZ | Data] = Tensor, + % io:format("DimX: ~p~n", [DimX]), + % io:format("DimY: ~p~n", [DimY]), + % io:format("DimZ: ~p~n", [DimZ]), + {FeaturesSamples , LabelsSamples} = split_data(Data , round(DimY) , SplitColumnIdx , [] , []), + % io:format("FeaturesSamples: ~p~n", [FeaturesSamples]), + {[DimX , float(SplitColumnIdx) , DimZ] ++ FeaturesSamples , [DimX , DimY - SplitColumnIdx , DimZ] ++ LabelsSamples}. + +split_data(Data , NumCols , ColumnIndex , Acc1 , Acc2) when is_list(Data) -> + %% io:format("Acc1: ~p~n", [Acc1]), + %% io:format("Acc2: ~p~n", [Acc2]), + if + length(Data) == 0 -> + {Acc1 , Acc2}; + true -> + {RowPart1 , RestDataTemp} = lists:split(ColumnIndex , Data), + {RowPart2 , RestData} = lists:split(NumCols - ColumnIndex , RestDataTemp), + Split1 = Acc1 ++ RowPart1, + Split2 = Acc2 ++ RowPart2, + split_data(RestData , NumCols , ColumnIndex , Split1 , Split2) + end. + + nerltensor_sum_erl({NerlTensorErlA, Type}, {NerlTensorErlB, Type}) -> ListGroup = lists:member(Type, get_all_nerltensor_list_types()), if ListGroup -> diff --git a/src_erl/NerlnetApp/src/Bridge/nerlTests.erl b/src_erl/NerlnetApp/src/Bridge/nerlTests.erl index 2121884f..59baf9d2 100644 --- a/src_erl/NerlnetApp/src/Bridge/nerlTests.erl +++ b/src_erl/NerlnetApp/src/Bridge/nerlTests.erl @@ -1,6 +1,9 @@ -module(nerlTests). -author("David Leon"). -include("nerlTensor.hrl"). +-include("neural_networks_testing_models.hrl"). +-include("layers_types_ag.hrl"). +-include("models_types_ag.hrl"). -compile(nerlNIF). -export([run_tests/0]). @@ -75,9 +78,10 @@ run_tests()-> SumNerlTensorsListDoubleName = "sum_nerltensors_lists double", test_envelope_nif_performance(SumNerlTensorsListDoubleFunc, SumNerlTensorsListDoubleName, ?NERLTESNORS_SUM_LIST_ROUNDS ), - NerlworkerTestFunc = fun(Rounds) -> Performance = 0, nerlworker_test(Rounds, Performance) end, + NeuralNetworkTestingModelList = ?NEURAL_NETWORK_TESTING_MODELS_LIST, + NerlworkerTestFunc = fun(_Rounds) -> Performance = 0, nerlworker_test(NeuralNetworkTestingModelList, Performance) end, NerlworkerTestName = "nerlworker_test", - test_envelope_nif_performance(NerlworkerTestFunc, NerlworkerTestName, ?NERLWORKER_TEST_ROUNDS ), + test_envelope_nif_performance(NerlworkerTestFunc, NerlworkerTestName, length(NeuralNetworkTestingModelList) ), nerltest_print("Tests Completed"), ok. @@ -93,15 +97,16 @@ generate_nerltensor_rand_dims(Type)-> DimZ = 1, generate_nerltensor(Type,DimX,DimY,DimZ). -generate_nerltensor(Type,DimX,DimY,DimZ) -> +generate_nerltensor(BinType,DimX,DimY,DimZ) -> DataLength = DimX * DimY * DimZ, if - (Type == int32) or (Type == int16) -> Data = [rand:uniform(255) || _ <- lists:seq(1, DataLength)], + (BinType == int32) or (BinType == int16) -> Data = [rand:uniform(255) || _ <- lists:seq(1, DataLength)], [DimX,DimY,DimZ] ++ Data; - (Type == double) or (Type == float) -> DimXf = float(DimX), + (BinType == double) or (BinType == float) -> + DimXf = float(DimX), DimYf = float(DimY), DimZf = float(DimZ), - Data = [rand:uniform() * 10 || _ <- lists:seq(1, DataLength)], + Data = [rand:uniform() * 10 || _ <- lists:seq(1, DataLength)], %% Where are the labels generated? [DimXf,DimYf,DimZf] ++ Data; true -> wrong_type end. @@ -225,20 +230,122 @@ nerltensor_conversion_test(Rounds) -> end end. -nerlworker_test(0, _Performance) -> _Performance; -nerlworker_test(Rounds, Performance) -> - ModelId = erlang:unique_integer([positive]), - ModelType = "5", - LayersSizes = "5,10,5,3", - LayersTypes = "1,3,3,3", - LayersFunctionalityCodes = "1,6,11,11", % change scaler functionality to 6 to check exception handling - LearningRate = "0.01", - Epochs = "1", - OptimizerType = "2", - OptimizerArgs = "", - LossMethod = "2", - DistributedSystemType = "0", - DistributedSystemArg = "", - nerlNIF:test_nerlworker_nif(ModelId,ModelType,LayersSizes, LayersTypes, LayersFunctionalityCodes, LearningRate, Epochs, OptimizerType, OptimizerArgs, LossMethod, DistributedSystemType, DistributedSystemArg), + +nerlworker_test_generate_data(LayersSizes, LayerTypes, NumOfSamples) -> %% Ask David about where to split and if a 'if' statement is needed + % extract first and last sizes + % use module re to extract complex layer sizes + [FirstLayerSize | LayerSizesList] = re:split(LayersSizes,",",[{return,list}]), + [LastLayerSize|_] = lists:reverse(LayerSizesList), + [FirstLayerType| _] = re:split(LayerTypes,",",[{return,list}]), + %TODO simple layer types in inline function + {DimX, DimY, DimZ} = + case FirstLayerType of + ?LAYERS_TYPE_DEFAULT_IDX -> {FirstLayerSizeInt,_} = string:to_integer(FirstLayerSize), + {LastLayerSizeInt,_} = string:to_integer(LastLayerSize), + {NumOfSamples,LastLayerSizeInt+FirstLayerSizeInt, 1}; + ?LAYERS_TYPE_SCALING_IDX -> {FirstLayerSizeInt,_} = string:to_integer(FirstLayerSize), + {LastLayerSizeInt,_} = string:to_integer(LastLayerSize), + {NumOfSamples,LastLayerSizeInt+FirstLayerSizeInt, 1}; + ?LAYERS_TYPE_CNN_IDX -> [DimXComplex, DimYComplex, DimZComplex | _] = re:split(FirstLayerSize,"x",[{return,list}]), + {DimXComplexInt,_} = string:to_integer(DimXComplex), + {DimYComplexInt,_} = string:to_integer(DimYComplex), + {DimZComplexInt,_} = string:to_integer(DimZComplex), + {LastLayerSizeInt,_} = string:to_integer(LastLayerSize), + {NumOfSamples, DimXComplexInt*DimYComplexInt*DimZComplexInt+LastLayerSizeInt, 1}; + ?LAYERS_TYPE_PERCEPTRON_IDX ->{FirstLayerSizeInt,_} = string:to_integer(FirstLayerSize), + {LastLayerSizeInt,_} = string:to_integer(LastLayerSize), + {NumOfSamples,LastLayerSizeInt+FirstLayerSizeInt, 1}; + ?LAYERS_TYPE_POOLING_IDX -> {FirstLayerSizeInt,_} = string:to_integer(FirstLayerSize), + {LastLayerSizeInt,_} = string:to_integer(LastLayerSize), + {NumOfSamples,LastLayerSizeInt+FirstLayerSizeInt, 1}; + ?LAYERS_TYPE_PROBABILISTIC_IDX -> {FirstLayerSizeInt,_} = string:to_integer(FirstLayerSize), + {LastLayerSizeInt,_} = string:to_integer(LastLayerSize), + {NumOfSamples,LastLayerSizeInt+FirstLayerSizeInt, 1}; + ?LAYERS_TYPE_LSTM_IDX -> {1, 1, 1}; + ?LAYERS_TYPE_RECCURRENT_IDX -> {1, 1, 1}; + ?LAYERS_TYPE_UNSCALING_IDX -> {FirstLayerSizeInt,_} = string:to_integer(FirstLayerSize), + {LastLayerSizeInt,_} = string:to_integer(LastLayerSize), + {NumOfSamples,LastLayerSizeInt+FirstLayerSizeInt, 1} + end, + ErlDataTensor = generate_nerltensor(float, DimX, DimY, DimZ), + %{NumOfFeatures ,_} = string:to_integer(FirstLayerSize), + {NumOfLabels ,_} = string:to_integer(LastLayerSize), + NumOfFeatures = DimY - NumOfLabels, + % io:format("ErlDataTensor of length ~p : ~p~n",[length(ErlDataTensor),ErlDataTensor]), + %%{ErlDataTensor , float , NumOfFeatures , NumOfLabels}, + %% {SamplesFeatures , SamplesLabels} = nerlTensor:split_erl_tensor(ErlDataTensor , NumOfFeatures , float), + %% io:format("Splitted SamplesFeatures (Of Length ~p) is ~p~n",[length(SamplesFeatures) , SamplesFeatures]), + %% io:format("Splitted SamplesLabels (Of Length ~p) is ~p~n",[length(SamplesLabels) , SamplesLabels]). + {NerlTensor , Type} = nerlNIF:nerltensor_conversion({ErlDataTensor,erl_float} , float), + {NerlTensor , Type , ErlDataTensor , erl_float , NumOfFeatures , NumOfLabels}. + + + +nerlworker_test([], _Performance) -> _Performance; +nerlworker_test([CurrentModel | Tail], Performance) -> + {ModelId,ModelType,LayersSizes, LayersTypes, LayersFunctionalityCodes, + LearningRate, Epochs, OptimizerType, OptimizerArgs, + LossMethod, DistributedSystemType, DistributedSystemArg} = CurrentModel, + case ModelType of + ?MODEL_TYPE_NN_IDX -> nerltest_print("Testing NN Model"); + ?MODEL_TYPE_AUTOENCODER_IDX -> nerltest_print("Testing AE Model"); + ?MODEL_TYPE_AE_CLASSIFIER_IDX -> nerltest_print("Testing AEC Model"); + _ -> nerltest_print(nerl:string_format("Model Type ~p is being tested~n",[ModelType])) + end, + nerlNIF:test_nerlworker_nif(ModelId,ModelType,LayersSizes, LayersTypes, + LayersFunctionalityCodes, LearningRate, Epochs, OptimizerType, + OptimizerArgs, LossMethod, DistributedSystemType, DistributedSystemArg), + NumOfSamples = 500, + {NerlTensorDataBin , NerlTensorDataBinType , NerlTensorDataErl , NerlTensorDataErlType , NumOfFeatures , _NumOfLabels} = nerlworker_test_generate_data(LayersSizes, LayersTypes, NumOfSamples), + if + (ModelType == ?MODEL_TYPE_AUTOENCODER_IDX) or (ModelType == ?MODEL_TYPE_AE_CLASSIFIER_IDX) -> %% AE or AEC + {DataTensorErlFeatures , _DataTensorErlLabels} = nerlTensor:split_cols_erl_tensor(NerlTensorDataErl , NerlTensorDataErlType , NumOfFeatures), + {NerlTensorDataBinTrain , _Type} = nerlNIF:nerltensor_conversion({DataTensorErlFeatures, erl_float}, float), + NerlTensorDataBinPredict = NerlTensorDataBinTrain; + true -> + NerlTensorDataBinTrain = NerlTensorDataBin, + {DataTensorErlPredictFeatures , _DataTensorErlPredictLabels} = nerlTensor:split_cols_erl_tensor(NerlTensorDataErl , NerlTensorDataErlType , NumOfFeatures), + {NerlTensorDataBinPredict , _Type} = nerlNIF:nerltensor_conversion({DataTensorErlPredictFeatures, erl_float}, float) + end, + TicNIF = nerl:tic(), + nerlNIF:train_nif(ModelId , NerlTensorDataBinTrain , NerlTensorDataBinType), % ask Guy about receiver block + + receive + {nerlnif , _LossValue , _TrainTime} -> + % io:format("Got LossValue~n") + ok + after 100000 -> throw("timeout") + end, + %block receive to get loss values from worker + nerlNIF:predict_nif(ModelId , NerlTensorDataBinPredict , NerlTensorDataBinType), + receive + {nerlnif , _PredNerlTensor, _NewType, _TimeTook} -> + % io:format("Got Pred~n") + ok + after 100000 -> throw("timeout") + end, + % TODO remove labels from generated data - ask David if we need to change "generate_tensor" + % TODO Ori - implement predict nerlNIF:remove_nerlworker_nif(ModelId), - nerlworker_test(Rounds - 1, Performance). \ No newline at end of file + {TocNIF, _} = nerl:toc(TicNIF), + PerformanceNIF = TocNIF + Performance, + + nerlworker_test(Tail, PerformanceNIF). + + + +% % neural_network_sample_1() -> +% % { ModelId = erlang:unique_integer([positive]), +% ModelType = "0", +% LayersSizes = "128x128k3x3p1x1s2,64x64k3p1s2,1024,256,128,8,2",%"5,10,5,3", +% LayersTypes = "2,2,3,3,3,3,3", +% LayersFunctionalityCodes = "5,6,11,11,11,11,11", % change scaler functionality to 6 to check exception handling +% LearningRate = "0.01", +% Epochs = "1", +% OptimizerType = "2", +% OptimizerArgs = "", +% LossMethod = "2", +% DistributedSystemType = "0", +% DistributedSystemArg = "", + + diff --git a/src_erl/NerlnetApp/src/Bridge/neural_networks_testing_models.hrl b/src_erl/NerlnetApp/src/Bridge/neural_networks_testing_models.hrl new file mode 100644 index 00000000..8d4b7a3a --- /dev/null +++ b/src_erl/NerlnetApp/src/Bridge/neural_networks_testing_models.hrl @@ -0,0 +1,56 @@ + + +-define(PERCEPTRON_TESTING_NN,{ _ModelId = erlang:unique_integer([positive]), + _ModelType = "0", + _LayersSizes = "5,30,5,3", + _LayersTypes = "1,3,3,3", + _LayersFunctionalityCodes = "1,6,6,6", % change scaler functionality to 6 to check exception handling + _LearningRate = "0.01", + _Epochs = "50", + _OptimizerType = "2", + _OptimizerArgs = "", + _LossMethod = "2", + _DistributedSystemType = "0", + _DistributedSystemArg = ""} ). + + +-define(CNN_TESTING_NN,{ _ModelIdCNN = erlang:unique_integer([positive]), + _ModelTypeCNN = "0", + _LayersSizesCNN = "28x28x1k5x5x1x6p0s1t1,28x28x6k2x2p0s2,14x14x6k4x4x6x12p0s1t0,32,10", + _LayersTypesCNN = "2,4,2,3,5", + _LayersFunctionalityCodesCNN = "6,2,6,6,4", % change scaler functionality to 6 to check exception handling + _LearningRateCNN = "0.01", + _EpochsCNN = "100", + _OptimizerTypeCNN = "5", + _OptimizerArgsCNN = "", + _LossMethodCNN = "2", + _DistributedSystemTypeCNN = "0", + _DistributedSystemArgCNN = ""} ). + +-define(AEC_TESTING_NN,{ _ModelIdAEC = erlang:unique_integer([positive]), + _ModelTypeAEC = "9", + _LayersSizesAEC = "32,16,8,4,8,16,32,32", % last layer (perceptron) should be the same as the input layer , followed by bounding layer + _LayersTypesAEC = "1,3,3,3,3,3,3,9", + _LayersFunctionalityCodesAEC = "1,11,11,11,11,11,11,1", + _LearningRateAEC = "0.01", + _EpochsAEC = "100", + _OptimizerTypeAEC = "5", + _OptimizerArgsAEC = "", + _LossMethodAEC = "2", + _DistributedSystemTypeAEC = "0", + _DistributedSystemArgAEC = ""} ). + +-define(AE_TESTING_NN, { _ModelIdAE = erlang:unique_integer([positive]), + _ModelTypeAE = "8", + _LayersSizesAE = "32,16,8,4,8,16,32,32", % last layer (perceptron) should be the same as the input layer , followed by bounding layer + _LayersTypesAE = "1,3,3,3,3,3,3,9", + _LayersFunctionalityCodesAE = "1,11,11,11,11,11,11,1", + _LearningRateAE = "0.01", + _EpochsAE = "50", + _OptimizerTypeAE = "5", + _OptimizerArgsAE = "", + _LossMethodAE = "2", + _DistributedSystemTypeAE = "0", + _DistributedSystemArgAE = ""} ). + +-define(NEURAL_NETWORK_TESTING_MODELS_LIST, [?PERCEPTRON_TESTING_NN ,?AEC_TESTING_NN , ?CNN_TESTING_NN]). diff --git a/src_erl/NerlnetApp/src/Bridge/onnWorkers/workerGeneric.erl b/src_erl/NerlnetApp/src/Bridge/onnWorkers/workerGeneric.erl index b95db586..6aa0bbdf 100644 --- a/src_erl/NerlnetApp/src/Bridge/onnWorkers/workerGeneric.erl +++ b/src_erl/NerlnetApp/src/Bridge/onnWorkers/workerGeneric.erl @@ -168,6 +168,8 @@ wait(cast, {loss , nan , TimeNIF , BatchID , SourceName}, State = #workerGeneric {next_state, NextState, State}; wait(cast, {loss, LossTensor , TimeNIF , BatchID , SourceName}, State = #workerGeneric_state{myName = MyName, nextState = NextState, modelID=_ModelID, distributedBehaviorFunc = DistributedBehaviorFunc, distributedWorkerData = DistributedWorkerData}) -> + % {[_ , _ , _ , LossValue] , _} = LossTensor, + % io:format("Got Loss Value ~p~n",[LossValue]), BatchTimeStamp = erlang:system_time(nanosecond), gen_statem:cast(get(client_pid),{loss, MyName, SourceName ,LossTensor , TimeNIF , BatchID , BatchTimeStamp}), %% TODO Add Time and Time_NIF to the cast ToUpdate = DistributedBehaviorFunc(post_train, {get(generic_worker_ets),DistributedWorkerData}), @@ -177,7 +179,7 @@ wait(cast, {loss, LossTensor , TimeNIF , BatchID , SourceName}, State = #workerG wait(cast, {predictRes,PredNerlTensor, Type, TimeNIF, BatchID , SourceName}, State = #workerGeneric_state{myName = MyName, nextState = NextState, distributedBehaviorFunc = DistributedBehaviorFunc, distributedWorkerData = DistributedWorkerData}) -> BatchTimeStamp = erlang:system_time(nanosecond), - gen_statem:cast(get(client_pid),{predictRes,MyName,SourceName, {PredNerlTensor, Type}, TimeNIF , BatchID , BatchTimeStamp}), %% TODO TODO change csv name and batch id(1) + gen_statem:cast(get(client_pid),{predictRes,MyName,SourceName, {PredNerlTensor, Type}, TimeNIF , BatchID , BatchTimeStamp}), Update = DistributedBehaviorFunc(post_predict, {get(generic_worker_ets),DistributedWorkerData}), if Update -> {next_state, update, State#workerGeneric_state{nextState=NextState}}; @@ -307,7 +309,7 @@ predict(cast, {sample , SourceName , BatchID , {PredictBatchTensor, Type}}, Stat %% io:format("Pred Tensor: ~p~n",[nerlNIF:nerltensor_conversion({PredictBatchTensor , Type} , nerlNIF:erl_type_conversion(Type))]), _Pid = spawn(fun()-> nerlNIF:call_to_predict(ModelId , {PredictBatchTensor, Type} , CurrPID , BatchID, SourceName) end), {next_state, wait, State#workerGeneric_state{nextState = predict , currentBatchID = BatchID}}; - + predict(cast, {idle}, State = #workerGeneric_state{myName = MyName}) -> update_client_avilable_worker(MyName), {next_state, idle, State}; diff --git a/src_erl/NerlnetApp/src/MainServer/mainGenserver.erl b/src_erl/NerlnetApp/src/MainServer/mainGenserver.erl index ceb85ea7..e15ac392 100644 --- a/src_erl/NerlnetApp/src/MainServer/mainGenserver.erl +++ b/src_erl/NerlnetApp/src/MainServer/mainGenserver.erl @@ -272,13 +272,11 @@ handle_cast({clientAck,Body}, State = #main_genserver_state{clientsWaitingList = handle_cast({startCasting,SourcesNames}, State = #main_genserver_state{state = idle, sourcesCastingList=CastingList, sourcesWaitingList = [], clientsWaitingList = []}) -> put(curr_phase_ack , start_casting_done), StatsEts = get_entity_stats_ets(?MAIN_SERVER_ATOM), - io:format("@MainServer startCasting Body ~p~n",[binary_to_list(SourcesNames)]), stats:increment_messages_received(StatsEts), SourcesList = re:split(binary_to_list(SourcesNames), "," , [{return, list}]), %% NumOfSampleToSend = lists:last(Splitted), %% Sources = lists:sublist(Splitted,length(Splitted)-1), SourcesAtoms = [list_to_atom(Source_Name) || Source_Name <- SourcesList], - io:format("Sources: ~p~n",[SourcesList]), sources_start_casting(SourcesList), % each source gets a unicast message of start casting action stats:increment_messages_sent(StatsEts, length(SourcesList)), diff --git a/src_erl/NerlnetApp/src/Source/parser.erl b/src_erl/NerlnetApp/src/Source/parser.erl index 169e1250..ebbac6f6 100644 --- a/src_erl/NerlnetApp/src/Source/parser.erl +++ b/src_erl/NerlnetApp/src/Source/parser.erl @@ -62,7 +62,7 @@ parseCSV(SourceName, BatchSize, CSVData)-> %%this parser takes a CSV folder containing chunked data, parsing into a list of binary. %%each record in the line is a batch of samples -parse_file(SourceName, BatchSize,Data) -> +parse_file(_SourceName, BatchSize,Data) -> % {ok, Data} = file:read_file(File_Address), Lines = re:split(Data, "\r|\n|\r\n", [{return,binary}] ), % CleanLines = [Line || Line <- Lines, Line /= []], @@ -84,7 +84,6 @@ parse_file(SourceName, BatchSize,Data) -> % TestTensor = nerlNIF:nerltensor_conversion(hd(tl(A)), erl_float), % B = encodeListOfListsNerlTensor(ListOfBatches, DataType, SampleSize, 1), % io:format("A = ~p~n~nB = ~p ~n", [A, B]), - % io:format("Test Tensor = ~p ~n", [TestTensor]), A; % erl_int -> encodeListOfListsNerlTensor(ListOfGroupedBatches, UserType, BatchSize,SampleSize,DimZ); _Other -> throw("wrong ErlType") diff --git a/src_py/nerlPlanner/CppHeadersExporter.py b/src_py/nerlPlanner/CppHeadersExporter.py index 1fd2642c..46791ec4 100644 --- a/src_py/nerlPlanner/CppHeadersExporter.py +++ b/src_py/nerlPlanner/CppHeadersExporter.py @@ -29,6 +29,8 @@ def gen_header_worker_parameters_definitions(header_path : str, debug : bool = F gen_header_exporter_logger(probabilistic_activation_enums.generate_code()) scaling_enums = EnumType('ScalingEnum', ScalingMethodMap, True, 'SCALING') gen_header_exporter_logger(scaling_enums.generate_code()) + bounding_enums = EnumType('BoundingEnum', BoundingMethodMap, True, 'BOUNDING') + gen_header_exporter_logger(bounding_enums.generate_code()) unscaling_enums = EnumType('UnscalingEnum', UnScalingMethodMap, True, 'UNSCALING') gen_header_exporter_logger(unscaling_enums.generate_code()) pooling_enums = EnumType('PoolingEnum', PoolingMethodMap, True, 'POOLING') @@ -62,6 +64,7 @@ def gen_header_worker_parameters_definitions(header_path : str, debug : bool = F f.write(layer_type_enums.generate_code()) f.write(probabilistic_activation_enums.generate_code()) f.write(scaling_enums.generate_code()) + f.write(bounding_enums.generate_code()) f.write(unscaling_enums.generate_code()) f.write(pooling_enums.generate_code()) f.write(activation_enums.generate_code()) diff --git a/src_py/nerlPlanner/ErlHeadersExporter.py b/src_py/nerlPlanner/ErlHeadersExporter.py index c34cee2b..f9d5281d 100644 --- a/src_py/nerlPlanner/ErlHeadersExporter.py +++ b/src_py/nerlPlanner/ErlHeadersExporter.py @@ -223,7 +223,64 @@ def gen_dc_fields_hrl(header_path : str, debug : bool = False): f.write(EMPTY_LINE) [f.write(x.generate_code()) for x in fields_list_defs_str_bins] +def gen_layers_type_hrl(header_path : str, debug : bool = False): + global DEBUG + DEBUG = debug + + + auto_generated_header = AutoGeneratedHeader() + gen_erlang_exporter_logger(auto_generated_header.generate_code()) + + nerlplanner_version = Comment(f'DC Fields Generated by Nerlplanner version: {NERLPLANNER_VERSION}') + gen_erlang_exporter_logger(nerlplanner_version.generate_code()) + + layers_type_index_defs_list = [] + for key,val in LayerTypeMap.items(): + layer_type_definition = Definition(f'LAYERS_TYPE_{key.upper()}_IDX', f'"{val}"') + layers_type_index_defs_list.append(layer_type_definition) + gen_erlang_exporter_logger(layer_type_definition.generate_code()) + path_validator(header_path) + + with open(header_path, 'w') as f: + f.write(auto_generated_header.generate_code()) + f.write(nerlplanner_version.generate_code()) + f.write(EMPTY_LINE) + [f.write(x.generate_code()) for x in layers_type_index_defs_list] + +def gen_models_types_hrl(header_path : str, debug : bool = False): + global DEBUG + DEBUG = debug + + + auto_generated_header = AutoGeneratedHeader() + gen_erlang_exporter_logger(auto_generated_header.generate_code()) + + nerlplanner_version = Comment(f'DC Fields Generated by Nerlplanner version: {NERLPLANNER_VERSION}') + gen_erlang_exporter_logger(nerlplanner_version.generate_code()) + + model_type_index_defs_list = [] + for key,val in ModelTypeMapping.items(): + model_type_definition = Definition(f'MODEL_TYPE_{key.upper()}_IDX', f'"{val}"') + model_type_index_defs_list.append(model_type_definition) + gen_erlang_exporter_logger(model_type_definition.generate_code()) + + model_type_index_int_defs_list = [] + for key,val in ModelTypeMapping.items(): + model_type_definition = Definition(f'MODEL_TYPE_{key.upper()}_IDX_INT', f'{val}') + model_type_index_int_defs_list.append(model_type_definition) + gen_erlang_exporter_logger(model_type_definition.generate_code()) + + path_validator(header_path) + + with open(header_path, 'w') as f: + f.write(auto_generated_header.generate_code()) + f.write(nerlplanner_version.generate_code()) + f.write(EMPTY_LINE) + [f.write(x.generate_code()) for x in model_type_index_defs_list] + f.write(EMPTY_LINE) + [f.write(x.generate_code()) for x in model_type_index_int_defs_list] + def main(): parser = argparse.ArgumentParser(description='Generate C++ header file for nerlPlanner') parser.add_argument('-o', '--output', help='output header file path', required=True) @@ -232,6 +289,8 @@ def main(): parser.add_argument('--gen_dc_fields_hrl', help='debug mode', action='store_true') parser.add_argument('--gen_source_fields_hrl', help='debug mode', action='store_true') parser.add_argument('--gen_router_fields_hrl', help='debug mode', action='store_true') + parser.add_argument('--gen_layers_type_hrl', help='debug mode', action='store_true') + parser.add_argument('--gen_models_types_hrl', help='debug mode', action='store_true') args = parser.parse_args() if args.gen_worker_fields_hrl: @@ -242,6 +301,10 @@ def main(): gen_source_fields_hrl(args.output, args.debug) if args.gen_router_fields_hrl: gen_router_fields_hrl(args.output, args.debug) + if args.gen_layers_type_hrl: + gen_layers_type_hrl(args.output, args.debug) + if args.gen_models_types_hrl: + gen_models_types_hrl(args.output, args.debug) if __name__=="__main__": main() diff --git a/src_py/nerlPlanner/JsonElementWorkerDefinitions.py b/src_py/nerlPlanner/JsonElementWorkerDefinitions.py index 74b3ed44..fbc7ea78 100644 --- a/src_py/nerlPlanner/JsonElementWorkerDefinitions.py +++ b/src_py/nerlPlanner/JsonElementWorkerDefinitions.py @@ -14,7 +14,8 @@ ("Probabilistic" , "5"), ("LSTM" , "6"), ("Reccurrent" , "7"), - ("Unscaling" , "8")] + ("Unscaling" , "8"), + ("Bounding" , "9")] ) ProbabilisticActivationFunctionMap = OrderedDict( @@ -24,6 +25,11 @@ ("Softmax" , "4")] ) +BoundingMethodMap = OrderedDict( + [("none" , "1"), + ("bounding" , "2")] +) + ScalingMethodMap = OrderedDict( [("none" , "1"), ("MinMax" , "2"), @@ -72,17 +78,18 @@ ("Unscaling" , UnScalingMethodMap)] ) -ModelTypeMapping = OrderedDict([ +# Model type name should be suitable with erlang atoms convention! +ModelTypeMapping = OrderedDict([ ("nn" , "0"), ("approximation" , "1"), ("classification" , "2"), ("forecasting" , "3"), - ("image-classification" , "4"), - ("text-classification" , "5"), - ("text-generation" , "6"), - ("auto-association" , "7"), + ("image_classification" , "4"), + ("text_classification" , "5"), + ("text_generation" , "6"), + ("auto_association" , "7"), ("autoencoder" , "8"), - ("ae-classifier" , "9") + ("ae_classifier" , "9") ]) OptimizerTypeMapping = OrderedDict([ diff --git a/src_py/nerlPlanner/WinWorkerDialog.py b/src_py/nerlPlanner/WinWorkerDialog.py index 2dc2f179..3957eb60 100644 --- a/src_py/nerlPlanner/WinWorkerDialog.py +++ b/src_py/nerlPlanner/WinWorkerDialog.py @@ -155,8 +155,9 @@ def ui_update_all_values(WorkerWindow): ActivationDictStr = f'Activation:\n{pretty_print_dict(ActivationFunctionsMap)}' PoolingDictStr = f'Pooling:\n{pretty_print_dict(PoolingMethodMap)}' ScalerDictStr = f'Scaler:\n{pretty_print_dict(ScalingMethodMap)}' + BoundingDictStr = f'Bounding:\n{pretty_print_dict(BoundingMethodMap)}' ProbabilisticDictStr = f'Probabilistic:\n{pretty_print_dict(ProbabilisticActivationFunctionMap)}' - sg.popup_ok(f"Layer Functions Codes:\n{ActivationDictStr}\n{PoolingDictStr}\n{ScalerDictStr}\n{ProbabilisticDictStr}", keep_on_top=True, title="Layer Type Codes") + sg.popup_ok(f"Layer Functions Codes:\n{ActivationDictStr}\n{PoolingDictStr}\n{ScalerDictStr}\n{BoundingDictStr}\n{ProbabilisticDictStr}", keep_on_top=True, title="Layer Type Codes") if event == KEY_LEARNING_RATE_INPUT: LearningRate = values[event] @@ -250,14 +251,15 @@ def ui_update_all_values(WorkerWindow): def LayerMethodSelection(): global global_layer_method_selection_code - layout = [[sg.Text("Activation",expand_x=True), sg.Text('Pooling', expand_x=True), sg.Text('Scaler', expand_x=True), sg.Text('Probabilistic', expand_x=True)], + layout = [[sg.Text("Activation",expand_x=True), sg.Text('Pooling', expand_x=True), sg.Text('Scaler', expand_x=True),sg.Text('Bounding' , expand_x=True), sg.Text('Probabilistic', expand_x=True)], [sg.Listbox(list(ActivationFunctionsMap.keys()), size=(20,15), enable_events=True, key=KEY_LAYER_METHOD_SELECTION_DIALOG_LISTBOX_ACTIVATION), sg.Listbox(list(PoolingMethodMap.keys()),size=(20,15), enable_events=True, key=KEY_LAYER_METHOD_SELECTION_DIALOG_LISTBOX_POOLING), sg.Listbox(list(ScalingMethodMap.keys()),size=(20,15), enable_events=True, key=KEY_LAYER_METHOD_SELECTION_DIALOG_LISTBOX_SCALER), + sg.Listbox(list(BoundingMethodMap.keys()),size=(20,15), enable_events=True, key=KEY_LAYER_METHOD_SELECTION_DIALOG_LISTBOX_BOUNDING), sg.Listbox(list(ProbabilisticActivationFunctionMap.keys()),size=(20,15), enable_events=True, key=KEY_LAYER_METHOD_SELECTION_DIALOG_LISTBOX_PROBABILISTIC)], [sg.Text('Selection', expand_x=True, enable_events=True, key=KEY_LAYER_METHOD_SELECTION_TEXT),sg.Button('Select', expand_x=True, key=KEY_LAYER_METHOD_SELECTION_BUTTON)]] - layer_selection_win = sg.Window(title="Layer Method Selection", layout=layout, modal=True, keep_on_top=True) + layer_selection_win = sg.Window(title="Layer Method Selection", layout=layout, modal=True) while True: @@ -283,6 +285,11 @@ def LayerMethodSelection(): global_layer_method_selection_code = ProbabilisticActivationFunctionMap[layer_method_selection] layer_selection_win[KEY_LAYER_METHOD_SELECTION_TEXT].update(f'Selected {layer_method_selection} code: {global_layer_method_selection_code}') + if event == KEY_LAYER_METHOD_SELECTION_DIALOG_LISTBOX_BOUNDING: + layer_method_selection = values[KEY_LAYER_METHOD_SELECTION_DIALOG_LISTBOX_BOUNDING][0] + global_layer_method_selection_code = BoundingMethodMap[layer_method_selection] + layer_selection_win[KEY_LAYER_METHOD_SELECTION_TEXT].update(f'Selected {layer_method_selection} code: {global_layer_method_selection_code}') + if event == KEY_LAYER_METHOD_SELECTION_BUTTON: break diff --git a/src_py/nerlPlanner/WinWorkerDialogDefnitions.py b/src_py/nerlPlanner/WinWorkerDialogDefnitions.py index 3b0347e9..c845b9db 100644 --- a/src_py/nerlPlanner/WinWorkerDialogDefnitions.py +++ b/src_py/nerlPlanner/WinWorkerDialogDefnitions.py @@ -51,6 +51,7 @@ KEY_LAYER_METHOD_SELECTION_DIALOG_LISTBOX_ACTIVATION = '-LAYER-METHOD-SELECTION-DIALOG-LISTBOX-ACTIVATION-' KEY_LAYER_METHOD_SELECTION_DIALOG_LISTBOX_POOLING = '-LAYER-METHOD-SELECTION-DIALOG-LISTBOX-POOLING-' KEY_LAYER_METHOD_SELECTION_DIALOG_LISTBOX_SCALER = '-LAYER-METHOD-SELECTION-DIALOG-LISTBOX-SCALER-' +KEY_LAYER_METHOD_SELECTION_DIALOG_LISTBOX_BOUNDING = '-LAYER-METHOD-SELECTION-DIALOG-LISTBOX-BOUNDING-' KEY_LAYER_METHOD_SELECTION_DIALOG_LISTBOX_PROBABILISTIC = '-LAYER-METHOD-SELECTION-DIALOG-LISTBOX-PROBABILISTIC-' KEY_LAYER_METHOD_SELECTION_TEXT = '-LAYER-METHOD-SELECTION-TEXT-' KEY_LAYER_METHOD_SELECTION_BUTTON = '-LAYER-METHOD-SELECTION-BUTTON-' diff --git a/tests/NerlnetNifTest.sh b/tests/NerlnetNifTest.sh index 994f9d4f..62963ee1 100755 --- a/tests/NerlnetNifTest.sh +++ b/tests/NerlnetNifTest.sh @@ -11,7 +11,6 @@ LOG_FILE="nerlnet_test-$NOW.log" TEST_LOG_PATH="/usr/local/lib/nerlnet-lib/log" TEST_LOG_FILE_PATH="$TEST_LOG_PATH/$LOG_FILE" ERL_BRIDGE_SOURCE_PATH="$NERLNET_PATH/src_erl/NerlnetApp/src/Bridge" - NERLNET_BUILD_DIR="$NERLNET_PATH/build" NERLNET_TEST_DIR="$NERLNET_BUILD_DIR/test" @@ -35,6 +34,9 @@ cp $ERL_BRIDGE_SOURCE_PATH/nerlNIF.erl $NERLNET_TEST_DIR/nerlNIF.erl cp $ERL_BRIDGE_SOURCE_PATH/nerl.erl $NERLNET_TEST_DIR/nerl.erl cp $ERL_BRIDGE_SOURCE_PATH/nerlTensor.hrl $NERLNET_TEST_DIR/nerlTensor.hrl cp $ERL_BRIDGE_SOURCE_PATH/nerlTensor.erl $NERLNET_TEST_DIR/nerlTensor.erl +cp $ERL_BRIDGE_SOURCE_PATH/neural_networks_testing_models.hrl $NERLNET_TEST_DIR/neural_networks_testing_models.hrl +cp $ERL_BRIDGE_SOURCE_PATH/layers_types_ag.hrl $NERLNET_TEST_DIR/layers_types_ag.hrl +cp $ERL_BRIDGE_SOURCE_PATH/models_types_ag.hrl $NERLNET_TEST_DIR/models_types_ag.hrl print "Starting compilation: " # only for raspberry