363 lines
12 KiB
C++
363 lines
12 KiB
C++
#include <iostream>
|
|
#include <ctime>
|
|
#include <cmath>
|
|
#include <forward_list>
|
|
#include "myclasses.h"
|
|
|
|
using namespace std;
|
|
|
|
Neuron::Neuron(int prev_layer_size)
|
|
{
|
|
for(int i(1) ; i<=prev_layer_size ; i++)
|
|
{
|
|
//weights.push_front(Tools::get_random(0.0, 1.0));
|
|
weights.push_front(1.0);
|
|
}
|
|
bias = 0.1;
|
|
weighted_sum = 0.0;
|
|
activated_output = 0.0;
|
|
derror = 0.0;
|
|
}
|
|
|
|
void Neuron::set_activated_output(float value)
|
|
{
|
|
activated_output = value;
|
|
}
|
|
|
|
float Neuron::get_weighted_sum()
|
|
{
|
|
return weighted_sum;
|
|
}
|
|
|
|
float Neuron::get_activated_output()
|
|
{
|
|
return activated_output;
|
|
}
|
|
|
|
void Neuron::set_derror(float value)
|
|
{
|
|
derror = value;
|
|
}
|
|
|
|
float Neuron::get_derror()
|
|
{
|
|
return derror;
|
|
}
|
|
|
|
void Neuron::set_nth_weight(int n, float value)
|
|
{
|
|
int i=1;
|
|
forward_list<float>::iterator current_weight(weights.begin());
|
|
while(i<n)
|
|
{
|
|
current_weight++;
|
|
i++;
|
|
}
|
|
*current_weight = value;
|
|
}
|
|
|
|
float Neuron::get_nth_weight(int n)
|
|
{
|
|
int i=1;
|
|
forward_list<float>::iterator current_weight(weights.begin());
|
|
while(i<n)
|
|
{
|
|
current_weight++;
|
|
i++;
|
|
}
|
|
return *current_weight;
|
|
}
|
|
|
|
void Neuron::activate(forward_list<Neuron>::iterator &prev_layer_it, Activ activ_function)
|
|
{
|
|
weighted_sum = bias;
|
|
for(forward_list<float>::iterator it(weights.begin()) ; it!=weights.end() ; ++it)
|
|
{
|
|
weighted_sum += (*it) * (prev_layer_it->activated_output);
|
|
prev_layer_it++;
|
|
}
|
|
activated_output = Tools::activation_function(activ_function, weighted_sum);
|
|
}
|
|
|
|
|
|
Network::Network(int n_layers, int n_neurons)
|
|
{
|
|
for(int i(1) ; i<=n_layers ; i++)
|
|
{
|
|
forward_list<Neuron> current_layer;
|
|
for(int j(1) ; j<=n_neurons ; j++)
|
|
{
|
|
if(i==1)
|
|
{
|
|
current_layer.push_front( Neuron(0) );
|
|
}else if(i==n_layers)
|
|
{
|
|
current_layer.push_front( Neuron(n_neurons) );
|
|
}else
|
|
{
|
|
current_layer.push_front( Neuron(n_neurons) );
|
|
}
|
|
}
|
|
layers.push_back(current_layer);
|
|
}
|
|
h_activ = RELU;
|
|
//o_activ = SIGMOID;
|
|
o_activ = LINEAR;
|
|
}
|
|
|
|
Network::Network(const std::vector<int> &n_neurons, Activ h_activ, Activ o_activ)
|
|
{
|
|
for(int i(0) ; i<n_neurons.size() ; i++)
|
|
{
|
|
forward_list<Neuron> current_layer;
|
|
for(int j(1) ; j<=n_neurons[i] ; j++)
|
|
{
|
|
if(i==0)
|
|
{
|
|
current_layer.push_front( Neuron(0) );
|
|
}else if(i==n_neurons.size()-1)
|
|
{
|
|
current_layer.push_front( Neuron(n_neurons[i-1]) );
|
|
}else
|
|
{
|
|
current_layer.push_front( Neuron(n_neurons[i-1]) );
|
|
}
|
|
}
|
|
layers.push_back(current_layer);
|
|
}
|
|
h_activ = h_activ;
|
|
o_activ = o_activ;
|
|
}
|
|
|
|
bool Network::forward(const std::vector<float> &input, const std::vector<float> &target)
|
|
{
|
|
int layer_counter = 0;
|
|
for(list<forward_list<Neuron>>::iterator current_layer(layers.begin()) ; current_layer!=layers.end() ; ++current_layer)
|
|
{//inside current layer
|
|
layer_counter++;
|
|
if(layer_counter==1)
|
|
{
|
|
int i=0;
|
|
for(forward_list<Neuron>::iterator current_neuron(current_layer->begin()) ; current_neuron!=current_layer->end() ; ++current_neuron)
|
|
{//inside current neuron
|
|
current_neuron->set_activated_output( input.at(i) );
|
|
i++;
|
|
}
|
|
}else if(layer_counter==layers.size())
|
|
{
|
|
list<forward_list<Neuron>>::iterator temp = current_layer;
|
|
temp--; //previous layer
|
|
for(forward_list<Neuron>::iterator current_neuron(current_layer->begin()) ; current_neuron!=current_layer->end() ; ++current_neuron)
|
|
{//inside current neuron
|
|
forward_list<Neuron>::iterator prev_layer_it(temp->begin());
|
|
current_neuron->activate(prev_layer_it, o_activ);
|
|
}
|
|
}else
|
|
{
|
|
list<forward_list<Neuron>>::iterator temp_prev_layer = current_layer; //temp_prev_layer set at current layer
|
|
temp_prev_layer--; ////temp_prev_layer set now at previous layer
|
|
for(forward_list<Neuron>::iterator current_neuron(current_layer->begin()) ; current_neuron!=current_layer->end() ; ++current_neuron)
|
|
{//inside current neuron
|
|
forward_list<Neuron>::iterator prev_layer_it(temp_prev_layer->begin());
|
|
current_neuron->activate(prev_layer_it, h_activ);
|
|
}
|
|
}
|
|
}
|
|
set_errors(target);
|
|
return true;
|
|
}
|
|
|
|
bool Network::set_errors(const std::vector<float> &target)
|
|
{
|
|
int layer_counter = layers.size()+1;
|
|
for(list<forward_list<Neuron>>::reverse_iterator current_layer(layers.rbegin()) ; current_layer!=layers.rend() ; ++current_layer)
|
|
{//inside current layer
|
|
layer_counter--;
|
|
if(layer_counter==layers.size())
|
|
{
|
|
int i=0;
|
|
for(forward_list<Neuron>::iterator current_neuron(current_layer->begin()) ; current_neuron!=current_layer->end() ; ++current_neuron)
|
|
{//inside current neuron
|
|
current_neuron->set_derror( (current_neuron->get_activated_output()-target.at(i))*Tools::activation_function_derivative(o_activ,current_neuron->get_weighted_sum()) );
|
|
i++;
|
|
}
|
|
}else if(layer_counter>1) //all hidden layers
|
|
{
|
|
list<forward_list<Neuron>>::reverse_iterator temp_next_layer = current_layer; //temp_next_layer set at current layer
|
|
temp_next_layer--; //temp_next_layer set now at next layer
|
|
int neuron_counter=0;
|
|
for(forward_list<Neuron>::iterator current_neuron(current_layer->begin()) ; current_neuron!=current_layer->end() ; ++current_neuron)
|
|
{//inside current neuron
|
|
neuron_counter++;
|
|
current_neuron->set_derror(0.0);
|
|
for(forward_list<Neuron>::iterator next_layer_current_neuron(temp_next_layer->begin()) ; next_layer_current_neuron!=temp_next_layer->end() ; ++next_layer_current_neuron)
|
|
{
|
|
current_neuron->set_derror( current_neuron->get_derror()+next_layer_current_neuron->get_derror()*next_layer_current_neuron->get_nth_weight(neuron_counter) );
|
|
}
|
|
current_neuron->set_derror( current_neuron->get_derror()*Tools::activation_function_derivative(h_activ,current_neuron->get_weighted_sum()) );
|
|
}
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
bool Network::backward(float learning_rate)
|
|
{
|
|
int layer_counter = layers.size()+1;
|
|
for(list<forward_list<Neuron>>::reverse_iterator current_layer(layers.rbegin()) ; current_layer!=layers.rend() ; ++current_layer)
|
|
{//inside current layer
|
|
layer_counter--;
|
|
if(layer_counter>1) //all layers except input layer
|
|
{
|
|
list<forward_list<Neuron>>::reverse_iterator temp_prev_layer = current_layer; //temp_prev_layer set at current layer
|
|
temp_prev_layer++; //temp_prev_layer set now at previous layer
|
|
int neuron_counter=0;
|
|
for(forward_list<Neuron>::iterator current_neuron(current_layer->begin()) ; current_neuron!=current_layer->end() ; ++current_neuron)
|
|
{//inside current neuron
|
|
neuron_counter++;
|
|
for(forward_list<Neuron>::iterator prev_layer_current_neuron(temp_prev_layer->begin()) ; prev_layer_current_neuron!=temp_prev_layer->end() ; ++prev_layer_current_neuron)
|
|
{
|
|
//current_neuron->set_nth_weight()
|
|
current_neuron->set_derror( current_neuron->get_derror()+prev_layer_current_neuron->get_derror()*prev_layer_current_neuron->get_nth_weight(neuron_counter) );
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
float Network::predict(const std::vector<float> &input)
|
|
{
|
|
return 0.0;
|
|
}
|
|
|
|
void Network::print()
|
|
{
|
|
cout << endl << "#>>==========================================<<#" << endl;
|
|
cout << "# NEURAL NETWORK #" << endl;
|
|
cout << "#>>==========================================<<#" << endl;
|
|
cout << ">> Number of layers : " << layers.size() << endl;
|
|
cout << "------------------------------------------------" << endl;
|
|
int layer_counter = 0;
|
|
int prev_layer_size_temp = 0, params_counter = 0;
|
|
for(list<forward_list<Neuron>>::iterator it1(layers.begin()) ; it1!=layers.end() ; ++it1)
|
|
{
|
|
layer_counter++;
|
|
int current_layer_size = 0;
|
|
for(forward_list<Neuron>::iterator it2(it1->begin()) ; it2!=it1->end() ; ++it2)
|
|
{
|
|
current_layer_size++;
|
|
}
|
|
if(layer_counter==1)
|
|
{
|
|
prev_layer_size_temp = current_layer_size;
|
|
}
|
|
else
|
|
{
|
|
params_counter += (prev_layer_size_temp+1)*current_layer_size;
|
|
prev_layer_size_temp = current_layer_size;
|
|
}
|
|
if(layer_counter==1)
|
|
{
|
|
cout << ">> Input layer" << endl;
|
|
cout << "size : " << current_layer_size << endl;
|
|
cout << "neurons' activations : ";
|
|
for(forward_list<Neuron>::iterator it2(it1->begin()) ; it2!=it1->end() ; ++it2){cout << it2->get_activated_output() << " ";}
|
|
cout << endl;
|
|
}else if(layer_counter==layers.size())
|
|
{
|
|
cout << (">> Output layer\n");
|
|
cout << "size : " << current_layer_size << endl;
|
|
cout << ("neurons' activations : ");
|
|
//for(forward_list<Neuron>::iterator it2(it1->begin()) ; it2!=it1->end() ; ++it2){cout << it2->get_activated_output() << " ";}
|
|
for(forward_list<Neuron>::iterator it2(it1->begin()) ; it2!=it1->end() ; ++it2){cout << it2->get_activated_output() << " " << it2->get_derror() << endl; for(int i=1;i<=3;i++){cout << it2->get_nth_weight(i) << " ";}cout<<endl;}//to be deleted
|
|
cout << endl;
|
|
}else
|
|
{
|
|
cout << ">> Hidden layer " << layer_counter-1 << endl;
|
|
cout << "size : " << current_layer_size << endl;
|
|
for(forward_list<Neuron>::iterator it2(it1->begin()) ; it2!=it1->end() ; ++it2){cout << it2->get_activated_output() << " " << it2->get_derror() << endl;}//to be deleted
|
|
}
|
|
cout << "------------------------------------------------" << endl;
|
|
}
|
|
cout << "Number of parameters : ";
|
|
cout << params_counter << endl;
|
|
cout << "#>>==========================================<<#" << endl << endl;
|
|
}
|
|
|
|
void Tools::activate_randomness()
|
|
{
|
|
srand(time(NULL));
|
|
}
|
|
|
|
float Tools::get_random(float mini, float maxi)
|
|
{
|
|
return mini + ((float)rand()/(float)RAND_MAX) * (maxi-mini);
|
|
}
|
|
|
|
float Tools::activation_function(Activ activ, float value)
|
|
{
|
|
Tools t;
|
|
switch(activ)
|
|
{
|
|
case RELU:
|
|
return t.relu(value);
|
|
|
|
case SIGMOID:
|
|
return t.sigmoid(value);
|
|
|
|
case TANH:
|
|
return tanh(value);
|
|
|
|
case LINEAR:
|
|
return value;
|
|
default:
|
|
exit(-1);
|
|
}
|
|
}
|
|
|
|
float Tools::activation_function_derivative(Activ activ, float value)
|
|
{
|
|
Tools t;
|
|
switch(activ)
|
|
{
|
|
case RELU:
|
|
return t.relu_derivative(value);
|
|
|
|
case SIGMOID:
|
|
return t.sigmoid_derivative(value);
|
|
|
|
case TANH:
|
|
return t.tanh_derivative(value);
|
|
|
|
case LINEAR:
|
|
return 1.0;
|
|
default:
|
|
exit(-1);
|
|
}
|
|
}
|
|
|
|
float Tools::relu(float value)
|
|
{
|
|
return (value > 0.0) ? value : 0.0;
|
|
}
|
|
|
|
float Tools::sigmoid(float value)
|
|
{
|
|
return 1.0 / (1.0 + exp(-value));
|
|
}
|
|
|
|
float Tools::relu_derivative(float value)
|
|
{
|
|
return (value > 0.0) ? 1.0 : 0.0;
|
|
}
|
|
|
|
float Tools::sigmoid_derivative(float value)
|
|
{
|
|
return sigmoid(value) * (1.0 - sigmoid(value));
|
|
}
|
|
|
|
float Tools::tanh_derivative(float value)
|
|
{
|
|
return 1.0 - (tanh(value) * tanh(value));
|
|
}
|