Updating neuron and network classes functions
This commit is contained in:
parent
639694d05c
commit
f7193b3e33
3 changed files with 229 additions and 54 deletions
35
main.cpp
35
main.cpp
|
@ -5,6 +5,7 @@
|
|||
|
||||
#include <vector>
|
||||
#include <iterator>
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
||||
|
@ -14,20 +15,28 @@ int main(int argc, char *argv[])
|
|||
|
||||
|
||||
cout << "Bonjour et bienvenu" << endl;
|
||||
|
||||
/*Neuron n0(3,SIGMOID);
|
||||
|
||||
Neuron n1(3,RELU);n1.set_output(1.0);
|
||||
Neuron n2(3,RELU);n2.set_output(2.0);
|
||||
Neuron n3(3,RELU);n3.set_output(-3.0);
|
||||
forward_list<Neuron> fl;
|
||||
fl.push_front(n1);fl.push_front(n2);fl.push_front(n3);
|
||||
forward_list<Neuron>::iterator it(fl.begin());
|
||||
|
||||
n0.activate(it);
|
||||
cout << "is = " << n0.get_output() << endl;*/
|
||||
|
||||
Network network(4, 5);
|
||||
Network network(2, 5);
|
||||
network.forward({1.0,1.0,1.0,1.0,1.0}, {1.0,1.0,1.0,1.0,1.0});
|
||||
network.print();
|
||||
|
||||
/*Neuron n(3), n1(1), n2(1), n3(1);
|
||||
forward_list<Neuron> fl;
|
||||
fl.push_front(n1);
|
||||
fl.push_front(n2);
|
||||
fl.push_front(n3);
|
||||
forward_list<Neuron>::iterator it(fl.begin());
|
||||
n.activate(it, LINEAR);
|
||||
cout << "weighted sum = " << n.get_weighted_sum() << endl;*/
|
||||
|
||||
/*list<float> l;
|
||||
l.push_back(1.0);
|
||||
l.push_back(2.0);
|
||||
l.push_back(3.0);
|
||||
for(list<float>::reverse_iterator it(l.rbegin()) ; it!=l.rend() ; ++it)
|
||||
{
|
||||
cout << *it << endl;
|
||||
}*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
216
myclasses.cpp
216
myclasses.cpp
|
@ -10,51 +10,51 @@ Neuron::Neuron(int prev_layer_size)
|
|||
{
|
||||
for(int i(1) ; i<=prev_layer_size ; i++)
|
||||
{
|
||||
weights.push_front(Tools::get_random(0.0, 1.0));
|
||||
//weights.push_front(Tools::get_random(0.0, 1.0));
|
||||
weights.push_front(1.0);
|
||||
}
|
||||
bias = 0.1;
|
||||
output = 0.0;
|
||||
weighted_sum = 0.0;
|
||||
activated_output = 0.0;
|
||||
derror = 0.0;
|
||||
}
|
||||
|
||||
void Neuron::set_output(float value)
|
||||
void Neuron::set_activated_output(float value)
|
||||
{
|
||||
output = value;
|
||||
activated_output = value;
|
||||
}
|
||||
|
||||
float Neuron::get_weighted_sum()
|
||||
{
|
||||
return weighted_sum;
|
||||
}
|
||||
|
||||
float Neuron::get_activated_output()
|
||||
{
|
||||
return activated_output;
|
||||
}
|
||||
|
||||
void Neuron::set_derror(float value)
|
||||
{
|
||||
derror = value;
|
||||
}
|
||||
|
||||
float Neuron::get_derror()
|
||||
{
|
||||
return derror;
|
||||
}
|
||||
|
||||
void Neuron::activate(forward_list<Neuron>::iterator &prev_layer_it, Activ activ_function)
|
||||
{
|
||||
output = bias;
|
||||
weighted_sum = bias;
|
||||
for(forward_list<float>::iterator it(weights.begin()) ; it!=weights.end() ; ++it)
|
||||
{
|
||||
output += (*it) * ((*prev_layer_it).output);
|
||||
weighted_sum += (*it) * (prev_layer_it->activated_output);
|
||||
prev_layer_it++;
|
||||
}
|
||||
|
||||
switch(activ_function)
|
||||
{
|
||||
case RELU:
|
||||
output = (output > 0.0) ? output : 0.0;
|
||||
break;
|
||||
|
||||
case SIGMOID:
|
||||
output = 1.0 / (1.0 + exp(-output));
|
||||
break;
|
||||
|
||||
case TANH:
|
||||
output = tanh(output);
|
||||
break;
|
||||
|
||||
default:
|
||||
//LINEAR (output=direct weighted sum) as base behavior
|
||||
break;
|
||||
}
|
||||
activated_output = Tools::activation_function(activ_function, weighted_sum);
|
||||
}
|
||||
|
||||
float Neuron::get_output()//to be deleted later
|
||||
{
|
||||
return output;
|
||||
}
|
||||
|
||||
Network::Network(int n_layers, int n_neurons)
|
||||
{
|
||||
|
@ -77,7 +77,8 @@ Network::Network(int n_layers, int n_neurons)
|
|||
layers.push_back(current_layer);
|
||||
}
|
||||
h_activ = RELU;
|
||||
o_activ = SIGMOID;
|
||||
//o_activ = SIGMOID;
|
||||
o_activ = LINEAR;
|
||||
}
|
||||
|
||||
Network::Network(const std::vector<int> &n_neurons, Activ h_activ, Activ o_activ)
|
||||
|
@ -104,6 +105,84 @@ Network::Network(const std::vector<int> &n_neurons, Activ h_activ, Activ o_activ
|
|||
o_activ = o_activ;
|
||||
}
|
||||
|
||||
bool Network::forward(const std::vector<float> &input, const std::vector<float> &target)
|
||||
{
|
||||
int layer_counter = 0;
|
||||
for(list<forward_list<Neuron>>::iterator current_layer(layers.begin()) ; current_layer!=layers.end() ; ++current_layer)
|
||||
{//inside current layer
|
||||
layer_counter++;
|
||||
if(layer_counter==1)
|
||||
{
|
||||
int i=0;
|
||||
for(forward_list<Neuron>::iterator current_neuron(current_layer->begin()) ; current_neuron!=current_layer->end() ; ++current_neuron)
|
||||
{//inside current neuron
|
||||
current_neuron->set_activated_output( input.at(i) );
|
||||
i++;
|
||||
}
|
||||
}else if(layer_counter==layers.size())
|
||||
{
|
||||
list<forward_list<Neuron>>::iterator temp = current_layer;
|
||||
temp--; //previous layer
|
||||
for(forward_list<Neuron>::iterator current_neuron(current_layer->begin()) ; current_neuron!=current_layer->end() ; ++current_neuron)
|
||||
{//inside current neuron
|
||||
forward_list<Neuron>::iterator prev_layer_it(temp->begin());
|
||||
current_neuron->activate(prev_layer_it, o_activ);
|
||||
}
|
||||
}else
|
||||
{
|
||||
list<forward_list<Neuron>>::iterator temp_prev_layer = current_layer; //temp_prev_layer set at current layer
|
||||
temp_prev_layer--; ////temp_prev_layer set now at previous layer
|
||||
for(forward_list<Neuron>::iterator current_neuron(current_layer->begin()) ; current_neuron!=current_layer->end() ; ++current_neuron)
|
||||
{//inside current neuron
|
||||
forward_list<Neuron>::iterator prev_layer_it(temp_prev_layer->begin());
|
||||
current_neuron->activate(prev_layer_it, h_activ);
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Network::set_errors(const std::vector<float> &target)
|
||||
{
|
||||
int layer_counter = layers.size()+1;
|
||||
for(list<forward_list<Neuron>>::reverse_iterator current_layer(layers.rbegin()) ; current_layer!=layers.rend() ; ++current_layer)
|
||||
{//inside current layer
|
||||
layer_counter--;
|
||||
if(layer_counter==layers.size())
|
||||
{
|
||||
int i=0;
|
||||
for(forward_list<Neuron>::iterator current_neuron(current_layer->begin()) ; current_neuron!=current_layer->end() ; ++current_neuron)
|
||||
{//inside current neuron
|
||||
current_neuron->set_derror( (current_neuron->get_activated_output()-target.at(i))*Tools::activation_function_derivative(o_activ,current_neuron->get_weighted_sum()) );
|
||||
i++;
|
||||
}
|
||||
}else if(layer_counter>1) //all hidden layers
|
||||
{
|
||||
list<forward_list<Neuron>>::reverse_iterator temp_next_layer = current_layer; //temp_next_layer set at current layer
|
||||
temp_next_layer--; //temp_next_layer set now at next layer
|
||||
for(forward_list<Neuron>::iterator current_neuron(current_layer->begin()) ; current_neuron!=current_layer->end() ; ++current_neuron)
|
||||
{//inside current neuron
|
||||
current_neuron->set_derror(0.0);
|
||||
for(forward_list<Neuron>::iterator next_layer_current_neuron(temp_next_layer->begin()) ; next_layer_current_neuron!=temp_next_layer->end() ; ++next_layer_current_neuron)
|
||||
{
|
||||
//
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Network::backward(float learning_rate)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
float Network::predict(const std::vector<float> &input)
|
||||
{
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
void Network::print()
|
||||
{
|
||||
cout << endl << "#>>==========================================<<#" << endl;
|
||||
|
@ -134,15 +213,15 @@ void Network::print()
|
|||
{
|
||||
cout << ">> Input layer" << endl;
|
||||
cout << "size : " << current_layer_size << endl;
|
||||
cout << "neurons' outputs : ";
|
||||
for(forward_list<Neuron>::iterator it2(it1->begin()) ; it2!=it1->end() ; ++it2){it2->get_output();}
|
||||
cout << "neurons' activations : ";
|
||||
for(forward_list<Neuron>::iterator it2(it1->begin()) ; it2!=it1->end() ; ++it2){cout << it2->get_activated_output() << " ";}
|
||||
cout << endl;
|
||||
}else if(layer_counter==layers.size())
|
||||
{
|
||||
cout << (">> Output layer\n");
|
||||
cout << "size : " << current_layer_size << endl;
|
||||
cout << ("neurons' outputs : ");
|
||||
for(forward_list<Neuron>::iterator it2(it1->begin()) ; it2!=it1->end() ; ++it2){it2->get_output();}
|
||||
cout << ("neurons' activations : ");
|
||||
for(forward_list<Neuron>::iterator it2(it1->begin()) ; it2!=it1->end() ; ++it2){cout << it2->get_activated_output() << " ";}
|
||||
cout << endl;
|
||||
}else
|
||||
{
|
||||
|
@ -164,4 +243,71 @@ void Tools::activate_randomness()
|
|||
float Tools::get_random(float mini, float maxi)
|
||||
{
|
||||
return mini + ((float)rand()/(float)RAND_MAX) * (maxi-mini);
|
||||
}
|
||||
}
|
||||
|
||||
float Tools::activation_function(Activ activ, float value)
|
||||
{
|
||||
Tools t;
|
||||
switch(activ)
|
||||
{
|
||||
case RELU:
|
||||
return t.relu(value);
|
||||
|
||||
case SIGMOID:
|
||||
return t.sigmoid(value);
|
||||
|
||||
case TANH:
|
||||
return tanh(value);
|
||||
|
||||
case LINEAR:
|
||||
return value;
|
||||
default:
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
float Tools::activation_function_derivative(Activ activ, float value)
|
||||
{
|
||||
Tools t;
|
||||
switch(activ)
|
||||
{
|
||||
case RELU:
|
||||
return t.relu_derivative(value);
|
||||
|
||||
case SIGMOID:
|
||||
return t.sigmoid_derivative(value);
|
||||
|
||||
case TANH:
|
||||
return t.tanh_derivative(value);
|
||||
|
||||
case LINEAR:
|
||||
return 1.0;
|
||||
default:
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
float Tools::relu(float value)
|
||||
{
|
||||
return (value > 0.0) ? value : 0.0;
|
||||
}
|
||||
|
||||
float Tools::sigmoid(float value)
|
||||
{
|
||||
return 1.0 / (1.0 + exp(-value));
|
||||
}
|
||||
|
||||
float Tools::relu_derivative(float value)
|
||||
{
|
||||
return (value > 0.0) ? 1.0 : 0.0;
|
||||
}
|
||||
|
||||
float Tools::sigmoid_derivative(float value)
|
||||
{
|
||||
return sigmoid(value) * (1.0 - sigmoid(value));
|
||||
}
|
||||
|
||||
float Tools::tanh_derivative(float value)
|
||||
{
|
||||
return 1.0 - (tanh(value) * tanh(value));
|
||||
}
|
||||
|
|
32
myclasses.h
32
myclasses.h
|
@ -15,13 +15,18 @@ class Neuron
|
|||
{
|
||||
public:
|
||||
Neuron(int prev_layer_size); //prev_layer_size = number of weights
|
||||
void set_output(float value);
|
||||
float get_output();//to be deleted
|
||||
//void set_weighted_sum(float weighted_sum);
|
||||
float get_weighted_sum();
|
||||
void set_activated_output(float value);
|
||||
float get_activated_output();
|
||||
void set_derror(float value);
|
||||
float get_derror();
|
||||
void activate(std::forward_list<Neuron>::iterator &prev_layer_it, Activ activ_function=LINEAR);
|
||||
private:
|
||||
std::forward_list<float> weights;
|
||||
float bias;
|
||||
float output;
|
||||
float weighted_sum;
|
||||
float activated_output;
|
||||
float derror;
|
||||
};
|
||||
|
||||
|
@ -31,15 +36,21 @@ class Network
|
|||
public:
|
||||
Network(int n_layers, int n_neurons);
|
||||
Network(const std::vector<int> &n_neurons, Activ h_activ=RELU, Activ o_activ=SIGMOID);
|
||||
|
||||
float predict(const std::vector<float> &input);
|
||||
void print();
|
||||
|
||||
//to be deleted
|
||||
bool forward(const std::vector<float> &input, const std::vector<float> &target);
|
||||
bool backward();
|
||||
bool set_errors(const std::vector<float> &target);
|
||||
private:
|
||||
std::list<std::forward_list<Neuron>> layers;
|
||||
Activ h_activ;
|
||||
Activ o_activ;
|
||||
|
||||
bool _set_errors();
|
||||
//bool forward(const std::vector<float> &input, const std::vector<float> &target);
|
||||
//bool set_errors(const std::vector<float> &target);
|
||||
bool backward(float learning_rate);
|
||||
};
|
||||
|
||||
|
||||
|
@ -48,8 +59,17 @@ class Tools
|
|||
public:
|
||||
static void activate_randomness();
|
||||
static float get_random(float mini, float maxi);
|
||||
|
||||
//Activation functions and their derivatives
|
||||
static float activation_function(Activ activ, float value);
|
||||
static float activation_function_derivative(Activ activ, float value);
|
||||
//float activation_function(Activ activ, float value);
|
||||
//float activation_function_derivative(Activ activ, float value);
|
||||
private:
|
||||
float relu(float value);
|
||||
float sigmoid(float value);
|
||||
float relu_derivative(float value);
|
||||
float sigmoid_derivative(float value);
|
||||
float tanh_derivative(float value);
|
||||
};
|
||||
|
||||
#endif
|
Loading…
Reference in a new issue