Updating predict function to output wether raw output layer maximum number or class
This commit is contained in:
parent
77be210265
commit
cf48d29892
3 changed files with 97 additions and 50 deletions
29
main.cpp
29
main.cpp
|
@ -16,27 +16,18 @@ int main(int argc, char *argv[])
|
||||||
|
|
||||||
cout << "Bonjour et bienvenu" << endl;
|
cout << "Bonjour et bienvenu" << endl;
|
||||||
|
|
||||||
Network network(3, 3);
|
Network network(15, 3);
|
||||||
network.forward({1.0,1.0,1.0}, {1.0,2.0,3.0});
|
|
||||||
network.print();
|
network.print();
|
||||||
|
cout << endl << endl;
|
||||||
/*Neuron n(3), n1(1), n2(1), n3(1);
|
for(int episode=1;episode<=100000;episode++)
|
||||||
forward_list<Neuron> fl;
|
|
||||||
fl.push_front(n1);
|
|
||||||
fl.push_front(n2);
|
|
||||||
fl.push_front(n3);
|
|
||||||
forward_list<Neuron>::iterator it(fl.begin());
|
|
||||||
n.activate(it, LINEAR);
|
|
||||||
cout << "weighted sum = " << n.get_weighted_sum() << endl;*/
|
|
||||||
|
|
||||||
/*list<float> l;
|
|
||||||
l.push_back(1.0);
|
|
||||||
l.push_back(2.0);
|
|
||||||
l.push_back(3.0);
|
|
||||||
for(list<float>::reverse_iterator it(l.rbegin()) ; it!=l.rend() ; ++it)
|
|
||||||
{
|
{
|
||||||
cout << *it << endl;
|
network.forward({1.0,1.0,1.0}, {1.0,2.0,3.0});
|
||||||
}*/
|
network.backward(0.001);
|
||||||
|
}
|
||||||
|
//network.print();
|
||||||
|
cout << endl << endl;
|
||||||
|
network.print();
|
||||||
|
cout << "verdict : " << network.predict({1.0,1.0,1.0},false) << endl;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
104
myclasses.cpp
104
myclasses.cpp
|
@ -2,6 +2,7 @@
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <forward_list>
|
#include <forward_list>
|
||||||
|
#include <algorithm>
|
||||||
#include "myclasses.h"
|
#include "myclasses.h"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
@ -19,29 +20,14 @@ Neuron::Neuron(int prev_layer_size)
|
||||||
derror = 0.0;
|
derror = 0.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Neuron::set_activated_output(float value)
|
void Neuron::set_bias(float value)
|
||||||
{
|
{
|
||||||
activated_output = value;
|
bias = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
float Neuron::get_weighted_sum()
|
float Neuron::get_bias()
|
||||||
{
|
{
|
||||||
return weighted_sum;
|
return bias;
|
||||||
}
|
|
||||||
|
|
||||||
float Neuron::get_activated_output()
|
|
||||||
{
|
|
||||||
return activated_output;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Neuron::set_derror(float value)
|
|
||||||
{
|
|
||||||
derror = value;
|
|
||||||
}
|
|
||||||
|
|
||||||
float Neuron::get_derror()
|
|
||||||
{
|
|
||||||
return derror;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Neuron::set_nth_weight(int n, float value)
|
void Neuron::set_nth_weight(int n, float value)
|
||||||
|
@ -68,6 +54,31 @@ float Neuron::get_nth_weight(int n)
|
||||||
return *current_weight;
|
return *current_weight;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
float Neuron::get_weighted_sum()
|
||||||
|
{
|
||||||
|
return weighted_sum;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Neuron::set_activated_output(float value)
|
||||||
|
{
|
||||||
|
activated_output = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
float Neuron::get_activated_output()
|
||||||
|
{
|
||||||
|
return activated_output;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Neuron::set_derror(float value)
|
||||||
|
{
|
||||||
|
derror = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
float Neuron::get_derror()
|
||||||
|
{
|
||||||
|
return derror;
|
||||||
|
}
|
||||||
|
|
||||||
void Neuron::activate(forward_list<Neuron>::iterator &prev_layer_it, Activ activ_function)
|
void Neuron::activate(forward_list<Neuron>::iterator &prev_layer_it, Activ activ_function)
|
||||||
{
|
{
|
||||||
weighted_sum = bias;
|
weighted_sum = bias;
|
||||||
|
@ -211,24 +222,67 @@ bool Network::backward(float learning_rate)
|
||||||
{
|
{
|
||||||
list<forward_list<Neuron>>::reverse_iterator temp_prev_layer = current_layer; //temp_prev_layer set at current layer
|
list<forward_list<Neuron>>::reverse_iterator temp_prev_layer = current_layer; //temp_prev_layer set at current layer
|
||||||
temp_prev_layer++; //temp_prev_layer set now at previous layer
|
temp_prev_layer++; //temp_prev_layer set now at previous layer
|
||||||
int neuron_counter=0;
|
|
||||||
for(forward_list<Neuron>::iterator current_neuron(current_layer->begin()) ; current_neuron!=current_layer->end() ; ++current_neuron)
|
for(forward_list<Neuron>::iterator current_neuron(current_layer->begin()) ; current_neuron!=current_layer->end() ; ++current_neuron)
|
||||||
{//inside current neuron
|
{//inside current neuron
|
||||||
neuron_counter++;
|
int neuron_counter=0;
|
||||||
|
|
||||||
for(forward_list<Neuron>::iterator prev_layer_current_neuron(temp_prev_layer->begin()) ; prev_layer_current_neuron!=temp_prev_layer->end() ; ++prev_layer_current_neuron)
|
for(forward_list<Neuron>::iterator prev_layer_current_neuron(temp_prev_layer->begin()) ; prev_layer_current_neuron!=temp_prev_layer->end() ; ++prev_layer_current_neuron)
|
||||||
{
|
{
|
||||||
//current_neuron->set_nth_weight()
|
neuron_counter++;
|
||||||
current_neuron->set_derror( current_neuron->get_derror()+prev_layer_current_neuron->get_derror()*prev_layer_current_neuron->get_nth_weight(neuron_counter) );
|
current_neuron->set_nth_weight( neuron_counter, current_neuron->get_nth_weight(neuron_counter)-learning_rate*current_neuron->get_derror()*prev_layer_current_neuron->get_activated_output() );
|
||||||
}
|
}
|
||||||
|
current_neuron->set_bias( current_neuron->get_bias()-learning_rate*current_neuron->get_derror() );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
float Network::predict(const std::vector<float> &input)
|
bool neuron_cmp(Neuron a, Neuron b){return a.get_activated_output()<b.get_activated_output();}
|
||||||
|
|
||||||
|
float Network::predict(const std::vector<float> &input, bool as_raw)
|
||||||
{
|
{
|
||||||
return 0.0;
|
int layer_counter = 0;
|
||||||
|
for(list<forward_list<Neuron>>::iterator current_layer(layers.begin()) ; current_layer!=layers.end() ; ++current_layer)
|
||||||
|
{//inside current layer
|
||||||
|
layer_counter++;
|
||||||
|
if(layer_counter==1)
|
||||||
|
{
|
||||||
|
int i=0;
|
||||||
|
for(forward_list<Neuron>::iterator current_neuron(current_layer->begin()) ; current_neuron!=current_layer->end() ; ++current_neuron)
|
||||||
|
{//inside current neuron
|
||||||
|
current_neuron->set_activated_output( input.at(i) );
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
}else if(layer_counter==layers.size())
|
||||||
|
{
|
||||||
|
list<forward_list<Neuron>>::iterator temp = current_layer;
|
||||||
|
temp--; //previous layer
|
||||||
|
for(forward_list<Neuron>::iterator current_neuron(current_layer->begin()) ; current_neuron!=current_layer->end() ; ++current_neuron)
|
||||||
|
{//inside current neuron
|
||||||
|
forward_list<Neuron>::iterator prev_layer_it(temp->begin());
|
||||||
|
current_neuron->activate(prev_layer_it, o_activ);
|
||||||
|
}
|
||||||
|
}else
|
||||||
|
{
|
||||||
|
list<forward_list<Neuron>>::iterator temp_prev_layer = current_layer; //temp_prev_layer set at current layer
|
||||||
|
temp_prev_layer--; ////temp_prev_layer set now at previous layer
|
||||||
|
for(forward_list<Neuron>::iterator current_neuron(current_layer->begin()) ; current_neuron!=current_layer->end() ; ++current_neuron)
|
||||||
|
{//inside current neuron
|
||||||
|
forward_list<Neuron>::iterator prev_layer_it(temp_prev_layer->begin());
|
||||||
|
current_neuron->activate(prev_layer_it, h_activ);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
list<forward_list<Neuron>>::iterator output_layer = layers.end(); output_layer--;
|
||||||
|
if(as_raw)
|
||||||
|
{
|
||||||
|
return max_element(output_layer->begin(), output_layer->end(), neuron_cmp)->get_activated_output();
|
||||||
|
}else
|
||||||
|
{
|
||||||
|
return distance( output_layer->begin(), max_element(output_layer->begin(),output_layer->end(),neuron_cmp) );
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Network::print()
|
void Network::print()
|
||||||
|
|
14
myclasses.h
14
myclasses.h
|
@ -15,15 +15,15 @@ class Neuron
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
Neuron(int prev_layer_size); //prev_layer_size = number of weights
|
Neuron(int prev_layer_size); //prev_layer_size = number of weights
|
||||||
//void set_weighted_sum(float weighted_sum);
|
void set_bias(float value);
|
||||||
|
float get_bias();
|
||||||
|
void set_nth_weight(int n, float value);
|
||||||
|
float get_nth_weight(int n);
|
||||||
float get_weighted_sum();
|
float get_weighted_sum();
|
||||||
void set_activated_output(float value);
|
void set_activated_output(float value);
|
||||||
float get_activated_output();
|
float get_activated_output();
|
||||||
void set_derror(float value);
|
void set_derror(float value);
|
||||||
float get_derror();
|
float get_derror();
|
||||||
void set_nth_weight(int n, float value);
|
|
||||||
float get_nth_weight(int n);
|
|
||||||
//std::forward_list<float> &get_weights();
|
|
||||||
void activate(std::forward_list<Neuron>::iterator &prev_layer_it, Activ activ_function=LINEAR);
|
void activate(std::forward_list<Neuron>::iterator &prev_layer_it, Activ activ_function=LINEAR);
|
||||||
private:
|
private:
|
||||||
std::forward_list<float> weights;
|
std::forward_list<float> weights;
|
||||||
|
@ -40,12 +40,14 @@ public:
|
||||||
Network(int n_layers, int n_neurons);
|
Network(int n_layers, int n_neurons);
|
||||||
Network(const std::vector<int> &n_neurons, Activ h_activ=RELU, Activ o_activ=SIGMOID);
|
Network(const std::vector<int> &n_neurons, Activ h_activ=RELU, Activ o_activ=SIGMOID);
|
||||||
|
|
||||||
float predict(const std::vector<float> &input);
|
|
||||||
|
float predict(const std::vector<float> &input, bool as_raw=true);
|
||||||
void print();
|
void print();
|
||||||
|
|
||||||
//to be deleted
|
//to be deleted
|
||||||
bool forward(const std::vector<float> &input, const std::vector<float> &target);
|
bool forward(const std::vector<float> &input, const std::vector<float> &target);
|
||||||
bool set_errors(const std::vector<float> &target);
|
bool set_errors(const std::vector<float> &target);
|
||||||
|
bool backward(float learning_rate);
|
||||||
private:
|
private:
|
||||||
std::list<std::forward_list<Neuron>> layers;
|
std::list<std::forward_list<Neuron>> layers;
|
||||||
Activ h_activ;
|
Activ h_activ;
|
||||||
|
@ -53,7 +55,7 @@ private:
|
||||||
|
|
||||||
//bool forward(const std::vector<float> &input, const std::vector<float> &target);
|
//bool forward(const std::vector<float> &input, const std::vector<float> &target);
|
||||||
//bool set_errors(const std::vector<float> &target);
|
//bool set_errors(const std::vector<float> &target);
|
||||||
bool backward(float learning_rate);
|
//bool backward(float learning_rate);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue