Browse Source

added training precision and zero one loss in plot with varying tolerance

Titouan Labourdette 2 years ago
parent
commit
d286bedb4d

+ 41
- 19
.ipynb_checkpoints/TP1_prog2.py-checkpoint.ipynb View File

@@ -2,7 +2,7 @@
2 2
  "cells": [
3 3
   {
4 4
    "cell_type": "code",
5
-   "execution_count": 8,
5
+   "execution_count": 1,
6 6
    "id": "530f620c",
7 7
    "metadata": {},
8 8
    "outputs": [],
@@ -22,7 +22,7 @@
22 22
   },
23 23
   {
24 24
    "cell_type": "code",
25
-   "execution_count": 3,
25
+   "execution_count": 2,
26 26
    "id": "68b6a517",
27 27
    "metadata": {},
28 28
    "outputs": [],
@@ -864,7 +864,7 @@
864 864
   },
865 865
   {
866 866
    "cell_type": "code",
867
-   "execution_count": 11,
867
+   "execution_count": 26,
868 868
    "id": "98107e41",
869 869
    "metadata": {},
870 870
    "outputs": [
@@ -872,37 +872,59 @@
872 872
      "name": "stdout",
873 873
      "output_type": "stream",
874 874
      "text": [
875
-      "Matrice de confusion K-NN :\n",
876
-      " [[51  0  0  0  0  1  0  0  0  0]\n",
877
-      " [ 0 56  0  0  0  0  0  0  0  0]\n",
878
-      " [ 3  1 45  1  0  0  1  1  0  0]\n",
879
-      " [ 0  1  1 35  0  1  0  1  1  1]\n",
880
-      " [ 0  3  0  0 48  0  0  0  0  2]\n",
881
-      " [ 0  1  0  1  0 38  0  0  0  0]\n",
882
-      " [ 0  0  0  0  0  2 44  0  0  0]\n",
883
-      " [ 0  2  0  0  3  0  0 47  0  0]\n",
884
-      " [ 2  0  0  0  0  3  1  0 42  2]\n",
885
-      " [ 0  0  0  0  4  1  0  1  2 50]]\n"
875
+      "Métriques pour K-NN :\n",
876
+      "Paramètres : (n_neighbors=3,p=2,n_jobs=1)\n",
877
+      "Taille de l'échantillon : 10000\n",
878
+      "Proportion des datasets : 90%\n",
879
+      "Temps d'entraînement (secondes) : 0.01596\n",
880
+      "Temps de prédiction (secondes) : 0.30718\n",
881
+      "Précision pour chaque classe : [0.942, 0.891, 0.962, 0.959, 0.988, 0.944, 0.961, 0.97, 0.989, 0.918]\n",
882
+      "Précision : 0.95\n",
883
+      "Erreur : 0.05\n",
884
+      "Matrice de confusion :\n",
885
+      " [[ 98   0   1   0   0   0   1   0   1   0]\n",
886
+      " [  0 114   0   0   0   0   0   0   0   0]\n",
887
+      " [  2   2 102   0   0   0   0   0   0   0]\n",
888
+      " [  0   1   1  93   0   3   0   1   0   0]\n",
889
+      " [  1   5   0   0  82   0   0   0   0   5]\n",
890
+      " [  0   1   1   1   0  84   3   0   0   1]\n",
891
+      " [  0   0   0   0   0   0  99   0   0   0]\n",
892
+      " [  0   3   0   0   0   0   0  97   0   2]\n",
893
+      " [  2   1   1   3   1   2   0   0  92   0]\n",
894
+      " [  1   1   0   0   0   0   0   2   0  89]]\n"
886 895
      ]
887 896
     }
888 897
    ],
889 898
    "source": [
890 899
     "### Create vector of 5000 random indexes\n",
891
-    "rand_indexes = np.random.randint(70000, size=5000)\n",
900
+    "rand_indexes = np.random.randint(70000, size=10000)\n",
892 901
     "### Load data with the previous vector\n",
893 902
     "data = mnist.data[rand_indexes]\n",
894
-    "# print(\"Dataset : \", data)\n",
895 903
     "target = mnist.target[rand_indexes]\n",
896
-    "\n",
897 904
     "# Split the dataset\n",
898 905
     "xtrain, xtest, ytrain, ytest = model_selection.train_test_split(data, target,train_size=0.9)\n",
899 906
     "\n",
900
-    "# Training on xtrain,ytrain\n",
901 907
     "clf = neighbors.KNeighborsClassifier(n_neighbors=3,p=2,n_jobs=1)\n",
908
+    "# Training on xtrain,ytrain\n",
909
+    "t1 = time.time()\n",
902 910
     "clf.fit(xtrain, ytrain)\n",
911
+    "t2 = time.time()\n",
903 912
     "# Predicting on xtest\n",
904 913
     "pred = clf.predict(xtest)\n",
905
-    "print(\"Matrice de confusion K-NN :\\n\", metrics.confusion_matrix(ytest, pred))"
914
+    "t3 = time.time()\n",
915
+    "#Calcul de différentes metrics\n",
916
+    "precisions = [round(i,3) for i in metrics.precision_score(ytest, pred,average=None)]\n",
917
+    "\n",
918
+    "print(\"Métriques pour K-NN :\")\n",
919
+    "print(\"Paramètres : (n_neighbors=3,p=2,n_jobs=1)\")\n",
920
+    "print(\"Taille de l'échantillon :\", 10000)\n",
921
+    "print(\"Proportion des datasets :\", \"90%\")\n",
922
+    "print(\"Temps d'entraînement (secondes) :\", round(t2-t1,5))\n",
923
+    "print(\"Temps de prédiction (secondes) :\", round(t3-t2,5))\n",
924
+    "print(\"Précision pour chaque classe :\", precisions)\n",
925
+    "print(\"Précision :\", clf.score(xtest, ytest))\n",
926
+    "print(\"Erreur :\", round(metrics.zero_one_loss(ytest, pred),5))\n",
927
+    "print(\"Matrice de confusion :\\n\", metrics.confusion_matrix(ytest, pred))"
906 928
    ]
907 929
   },
908 930
   {

+ 42
- 40
.ipynb_checkpoints/TP2_prog1.py-checkpoint.ipynb View File

@@ -2,7 +2,7 @@
2 2
  "cells": [
3 3
   {
4 4
    "cell_type": "code",
5
-   "execution_count": 2,
5
+   "execution_count": 1,
6 6
    "id": "3eb7a65b",
7 7
    "metadata": {},
8 8
    "outputs": [],
@@ -22,7 +22,7 @@
22 22
   },
23 23
   {
24 24
    "cell_type": "code",
25
-   "execution_count": 3,
25
+   "execution_count": 2,
26 26
    "id": "a8812842",
27 27
    "metadata": {},
28 28
    "outputs": [],
@@ -1233,7 +1233,7 @@
1233 1233
   },
1234 1234
   {
1235 1235
    "cell_type": "code",
1236
-   "execution_count": 7,
1236
+   "execution_count": 9,
1237 1237
    "id": "abb0fcf1",
1238 1238
    "metadata": {},
1239 1239
    "outputs": [
@@ -1241,23 +1241,33 @@
1241 1241
      "name": "stdout",
1242 1242
      "output_type": "stream",
1243 1243
      "text": [
1244
-      "Matrice de confusion A-NN :\n",
1245
-      " [[59  0  0  0  0  0  0  0  0  0]\n",
1246
-      " [ 0 60  0  0  0  0  0  1  0  0]\n",
1247
-      " [ 0  0 42  0  0  1  2  1  2  0]\n",
1248
-      " [ 0  0  1 44  0  1  0  0  0  0]\n",
1249
-      " [ 0  0  0  0 46  0  1  0  0  4]\n",
1250
-      " [ 0  0  0  0  0 31  0  0  1  0]\n",
1251
-      " [ 0  0  0  0  0  0 48  0  0  0]\n",
1252
-      " [ 1  0  0  1  0  0  0 49  0  0]\n",
1253
-      " [ 0  1  1  5  0  1  0  0 48  0]\n",
1254
-      " [ 2  0  0  1  1  2  0  0  2 40]]\n"
1244
+      "Métriques pour A-NN\n",
1245
+      "Paramètres : (random_state=1, max_iter=300, hidden_layer_sizes=((85,)*15),\n",
1246
+      "solver=adam, activation=relu, alpha= 0.0000001)\n",
1247
+      "Taille de l'échantillon : 10000\n",
1248
+      "Proportion des datasets : 90%\n",
1249
+      "Temps d'entraînement (secondes) : 27.9214\n",
1250
+      "Temps de prédiction (secondes) : 0.01396\n",
1251
+      "Précision pour chaque classe : [0.972, 0.974, 0.926, 0.97, 0.919, 0.939, 0.971, 0.967, 0.951, 0.927]\n",
1252
+      "Précision : 0.952\n",
1253
+      "Erreur : 0.048\n",
1254
+      "Matrice de confusion :\n",
1255
+      " [[103   0   1   0   0   0   1   0   0   0]\n",
1256
+      " [  0 114   0   0   1   0   0   0   0   0]\n",
1257
+      " [  0   0 100   0   2   1   1   1   1   0]\n",
1258
+      " [  0   1   3  97   0   1   0   0   1   1]\n",
1259
+      " [  0   0   0   0  79   0   0   0   0   4]\n",
1260
+      " [  2   0   0   3   0  93   1   0   1   0]\n",
1261
+      " [  0   0   0   0   0   3  99   0   1   0]\n",
1262
+      " [  1   1   1   0   1   0   0  89   0   1]\n",
1263
+      " [  0   1   3   0   0   1   0   0  77   2]\n",
1264
+      " [  0   0   0   0   3   0   0   2   0 101]]\n"
1255 1265
      ]
1256 1266
     }
1257 1267
    ],
1258 1268
    "source": [
1259 1269
     "### Create vector of 5000 random indexes\n",
1260
-    "rand_indexes = np.random.randint(70000, size=5000)\n",
1270
+    "rand_indexes = np.random.randint(70000, size=10000)\n",
1261 1271
     "### Load data with the previous vector\n",
1262 1272
     "data = mnist.data[rand_indexes]\n",
1263 1273
     "# print(\"Dataset : \", data)\n",
@@ -1266,10 +1276,6 @@
1266 1276
     "# Split the dataset\n",
1267 1277
     "xtrain, xtest, ytrain, ytest = model_selection.train_test_split(data, target,train_size=0.9)\n",
1268 1278
     "\n",
1269
-    "best_training_time = 0\n",
1270
-    "best_precision_score = 0\n",
1271
-    "best_zero_one_loss = 0\n",
1272
-    "\n",
1273 1279
     "r = 1\n",
1274 1280
     "max_i = 300\n",
1275 1281
     "nb_hl = 15\n",
@@ -1281,31 +1287,27 @@
1281 1287
     "\n",
1282 1288
     "#Entraîne le classifier\n",
1283 1289
     "clf = neural_network.MLPClassifier(random_state=r, max_iter=max_i, hidden_layer_sizes=hl, solver=sol, activation=act, alpha=a, verbose=False)\n",
1284
-    "t1 = round(time.time(),5)\n",
1290
+    "t1 = time.time()\n",
1285 1291
     "clf.fit(xtrain, ytrain)\n",
1286
-    "t2 = round(time.time(),5)\n",
1292
+    "t2 = time.time()\n",
1287 1293
     "#Prédiction sur le jeu de tests\n",
1288 1294
     "pred = clf.predict(xtest)\n",
1289
-    "# Probabilités des prédictions sur xtest\n",
1290
-    "pred_proba = clf.predict_proba(xtest)\n",
1291
-    "# On sauvegarde le temps de calcul, la précision et \n",
1292
-    "# les taux d'erreurs par classe\n",
1293
-    "best_training_time = t2-t1\n",
1294
-    "best_precision_score = clf.score(xtest, ytest)\n",
1295
-    "best_zero_one_loss = metrics.zero_one_loss(ytest, pred)\n",
1295
+    "t3 = time.time()\n",
1296
+    "\n",
1297
+    "#Calcul de différentes metrics\n",
1298
+    "precisions = [round(i,3) for i in metrics.precision_score(ytest, pred,average=None)]\n",
1296 1299
     "\n",
1297
-    "# print(\"Paramètre :\\n\")\n",
1298
-    "# print(\"random_state = \", r)\n",
1299
-    "# print(\"max_iter = \", max_i)\n",
1300
-    "# print(\"nb_hidden_layer = \", nb_hl)\n",
1301
-    "# print(\"hidden_layer_size = \", hl_size)\n",
1302
-    "# print(\"solver = \", sol)\n",
1303
-    "# print(\"activation = \", act)\n",
1304
-    "# print(\"alpha = \", a)\n",
1305
-    "# print(\"Temps d'entraînement : \", best_training_time)\n",
1306
-    "# print(\"Score : \", best_precision_score)\n",
1307
-    "# print(\"Zero-one loss : \", best_zero_one_loss)\n",
1308
-    "print(\"Matrice de confusion A-NN :\\n\", metrics.confusion_matrix(ytest, pred))"
1300
+    "print(\"Métriques pour A-NN\")\n",
1301
+    "print(\"Paramètres : (random_state=1, max_iter=300, hidden_layer_sizes=((85,)*15),\")\n",
1302
+    "print(\"solver=adam, activation=relu, alpha= 0.0000001)\")\n",
1303
+    "print(\"Taille de l'échantillon :\", 10000)\n",
1304
+    "print(\"Proportion des datasets :\", \"90%\")\n",
1305
+    "print(\"Temps d'entraînement (secondes) :\", round(t2-t1,5))\n",
1306
+    "print(\"Temps de prédiction (secondes) :\", round(t3-t2,5))\n",
1307
+    "print(\"Précision pour chaque classe :\", precisions)\n",
1308
+    "print(\"Précision :\", clf.score(xtest, ytest))\n",
1309
+    "print(\"Erreur :\", round(metrics.zero_one_loss(ytest, pred),5))\n",
1310
+    "print(\"Matrice de confusion :\\n\", metrics.confusion_matrix(ytest, pred))"
1309 1311
    ]
1310 1312
   },
1311 1313
   {

+ 138
- 75
.ipynb_checkpoints/TP3_prog1.py-checkpoint.ipynb
File diff suppressed because it is too large
View File


+ 41
- 19
TP1_prog2.py.ipynb View File

@@ -2,7 +2,7 @@
2 2
  "cells": [
3 3
   {
4 4
    "cell_type": "code",
5
-   "execution_count": 8,
5
+   "execution_count": 1,
6 6
    "id": "530f620c",
7 7
    "metadata": {},
8 8
    "outputs": [],
@@ -22,7 +22,7 @@
22 22
   },
23 23
   {
24 24
    "cell_type": "code",
25
-   "execution_count": 3,
25
+   "execution_count": 2,
26 26
    "id": "68b6a517",
27 27
    "metadata": {},
28 28
    "outputs": [],
@@ -864,7 +864,7 @@
864 864
   },
865 865
   {
866 866
    "cell_type": "code",
867
-   "execution_count": 11,
867
+   "execution_count": 26,
868 868
    "id": "98107e41",
869 869
    "metadata": {},
870 870
    "outputs": [
@@ -872,37 +872,59 @@
872 872
      "name": "stdout",
873 873
      "output_type": "stream",
874 874
      "text": [
875
-      "Matrice de confusion K-NN :\n",
876
-      " [[51  0  0  0  0  1  0  0  0  0]\n",
877
-      " [ 0 56  0  0  0  0  0  0  0  0]\n",
878
-      " [ 3  1 45  1  0  0  1  1  0  0]\n",
879
-      " [ 0  1  1 35  0  1  0  1  1  1]\n",
880
-      " [ 0  3  0  0 48  0  0  0  0  2]\n",
881
-      " [ 0  1  0  1  0 38  0  0  0  0]\n",
882
-      " [ 0  0  0  0  0  2 44  0  0  0]\n",
883
-      " [ 0  2  0  0  3  0  0 47  0  0]\n",
884
-      " [ 2  0  0  0  0  3  1  0 42  2]\n",
885
-      " [ 0  0  0  0  4  1  0  1  2 50]]\n"
875
+      "Métriques pour K-NN :\n",
876
+      "Paramètres : (n_neighbors=3,p=2,n_jobs=1)\n",
877
+      "Taille de l'échantillon : 10000\n",
878
+      "Proportion des datasets : 90%\n",
879
+      "Temps d'entraînement (secondes) : 0.01596\n",
880
+      "Temps de prédiction (secondes) : 0.30718\n",
881
+      "Précision pour chaque classe : [0.942, 0.891, 0.962, 0.959, 0.988, 0.944, 0.961, 0.97, 0.989, 0.918]\n",
882
+      "Précision : 0.95\n",
883
+      "Erreur : 0.05\n",
884
+      "Matrice de confusion :\n",
885
+      " [[ 98   0   1   0   0   0   1   0   1   0]\n",
886
+      " [  0 114   0   0   0   0   0   0   0   0]\n",
887
+      " [  2   2 102   0   0   0   0   0   0   0]\n",
888
+      " [  0   1   1  93   0   3   0   1   0   0]\n",
889
+      " [  1   5   0   0  82   0   0   0   0   5]\n",
890
+      " [  0   1   1   1   0  84   3   0   0   1]\n",
891
+      " [  0   0   0   0   0   0  99   0   0   0]\n",
892
+      " [  0   3   0   0   0   0   0  97   0   2]\n",
893
+      " [  2   1   1   3   1   2   0   0  92   0]\n",
894
+      " [  1   1   0   0   0   0   0   2   0  89]]\n"
886 895
      ]
887 896
     }
888 897
    ],
889 898
    "source": [
890 899
     "### Create vector of 5000 random indexes\n",
891
-    "rand_indexes = np.random.randint(70000, size=5000)\n",
900
+    "rand_indexes = np.random.randint(70000, size=10000)\n",
892 901
     "### Load data with the previous vector\n",
893 902
     "data = mnist.data[rand_indexes]\n",
894
-    "# print(\"Dataset : \", data)\n",
895 903
     "target = mnist.target[rand_indexes]\n",
896
-    "\n",
897 904
     "# Split the dataset\n",
898 905
     "xtrain, xtest, ytrain, ytest = model_selection.train_test_split(data, target,train_size=0.9)\n",
899 906
     "\n",
900
-    "# Training on xtrain,ytrain\n",
901 907
     "clf = neighbors.KNeighborsClassifier(n_neighbors=3,p=2,n_jobs=1)\n",
908
+    "# Training on xtrain,ytrain\n",
909
+    "t1 = time.time()\n",
902 910
     "clf.fit(xtrain, ytrain)\n",
911
+    "t2 = time.time()\n",
903 912
     "# Predicting on xtest\n",
904 913
     "pred = clf.predict(xtest)\n",
905
-    "print(\"Matrice de confusion K-NN :\\n\", metrics.confusion_matrix(ytest, pred))"
914
+    "t3 = time.time()\n",
915
+    "#Calcul de différentes metrics\n",
916
+    "precisions = [round(i,3) for i in metrics.precision_score(ytest, pred,average=None)]\n",
917
+    "\n",
918
+    "print(\"Métriques pour K-NN :\")\n",
919
+    "print(\"Paramètres : (n_neighbors=3,p=2,n_jobs=1)\")\n",
920
+    "print(\"Taille de l'échantillon :\", 10000)\n",
921
+    "print(\"Proportion des datasets :\", \"90%\")\n",
922
+    "print(\"Temps d'entraînement (secondes) :\", round(t2-t1,5))\n",
923
+    "print(\"Temps de prédiction (secondes) :\", round(t3-t2,5))\n",
924
+    "print(\"Précision pour chaque classe :\", precisions)\n",
925
+    "print(\"Précision :\", clf.score(xtest, ytest))\n",
926
+    "print(\"Erreur :\", round(metrics.zero_one_loss(ytest, pred),5))\n",
927
+    "print(\"Matrice de confusion :\\n\", metrics.confusion_matrix(ytest, pred))"
906 928
    ]
907 929
   },
908 930
   {

+ 42
- 40
TP2_prog1.py.ipynb View File

@@ -2,7 +2,7 @@
2 2
  "cells": [
3 3
   {
4 4
    "cell_type": "code",
5
-   "execution_count": 2,
5
+   "execution_count": 1,
6 6
    "id": "3eb7a65b",
7 7
    "metadata": {},
8 8
    "outputs": [],
@@ -22,7 +22,7 @@
22 22
   },
23 23
   {
24 24
    "cell_type": "code",
25
-   "execution_count": 3,
25
+   "execution_count": 2,
26 26
    "id": "a8812842",
27 27
    "metadata": {},
28 28
    "outputs": [],
@@ -1233,7 +1233,7 @@
1233 1233
   },
1234 1234
   {
1235 1235
    "cell_type": "code",
1236
-   "execution_count": 7,
1236
+   "execution_count": 9,
1237 1237
    "id": "abb0fcf1",
1238 1238
    "metadata": {},
1239 1239
    "outputs": [
@@ -1241,23 +1241,33 @@
1241 1241
      "name": "stdout",
1242 1242
      "output_type": "stream",
1243 1243
      "text": [
1244
-      "Matrice de confusion A-NN :\n",
1245
-      " [[59  0  0  0  0  0  0  0  0  0]\n",
1246
-      " [ 0 60  0  0  0  0  0  1  0  0]\n",
1247
-      " [ 0  0 42  0  0  1  2  1  2  0]\n",
1248
-      " [ 0  0  1 44  0  1  0  0  0  0]\n",
1249
-      " [ 0  0  0  0 46  0  1  0  0  4]\n",
1250
-      " [ 0  0  0  0  0 31  0  0  1  0]\n",
1251
-      " [ 0  0  0  0  0  0 48  0  0  0]\n",
1252
-      " [ 1  0  0  1  0  0  0 49  0  0]\n",
1253
-      " [ 0  1  1  5  0  1  0  0 48  0]\n",
1254
-      " [ 2  0  0  1  1  2  0  0  2 40]]\n"
1244
+      "Métriques pour A-NN\n",
1245
+      "Paramètres : (random_state=1, max_iter=300, hidden_layer_sizes=((85,)*15),\n",
1246
+      "solver=adam, activation=relu, alpha= 0.0000001)\n",
1247
+      "Taille de l'échantillon : 10000\n",
1248
+      "Proportion des datasets : 90%\n",
1249
+      "Temps d'entraînement (secondes) : 27.9214\n",
1250
+      "Temps de prédiction (secondes) : 0.01396\n",
1251
+      "Précision pour chaque classe : [0.972, 0.974, 0.926, 0.97, 0.919, 0.939, 0.971, 0.967, 0.951, 0.927]\n",
1252
+      "Précision : 0.952\n",
1253
+      "Erreur : 0.048\n",
1254
+      "Matrice de confusion :\n",
1255
+      " [[103   0   1   0   0   0   1   0   0   0]\n",
1256
+      " [  0 114   0   0   1   0   0   0   0   0]\n",
1257
+      " [  0   0 100   0   2   1   1   1   1   0]\n",
1258
+      " [  0   1   3  97   0   1   0   0   1   1]\n",
1259
+      " [  0   0   0   0  79   0   0   0   0   4]\n",
1260
+      " [  2   0   0   3   0  93   1   0   1   0]\n",
1261
+      " [  0   0   0   0   0   3  99   0   1   0]\n",
1262
+      " [  1   1   1   0   1   0   0  89   0   1]\n",
1263
+      " [  0   1   3   0   0   1   0   0  77   2]\n",
1264
+      " [  0   0   0   0   3   0   0   2   0 101]]\n"
1255 1265
      ]
1256 1266
     }
1257 1267
    ],
1258 1268
    "source": [
1259 1269
     "### Create vector of 5000 random indexes\n",
1260
-    "rand_indexes = np.random.randint(70000, size=5000)\n",
1270
+    "rand_indexes = np.random.randint(70000, size=10000)\n",
1261 1271
     "### Load data with the previous vector\n",
1262 1272
     "data = mnist.data[rand_indexes]\n",
1263 1273
     "# print(\"Dataset : \", data)\n",
@@ -1266,10 +1276,6 @@
1266 1276
     "# Split the dataset\n",
1267 1277
     "xtrain, xtest, ytrain, ytest = model_selection.train_test_split(data, target,train_size=0.9)\n",
1268 1278
     "\n",
1269
-    "best_training_time = 0\n",
1270
-    "best_precision_score = 0\n",
1271
-    "best_zero_one_loss = 0\n",
1272
-    "\n",
1273 1279
     "r = 1\n",
1274 1280
     "max_i = 300\n",
1275 1281
     "nb_hl = 15\n",
@@ -1281,31 +1287,27 @@
1281 1287
     "\n",
1282 1288
     "#Entraîne le classifier\n",
1283 1289
     "clf = neural_network.MLPClassifier(random_state=r, max_iter=max_i, hidden_layer_sizes=hl, solver=sol, activation=act, alpha=a, verbose=False)\n",
1284
-    "t1 = round(time.time(),5)\n",
1290
+    "t1 = time.time()\n",
1285 1291
     "clf.fit(xtrain, ytrain)\n",
1286
-    "t2 = round(time.time(),5)\n",
1292
+    "t2 = time.time()\n",
1287 1293
     "#Prédiction sur le jeu de tests\n",
1288 1294
     "pred = clf.predict(xtest)\n",
1289
-    "# Probabilités des prédictions sur xtest\n",
1290
-    "pred_proba = clf.predict_proba(xtest)\n",
1291
-    "# On sauvegarde le temps de calcul, la précision et \n",
1292
-    "# les taux d'erreurs par classe\n",
1293
-    "best_training_time = t2-t1\n",
1294
-    "best_precision_score = clf.score(xtest, ytest)\n",
1295
-    "best_zero_one_loss = metrics.zero_one_loss(ytest, pred)\n",
1295
+    "t3 = time.time()\n",
1296
+    "\n",
1297
+    "#Calcul de différentes metrics\n",
1298
+    "precisions = [round(i,3) for i in metrics.precision_score(ytest, pred,average=None)]\n",
1296 1299
     "\n",
1297
-    "# print(\"Paramètre :\\n\")\n",
1298
-    "# print(\"random_state = \", r)\n",
1299
-    "# print(\"max_iter = \", max_i)\n",
1300
-    "# print(\"nb_hidden_layer = \", nb_hl)\n",
1301
-    "# print(\"hidden_layer_size = \", hl_size)\n",
1302
-    "# print(\"solver = \", sol)\n",
1303
-    "# print(\"activation = \", act)\n",
1304
-    "# print(\"alpha = \", a)\n",
1305
-    "# print(\"Temps d'entraînement : \", best_training_time)\n",
1306
-    "# print(\"Score : \", best_precision_score)\n",
1307
-    "# print(\"Zero-one loss : \", best_zero_one_loss)\n",
1308
-    "print(\"Matrice de confusion A-NN :\\n\", metrics.confusion_matrix(ytest, pred))"
1300
+    "print(\"Métriques pour A-NN\")\n",
1301
+    "print(\"Paramètres : (random_state=1, max_iter=300, hidden_layer_sizes=((85,)*15),\")\n",
1302
+    "print(\"solver=adam, activation=relu, alpha= 0.0000001)\")\n",
1303
+    "print(\"Taille de l'échantillon :\", 10000)\n",
1304
+    "print(\"Proportion des datasets :\", \"90%\")\n",
1305
+    "print(\"Temps d'entraînement (secondes) :\", round(t2-t1,5))\n",
1306
+    "print(\"Temps de prédiction (secondes) :\", round(t3-t2,5))\n",
1307
+    "print(\"Précision pour chaque classe :\", precisions)\n",
1308
+    "print(\"Précision :\", clf.score(xtest, ytest))\n",
1309
+    "print(\"Erreur :\", round(metrics.zero_one_loss(ytest, pred),5))\n",
1310
+    "print(\"Matrice de confusion :\\n\", metrics.confusion_matrix(ytest, pred))"
1309 1311
    ]
1310 1312
   },
1311 1313
   {

+ 109
- 58
TP3_prog1.py.ipynb
File diff suppressed because it is too large
View File


Loading…
Cancel
Save