tp-apprentissage-supervise/TP2_prog1.py.ipynb
2021-11-10 18:24:06 +01:00

1151 lines
69 KiB
Text

{
"cells": [
{
"cell_type": "code",
"execution_count": 31,
"id": "3eb7a65b",
"metadata": {},
"outputs": [],
"source": [
"from sklearn.datasets import fetch_openml\n",
"import sklearn\n",
"import matplotlib.pyplot as plt\n",
"from sklearn import model_selection\n",
"from sklearn import neural_network\n",
"from sklearn import metrics\n",
"import numpy as np\n",
"import time\n",
"import matplotlib as plt"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "a8812842",
"metadata": {},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAD4CAYAAAAq5pAIAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAOUElEQVR4nO3dX4xUdZrG8ecFwT8MKiyt2zJEZtGYIRqBlLAJG0Qni38SBS5mAzGIxogXIDMJxEW5gAsvjO7MZBQzplEDbEYmhJEIiRkHCcYQE0OhTAuLLGpapkeEIkTH0QsU373ow6bFrl81VafqlP1+P0mnquup0+dNhYdTXae6fubuAjD0DSt6AACtQdmBICg7EARlB4Kg7EAQF7RyZ+PGjfOJEye2cpdAKD09PTp58qQNlDVUdjO7XdJvJQ2X9Ly7P5G6/8SJE1UulxvZJYCEUqlUNav7abyZDZf0rKQ7JE2WtNDMJtf78wA0VyO/s0+X9IG7f+TupyX9QdLcfMYCkLdGyj5e0l/7fd+b3fYdZrbEzMpmVq5UKg3sDkAjGin7QC8CfO+9t+7e5e4ldy91dHQ0sDsAjWik7L2SJvT7/seSPmlsHADN0kjZ90q61sx+YmYjJS2QtD2fsQDkre5Tb+7+jZktk/Sa+k69vejuB3ObDECuGjrP7u6vSno1p1kANBFvlwWCoOxAEJQdCIKyA0FQdiAIyg4EQdmBICg7EARlB4Kg7EAQlB0IgrIDQVB2IAjKDgRB2YEgKDsQBGUHgqDsQBCUHQiCsgNBUHYgCMoOBEHZgSAoOxAEZQeCoOxAEJQdCIKyA0FQdiCIhlZxRfs7c+ZMMv/888+buv9169ZVzb766qvktocPH07mzz77bDJfuXJl1Wzz5s3JbS+66KJkvmrVqmS+Zs2aZF6EhspuZj2SvpB0RtI37l7KYygA+cvjyH6Lu5/M4ecAaCJ+ZweCaLTsLunPZrbPzJYMdAczW2JmZTMrVyqVBncHoF6Nln2mu0+TdIekpWY269w7uHuXu5fcvdTR0dHg7gDUq6Gyu/sn2eUJSdskTc9jKAD5q7vsZjbKzEafvS5pjqQDeQ0GIF+NvBp/paRtZnb257zk7n/KZaoh5ujRo8n89OnTyfytt95K5nv27KmaffbZZ8ltt27dmsyLNGHChGT+8MMPJ/Nt27ZVzUaPHp3c9sYbb0zmN998czJvR3WX3d0/kpR+RAC0DU69AUFQdiAIyg4EQdmBICg7EAR/4pqDd999N5nfeuutybzZf2baroYPH57MH3/88WQ+atSoZH7PPfdUza666qrktmPGjEnm1113XTJvRxzZgSAoOxAEZQeCoOxAEJQdCIKyA0FQdiAIzrPn4Oqrr07m48aNS+btfJ59xowZybzW+ejdu3dXzUaOHJncdtGiRckc54cjOxAEZQeCoOxAEJQdCIKyA0FQdiAIyg4EwXn2HIwdOzaZP/XUU8l8x44dyXzq1KnJfPny5ck8ZcqUKcn89ddfT+a1/qb8wIHqSwk8/fTTyW2RL47sQBCUHQiCsgNBUHYgCMoOBEHZgSAoOxAE59lbYN68ecm81ufK11peuLu7u2r2/PPPJ7dduXJlMq91Hr2W66+/vmrW1dXV0M/G+al5ZDezF83shJkd6HfbWDPbaWZHssv0JxgAKNxgnsZvkHT7ObetkrTL3a+VtCv7HkAbq1l2d39T0qlzbp4raWN2faOkefmOBSBv9b5Ad6W7H5Ok7PKKanc0syVmVjazcqVSqXN3ABrV9Ffj3b3L3UvuXuro6Gj27gBUUW/Zj5tZpyRllyfyGwlAM9Rb9u2SFmfXF0t6JZ9xADRLzfPsZrZZ0mxJ48ysV9IaSU9I2mJmD0g6KunnzRxyqLv00ksb2v6yyy6re9ta5+EXLFiQzIcN431ZPxQ1y+7uC6tEP8t5FgBNxH/LQBCUHQiCsgNBUHYgCMoOBMGfuA4Ba9eurZrt27cvue0bb7yRzGt9lPScOXOSOdoHR3YgCMoOBEHZgSAoOxAEZQeCoOxAEJQdCILz7ENA6uOe169fn9x22rRpyfzBBx9M5rfccksyL5VKVbOlS5cmtzWzZI7zw5EdCIKyA0FQdiAIyg4EQdmBICg7EARlB4LgPPsQN2nSpGS+YcOGZH7//fcn802bNtWdf/nll8lt77333mTe2dmZzPFdHNmBICg7EARlB4Kg7EAQlB0IgrIDQVB2IAjOswc3f/78ZH7NNdck8xUrViTz1OfOP/roo8ltP/7442S+evXqZD5+/PhkHk3NI7uZvWhmJ8zsQL/b1prZ38xsf/Z1Z3PHBNCowTyN3yDp9gFu/427T8m+Xs13LAB5q1l2d39T0qkWzAKgiRp5gW6ZmXVnT/PHVLuTmS0xs7KZlSuVSgO7A9CIesv+O0mTJE2RdEzSr6rd0d273L3k7qWOjo46dwegUXWV3d2Pu/sZd/9W0npJ0/MdC0De6iq7mfX/28L5kg5Uuy+A9lDzPLuZbZY0W9I4M+uVtEbSbDObIskl9Uh6qHkjokg33HBDMt+yZUsy37FjR9XsvvvuS2773HPPJfMjR44k8507dybzaGqW3d0XDnDzC02YBUAT8XZZIAjKDgRB2YEgKDsQBGUHgjB3b9nOSqWSl8vllu0P7e3CCy9M5l9//XUyHzFiRDJ/7bXXqmazZ89ObvtDVSqVVC6XB1zrmiM7EARlB4Kg7EAQlB0IgrIDQVB2IAjKDgTBR0kjqbu7O5lv3bo1me/du7dqVus8ei2TJ09O5rNmzWro5w81HNmBICg7EARlB4Kg7EAQlB0IgrIDQVB2IAjOsw9xhw8fTubPPPNMMn/55ZeT+aeffnreMw3WBRek/3l2dnYm82HDOJb1x6MBBEHZgSAoOxAEZQeCoOxAEJQdCIKyA0Fwnv0HoNa57Jdeeqlqtm7duuS2PT099YyUi5tuuimZr169OpnffffdeY4z5NU8spvZBDPbbWaHzOygmf0iu32sme00syPZ5ZjmjwugXoN5Gv+NpBXu/lNJ/yppqZlNlrRK0i53v1bSrux7AG2qZtnd/Zi7v5Nd/0LSIUnjJc2VtDG720ZJ85o0I4AcnNcLdGY2UdJUSW9LutLdj0l9/yFIuqLKNkvMrGxm5Uql0uC4AOo16LKb2Y8k/VHSL93974Pdzt273L3k7qWOjo56ZgSQg0GV3cxGqK/ov3f3s38GddzMOrO8U9KJ5owIIA81T72ZmUl6QdIhd/91v2i7pMWSnsguX2nKhEPA8ePHk/nBgweT+bJly5L5+++/f94z5WXGjBnJ/JFHHqmazZ07N7ktf6Kar8GcZ58paZGk98xsf3bbY+or+RYze0DSUUk/b8qEAHJRs+zuvkfSgIu7S/pZvuMAaBaeJwFBUHYgCMoOBEHZgSAoOxAEf+I6SKdOnaqaPfTQQ8lt9+/fn8w//PDDekbKxcyZM5P5ihUrkvltt92WzC+++OLzngnNwZEdCIKyA0FQdiAIyg4EQdmBICg7EARlB4IIc5797bffTuZPPvlkMt+7d2/VrLe3t66Z8nLJJZdUzZYvX57cttbHNY8aNaqumdB+OLIDQVB2IAjKDgRB2YEgKDsQBGUHgqDsQBBhzrNv27atobwRkydPTuZ33XVXMh8+fHgyX7lyZdXs8ssvT26LODiyA0FQdiAIyg4EQdmBICg7EARlB4Kg7EAQ5u7pO5hNkLRJ0j9L+lZSl7v/1szWSnpQUiW762Pu/mrqZ5VKJS+Xyw0PDWBgpVJJ5XJ5wFWXB/Ommm8krXD3d8xstKR9ZrYzy37j7v+V16AAmmcw67Mfk3Qsu/6FmR2SNL7ZgwHI13n9zm5mEyVNlXT2M56WmVm3mb1oZmOqbLPEzMpmVq5UKgPdBUALDLrsZvYjSX+U9Et3/7uk30maJGmK+o78vxpoO3fvcveSu5c6OjoanxhAXQZVdjMbob6i/97dX5Ykdz/u7mfc/VtJ6yVNb96YABpVs+xmZpJekHTI3X/d7/bOfnebL+lA/uMByMtgXo2fKWmRpPfMbH9222OSFprZFEkuqUdSet1iAIUazKvxeyQNdN4ueU4dQHvhHXRAEJQdCIKyA0FQdiAIyg4EQdmBICg7EARlB4Kg7EAQlB0IgrIDQVB2IAjKDgRB2YEgan6UdK47M6tI+rjfTeMknWzZAOenXWdr17kkZqtXnrNd7e4Dfv5bS8v+vZ2bld29VNgACe06W7vOJTFbvVo1G0/jgSAoOxBE0WXvKnj/Ke06W7vOJTFbvVoyW6G/swNonaKP7ABahLIDQRRSdjO73cwOm9kHZraqiBmqMbMeM3vPzPabWaHrS2dr6J0wswP9bhtrZjvN7Eh2OeAaewXNttbM/pY9dvvN7M6CZptgZrvN7JCZHTSzX2S3F/rYJeZqyePW8t/ZzWy4pP+V9O+SeiXtlbTQ3f+npYNUYWY9kkruXvgbMMxslqR/SNrk7tdntz0p6ZS7P5H9RznG3f+zTWZbK+kfRS/jna1W1Nl/mXFJ8yTdpwIfu8Rc/6EWPG5FHNmnS/rA3T9y99OS/iBpbgFztD13f1PSqXNunitpY3Z9o/r+sbRcldnagrsfc/d3sutfSDq7zHihj11irpYoouzjJf213/e9aq/13l3Sn81sn5ktKXqYAVzp7sekvn88kq4oeJ5z1VzGu5XOWWa8bR67epY/b1QRZR9oKal2Ov83092nSbpD0tLs6SoGZ1DLeLfKAMuMt4V6lz9vVBFl75U0od/3P5b0SQFzDMjdP8kuT0japvZbivr42RV0s8sTBc/z/9ppGe+BlhlXGzx2RS5/XkTZ90q61sx+YmYjJS2QtL2AOb7HzEZlL5zIzEZJmqP2W4p6u6TF2fXFkl4pcJbvaJdlvKstM66CH7vClz9395Z/SbpTfa/IfyhpdREzVJnrXyT9Jfs6WPRskjar72nd1+p7RvSApH+StEvSkexybBvN9t+S3pPUrb5idRY027+p71fDbkn7s687i37sEnO15HHj7bJAELyDDgiCsgNBUHYgCMoOBEHZgSAoOxAEZQeC+D+ypTV9clByEAAAAABJRU5ErkJggg==\n",
"text/plain": [
"<Figure size 432x288 with 1 Axes>"
]
},
"metadata": {
"needs_background": "light"
},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Classe : 5\n"
]
}
],
"source": [
"mnist = fetch_openml('mnist_784',as_frame=False)\n",
"images = mnist.data.reshape((-1, 28, 28))\n",
"plt.imshow(images[0],cmap=plt.cm.gray_r,interpolation=\"nearest\")\n",
"plt.show()\n",
"print(\"Classe : \", mnist.target[0])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "3b1a54ef",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Score échantillon de test : 0.9471428571428572\n",
"Classe image 4 : 4\n",
"Classe prédite image 4 : 4\n",
"Précision pour chaque classe : [0.9722488 0.97494781 0.95077973 0.94289898 0.95481336 0.93585101\n",
" 0.96747967 0.96834012 0.88457808 0.91254205]\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/insa/anaconda/lib/python3.8/site-packages/sklearn/neural_network/_multilayer_perceptron.py:614: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (100) reached and the optimization hasn't converged yet.\n",
" warnings.warn(\n"
]
}
],
"source": [
"# xtrain data set d'entraînement et ytrain étiquettes de xtrain\n",
"# xtest dataset de prédiction et ytest étiquettes de xtest\n",
"xtrain, xtest, ytrain, ytest = model_selection.train_test_split(mnist.data, mnist.target,train_size=0.7)\n",
"\n",
"#Entraîne le classifier\n",
"clf = neural_network.MLPClassifier(random_state=1, max_iter=100, hidden_layer_sizes=(50))\n",
"clf.fit(xtrain, ytrain)\n",
"\n",
"#Prédiction sur le jeu de tests\n",
"pred = clf.predict(xtest)\n",
"#print(\"Prédiction : \", pred)\n",
"# Probabilités des prédictions sur xtest\n",
"pred_proba = clf.predict_proba(xtest)\n",
"#print(\"Probabilités : \", pred_proba)\n",
"# On calcule le score obtenu sur xtest avec les étiquettes ytest\n",
"score = clf.score(xtest, ytest)\n",
"print(\"Score échantillon de test : \", score)\n",
"\n",
"#Infos image 4\n",
"print(\"Classe image 4 : \", ytest[3])\n",
"print(\"Classe prédite image 4 : \", pred[3])\n",
"\n",
"#Calcul de la précision avec metrics.precision_score\n",
"print(\"Précision pour chaque classe : \", metrics.precision_score(ytest, pred,average=None))"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "6068ca09",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Précision pour chaque classe :\n",
" [[2032 1 2 0 1 6 6 0 34 6]\n",
" [ 0 2335 14 5 6 2 5 8 13 2]\n",
" [ 13 7 1951 23 17 4 11 25 67 6]\n",
" [ 2 4 26 1932 0 44 2 13 45 23]\n",
" [ 3 6 9 1 1944 2 8 6 6 48]\n",
" [ 7 8 5 32 3 1809 22 3 29 12]\n",
" [ 16 4 10 0 11 18 2023 0 18 0]\n",
" [ 7 4 18 15 14 6 0 2141 5 56]\n",
" [ 2 21 16 17 8 27 13 1 1824 29]\n",
" [ 8 5 1 24 32 15 1 14 21 1899]]\n"
]
}
],
"source": [
"print(\"Précision pour chaque classe :\\n\", metrics.confusion_matrix(ytest, pred))"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "5a4a5485",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Liste des scores : [0.937952380952381, 0.9632857142857143, 0.9618571428571429, 0.8938571428571429, 0.5494761904761905, 0.11290476190476191, 0.11290476190476191, 0.11290476190476191, 0.11290476190476191, 0.11290476190476191]\n"
]
}
],
"source": [
"import warnings\n",
"warnings.filterwarnings(\"ignore\")\n",
"\n",
"# xtrain data set d'entraînement et ytrain étiquettes de xtrain\n",
"# xtest dataset de prédiction et ytest étiquettes de xtest\n",
"xtrain, xtest, ytrain, ytest = model_selection.train_test_split(mnist.data, mnist.target,train_size=0.7)\n",
"\n",
"list_scores = []\n",
" \n",
"for i in range(1, 101, 10):\n",
" #Entraîne le classifier\n",
" clf = neural_network.MLPClassifier(random_state=1, max_iter=20, hidden_layer_sizes=((50,) * i))\n",
" clf.fit(xtrain, ytrain)\n",
"\n",
" #Prédiction sur le jeu de tests\n",
" pred = clf.predict(xtest)\n",
" # Probabilités des prédictions sur xtest\n",
" pred_proba = clf.predict_proba(xtest)\n",
" # On calcule le score obtenu sur xtest avec les étiquettes ytest\n",
" list_scores += [clf.score(xtest, ytest)]\n",
" \n",
"print(\"Liste des scores : \", list_scores)"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "16283951",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0.214, 0.114, 0.114, 0.215, 0.377, 0.634, 0.617, 0.531, 0.492, 0.765, 0.706, 0.66, 0.738, 0.838, 0.782, 0.879, 0.846, 0.897, 0.911, 0.87, 0.905, 0.919, 0.879, 0.91, 0.913, 0.922, 0.922, 0.922, 0.918, 0.916, 0.917, 0.919, 0.924, 0.928, 0.929, 0.935, 0.927, 0.923, 0.929, 0.929, 0.931, 0.936, 0.934, 0.927, 0.936, 0.929, 0.935, 0.931, 0.937, 0.93, 0.938, 0.935, 0.938, 0.937, 0.939, 0.938, 0.941, 0.939, 0.941, 0.943, 0.945, 0.941, 0.944, 0.943, 0.941, 0.941, 0.941, 0.945, 0.942, 0.947, 0.945, 0.944, 0.948, 0.939, 0.947, 0.943, 0.94, 0.946, 0.95, 0.945, 0.951, 0.949, 0.947, 0.947, 0.944, 0.95, 0.951, 0.946, 0.951, 0.949, 0.952, 0.948, 0.947, 0.949, 0.95, 0.949, 0.947, 0.946, 0.95, 0.954]\n",
"0.954\n",
"100\n",
"0.954\n"
]
}
],
"source": [
"list_rounded_scores = [round(i,3) for i in list_scores]\n",
"print(list_rounded_scores)\n",
"\n",
"n = 1\n",
"max_score = 0\n",
"max_index = 1\n",
"for i in list_rounded_scores:\n",
" if i > max_score:\n",
" max_score = i\n",
" max_index = n\n",
" n += 1\n",
" else:\n",
" n += 1\n",
"print(max_score)\n",
"print(max_index)\n",
"print(list_rounded_scores[max_index-1])"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "5726fcb1",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Liste des scores : \n",
" [0.9682380952380952, 0.9711904761904762, 0.9734761904761905, 0.9657619047619047, 0.9624285714285714]\n",
"Temps d'entraînement : \n",
" [0.967, 0.9704761904761905, 0.9586190476190476, 0.9702380952380952, 0.9645238095238096]\n",
"Temps de prédiction : \n",
" [0.967, 0.9704761904761905, 0.9586190476190476, 0.9702380952380952, 0.9645238095238096]\n"
]
}
],
"source": [
"xtrain, xtest, ytrain, ytest = model_selection.train_test_split(mnist.data, mnist.target,train_size=0.7)\n",
"\n",
"rand_nb_couches_cachees = np.random.randint(low=1, high=10, size=5)\n",
"rand_taille_couches = np.random.randint(low=10, high=300, size=5)\n",
"\n",
"#Liste des tuples utilisés comme arguments pour hidden_layer_sizes\n",
"list_args = []\n",
"for i in range(5):\n",
" list_args += [((rand_taille_couches[i],) * rand_nb_couches_cachees[i])]\n",
"\n",
"list_scores = []\n",
"list_training_times = []\n",
"list_predicting_times = []\n",
"\n",
"for i in range(5):\n",
" #Entraîne le classifier\n",
" t1 = round(time.time(),3)\n",
" clf = neural_network.MLPClassifier(random_state=1, max_iter=20, hidden_layer_sizes=list_args[i])\n",
" clf.fit(xtrain, ytrain)\n",
" t2 = round(time.time(),3)\n",
" #Prédiction sur le jeu de tests\n",
" pred = clf.predict(xtest)\n",
" t3 = round(time.time(),3)\n",
" # Probabilités des prédictions sur xtest\n",
" pred_proba = clf.predict_proba(xtest)\n",
" # On calcule le score obtenu sur xtest avec les étiquettes ytest\n",
" list_scores += [clf.score(xtest, ytest)]\n",
" list_training_times += [t2-t1]\n",
" list_predicting_times += [t3-t2]\n",
" \n",
"print(\"Liste des scores : \\n\", list_scores)\n",
"print(\"Temps d'entraînement : \\n\", list_training_times)\n",
"print(\"Temps de prédiction : \\n\", list_predicting_times)"
]
},
{
"cell_type": "code",
"execution_count": 34,
"id": "741f82ca",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Temps d'entraînement : \n",
" [28.21499991416931, 54.074000120162964, 75.52600002288818, 26.98900008201599, 17.579999923706055]\n",
"Temps de prédiction : \n",
" [0.09300017356872559, 0.20399999618530273, 0.2669999599456787, 0.08899998664855957, 0.0710000991821289]\n",
"[(297, 297), (222, 222, 222, 222, 222, 222, 222), (244, 244, 244, 244, 244, 244, 244, 244, 244), (265, 265), (150, 150, 150)]\n"
]
}
],
"source": [
"print(\"Temps d'entraînement : \\n\", list_training_times)\n",
"print(\"Temps de prédiction : \\n\", list_predicting_times)\n",
"print(list_args)"
]
},
{
"cell_type": "code",
"execution_count": 36,
"id": "c32eeb4e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Iteration 1, loss = 3.54940485\n",
"Iteration 2, loss = 0.88067693\n",
"Iteration 3, loss = 0.64504156\n",
"Iteration 4, loss = 0.52889025\n",
"Iteration 5, loss = 0.44844534\n",
"Iteration 6, loss = 0.38176710\n",
"Iteration 7, loss = 0.33897928\n",
"Iteration 8, loss = 0.30837530\n",
"Iteration 9, loss = 0.27728672\n",
"Iteration 10, loss = 0.25089914\n",
"Iteration 11, loss = 0.23463636\n",
"Iteration 12, loss = 0.21708870\n"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-36-4146e43b1fa0>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[0mt1\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mround\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m3\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0mclf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mneural_network\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mMLPClassifier\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrandom_state\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax_iter\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m75\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhidden_layer_sizes\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m50\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mverbose\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msolver\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 21\u001b[0;31m \u001b[0mclf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mxtrain\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mytrain\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 22\u001b[0m \u001b[0mt2\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mround\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m3\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0;31m#Prédiction sur le jeu de tests\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/insa/anaconda/lib/python3.8/site-packages/sklearn/neural_network/_multilayer_perceptron.py\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, X, y)\u001b[0m\n\u001b[1;32m 671\u001b[0m \u001b[0mself\u001b[0m \u001b[0;34m:\u001b[0m \u001b[0mreturns\u001b[0m \u001b[0ma\u001b[0m \u001b[0mtrained\u001b[0m \u001b[0mMLP\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 672\u001b[0m \"\"\"\n\u001b[0;32m--> 673\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_fit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mincremental\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 674\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 675\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mproperty\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/insa/anaconda/lib/python3.8/site-packages/sklearn/neural_network/_multilayer_perceptron.py\u001b[0m in \u001b[0;36m_fit\u001b[0;34m(self, X, y, incremental)\u001b[0m\n\u001b[1;32m 402\u001b[0m \u001b[0;31m# Run the LBFGS solver\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 403\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msolver\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'lbfgs'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 404\u001b[0;31m self._fit_lbfgs(X, y, activations, deltas, coef_grads,\n\u001b[0m\u001b[1;32m 405\u001b[0m intercept_grads, layer_units)\n\u001b[1;32m 406\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/insa/anaconda/lib/python3.8/site-packages/sklearn/neural_network/_multilayer_perceptron.py\u001b[0m in \u001b[0;36m_fit_lbfgs\u001b[0;34m(self, X, y, activations, deltas, coef_grads, intercept_grads, layer_units)\u001b[0m\n\u001b[1;32m 488\u001b[0m \u001b[0miprint\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 489\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 490\u001b[0;31m opt_res = scipy.optimize.minimize(\n\u001b[0m\u001b[1;32m 491\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_loss_grad_lbfgs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpacked_coef_inter\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 492\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"L-BFGS-B\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mjac\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/insa/anaconda/lib/python3.8/site-packages/scipy/optimize/_minimize.py\u001b[0m in \u001b[0;36mminimize\u001b[0;34m(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)\u001b[0m\n\u001b[1;32m 617\u001b[0m **options)\n\u001b[1;32m 618\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mmeth\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'l-bfgs-b'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 619\u001b[0;31m return _minimize_lbfgsb(fun, x0, args, jac, bounds,\n\u001b[0m\u001b[1;32m 620\u001b[0m callback=callback, **options)\n\u001b[1;32m 621\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mmeth\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'tnc'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/insa/anaconda/lib/python3.8/site-packages/scipy/optimize/lbfgsb.py\u001b[0m in \u001b[0;36m_minimize_lbfgsb\u001b[0;34m(fun, x0, args, jac, bounds, disp, maxcor, ftol, gtol, eps, maxfun, maxiter, iprint, callback, maxls, finite_diff_rel_step, **unknown_options)\u001b[0m\n\u001b[1;32m 358\u001b[0m \u001b[0;31m# until the completion of the current minimization iteration.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 359\u001b[0m \u001b[0;31m# Overwrite f and g:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 360\u001b[0;31m \u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mg\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc_and_grad\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 361\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mtask_str\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstartswith\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mb'NEW_X'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 362\u001b[0m \u001b[0;31m# new iteration\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/insa/anaconda/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py\u001b[0m in \u001b[0;36mfun_and_grad\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 258\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray_equal\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 259\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_update_x_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 260\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_update_fun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 261\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_update_grad\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 262\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/insa/anaconda/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py\u001b[0m in \u001b[0;36m_update_fun\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 224\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_update_fun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 225\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mf_updated\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 226\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_update_fun_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 227\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mf_updated\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 228\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/insa/anaconda/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py\u001b[0m in \u001b[0;36mupdate_fun\u001b[0;34m()\u001b[0m\n\u001b[1;32m 131\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 132\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mupdate_fun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 133\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfun_wrapped\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 134\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 135\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_update_fun_impl\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mupdate_fun\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/insa/anaconda/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py\u001b[0m in \u001b[0;36mfun_wrapped\u001b[0;34m(x)\u001b[0m\n\u001b[1;32m 128\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mfun_wrapped\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 129\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnfev\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 130\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 131\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 132\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mupdate_fun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/insa/anaconda/lib/python3.8/site-packages/scipy/optimize/optimize.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, x, *args)\u001b[0m\n\u001b[1;32m 72\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 73\u001b[0m \u001b[0;34m\"\"\" returns the the function value \"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 74\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compute_if_needed\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 75\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_value\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 76\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/insa/anaconda/lib/python3.8/site-packages/scipy/optimize/optimize.py\u001b[0m in \u001b[0;36m_compute_if_needed\u001b[0;34m(self, x, *args)\u001b[0m\n\u001b[1;32m 66\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mall\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_value\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjac\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 67\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0masarray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcopy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 68\u001b[0;31m \u001b[0mfg\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 69\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjac\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfg\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 70\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_value\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfg\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/insa/anaconda/lib/python3.8/site-packages/sklearn/neural_network/_multilayer_perceptron.py\u001b[0m in \u001b[0;36m_loss_grad_lbfgs\u001b[0;34m(self, packed_coef_inter, X, y, activations, deltas, coef_grads, intercept_grads)\u001b[0m\n\u001b[1;32m 206\u001b[0m \"\"\"\n\u001b[1;32m 207\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_unpack\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpacked_coef_inter\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 208\u001b[0;31m loss, coef_grads, intercept_grads = self._backprop(\n\u001b[0m\u001b[1;32m 209\u001b[0m X, y, activations, deltas, coef_grads, intercept_grads)\n\u001b[1;32m 210\u001b[0m \u001b[0mgrad\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_pack\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcoef_grads\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mintercept_grads\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/insa/anaconda/lib/python3.8/site-packages/sklearn/neural_network/_multilayer_perceptron.py\u001b[0m in \u001b[0;36m_backprop\u001b[0;34m(self, X, y, activations, deltas, coef_grads, intercept_grads)\u001b[0m\n\u001b[1;32m 275\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 276\u001b[0m \u001b[0;31m# Compute gradient for the last layer\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 277\u001b[0;31m self._compute_loss_grad(\n\u001b[0m\u001b[1;32m 278\u001b[0m last, n_samples, activations, deltas, coef_grads, intercept_grads)\n\u001b[1;32m 279\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/insa/anaconda/lib/python3.8/site-packages/sklearn/neural_network/_multilayer_perceptron.py\u001b[0m in \u001b[0;36m_compute_loss_grad\u001b[0;34m(self, layer, n_samples, activations, deltas, coef_grads, intercept_grads)\u001b[0m\n\u001b[1;32m 156\u001b[0m \u001b[0mThis\u001b[0m \u001b[0mfunction\u001b[0m \u001b[0mdoes\u001b[0m \u001b[0mbackpropagation\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mspecified\u001b[0m \u001b[0mone\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 157\u001b[0m \"\"\"\n\u001b[0;32m--> 158\u001b[0;31m coef_grads[layer] = safe_sparse_dot(activations[layer].T,\n\u001b[0m\u001b[1;32m 159\u001b[0m deltas[layer])\n\u001b[1;32m 160\u001b[0m \u001b[0mcoef_grads\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0malpha\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcoefs_\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/insa/anaconda/lib/python3.8/site-packages/sklearn/utils/validation.py\u001b[0m in \u001b[0;36minner_f\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0mextra_args\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mall_args\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mextra_args\u001b[0m \u001b[0;34m<=\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 63\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 64\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0;31m# extra_args > 0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/usr/local/insa/anaconda/lib/python3.8/site-packages/sklearn/utils/extmath.py\u001b[0m in \u001b[0;36msafe_sparse_dot\u001b[0;34m(a, b, dense_output)\u001b[0m\n\u001b[1;32m 150\u001b[0m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mb\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 151\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 152\u001b[0;31m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0ma\u001b[0m \u001b[0;34m@\u001b[0m \u001b[0mb\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 153\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 154\u001b[0m if (sparse.issparse(a) and sparse.issparse(b)\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"xtrain, xtest, ytrain, ytest = model_selection.train_test_split(mnist.data, mnist.target,train_size=0.7)\n",
"\n",
"rand_nb_couches_cachees = np.random.randint(low=1, high=10, size=5)\n",
"rand_taille_couches = np.random.randint(low=10, high=300, size=5)\n",
"\n",
"#Liste des tuples utilisés comme arguments pour hidden_layer_sizes\n",
"list_args = []\n",
"for i in range(5):\n",
" list_args += [((rand_taille_couches[i],) * rand_nb_couches_cachees[i])]\n",
"\n",
"list_scores = []\n",
"list_training_times = []\n",
"list_predicting_times = []\n",
"\n",
"solvers = [\"adam\",\"lbfgs\",\"sgd\"]\n",
"\n",
"for i in solvers:\n",
" #Entraîne le classifier\n",
" t1 = round(time.time(),3)\n",
" clf = neural_network.MLPClassifier(random_state=1, max_iter=75, hidden_layer_sizes=(50), verbose=True, solver=i)\n",
" clf.fit(xtrain, ytrain)\n",
" t2 = round(time.time(),3)\n",
" #Prédiction sur le jeu de tests\n",
" pred = clf.predict(xtest)\n",
" t3 = round(time.time(),3)\n",
" # Probabilités des prédictions sur xtest\n",
" pred_proba = clf.predict_proba(xtest)\n",
" # On calcule le score obtenu sur xtest avec les étiquettes ytest\n",
" list_scores += [clf.score(xtest, ytest)]\n",
" list_training_times += [t2-t1]\n",
" list_predicting_times += [t3-t2]\n",
" \n",
"print(\"Liste des scores : \\n\", list_scores)\n",
"print(\"Temps d'entraînement : \\n\", list_training_times)\n",
"print(\"Temps de prédiction : \\n\", list_predicting_times)"
]
},
{
"cell_type": "code",
"execution_count": 38,
"id": "b5c53e81",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Iteration 1, loss = 4.88688537\n",
"Iteration 2, loss = 3.04840280\n",
"Iteration 3, loss = 2.56350828\n",
"Iteration 4, loss = 2.36443117\n",
"Iteration 5, loss = 2.14888342\n",
"Iteration 6, loss = 1.90164455\n",
"Iteration 7, loss = 1.73369800\n",
"Iteration 8, loss = 1.59113644\n",
"Iteration 9, loss = 1.39238775\n",
"Iteration 10, loss = 1.20865984\n",
"Iteration 11, loss = 1.09884529\n",
"Iteration 12, loss = 0.92044718\n",
"Iteration 13, loss = 0.70704742\n",
"Iteration 14, loss = 0.62447704\n",
"Iteration 15, loss = 0.55514874\n",
"Iteration 16, loss = 0.47053810\n",
"Iteration 17, loss = 0.42364989\n",
"Iteration 18, loss = 0.36724990\n",
"Iteration 19, loss = 0.35226683\n",
"Iteration 20, loss = 0.32861833\n",
"Iteration 21, loss = 0.31863052\n",
"Iteration 22, loss = 0.31220412\n",
"Iteration 23, loss = 0.31712321\n",
"Iteration 24, loss = 0.31474418\n",
"Iteration 25, loss = 0.31866966\n",
"Iteration 26, loss = 0.30748726\n",
"Iteration 27, loss = 0.31383517\n",
"Iteration 28, loss = 0.30871978\n",
"Iteration 29, loss = 0.31905501\n",
"Iteration 30, loss = 0.31754223\n",
"Iteration 31, loss = 0.31449971\n",
"Iteration 32, loss = 0.31780184\n",
"Iteration 33, loss = 0.31414077\n",
"Iteration 34, loss = 0.31600866\n",
"Iteration 35, loss = 0.32978061\n",
"Iteration 36, loss = 0.32266249\n",
"Iteration 37, loss = 0.31869693\n",
"Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping.\n",
"Iteration 1, loss = 0.95937919\n",
"Iteration 2, loss = 0.44791973\n",
"Iteration 3, loss = 0.36777372\n",
"Iteration 4, loss = 0.32198104\n",
"Iteration 5, loss = 0.30272435\n",
"Iteration 6, loss = 0.28929073\n",
"Iteration 7, loss = 0.27490557\n",
"Iteration 8, loss = 0.26236297\n",
"Iteration 9, loss = 0.25580665\n",
"Iteration 10, loss = 0.24705695\n",
"Iteration 11, loss = 0.24723904\n",
"Iteration 12, loss = 0.24175405\n",
"Iteration 13, loss = 0.23945997\n",
"Iteration 14, loss = 0.23486309\n",
"Iteration 15, loss = 0.23143015\n",
"Iteration 16, loss = 0.22925494\n",
"Iteration 17, loss = 0.23010859\n",
"Iteration 18, loss = 0.22055626\n",
"Iteration 19, loss = 0.22143318\n",
"Iteration 20, loss = 0.20988591\n",
"Iteration 21, loss = 0.20912183\n",
"Iteration 22, loss = 0.20550806\n",
"Iteration 23, loss = 0.20975678\n",
"Iteration 24, loss = 0.20039015\n",
"Iteration 25, loss = 0.21067734\n",
"Iteration 26, loss = 0.19988693\n",
"Iteration 27, loss = 0.20082604\n",
"Iteration 28, loss = 0.19838493\n",
"Iteration 29, loss = 0.19746292\n",
"Iteration 30, loss = 0.19976580\n",
"Iteration 31, loss = 0.19489894\n",
"Iteration 32, loss = 0.19328144\n",
"Iteration 33, loss = 0.19908609\n",
"Iteration 34, loss = 0.18777037\n",
"Iteration 35, loss = 0.18720331\n",
"Iteration 36, loss = 0.18463362\n",
"Iteration 37, loss = 0.18439839\n",
"Iteration 38, loss = 0.19085391\n",
"Iteration 39, loss = 0.18395803\n",
"Iteration 40, loss = 0.19237815\n",
"Iteration 41, loss = 0.18249873\n",
"Iteration 42, loss = 0.17578331\n",
"Iteration 43, loss = 0.16531940\n",
"Iteration 44, loss = 0.18105539\n",
"Iteration 45, loss = 0.16539823\n",
"Iteration 46, loss = 0.16324249\n",
"Iteration 47, loss = 0.16739006\n",
"Iteration 48, loss = 0.15994799\n",
"Iteration 49, loss = 0.16062727\n",
"Iteration 50, loss = 0.16493342\n",
"Iteration 51, loss = 0.17038885\n",
"Iteration 52, loss = 0.15841169\n",
"Iteration 53, loss = 0.16505302\n",
"Iteration 54, loss = 0.17295392\n",
"Iteration 55, loss = 0.17176379\n",
"Iteration 56, loss = 0.16416708\n",
"Iteration 57, loss = 0.16709714\n",
"Iteration 58, loss = 0.15723652\n",
"Iteration 59, loss = 0.15564438\n",
"Iteration 60, loss = 0.15413186\n",
"Iteration 61, loss = 0.15508527\n",
"Iteration 62, loss = 0.15679250\n",
"Iteration 63, loss = 0.15253406\n",
"Iteration 64, loss = 0.15240647\n",
"Iteration 65, loss = 0.15434625\n",
"Iteration 66, loss = 0.14834313\n",
"Iteration 67, loss = 0.14499677\n",
"Iteration 68, loss = 0.14217665\n",
"Iteration 69, loss = 0.15716250\n",
"Iteration 70, loss = 0.15471665\n",
"Iteration 71, loss = 0.15794043\n",
"Iteration 72, loss = 0.15101312\n",
"Iteration 73, loss = 0.14967007\n",
"Iteration 74, loss = 0.14485682\n",
"Iteration 75, loss = 0.14693767\n",
"Iteration 1, loss = 0.89619348\n",
"Iteration 2, loss = 0.48077835\n",
"Iteration 3, loss = 0.41715072\n",
"Iteration 4, loss = 0.39061043\n",
"Iteration 5, loss = 0.34700812\n",
"Iteration 6, loss = 0.34711338\n",
"Iteration 7, loss = 0.33480878\n",
"Iteration 8, loss = 0.33945602\n",
"Iteration 9, loss = 0.33681982\n",
"Iteration 10, loss = 0.31248847\n",
"Iteration 11, loss = 0.29573950\n",
"Iteration 12, loss = 0.30557642\n",
"Iteration 13, loss = 0.29902732\n",
"Iteration 14, loss = 0.31221515\n",
"Iteration 15, loss = 0.30838314\n",
"Iteration 16, loss = 0.30678478\n",
"Iteration 17, loss = 0.29162416\n",
"Iteration 18, loss = 0.27816296\n",
"Iteration 19, loss = 0.27522940\n",
"Iteration 20, loss = 0.27675764\n",
"Iteration 21, loss = 0.27734335\n",
"Iteration 22, loss = 0.26886521\n",
"Iteration 23, loss = 0.26305899\n",
"Iteration 24, loss = 0.26890329\n",
"Iteration 25, loss = 0.27180262\n",
"Iteration 26, loss = 0.27215833\n",
"Iteration 27, loss = 0.26248364\n",
"Iteration 28, loss = 0.25917752\n",
"Iteration 29, loss = 0.23928182\n",
"Iteration 30, loss = 0.24394563\n",
"Iteration 31, loss = 0.24032895\n",
"Iteration 32, loss = 0.23784356\n",
"Iteration 33, loss = 0.24294224\n",
"Iteration 34, loss = 0.23827887\n",
"Iteration 35, loss = 0.23418048\n",
"Iteration 36, loss = 0.23906801\n",
"Iteration 37, loss = 0.24267875\n",
"Iteration 38, loss = 0.24964552\n",
"Iteration 39, loss = 0.24229798\n",
"Iteration 40, loss = 0.22651254\n",
"Iteration 41, loss = 0.22552689\n",
"Iteration 42, loss = 0.23707989\n",
"Iteration 43, loss = 0.23354218\n",
"Iteration 44, loss = 0.22674619\n",
"Iteration 45, loss = 0.23525498\n",
"Iteration 46, loss = 0.22263357\n",
"Iteration 47, loss = 0.22527906\n",
"Iteration 48, loss = 0.23024557\n",
"Iteration 49, loss = 0.22602991\n",
"Iteration 50, loss = 0.22659891\n",
"Iteration 51, loss = 0.21722136\n",
"Iteration 52, loss = 0.21258638\n",
"Iteration 53, loss = 0.21044253\n",
"Iteration 54, loss = 0.22010160\n",
"Iteration 55, loss = 0.21613578\n",
"Iteration 56, loss = 0.20968866\n",
"Iteration 57, loss = 0.20827426\n",
"Iteration 58, loss = 0.21124936\n",
"Iteration 59, loss = 0.21137546\n",
"Iteration 60, loss = 0.21501026\n",
"Iteration 61, loss = 0.21448958\n",
"Iteration 62, loss = 0.21653318\n",
"Iteration 63, loss = 0.20826436\n",
"Iteration 64, loss = 0.20087997\n",
"Iteration 65, loss = 0.19877037\n",
"Iteration 66, loss = 0.20793665\n",
"Iteration 67, loss = 0.21077591\n",
"Iteration 68, loss = 0.21393003\n",
"Iteration 69, loss = 0.20582821\n",
"Iteration 70, loss = 0.20498929\n",
"Iteration 71, loss = 0.20603869\n",
"Iteration 72, loss = 0.20962657\n",
"Iteration 73, loss = 0.20107920\n",
"Iteration 74, loss = 0.19554756\n",
"Iteration 75, loss = 0.20031178\n",
"Iteration 1, loss = 3.47809654\n",
"Iteration 2, loss = 0.91279497\n",
"Iteration 3, loss = 0.68167643\n",
"Iteration 4, loss = 0.54546524\n",
"Iteration 5, loss = 0.45371888\n",
"Iteration 6, loss = 0.39503527\n",
"Iteration 7, loss = 0.35098651\n",
"Iteration 8, loss = 0.31431299\n",
"Iteration 9, loss = 0.28375866\n",
"Iteration 10, loss = 0.26651705\n",
"Iteration 11, loss = 0.24939767\n",
"Iteration 12, loss = 0.23261528\n",
"Iteration 13, loss = 0.21939760\n",
"Iteration 14, loss = 0.20823775\n",
"Iteration 15, loss = 0.19761409\n",
"Iteration 16, loss = 0.19033408\n",
"Iteration 17, loss = 0.18098604\n",
"Iteration 18, loss = 0.17479000\n",
"Iteration 19, loss = 0.16182493\n",
"Iteration 20, loss = 0.15307401\n",
"Iteration 21, loss = 0.15187916\n",
"Iteration 22, loss = 0.14526083\n",
"Iteration 23, loss = 0.14572837\n",
"Iteration 24, loss = 0.14223120\n",
"Iteration 25, loss = 0.13437422\n",
"Iteration 26, loss = 0.13072946\n",
"Iteration 27, loss = 0.12065205\n",
"Iteration 28, loss = 0.12407342\n",
"Iteration 29, loss = 0.12036209\n",
"Iteration 30, loss = 0.11155118\n",
"Iteration 31, loss = 0.11124663\n",
"Iteration 32, loss = 0.10934849\n",
"Iteration 33, loss = 0.10723191\n",
"Iteration 34, loss = 0.10550853\n",
"Iteration 35, loss = 0.10685618\n",
"Iteration 36, loss = 0.10030136\n",
"Iteration 37, loss = 0.09769944\n",
"Iteration 38, loss = 0.09309497\n",
"Iteration 39, loss = 0.09187284\n",
"Iteration 40, loss = 0.09463036\n",
"Iteration 41, loss = 0.09250370\n",
"Iteration 42, loss = 0.09190528\n",
"Iteration 43, loss = 0.08604383\n",
"Iteration 44, loss = 0.08550408\n",
"Iteration 45, loss = 0.08431603\n",
"Iteration 46, loss = 0.08227780\n",
"Iteration 47, loss = 0.08228648\n",
"Iteration 48, loss = 0.07966158\n",
"Iteration 49, loss = 0.08330478\n",
"Iteration 50, loss = 0.07716251\n",
"Iteration 51, loss = 0.07390948\n",
"Iteration 52, loss = 0.07168111\n",
"Iteration 53, loss = 0.07232984\n",
"Iteration 54, loss = 0.06831980\n",
"Iteration 55, loss = 0.07117863\n",
"Iteration 56, loss = 0.07202741\n",
"Iteration 57, loss = 0.07072120\n",
"Iteration 58, loss = 0.06615292\n",
"Iteration 59, loss = 0.06148134\n",
"Iteration 60, loss = 0.06262963\n",
"Iteration 61, loss = 0.07239923\n",
"Iteration 62, loss = 0.07305985\n",
"Iteration 63, loss = 0.06219170\n",
"Iteration 64, loss = 0.06405476\n",
"Iteration 65, loss = 0.05723135\n",
"Iteration 66, loss = 0.06224711\n",
"Iteration 67, loss = 0.05995193\n",
"Iteration 68, loss = 0.06218459\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Iteration 69, loss = 0.05925131\n",
"Iteration 70, loss = 0.05897166\n",
"Iteration 71, loss = 0.06323861\n",
"Iteration 72, loss = 0.05303486\n",
"Iteration 73, loss = 0.05154365\n",
"Iteration 74, loss = 0.05105144\n",
"Iteration 75, loss = 0.05220767\n",
"Liste des scores : \n",
" [0.8988571428571429, 0.9483333333333334, 0.9317142857142857, 0.949047619047619]\n",
"Temps d'entraînement : \n",
" [7.937000036239624, 22.261000156402588, 21.019999980926514, 23.22000002861023]\n",
"Temps de prédiction : \n",
" [0.019999980926513672, 0.037999868392944336, 0.026000022888183594, 0.03099989891052246]\n"
]
}
],
"source": [
"xtrain, xtest, ytrain, ytest = model_selection.train_test_split(mnist.data, mnist.target,train_size=0.7)\n",
"\n",
"rand_nb_couches_cachees = np.random.randint(low=1, high=10, size=5)\n",
"rand_taille_couches = np.random.randint(low=10, high=300, size=5)\n",
"\n",
"#Liste des tuples utilisés comme arguments pour hidden_layer_sizes\n",
"list_args = []\n",
"for i in range(5):\n",
" list_args += [((rand_taille_couches[i],) * rand_nb_couches_cachees[i])]\n",
"\n",
"list_scores = []\n",
"list_training_times = []\n",
"list_predicting_times = []\n",
"\n",
"activations = [\"identity\", \"logistic\", \"tanh\", \"relu\"]\n",
"\n",
"for i in activations:\n",
" #Entraîne le classifier\n",
" t1 = round(time.time(),3)\n",
" clf = neural_network.MLPClassifier(random_state=1, max_iter=75, hidden_layer_sizes=(50), verbose=True, activation=i)\n",
" clf.fit(xtrain, ytrain)\n",
" t2 = round(time.time(),3)\n",
" #Prédiction sur le jeu de tests\n",
" pred = clf.predict(xtest)\n",
" t3 = round(time.time(),3)\n",
" # Probabilités des prédictions sur xtest\n",
" pred_proba = clf.predict_proba(xtest)\n",
" # On calcule le score obtenu sur xtest avec les étiquettes ytest\n",
" list_scores += [clf.score(xtest, ytest)]\n",
" list_training_times += [t2-t1]\n",
" list_predicting_times += [t3-t2]\n",
" \n",
"print(\"Liste des scores : \\n\", list_scores)\n",
"print(\"Temps d'entraînement : \\n\", list_training_times)\n",
"print(\"Temps de prédiction : \\n\", list_predicting_times)"
]
},
{
"cell_type": "code",
"execution_count": 42,
"id": "df66b8c2",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Iteration 1, loss = 3.49485971\n",
"Iteration 2, loss = 0.84186877\n",
"Iteration 3, loss = 0.59897763\n",
"Iteration 4, loss = 0.48222816\n",
"Iteration 5, loss = 0.40910038\n",
"Iteration 6, loss = 0.35698485\n",
"Iteration 7, loss = 0.31890082\n",
"Iteration 8, loss = 0.28702521\n",
"Iteration 9, loss = 0.26178729\n",
"Iteration 10, loss = 0.24501095\n",
"Iteration 11, loss = 0.22980867\n",
"Iteration 12, loss = 0.21697680\n",
"Iteration 13, loss = 0.20317091\n",
"Iteration 14, loss = 0.19518376\n",
"Iteration 15, loss = 0.19095851\n",
"Iteration 16, loss = 0.18390372\n",
"Iteration 17, loss = 0.17403983\n",
"Iteration 18, loss = 0.17021642\n",
"Iteration 19, loss = 0.16806605\n",
"Iteration 20, loss = 0.16107239\n",
"Iteration 21, loss = 0.15432827\n",
"Iteration 22, loss = 0.15130544\n",
"Iteration 23, loss = 0.15175568\n",
"Iteration 24, loss = 0.13813033\n",
"Iteration 25, loss = 0.13209256\n",
"Iteration 26, loss = 0.13215236\n",
"Iteration 27, loss = 0.13028761\n",
"Iteration 28, loss = 0.12767990\n",
"Iteration 29, loss = 0.12595089\n",
"Iteration 30, loss = 0.12546783\n",
"Iteration 31, loss = 0.11411925\n",
"Iteration 32, loss = 0.11613798\n",
"Iteration 33, loss = 0.10906183\n",
"Iteration 34, loss = 0.11111725\n",
"Iteration 35, loss = 0.10383517\n",
"Iteration 36, loss = 0.10106130\n",
"Iteration 37, loss = 0.10252372\n",
"Iteration 38, loss = 0.09340730\n",
"Iteration 39, loss = 0.09884437\n",
"Iteration 40, loss = 0.09069168\n",
"Iteration 41, loss = 0.09000048\n",
"Iteration 42, loss = 0.09328161\n",
"Iteration 43, loss = 0.09011111\n",
"Iteration 44, loss = 0.09106793\n",
"Iteration 45, loss = 0.08884656\n",
"Iteration 46, loss = 0.08643341\n",
"Iteration 47, loss = 0.08025898\n",
"Iteration 48, loss = 0.08278608\n",
"Iteration 49, loss = 0.07933054\n",
"Iteration 50, loss = 0.07743841\n",
"Iteration 51, loss = 0.07708842\n",
"Iteration 52, loss = 0.07644628\n",
"Iteration 53, loss = 0.07182392\n",
"Iteration 54, loss = 0.07076787\n",
"Iteration 55, loss = 0.07327512\n",
"Iteration 56, loss = 0.07177331\n",
"Iteration 57, loss = 0.07444694\n",
"Iteration 58, loss = 0.07211621\n",
"Iteration 59, loss = 0.06254946\n",
"Iteration 60, loss = 0.06214756\n",
"Iteration 61, loss = 0.06637697\n",
"Iteration 62, loss = 0.07098458\n",
"Iteration 63, loss = 0.06547165\n",
"Iteration 64, loss = 0.06557786\n",
"Iteration 65, loss = 0.06298976\n",
"Iteration 66, loss = 0.06114154\n",
"Iteration 67, loss = 0.06321407\n",
"Iteration 68, loss = 0.06306800\n",
"Iteration 69, loss = 0.06274164\n",
"Iteration 70, loss = 0.05876132\n",
"Iteration 71, loss = 0.05988191\n",
"Iteration 72, loss = 0.06795550\n",
"Iteration 73, loss = 0.05913711\n",
"Iteration 74, loss = 0.05645632\n",
"Iteration 75, loss = 0.05576284\n",
"Iteration 1, loss = 3.49948441\n",
"Iteration 2, loss = 0.84673524\n",
"Iteration 3, loss = 0.60024616\n",
"Iteration 4, loss = 0.48694438\n",
"Iteration 5, loss = 0.41041706\n",
"Iteration 6, loss = 0.35813110\n",
"Iteration 7, loss = 0.31603210\n",
"Iteration 8, loss = 0.28661892\n",
"Iteration 9, loss = 0.26614982\n",
"Iteration 10, loss = 0.24810718\n",
"Iteration 11, loss = 0.23230498\n",
"Iteration 12, loss = 0.22139779\n",
"Iteration 13, loss = 0.21204690\n",
"Iteration 14, loss = 0.20259392\n",
"Iteration 15, loss = 0.19255089\n",
"Iteration 16, loss = 0.19163753\n",
"Iteration 17, loss = 0.17475712\n",
"Iteration 18, loss = 0.16651520\n",
"Iteration 19, loss = 0.16337473\n",
"Iteration 20, loss = 0.15942281\n",
"Iteration 21, loss = 0.15221730\n",
"Iteration 22, loss = 0.14397948\n",
"Iteration 23, loss = 0.14921710\n",
"Iteration 24, loss = 0.14491095\n",
"Iteration 25, loss = 0.13580116\n",
"Iteration 26, loss = 0.13222987\n",
"Iteration 27, loss = 0.13057577\n",
"Iteration 28, loss = 0.12404713\n",
"Iteration 29, loss = 0.12884492\n",
"Iteration 30, loss = 0.11925530\n",
"Iteration 31, loss = 0.11977905\n",
"Iteration 32, loss = 0.11759678\n",
"Iteration 33, loss = 0.11766443\n",
"Iteration 34, loss = 0.10734001\n",
"Iteration 35, loss = 0.10673595\n",
"Iteration 36, loss = 0.10629010\n",
"Iteration 37, loss = 0.10239491\n",
"Iteration 38, loss = 0.10066712\n",
"Iteration 39, loss = 0.09637851\n",
"Iteration 40, loss = 0.09481360\n",
"Iteration 41, loss = 0.09561918\n",
"Iteration 42, loss = 0.09463273\n",
"Iteration 43, loss = 0.09342184\n",
"Iteration 44, loss = 0.09111995\n",
"Iteration 45, loss = 0.08930570\n",
"Iteration 46, loss = 0.08627143\n",
"Iteration 47, loss = 0.08449281\n",
"Iteration 48, loss = 0.08643639\n",
"Iteration 49, loss = 0.08224510\n",
"Iteration 50, loss = 0.08548878\n",
"Iteration 51, loss = 0.08100746\n",
"Iteration 52, loss = 0.08002942\n",
"Iteration 53, loss = 0.08030931\n",
"Iteration 54, loss = 0.08458616\n",
"Iteration 55, loss = 0.07849234\n",
"Iteration 56, loss = 0.07799018\n",
"Iteration 57, loss = 0.07664604\n",
"Iteration 58, loss = 0.07866778\n",
"Iteration 59, loss = 0.07482195\n",
"Iteration 60, loss = 0.07181201\n",
"Iteration 61, loss = 0.06865937\n",
"Iteration 62, loss = 0.07470610\n",
"Iteration 63, loss = 0.07552818\n",
"Iteration 64, loss = 0.07192860\n",
"Iteration 65, loss = 0.07369563\n",
"Iteration 66, loss = 0.06718254\n",
"Iteration 67, loss = 0.06897553\n",
"Iteration 68, loss = 0.06984935\n",
"Iteration 69, loss = 0.06894634\n",
"Iteration 70, loss = 0.06822296\n",
"Iteration 71, loss = 0.06576369\n",
"Iteration 72, loss = 0.06929245\n",
"Iteration 73, loss = 0.06254714\n",
"Iteration 74, loss = 0.07105893\n",
"Iteration 75, loss = 0.06957396\n",
"Iteration 1, loss = 3.51893328\n",
"Iteration 2, loss = 0.86066371\n",
"Iteration 3, loss = 0.61818964\n",
"Iteration 4, loss = 0.50414029\n",
"Iteration 5, loss = 0.42877213\n",
"Iteration 6, loss = 0.38264522\n",
"Iteration 7, loss = 0.33748996\n",
"Iteration 8, loss = 0.30974393\n",
"Iteration 9, loss = 0.28379590\n",
"Iteration 10, loss = 0.26332183\n",
"Iteration 11, loss = 0.25322333\n",
"Iteration 12, loss = 0.23968978\n",
"Iteration 13, loss = 0.22497451\n",
"Iteration 14, loss = 0.21612499\n",
"Iteration 15, loss = 0.20647634\n",
"Iteration 16, loss = 0.20326088\n",
"Iteration 17, loss = 0.18922059\n",
"Iteration 18, loss = 0.18675377\n",
"Iteration 19, loss = 0.18706991\n",
"Iteration 20, loss = 0.17194898\n",
"Iteration 21, loss = 0.16905640\n",
"Iteration 22, loss = 0.16804559\n",
"Iteration 23, loss = 0.16351214\n",
"Iteration 24, loss = 0.15747106\n",
"Iteration 25, loss = 0.15868723\n",
"Iteration 26, loss = 0.14956660\n",
"Iteration 27, loss = 0.14730607\n",
"Iteration 28, loss = 0.14108464\n",
"Iteration 29, loss = 0.14041014\n",
"Iteration 30, loss = 0.13313241\n",
"Iteration 31, loss = 0.12934051\n",
"Iteration 32, loss = 0.13037847\n",
"Iteration 33, loss = 0.12998036\n",
"Iteration 34, loss = 0.12571778\n",
"Iteration 35, loss = 0.12287227\n",
"Iteration 36, loss = 0.12213630\n",
"Iteration 37, loss = 0.11456102\n",
"Iteration 38, loss = 0.11486859\n",
"Iteration 39, loss = 0.10884439\n",
"Iteration 40, loss = 0.10584151\n",
"Iteration 41, loss = 0.10515959\n",
"Iteration 42, loss = 0.10862851\n",
"Iteration 43, loss = 0.11433447\n",
"Iteration 44, loss = 0.10531112\n",
"Iteration 45, loss = 0.10618465\n",
"Iteration 46, loss = 0.10285161\n",
"Iteration 47, loss = 0.10438262\n",
"Iteration 48, loss = 0.09517767\n",
"Iteration 49, loss = 0.09412366\n",
"Iteration 50, loss = 0.09811683\n",
"Iteration 51, loss = 0.09538758\n",
"Iteration 52, loss = 0.09533109\n",
"Iteration 53, loss = 0.09837649\n",
"Iteration 54, loss = 0.09463257\n",
"Iteration 55, loss = 0.09329445\n",
"Iteration 56, loss = 0.09343719\n",
"Iteration 57, loss = 0.09133918\n",
"Iteration 58, loss = 0.09439535\n",
"Iteration 59, loss = 0.08702370\n",
"Iteration 60, loss = 0.08669759\n",
"Iteration 61, loss = 0.08604687\n",
"Iteration 62, loss = 0.08828700\n",
"Iteration 63, loss = 0.08853932\n",
"Iteration 64, loss = 0.08846686\n",
"Iteration 65, loss = 0.08845045\n",
"Iteration 66, loss = 0.08993938\n",
"Iteration 67, loss = 0.08387432\n",
"Iteration 68, loss = 0.08530990\n",
"Iteration 69, loss = 0.08472773\n",
"Iteration 70, loss = 0.09795855\n",
"Iteration 71, loss = 0.08682862\n",
"Iteration 72, loss = 0.08262468\n",
"Iteration 73, loss = 0.08216740\n",
"Iteration 74, loss = 0.08359116\n",
"Iteration 75, loss = 0.08916283\n",
"Iteration 1, loss = 17.92679923\n",
"Iteration 2, loss = 6.98192289\n",
"Iteration 3, loss = 3.47057913\n",
"Iteration 4, loss = 1.81425411\n",
"Iteration 5, loss = 1.02505844\n",
"Iteration 6, loss = 0.64350326\n",
"Iteration 7, loss = 0.45773514\n",
"Iteration 8, loss = 0.37767504\n",
"Iteration 9, loss = 0.33712571\n",
"Iteration 10, loss = 0.31824457\n",
"Iteration 11, loss = 0.31521014\n",
"Iteration 12, loss = 0.31642528\n",
"Iteration 13, loss = 0.32390803\n",
"Iteration 14, loss = 0.31732272\n",
"Iteration 15, loss = 0.33332001\n",
"Iteration 16, loss = 0.32749975\n",
"Iteration 17, loss = 0.33869750\n",
"Iteration 18, loss = 0.33192320\n",
"Iteration 19, loss = 0.34575143\n",
"Iteration 20, loss = 0.34105948\n",
"Iteration 21, loss = 0.34023840\n",
"Iteration 22, loss = 0.34716041\n",
"Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping.\n",
"Liste des scores : \n",
" [0.9471428571428572, 0.9462857142857143, 0.9495238095238095, 0.9497142857142857]\n",
"Temps d'entraînement : \n",
" [16.488999843597412, 21.437000036239624, 18.611000061035156, 5.442000150680542]\n",
"Temps de prédiction : \n",
" [0.019999980926513672, 0.020999908447265625, 0.021999835968017578, 0.023000001907348633]\n"
]
}
],
"source": [
"xtrain, xtest, ytrain, ytest = model_selection.train_test_split(mnist.data, mnist.target,train_size=0.7)\n",
"\n",
"rand_nb_couches_cachees = np.random.randint(low=1, high=10, size=5)\n",
"rand_taille_couches = np.random.randint(low=10, high=300, size=5)\n",
"\n",
"#Liste des tuples utilisés comme arguments pour hidden_layer_sizes\n",
"list_args = []\n",
"for i in range(5):\n",
" list_args += [((rand_taille_couches[i],) * rand_nb_couches_cachees[i])]\n",
"\n",
"list_scores = []\n",
"list_training_times = []\n",
"list_predicting_times = []\n",
"\n",
"alphas = [0.0000001, 0.0001, 0.1,100]\n",
"\n",
"for i in alphas:\n",
" #Entraîne le classifier\n",
" t1 = round(time.time(),3)\n",
" clf = neural_network.MLPClassifier(random_state=1, max_iter=75, hidden_layer_sizes=(50), verbose=True, alpha=i)\n",
" clf.fit(xtrain, ytrain)\n",
" t2 = round(time.time(),3)\n",
" #Prédiction sur le jeu de tests\n",
" pred = clf.predict(xtest)\n",
" t3 = round(time.time(),3)\n",
" # Probabilités des prédictions sur xtest\n",
" pred_proba = clf.predict_proba(xtest)\n",
" # On calcule le score obtenu sur xtest avec les étiquettes ytest\n",
" list_scores += [clf.score(xtest, ytest)]\n",
" list_training_times += [t2-t1]\n",
" list_predicting_times += [t3-t2]\n",
" \n",
"print(\"Liste des scores : \\n\", list_scores)\n",
"print(\"Temps d'entraînement : \\n\", list_training_times)\n",
"print(\"Temps de prédiction : \\n\", list_predicting_times)"
]
},
{
"cell_type": "code",
"execution_count": 43,
"id": "abb0fcf1",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Iteration 1, loss = 0.57337126\n",
"Iteration 2, loss = 0.20280950\n",
"Iteration 3, loss = 0.14216666\n",
"Iteration 4, loss = 0.11681260\n",
"Iteration 5, loss = 0.10720796\n",
"Iteration 6, loss = 0.09074891\n",
"Iteration 7, loss = 0.08103471\n",
"Iteration 8, loss = 0.06976683\n",
"Iteration 9, loss = 0.06197761\n",
"Iteration 10, loss = 0.05580839\n",
"Iteration 11, loss = 0.05015979\n",
"Iteration 12, loss = 0.05246334\n",
"Iteration 13, loss = 0.04689268\n",
"Iteration 14, loss = 0.04248118\n",
"Iteration 15, loss = 0.04141969\n",
"Iteration 16, loss = 0.04317236\n",
"Iteration 17, loss = 0.03933356\n",
"Iteration 18, loss = 0.03442768\n",
"Iteration 19, loss = 0.03741615\n",
"Iteration 20, loss = 0.04165660\n",
"Iteration 21, loss = 0.03127276\n",
"Iteration 22, loss = 0.03138382\n",
"Iteration 23, loss = 0.03052995\n",
"Iteration 24, loss = 0.02485801\n",
"Iteration 25, loss = 0.03525228\n",
"Iteration 26, loss = 0.02694870\n",
"Iteration 27, loss = 0.02967749\n",
"Iteration 28, loss = 0.04004878\n",
"Iteration 29, loss = 0.03521499\n",
"Iteration 30, loss = 0.02014281\n",
"Iteration 31, loss = 0.02504718\n",
"Iteration 32, loss = 0.02709138\n",
"Iteration 33, loss = 0.02784363\n",
"Iteration 34, loss = 0.02324318\n",
"Iteration 35, loss = 0.02209150\n",
"Iteration 36, loss = 0.01978654\n",
"Iteration 37, loss = 0.02716624\n",
"Iteration 38, loss = 0.02699659\n",
"Iteration 39, loss = 0.02791633\n",
"Iteration 40, loss = 0.02506787\n",
"Iteration 41, loss = 0.02350482\n",
"Iteration 42, loss = 0.01585995\n",
"Iteration 43, loss = 0.02331411\n",
"Iteration 44, loss = 0.01674166\n",
"Iteration 45, loss = 0.01545800\n",
"Iteration 46, loss = 0.02067161\n",
"Iteration 47, loss = 0.02141465\n",
"Iteration 48, loss = 0.02602266\n",
"Iteration 49, loss = 0.01699863\n",
"Iteration 50, loss = 0.01920616\n",
"Iteration 51, loss = 0.01579502\n",
"Iteration 52, loss = 0.02283729\n",
"Iteration 53, loss = 0.01212422\n",
"Iteration 54, loss = 0.01690685\n",
"Iteration 55, loss = 0.02406952\n",
"Iteration 56, loss = 0.02123840\n",
"Iteration 57, loss = 0.01557985\n",
"Iteration 58, loss = 0.01085758\n",
"Iteration 59, loss = 0.01676534\n",
"Iteration 60, loss = 0.01854105\n",
"Iteration 61, loss = 0.01782373\n",
"Iteration 62, loss = 0.01248756\n",
"Iteration 63, loss = 0.01683467\n",
"Iteration 64, loss = 0.02100531\n",
"Iteration 65, loss = 0.01945957\n",
"Iteration 66, loss = 0.01885778\n",
"Iteration 67, loss = 0.01431520\n",
"Iteration 68, loss = 0.01208094\n",
"Iteration 69, loss = 0.01495458\n",
"Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping.\n",
"Liste des scores : \n",
" [0.9715714285714285]\n",
"Temps d'entraînement : \n",
" [99.99899983406067]\n",
"Temps de prédiction : \n",
" [0.13199996948242188]\n"
]
}
],
"source": [
"# xtrain data set d'entraînement et ytrain étiquettes de xtrain\n",
"# xtest dataset de prédiction et ytest étiquettes de xtest\n",
"xtrain, xtest, ytrain, ytest = model_selection.train_test_split(mnist.data, mnist.target,train_size=0.7)\n",
"\n",
"list_scores = []\n",
"list_training_times = []\n",
"list_predicting_times = []\n",
"\n",
"#Entraîne le classifier\n",
"t1 = round(time.time(),3)\n",
"clf = neural_network.MLPClassifier(random_state=1, max_iter=300, hidden_layer_sizes=((85,)*15), solver = \"adam\", activation = \"relu\", verbose=True)\n",
"clf.fit(xtrain, ytrain)\n",
"t2 = round(time.time(),3)\n",
"#Prédiction sur le jeu de tests\n",
"pred = clf.predict(xtest)\n",
"t3 = round(time.time(),3)\n",
"# Probabilités des prédictions sur xtest\n",
"pred_proba = clf.predict_proba(xtest)\n",
"# On calcule le score obtenu sur xtest avec les étiquettes ytest\n",
"list_scores += [clf.score(xtest, ytest)]\n",
"list_training_times += [t2-t1]\n",
"list_predicting_times += [t3-t2]\n",
" \n",
"print(\"Liste des scores : \\n\", list_scores)\n",
"print(\"Temps d'entraînement : \\n\", list_training_times)\n",
"print(\"Temps de prédiction : \\n\", list_predicting_times)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}