Modifs archi données
139
Oiseau.ipynb
|
@ -46,7 +46,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
|
@ -81,9 +81,31 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[name: \"/device:CPU:0\"\n",
|
||||
"device_type: \"CPU\"\n",
|
||||
"memory_limit: 268435456\n",
|
||||
"locality {\n",
|
||||
"}\n",
|
||||
"incarnation: 6212331521511793857\n",
|
||||
"]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2021-12-22 20:34:12.391220: I tensorflow/core/platform/cpu_feature_guard.cc:145] This TensorFlow binary is optimized with Intel(R) MKL-DNN to use the following CPU instructions in performance critical operations: SSE4.1 SSE4.2\n",
|
||||
"To enable them in non-MKL-DNN operations, rebuild TensorFlow with the appropriate compiler flags.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from tensorflow.python.client import device_lib\n",
|
||||
"print(device_lib.list_local_devices())"
|
||||
|
@ -91,9 +113,17 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"MODE = \"GPU\" if \"GPU\" in [k.device_type for k in device_lib.list_local_devices()] else \"CPU\"\n",
|
||||
"print(MODE)"
|
||||
|
@ -132,25 +162,24 @@
|
|||
"\n",
|
||||
"```\n",
|
||||
"data_dir\n",
|
||||
"└───sample/\n",
|
||||
"│ └───train/\n",
|
||||
"│ │ └───absence/\n",
|
||||
"│ │ │ │ absence_t_0001.jpg\n",
|
||||
"│ │ │ │ absence_t_0002.jpg\n",
|
||||
"│ │ │ │ ...\n",
|
||||
"│ │ └───presence/\n",
|
||||
"│ │ │ │ presence_t_0001.jpg\n",
|
||||
"│ │ │ │ presence_t_0002.jpg\n",
|
||||
"│ │ │ │ ...\n",
|
||||
"│ └───validation/\n",
|
||||
"│ │ └───absence/\n",
|
||||
"│ │ │ │ absence_v_0001.jpg\n",
|
||||
"│ │ │ │ absence_v_0002.jpg\n",
|
||||
"│ │ │ │ ...\n",
|
||||
"│ │ └───presence/\n",
|
||||
"│ │ │ │ presence_v_0001.jpg\n",
|
||||
"│ │ │ │ presence_v_0002.jpg\n",
|
||||
"│ │ │ │ ...\n",
|
||||
"└───train/\n",
|
||||
"│ └───absence/\n",
|
||||
"│ │ │ absence_t_0001.jpg\n",
|
||||
"│ │ │ absence_t_0002.jpg\n",
|
||||
"│ │ │ ...\n",
|
||||
"│ └───presence/\n",
|
||||
"│ │ │ presence_t_0001.jpg\n",
|
||||
"│ │ │ presence_t_0002.jpg\n",
|
||||
"│ │ │ ...\n",
|
||||
"└───validation/\n",
|
||||
"│ └───absence/\n",
|
||||
"│ │ │ absence_v_0001.jpg\n",
|
||||
"│ │ │ absence_v_0002.jpg\n",
|
||||
"│ │ │ ...\n",
|
||||
"│ └───presence/\n",
|
||||
"│ │ │ presence_v_0001.jpg\n",
|
||||
"│ │ │ presence_v_0002.jpg\n",
|
||||
"│ │ │ ...\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
|
@ -158,23 +187,19 @@
|
|||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Parameter"
|
||||
"### Reading the data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data_dir = 'data/' # data path\n",
|
||||
"\n",
|
||||
"# Sample directory path\n",
|
||||
"# That way we can use this algorithm with an other sample if we have mire pictures in the future.\n",
|
||||
"\n",
|
||||
"N_train = 160\n",
|
||||
"N_val = 40\n",
|
||||
"data_dir_sub = data_dir+'sample_%d_Ntrain_%d_Nval' %(N_train, N_val)"
|
||||
"# That way we can use this algorithm with an other sample if we have mire pictures in the future.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -188,23 +213,11 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "NameError",
|
||||
"evalue": "name 'kpi' is not defined",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[0;32m/var/folders/83/bqq_97cs083fpgkn46bd5k5r0000gn/T/ipykernel_61636/3158407634.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mimg\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mkpi\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_img\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata_dir_sub\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;34m'/train/presence/presence_t_0001.jpg'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mimg\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[0;31mNameError\u001b[0m: name 'kpi' is not defined"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"img = kpi.load_img(data_dir_sub+'/train/presence/presence_t_0001.jpg') \n",
|
||||
"img = kpi.load_img(data_dir+'/train/presence/presence_t_0001.jpg') \n",
|
||||
"img"
|
||||
]
|
||||
},
|
||||
|
@ -281,15 +294,15 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"img = kpi.load_img(data_dir_sub+\"/train/presence/presence_t_0001.jpg\")\n",
|
||||
"img = kpi.load_img(data_dir+\"/train/presence/presence_t_0001.jpg\")\n",
|
||||
"x = kpi.img_to_array(img) \n",
|
||||
"x_ = np.expand_dims(x, axis=0)\n",
|
||||
"\n",
|
||||
"if not(os.path.isdir(data_dir_sub+\"/preprocessing_example\")):\n",
|
||||
" os.mkdir(data_dir_sub+\"/preprocessing_example\")\n",
|
||||
"if not(os.path.isdir(data_dir+\"/preprocessing_example\")):\n",
|
||||
" os.mkdir(data_dir+\"/preprocessing_example\")\n",
|
||||
"\n",
|
||||
" i = 0\n",
|
||||
" for batch in datagen.flow(x_, batch_size=1,save_to_dir=data_dir_sub+\"/preprocessing_example\", save_prefix='bird', save_format='jpg'):\n",
|
||||
" for batch in datagen.flow(x_, batch_size=1,save_to_dir=data_dir+\"/preprocessing_example\", save_prefix='bird', save_format='jpg'):\n",
|
||||
" i += 1\n",
|
||||
" if i > 7:\n",
|
||||
" break "
|
||||
|
@ -330,8 +343,8 @@
|
|||
],
|
||||
"source": [
|
||||
"X_list=[]\n",
|
||||
"for f in os.listdir(data_dir_sub+\"/preprocessing_example\"):\n",
|
||||
" X_list.append(kpi.img_to_array(kpi.load_img(data_dir_sub+\"/preprocessing_example/\"+f)))\n",
|
||||
"for f in os.listdir(data_dir+\"/preprocessing_example\"):\n",
|
||||
" X_list.append(kpi.img_to_array(kpi.load_img(data_dir+\"/preprocessing_example/\"+f)))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"fig=plt.figure(figsize=(16,8))\n",
|
||||
|
@ -411,8 +424,8 @@
|
|||
}
|
||||
],
|
||||
"source": [
|
||||
"x_0 = kpi.img_to_array(kpi.load_img(data_dir_sub+\"/train/presence/presence_t_0001.jpg\"))\n",
|
||||
"x_1 = kpi.img_to_array(kpi.load_img(data_dir_sub+\"/train/presence/presence_t_0002.jpg\"))\n",
|
||||
"x_0 = kpi.img_to_array(kpi.load_img(data_dir+\"/train/presence/presence_t_0001.jpg\"))\n",
|
||||
"x_1 = kpi.img_to_array(kpi.load_img(data_dir+\"/train/presence/presence_t_0002.jpg\"))\n",
|
||||
"x_0.shape, x_1.shape"
|
||||
]
|
||||
},
|
||||
|
@ -461,14 +474,14 @@
|
|||
"# subfolers of 'data/train', and indefinitely generate\n",
|
||||
"# batches of augmented image data\n",
|
||||
"train_generator = train_datagen.flow_from_directory(\n",
|
||||
" data_dir_sub+\"/train/\", # this is the target directory\n",
|
||||
" data_dir+\"/train/\", # this is the target directory\n",
|
||||
" target_size=(img_width, img_height), \n",
|
||||
" batch_size=batch_size,\n",
|
||||
" class_mode='binary') # since we use binary_crossentropy loss, we need binary labels\n",
|
||||
"\n",
|
||||
"# this is a similar generator, for validation data\n",
|
||||
"validation_generator = valid_datagen.flow_from_directory(\n",
|
||||
" data_dir_sub+\"/validation/\",\n",
|
||||
" data_dir+\"/validation/\",\n",
|
||||
" target_size=(img_width, img_height),\n",
|
||||
" batch_size=batch_size,\n",
|
||||
" class_mode='binary')"
|
||||
|
@ -840,7 +853,7 @@
|
|||
"datagen = kpi.ImageDataGenerator(rescale=1. / 255)\n",
|
||||
"\n",
|
||||
"generator = datagen.flow_from_directory(\n",
|
||||
" data_dir_sub+\"/train\",\n",
|
||||
" data_dir+\"/train\",\n",
|
||||
" target_size=(img_width, img_height),\n",
|
||||
" batch_size=batch_size,\n",
|
||||
" class_mode=None, # this means our generator will only yield batches of data, no labels\n",
|
||||
|
@ -849,7 +862,7 @@
|
|||
"\n",
|
||||
"\n",
|
||||
"generator = datagen.flow_from_directory(\n",
|
||||
" data_dir_sub+\"/validation\",\n",
|
||||
" data_dir+\"/validation\",\n",
|
||||
" target_size=(img_width, img_height),\n",
|
||||
" batch_size=batch_size,\n",
|
||||
" class_mode=None,\n",
|
||||
|
@ -1044,7 +1057,7 @@
|
|||
}
|
||||
],
|
||||
"source": [
|
||||
"model_VGG_fcm.save_weights(data_dir_sub+'/weights_model_VGG_fully_connected_model_%d_epochs_%d_batch_size.h5' %(epochs, batch_size))"
|
||||
"model_VGG_fcm.save_weights(data_dir+'/weights_model_VGG_fully_connected_model_%d_epochs_%d_batch_size.h5' %(epochs, batch_size))"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -1113,7 +1126,7 @@
|
|||
"top_model.add(kl.Dropout(0.5))\n",
|
||||
"top_model.add(kl.Dense(1, activation='sigmoid'))\n",
|
||||
"\n",
|
||||
"top_model.load_weights(data_dir_sub+'/weights_model_VGG_fully_connected_model_%d_epochs_%d_batch_size.h5' %(epochs, batch_size))\n"
|
||||
"top_model.load_weights(data_dir+'/weights_model_VGG_fully_connected_model_%d_epochs_%d_batch_size.h5' %(epochs, batch_size))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -1247,13 +1260,13 @@
|
|||
"test_datagen = kpi.ImageDataGenerator(rescale=1. / 255)\n",
|
||||
"\n",
|
||||
"train_generator = train_datagen.flow_from_directory(\n",
|
||||
" data_dir_sub+\"/train/\",\n",
|
||||
" data_dir+\"/train/\",\n",
|
||||
" target_size=(img_height, img_width),\n",
|
||||
" batch_size=batch_size,\n",
|
||||
" class_mode='binary')\n",
|
||||
"\n",
|
||||
"validation_generator = test_datagen.flow_from_directory(\n",
|
||||
" data_dir_sub+\"/validation/\",\n",
|
||||
" data_dir+\"/validation/\",\n",
|
||||
" target_size=(img_height, img_width),\n",
|
||||
" batch_size=batch_size,\n",
|
||||
" class_mode='binary')\n"
|
||||
|
@ -1390,7 +1403,7 @@
|
|||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.7"
|
||||
"version": "3.7.11"
|
||||
},
|
||||
"toc": {
|
||||
"nav_menu": {},
|
||||
|
|
BIN
data/.DS_Store
vendored
Before Width: | Height: | Size: 603 KiB After Width: | Height: | Size: 603 KiB |
Before Width: | Height: | Size: 654 KiB After Width: | Height: | Size: 654 KiB |
Before Width: | Height: | Size: 586 KiB After Width: | Height: | Size: 586 KiB |
Before Width: | Height: | Size: 620 KiB After Width: | Height: | Size: 620 KiB |
Before Width: | Height: | Size: 605 KiB After Width: | Height: | Size: 605 KiB |
Before Width: | Height: | Size: 626 KiB After Width: | Height: | Size: 626 KiB |
Before Width: | Height: | Size: 619 KiB After Width: | Height: | Size: 619 KiB |
Before Width: | Height: | Size: 672 KiB After Width: | Height: | Size: 672 KiB |