diff --git a/solutions/.DS_Store b/solutions/.DS_Store deleted file mode 100644 index 5008ddf..0000000 Binary files a/solutions/.DS_Store and /dev/null differ diff --git a/solutions/classifier_pretrained_model.py b/solutions/classifier_pretrained_model.py deleted file mode 100644 index 3c59e2a..0000000 --- a/solutions/classifier_pretrained_model.py +++ /dev/null @@ -1,21 +0,0 @@ -model_VGG_fcm = km.Sequential() -model_VGG_fcm.add(kl.Flatten(input_shape=features_train.shape[1:])) -model_VGG_fcm.add(kl.Dense(64, activation='relu')) -model_VGG_fcm.add(kl.Dropout(0.5)) -model_VGG_fcm.add(kl.Dense(1, activation='sigmoid')) - -model_VGG_fcm.compile(optimizer='rmsprop', - loss='binary_crossentropy', - metrics=['accuracy']) - -model_VGG_fcm.summary() - - -train_labels = np.array([0] * int((N_train/2)) + [1] * int((N_train/2))) -validation_labels = np.array([0] * int((N_val/2)) + [1] * int((N_val/2))) - -model_VGG_fcm.fit(features_train, train_labels, - epochs=epochs, - batch_size=batch_size, - validation_data=(features_validation, validation_labels)) -t_learning_VGG_fcm = te-ts \ No newline at end of file diff --git a/solutions/max_pooling.py b/solutions/max_pooling.py deleted file mode 100644 index 69ab67e..0000000 --- a/solutions/max_pooling.py +++ /dev/null @@ -1,13 +0,0 @@ -conv_mp = km.Sequential([ kl.MaxPool2D(pool_size=(2,2))]) - -img_in = np.expand_dims(x, 0) -img_out = conv_mp.predict(img_in) - -fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(10, 5)) -ax0.imshow(img_in[0,:,:,0].astype(np.uint8), - cmap="binary"); -ax0.grid(False) - -ax1.imshow(img_out[0,:,:,0].astype(np.uint8), - cmap="binary"); -ax1.grid(False) \ No newline at end of file diff --git a/solutions/mnist_conv_architecture.py b/solutions/mnist_conv_architecture.py deleted file mode 100644 index 2db4807..0000000 --- a/solutions/mnist_conv_architecture.py +++ /dev/null @@ -1,23 +0,0 @@ -model = km.Sequential() -model.add(kl.Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28,28, 1), data_format="channels_last")) -model.add(kl.Conv2D(64, (3, 3), activation='relu')) -model.add(kl.MaxPooling2D(pool_size=(2, 2))) -model.add(kl.Dropout(0.25)) -model.add(kl.Flatten()) -model.add(kl.Dense(128, activation='relu')) -model.add(kl.Dropout(0.5)) -model.add(kl.Dense(N_classes, activation='softmax')) -# Résumé -model.summary() -# Apprentissage -model.compile(loss="sparse_categorical_crossentropy", - optimizer=ko.Adadelta(), - metrics=['accuracy']) -ts=time.time() -model.fit(X_train_conv, Y_train, - batch_size=batch_size, - epochs=epochs, - verbose=1, - validation_data=(X_test_conv, Y_test)) -te=time.time() -t_train_conv = te-ts \ No newline at end of file diff --git a/solutions/test_kaggle.py b/solutions/test_kaggle.py deleted file mode 100644 index 6457bb6..0000000 --- a/solutions/test_kaggle.py +++ /dev/null @@ -1,29 +0,0 @@ -data_dir_test = data_dir+'test/' -N_test = len(os.listdir(data_dir_test+"/test")) - -test_datagen = kpi.ImageDataGenerator(rescale=1. / 255) - -test_generator = test_datagen.flow_from_directory( - data_dir_test, - #data_dir_sub+"/train/", - target_size=(img_height, img_width), - batch_size=batch_size, - class_mode=None, - shuffle=False) - -test_prediction = model_VGG_LastConv_fcm.predict_generator(test_generator, N_test // batch_size) - -images_test = [data_dir_test+"/test/"+k for k in os.listdir(data_dir_test+"/test")][:9] -x_test = [kpi.img_to_array(kpi.load_img(image_test))/255 for image_test in images_test] # this is a PIL image - -fig = plt.figure(figsize=(10,10)) -for k in range(9): - ax = fig.add_subplot(3,3,k+1) - ax.imshow(x_test[k], interpolation='nearest') - pred = test_prediction[k] - if pred >0.5: - title = "Probabiliy for dog : %.1f" %(pred*100) - else: - title = "Probabiliy for cat : %.1f" %((1-pred)*100) - ax.set_title(title) -plt.show() \ No newline at end of file