This commit is contained in:
LMAGallois 2022-01-16 21:35:22 +01:00
parent fbdb199d43
commit 899535bfc9

223
1res_a&n.py Normal file
View file

@ -0,0 +1,223 @@
# -*- coding: utf-8 -*-
"""1Res_A&N.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1e4S-Beaphf_3gh_6blpWw5eXYT2hH_2A
# Dataset creation section
"""
from __future__ import print_function, division
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import torchvision
import cv2
from sklearn.model_selection import train_test_split
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
plt.ion() # interactive mode
from google.colab import drive
drive.mount('/content/drive')
data = pd.read_csv('/content/drive/MyDrive/insa 5/Datasets/celebA/labels.csv',nrows=4999)
data = data[["image_id","Smiling"]]
data= data.replace(-1,0)
pd.set_option("display.max_rows", 20, "display.max_columns", 20)
train_set,test_set=train_test_split(data,test_size=0.25)
print(train_set)
class ImageDataset(Dataset):
def __init__(self,csv,img_folder,transform):
self.csv=csv
self.transform=transform
self.img_folder=img_folder
self.image_names=self.csv[:]['image_id']
self.labels=np.array(self.csv.drop(['image_id'], axis=1))
#The __len__ function returns the number of samples in our dataset.
def __len__(self):
return len(self.image_names)
def __getitem__(self,index):
image=cv2.imread(self.img_folder+self.image_names.iloc[index])
image=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
image=self.transform(image)
targets=self.labels[index]
sample = {'image': image,'labels':targets}
#return sample
return image,targets
train_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((200, 200)),
transforms.ToTensor()])
#,transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
test_transform =transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((200, 200)),
transforms.ToTensor()])
train_dataset=ImageDataset(train_set,'/content/drive/MyDrive/insa 5/Datasets/celebA/images/',train_transform)
test_dataset=ImageDataset(test_set,'/content/drive/MyDrive/insa 5/Datasets/celebA/images/',test_transform)
BATCH_SIZE=16
train_dataloader = DataLoader(
train_dataset,
batch_size=BATCH_SIZE,
shuffle=True
)
test_dataloader = DataLoader(
test_dataset,
batch_size=BATCH_SIZE,
shuffle=True
)
sample = next(iter(train_dataloader))
input, label = sample
input = input.view(BATCH_SIZE, -1)
#print(sample[input])
print(input.size())
def imshow(inp, title=None):
"""imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
# Get a batch of training data
images, labels = next(iter(test_dataloader))
# Make a grid from batch
output = torchvision.utils.make_grid(images)
imshow(output)
"""# Training section"""
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(35344, 16)
self.fc2 = nn.Linear(16, 84)
self.fc3 = nn.Linear(84, 2)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
for epoch in range(10): # loop over the dataset multiple times
print(epoch)
running_loss = 0.0
for i, data in enumerate(train_dataloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
#print(inputs.size())
#print(labels.flatten())
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
#print(outputs)
loss = criterion(outputs, labels.flatten())
#print("Outputs : ", outputs)
#print("Labels : ", labels.flatten())
#print("Loss : ",loss)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
net_path='/content/drive/MyDrive/insa 5/Models/test_net.pth'
PATH = './cifar_net.pth'
torch.save(net.state_dict(), net_path)
"""# Testing section"""
dataiter = iter(train_dataloader)
images, labels = dataiter.next()
classes = ("Not Smiling", "Smiling")
# print images
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(16)))
net = Net()
net_path='/content/drive/MyDrive/insa 5/Colab Notebooks/savings/test_net.pth'
net.load_state_dict(torch.load(net_path))
print(net.conv1.weight)
outputs = net(images)
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
for j in range(16)))
correct = 0
total = 0
# since we're not training, we don't need to calculate the gradients for our outputs
with torch.no_grad():
for data in test_dataloader:
images, labels = data
# calculate outputs by running images through the network
outputs = net(images)
# the class with the highest energy is what we choose as prediction
_, predicted = torch.max(outputs.data, 1)
#print("labels.size(0): ",labels.size(0))
#print("Batch size: ",BATCH_SIZE)
total += labels.size(0)
correct += (predicted == labels.flatten()).sum().item()
#print("Predicted: ", predicted)
#print("Labels: ", labels.flatten())
#print("Correct: ", correct)
print(correct)
print(total)
print('Accuracy of the network on the 750 test images: %d %%' % (
100 * correct / total))