65 lines
1.8 KiB
Python
65 lines
1.8 KiB
Python
#!/usr/bin/python3
|
|
|
|
import torch
|
|
import torch.nn
|
|
import numpy
|
|
from matplotlib import markers, pyplot as plt
|
|
import time
|
|
|
|
print(torch.cuda.is_available())
|
|
|
|
class AutoEnc(torch.nn.Module):
|
|
|
|
def __init__(self, input_size):
|
|
super().__init__()
|
|
#encoder
|
|
self.en_l1 = torch.nn.Linear(input_size, 50)
|
|
self.en_l2 = torch.nn.Linear(50, 25)
|
|
self.en_l3 = torch.nn.Linear(25, 10)
|
|
self.en_l4 = torch.nn.Linear(10, 1)
|
|
#encoder-decoder
|
|
self.en_de_l5 = torch.nn.Linear(1, 1)
|
|
#decoder
|
|
self.de_l1 = torch.nn.Linear(1, 10)
|
|
self.de_l2 = torch.nn.Linear(10, 25)
|
|
self.de_l3 = torch.nn.Linear(25, 50)
|
|
self.de_l4 = torch.nn.Linear(50, input_size)
|
|
|
|
def forward(self, x):
|
|
h = x
|
|
h = torch.nn.functional.relu(self.en_l1(h))
|
|
h = torch.nn.functional.relu(self.en_l2(h))
|
|
h = torch.nn.functional.relu(self.en_l3(h))
|
|
h = torch.nn.functional.relu(self.en_l4(h))
|
|
h = torch.nn.functional.relu(self.en_de_l5(h))
|
|
h = torch.nn.functional.relu(self.de_l1(h))
|
|
h = torch.nn.functional.relu(self.de_l2(h))
|
|
h = torch.nn.functional.relu(self.de_l3(h))
|
|
h = self.de_l4(h)
|
|
return h
|
|
|
|
device = 'cpu'
|
|
model = AutoEnc(3)
|
|
model.to(device)
|
|
|
|
criterion = torch.nn.MSELoss()#(reduction='sum')
|
|
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
|
|
|
|
data = torch.tensor([[100.0,20.0,30.0],[4.0,5.0,6.0],[70.0,80.0,9.0],[10.0,11.0,12.0]]).to(device)
|
|
print(data.type())
|
|
|
|
start = time.time()
|
|
|
|
for t in range(20000):
|
|
y_pred = model(data)
|
|
loss = criterion(y_pred, data)
|
|
if t % 100 == 99:
|
|
print(t, loss.item())
|
|
optimizer.zero_grad()
|
|
loss.backward()
|
|
optimizer.step()
|
|
stop = time.time()
|
|
|
|
y_pred = model(data)
|
|
print("prediction: ",y_pred)
|
|
print("training takes : ",stop-start," seconds")
|