from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
import numpy as np
import torch
import torch.nn as nn

x,y=load_iris(return_X_y=True)

train_x,test_x,train_y,test_y=train_test_split(x,y,test_size=0.3)
# print(train_x)
train_y,test_y=np.squeeze(np.eye(3)[train_y]),np.squeeze(np.eye(3)[test_y])
print(train_y.shape)

X=torch.autograd.Variable(torch.Tensor(train_x))
Y=torch.autograd.Variable(torch.Tensor(train_y))
X_test=torch.autograd.Variable(torch.Tensor(test_x))
Y_test=torch.autograd.Variable(torch.Tensor(test_y))
Y=np.argmax(Y,1)
print(Y.shape)

softmax=nn.Softmax()
l1=nn.Linear(4,6,bias=True)
l2=nn.Linear(6, 3, bias=True)
model = nn.Sequential(l1, l2, softmax)

criterion=nn.CrossEntropyLoss()

optimizer=torch.optim.Adam(model.parameters(),lr=0.01)

for i in range(10001):
    optimizer.zero_grad()
    h=model(X)
    # print(h.shape)
    # print(Y.long().view(-1).shape)
    cost=criterion(h,Y)
    cost.backward()
    optimizer.step()

    predict=torch.max(h,1)[1].float()
    correct_prediction=(predict.data==Y.data)
    accuracy=correct_prediction.float().mean()

    if i%100==0:
        print('i=:{}\tLoss:{:.3f}\tAcc{:.2%}'.format(i,cost.data.numpy(),accuracy))

pred=torch.max(h,1)[1].float()

for p,y in zip(pred,Y):
    print('[{}] predict:{} True Y:{}'.format(bool(p.item()==y.item()),p.item(),y.item()))
    # print("[{}] Prediction: {} True Y: {}".format(bool(p.item() == y.item()), p.item(), y.item()))