from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import torch
from torch.autograd import Variable
from torch import nn
import matplotlib.pyplot as plt


data = load_iris()
x = data.data
y = data.target

X_train, X_test, y_train, y_test = train_test_split(x,y,shuffle=True,test_size=0.3)

train_x,test_x = Variable(torch.Tensor(X_train)),Variable(torch.Tensor(X_test))
train_y,test_y = Variable(torch.Tensor(y_train)),Variable(torch.Tensor(y_test))


l1 = nn.Linear(4,10,bias=True)
re1 = nn.ReLU()
dp1 = nn.Dropout(0.1)
l2 = nn.Linear(10,5,bias=True)
re2 = nn.ReLU()
dp2 = nn.Dropout(0.1)
fc1 = nn.Linear(5,3,bias=True)

model = nn.Sequential(l1,re1,dp1,l2,re2,dp2,fc1)
cross = nn.CrossEntropyLoss()
opt = torch.optim.Adam(model.parameters(),lr=0.001)

cost_his = []
for step in range(5001):

    opt.zero_grad()
    h = model(train_x)
    loss = cross(h,train_y.long())
    loss.backward()
    opt.step()
    pred = torch.argmax(h,1)
    acc = (pred==train_y).float().mean()
    if step % 100 ==0:
        cost_his.append(loss.data.numpy())
        print(step,'{:.4f}'.format(loss.item()),'{:.2f}'.format(acc))



plt.plot(cost_his[1:])
plt.show()


pred = torch.argmax(model(test_x),1)
acc = (pred==test_y).float().mean().data.numpy()
print(acc)

