import torch
import torch.nn as nn
from torch.nn import Parameter
from torch.nn import init
from torch import Tensor
from torch.autograd import Variable
import math
import random
import numpy as np
from torch import optim
from sklearn.preprocessing import label_binarize
from sklearn.metrics import classification_report
import pandas as pd
from sklearn.preprocessing import StandardScaler


class MyNet(nn.Module):
    """
    input_size: 每一个时步(time_step)输入到lstm单元的维度.(实际输入的数据size为[batch_size, input_size])
    hidden_size: 确定了隐含状态hidden_state的维度
    """

    def __init__(self, input_size, hidden_size, output_size):
        super(MyNet, self).__init__()

        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size

        self.lstm = nn.LSTM(self.input_size, self.hidden_size)
        self.linear = nn.Linear(self.hidden_size, self.output_size)

    def init_state(self, batch_size, hidden_size):
        h_init = Variable(torch.rand(1, batch_size, hidden_size))
        c_init = Variable(torch.rand(1, batch_size, hidden_size))
        return h_init, c_init

    def forward(self, x, h, c):
        output, (new_h, new_c) = self.lstm(x, (h, c))
        result = self.linear(new_h)
        return result

    def prediction(self, x, h, c):
        output, (new_h, new_c) = self.lstm(x, (h, c))
        result = self.linear(new_h)
        result = result.squeeze(0).squeeze(0)
        print('Result Shape', result.shape)
        label1 = torch.max(result, 1)[1]
        print('Label1 Shape', label1.shape)
        return label1


def reset_weights(model):
    for weight in model.parameters():
        init.constant_(weight, 0.5)


# net = MyNet(10, 20, 3)
net = MyNet(2, 20, 3)
reset_weights(net)

epoch = 100

# 读取约会数据
# Import dataset
raw_data = pd.read_csv('./datingTestSet2.txt', delimiter='\t', header=None)
print("Dataset Shape: " + str(raw_data.shape))

# Preprocess data
raw_data = raw_data.drop_duplicates()
raw_data = raw_data.fillna(raw_data.mean())
# Normalize data using z-score
ss = StandardScaler()
# 0: flight, 1: ice cream, 2: game time
scale_features = [0, 1, 2]
raw_data[scale_features] = ss.fit_transform(raw_data[scale_features])
raw_data.hist(grid=False, figsize=(12, 12))
# Separate data
raw_data = np.array(raw_data)
data = raw_data[:, :2]
targets = raw_data[:, 3]

# 输入
# input = Variable(torch.ones(5, 200, 10))
# input = Variable(torch.ones(1, 1000, 2))
input = Variable(torch.tensor(np.expand_dims(np.array(data[:500, :]), axis=0), dtype=torch.float32))
print('Input Shape', input.shape)

# 标签
label = []
# for i in range(1000):
#     label.append(random.choice([0, 1, 2]))
label = np.array(targets[:500], dtype=np.int)
# 原本数据标签是1~3，我们把它改成0~2
for idx, i in enumerate(label):
    label[idx] = i - 1
label_one_hot = label_binarize(label, np.arange(3))

target = Variable(torch.LongTensor(label))
print('Target Shape', target.shape)

h_init, c_init = net.init_state(500, 20)

criterion = nn.CrossEntropyLoss(reduction='sum')
optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9)

for i in range(epoch):
    output = net(input, h_init, c_init)
    output = output.squeeze(0)
    _, pred = torch.max(output, 1)

    loss = criterion(output, target)
    print('epoch', i + 1, ':', loss)

    loss.backward()
    optimizer.step()

net.eval()

# input = Variable(torch.ones(5, 200, 10))
input = Variable(torch.tensor(np.expand_dims(np.array(data[500:, :]), axis=0), dtype=torch.float32))
# 标签
# label = []
# for i in range(1000):
#     label.append(random.choice([0, 1, 2]))
label = np.array(targets[500:], dtype=np.int)
for idx, i in enumerate(label):
    label[idx] = i - 1
print(label)
label_one_hot = label_binarize(label, np.arange(3))
y_test = np.array(label, dtype=np.int)
print("Test Shape", y_test.shape)
y_pred = net.prediction(input, h_init, c_init)
print("Pred Shape", y_pred.shape)
print(type(y_pred))
print(y_pred.shape)

ans = classification_report(y_test, y_pred, digits=5)  # digits为输出浮点值的位数，support为每个标签出现的次数
print(ans)
