import json

import numpy as np
import torch
from sklearn.externals import joblib
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split
from torch import nn, optim
import torch.utils.data as Data

from LR import LR
from torch.autograd import Variable


def train(model_saving_path, loader):

    num_epochs = 100000

    minimum = 1
    for epoch in range(num_epochs):
        for step, (batch_x, batch_y) in enumerate(loader):

            # sparse_batch = sparse_graph_batch_collate(batch)
            b_x = Variable(batch_x).to(device)
            b_y = Variable(batch_y).to(device)
            # forward
            out = fm(b_x)  # 前向传播
            loss = criterion(out, b_y)  # 计算loss

            # backward
            optimizer.zero_grad()  # 梯度归零
            loss.backward()  # 反向传播
            optimizer.step()  # 更新参数

            if epoch % 5 == 0:
                val_loss = val(X_val_sparse, y_val)
                print('Epoch[{}/{}]'.format(epoch, num_epochs)+str(step)+'loss: {:.6f}'.format(loss.item()) + 'validation loss: {:.6f}'.format(val_loss))
                # print()

                if val_loss < minimum:
                    print('successssssssssssssssssssssssssssssss')
                    joblib.dump(fm, 'model/model_fm_v1_' + str(val_loss) +'.m')
                    return


def val(X_val, y_val):
    y_pred= fm(X_val)
    val_loss = criterion(y_pred, y_val)  # 计算loss
    # print(val_loss.item())
    return val_loss.item()


def test(X_test, y_test):
    y_pred = fm(X_test)
    loss = criterion(y_pred, y_test)  # 计算loss
    print(loss.item())


def load_data(file_path, size = - 1):
    data_list = []
    rate_list = []
    winery_list = []
    i = 0
    with open(file_path) as f:
        for line in f:
            data = json.loads(line)
            rate = data.pop('points')
            data_list.append(data)
            rate_list.append(rate)
            winery_list.append(data['winery'])

            if size != -1:
                i = i + 1
                if i > size:
                    break

    y = np.array(rate_list)

    v = DictVectorizer()
    v.fit_transform(data_list)

    X_train, X_test, y_train, y_test = train_test_split(data_list, y, test_size=0.1, random_state= 0)

    X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state= 1)

    X_train = v.transform(X_train)
    X_test = v.transform(X_test)
    X_val = v.transform(X_val)

    return X_train, X_val, X_test, y_train, y_val, y_test


if __name__ == '__main__':
    file_name = '../wine_data_v3.json'

    if torch.cuda.is_available():
        print('using cuda')
        device = torch.device('cuda')
    else:
        print('using cpu')
        device = torch.device('cpu')

    # 获得训练和测试数据
    X_train, X_val, X_test, y_train, y_val, y_test = load_data(file_name)

    # 训练数据转化为torch sparse
    X_coo = X_train.tocoo()
    values = X_coo.data
    indices = np.vstack((X_coo.row, X_coo.col))
    i = torch.LongTensor(indices)
    v = torch.FloatTensor(values)
    shape = X_coo.shape
    X_train_sparse = torch.sparse.FloatTensor(i, v, torch.Size(shape)).to_dense()

    # 测试数据转化为torch sparse
    X_coo = X_test.tocoo()
    values = X_coo.data
    indices = np.vstack((X_coo.row, X_coo.col))
    i = torch.LongTensor(indices)
    v = torch.FloatTensor(values)
    shape = X_coo.shape
    X_test_sparse = torch.sparse.FloatTensor(i, v, torch.Size(shape)).to_dense()

    X_coo = X_val.tocoo()
    values = X_coo.data
    indices = np.vstack((X_coo.row, X_coo.col))
    i = torch.LongTensor(indices)
    v = torch.FloatTensor(values)
    shape = X_coo.shape
    X_val_sparse = torch.sparse.FloatTensor(i, v, torch.Size(shape)).to_dense()

    # 获得数据的维度
    X_array = X_train.toarray()
    data_shape = X_array.shape

    # 初始化fm
    fm = LR(n=data_shape[1], k=2).to(device)
    # fm = joblib.load('model/model_fm.m').to(device)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(fm.parameters(), lr=1e-3)

    # 确定cpu/gpu
    # X_train_sparse = torch.from_numpy(np.array(X_train.toarray(), dtype=np.float)).float().to(device)
    X_train_sparse = X_train_sparse
    y_train = torch.from_numpy(np.array(y_train, dtype=np.float)).float()

    # 验证集
    X_val_sparse = X_val_sparse.to(device)
    y_val = torch.from_numpy(np.array(y_val, dtype=np.float)).float().to(device)

    # 添加批训练
    batch_size = 128
    print(X_train_sparse.shape, y_train.shape)
    torch_dataset = Data.TensorDataset(X_train_sparse, y_train)
    loader = Data.DataLoader(dataset = torch_dataset, batch_size = batch_size, shuffle = True)

    # 训练
    train('model/model_fm.m', loader)

    # 确定cpu/gpu
    X_test_sparse = X_test_sparse
    y_test = torch.from_numpy(np.array(y_test, dtype=np.float)).float()

    # 测试
    test(X_test_sparse, y_test)