import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F  # 激励函数都在这
import torch.utils.data as TData
import paras
import LSTMtranpoint
import numpy as np
import torchRnnShow
from madin import *
from LSTMModule import *
from autocode_torch import processInput, span


LR = 0.01  # learning rate

# torchfilename = 'data/Rnn-5-10.pkl'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


def trainpoint(datalists):
    srclists = []
    reslists = []
    LSTMtranpoint.findTranPoint(datalists, 2, 400)
    for i in range(TIME_STEP + span, len(datalists)):
        srclist = [getone(datalists[i - j + 1 - span:i - j + 1]) for j in range(TIME_STEP)]
        reslist = datalists[i]["tran_mark"]
        srclists.append(srclist)
        reslists.append(reslist)
    return srclists, reslists


def updatemodel(dbname="FX", symbol='FXUSDJPY', qt_type=5):
    torchfilename = 'data/{}_{}_lstm.pkl'.format(symbol, qt_type)
    try:
        rnn = torch.load(torchfilename).to(device)
        print('load secc')
    except:
        rnn = RNN().to(device)
        print('load fail')
    rnn.train(True)
    numbers=paras.number
    datalists = finddb(dbname=dbname, tablename=symbol, limit=numbers)
    # datalists = json.load(open("data/A%s.json" % 'FXUSDJPY', 'r'))
    datalists = joinKline(datalists, qt_type)[-int(numbers/qt_type):]

    srclists, reslists = trainpoint(datalists)
    xall, yall = torch.from_numpy(np.array(srclists, dtype=np.float32)), torch.from_numpy(
        np.array(reslists, dtype=np.int64))
    x = xall[:int(len(yall) * 3 / 4)]
    y = yall[:int(len(yall) * 3 / 4)]
    x_test = xall[-int(len(yall) / 4):]
    y_test = np.array(yall[-int(len(yall) / 4):])
    x = x.view(-1, TIME_STEP, INPUT_SIZE)
    print('train shape:', x.shape, y.shape)

    optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)  # optimize all parameters
    loss_func = nn.CrossEntropyLoss()  # the target label is not one-hotted
    #
    # # training and testing
    epoch = 0
    oldloss = 1e10
    while True:
        lossrun = []
        b_x = Variable(x).to(device)  # reshape x to (batch, time_step, input_size)
        b_y = Variable(y).to(device)  # batch y

        output = rnn(b_x)  # rnn output
        loss = loss_func(output, b_y)  # cross entropy loss
        optimizer.zero_grad()  # clear gradients for this training step
        loss.backward()  # backpropagation, compute gradients
        optimizer.step()
        lossrun.append(loss.item())
        aveloss = sum(lossrun) / float(len(lossrun))
        aveloss = loss.item()
        if oldloss > aveloss:
            oldloss = aveloss
            epoch = 0
        else:
            epoch += 1
        if epoch > 50:
            break

        test_output = rnn(x_test.to(device))  # (samples, time_step, input_size)
        pred_y = torch.max(test_output, 1)[1].cpu().data.numpy()
        print(sum(pred_y), sum(y_test))
        accuracy = float((pred_y == y_test).astype(int).sum()) / float(y_test.size)
        print('Epoch={}, Loss={}, accuracy={}'.format(epoch, aveloss, accuracy))

    rnn.eval()
    rnn.cpu()
    torch.save(rnn, torchfilename)


if __name__ == "__main__":
    updatemodel(paras.dbname, paras.symbol, paras.qt_type)
    torchRnnShow.showRnn(paras.dbname, paras.symbol, paras.qt_type)
