import pandas as pd
import numpy as np
from config import Constant
from rnn_model import LSTMModel, lstm_init_weights
from sklearn.preprocessing import MinMaxScaler
from dataset import MyDataset
from torch.utils.data import DataLoader
import torch
from tqdm import tqdm
from utils import PlotUtils
import os
from matplotlib import pyplot as plt

device = 'cuda'
plt.rcParams['font.sans-serif'] = ['SimHei']


def generate_samples(raw_data: np.ndarray, is_predict=False):
    res_data = []
    res_label = []
    length = raw_data.shape[0]
    seen = Constant.INPUT_WINDOW
    blind = Constant.OUTPUT_WINDOW
    for i in range(length - seen - blind):
        data = raw_data[i:i + seen]
        label = raw_data[i + blind:i + seen + blind]
        res_data.append(data)
        res_label.append(label)
    if is_predict:
        res_data.append(raw_data[length - seen:length])
    return np.array(res_data, dtype=float), np.array(res_label, dtype=float)


def scalar(data):
    """归一化处理"""
    mms = MinMaxScaler(feature_range=(-1, 1))
    res = mms.fit_transform(data)
    return res, mms


def generate_dataset():
    humidity_depth = Constant.DEPTH
    df = pd.read_csv('data/merge_data.csv', usecols=[humidity_depth])
    data_raw = df[humidity_depth].values
    data_raw = data_raw.reshape(-1, 1)
    data, min_max = scalar(data_raw)

    data, label = generate_samples(data)
    div = int(data.shape[0] * 0.8)
    train_data = data[:div]
    train_label = label[:div]
    test_data = data[div:]
    test_label = label[div:]

    return (train_data, train_label), (test_data, test_label), min_max


def train(model, data_loader, criterion, optimizer, epoch):
    model.train()
    batch_len = len(data_loader)

    process_bar = tqdm(total=batch_len)
    for idx, (data, label) in enumerate(data_loader):
        process_bar.update(1)
        data, label = data.to(device).float(), label.to(device).float()
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, label)
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 0.7)
        optimizer.step()

        if idx % 10 == 0:
            tqdm.write(f'[epoch:{epoch:>3d}] loss:{loss.item():.7f} .')


def test(model, data_loader, criterion):
    model.eval()
    total_loss = 0.
    predictions = torch.Tensor(0)
    truth = torch.Tensor(0)
    with torch.no_grad():
        for data, label in data_loader:
            data, label = data.to(device).float(), label.to(device).float()
            output = model(data)
            total_loss += criterion(output, label).item()
            predictions = torch.cat((predictions, output[:, -Constant.OUTPUT_WINDOW].view(-1).cpu()), 0)
            truth = torch.cat((truth, label[:, -Constant.OUTPUT_WINDOW].view(-1).cpu()), 0)

        loss = total_loss / len(data_loader)
        tqdm.write(f'valid loss : {loss:.5f}')
        tqdm.write("-" * 100)
    return truth, predictions, loss


def main():
    model = LSTMModel(n_input=1).to(device).apply(lstm_init_weights)
    train_ds, test_ds, min_max = generate_dataset()

    train_dataset = MyDataset(train_ds, has_tgt=False)
    train_dataset_loader = DataLoader(train_dataset, batch_size=10, shuffle=False)
    test_dataset = MyDataset(test_ds, has_tgt=False)
    test_dataset_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)

    criterion = torch.nn.MSELoss()
    optimizer = torch.optim.AdamW(model.parameters(), lr=0.001)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95)

    best = None

    for epoch in range(100):
        # train
        train(model, train_dataset_loader, criterion, optimizer, epoch)
        # validation
        truth, predictions, valid_loss = test(model, test_dataset_loader, criterion)

        if best is None:
            best = valid_loss
        if valid_loss <= best:
            best = valid_loss
            model_save_path = os.path.join('backup', 'lstm_best.pt')
            torch.save(model.state_dict(), model_save_path)
        truth = min_max.inverse_transform(truth.reshape((-1, 1)))
        predictions = min_max.inverse_transform(predictions.reshape((-1, 1)))
        PlotUtils.plot(truth, predictions, epoch)
        # scheduler
        scheduler.step()
    goahead(model)


def goahead(model=None, depth=Constant.DEPTH):
    if model is None:
        model = LSTMModel(n_input=1).to(device).apply(lstm_init_weights)
        model.load_state_dict(torch.load(f'backup/lstm_best_{depth}.pt'))
        model.eval()
    humidity_depth = depth
    df = pd.read_csv('data/merge_data.csv', usecols=[humidity_depth])
    data_raw = df[humidity_depth].values
    data_raw = data_raw.reshape(-1, 1)
    data_raw, min_max = scalar(data_raw)
    data, _ = generate_samples(data_raw)

    window = data[-1]
    window = window.reshape((1,) + window.shape)
    window = torch.from_numpy(window)
    window = window.to(device).float()
    cache = data[-1].flatten().tolist()
    for i in range(21):  # 总共需要预测21个月
        output = model(window)
        for j in range(12):  # 窗口长度为12
            if j < 11:
                window[0, j, 0] = window[0, j + 1, 0]
            else:
                window[0, j, 0] = output[0, -1, 0]
                cache.append(float(output[0, -1, 0].cpu().detach().numpy()))
    # print(cache)
    cache = np.array(cache)
    cache = cache.reshape((-1, 1))
    cache = min_max.inverse_transform(cache)
    cache = cache.flatten()
    prediction = pd.DataFrame(cache[12:])
    prediction.to_csv(f'backup/{Constant.DEPTH}.csv')
    cache_x = pd.date_range(end='20231201', periods=cache.shape[0], freq='MS')
    fig = plt.figure(figsize=(15.36, 7.49))
    print('cache len=', len(cache))
    plt.plot(cache_x, cache, color='red', label='prediction')
    data_raw = min_max.inverse_transform(data_raw)
    data_x = pd.date_range(start='20120101', periods=data_raw.shape[0], freq='MS')
    print('data_raw len=', len(data_raw))
    print(data_raw)
    plt.plot(data_x, data_raw.flatten(), color='blue', label='humidity')
    plt.legend()
    plt.grid()
    # fig.savefig(f'graph/{Constant.DEPTH}cm.png')
    plt.show()
    return cache_x, cache, data_x, data_raw.flatten()


if __name__ == "__main__":
    # main()
    goahead()
    # dd = ['10', '40', '100', '200']
    # fig, ax = plt.subplots(4, 1, sharex=True)
    # ax = ax.flatten()
    # for i, d in enumerate(dd):
    #     x1, y1, x2, y2 = goahead(depth=d)
    #     ax[i].plot(x1, y1, color='red', label='预测值')
    #     ax[i].plot(x2, y2, color='blue', label=f'{d}cm土壤湿度')
    #     ax[i].legend()
    #     ax[i].grid()
    #     ax[i].set_ylabel('湿度(kg/m2)')
    #     if i == 3:
    #         ax[i].set_xlabel('时间')
    # plt.show()
