# -*- coding: UTF-8 -*-
"""

"""
import json
import sys

import pandas as pd
import numpy as np
import os
import torch
import time
import logging
from logging.handlers import RotatingFileHandler

from sklearn.model_selection import train_test_split
from LSTM import train

from LSTM import Net

class Config:
    # 数据参数
    feature_columns = list(range(1, 14))     # 要作为feature的列，按原数据从0开始计算，也可以用list 如 [2,4,6,8] 设置
    label_columns = [9,13]                  # 要预测的列，按原数据从0开始计算, 如同时预测第四，五列 最低价和最高价
    label_in_feature_index = (lambda x,y: [x.index(i) for i in y])(feature_columns, label_columns)  # 因为feature不一定从0开始

    predict_day = 72             # 预测未来几项

    # 网络参数
    input_size = len(feature_columns)
    output_size = len(label_columns)

    hidden_size = 128           # LSTM的隐藏层大小，也是输出大小
    lstm_layers = 4             # LSTM的堆叠层数
    dropout_rate = 0.2          # dropout概率
    time_step = 168              # 设置用前多少项的数据来预测，也是LSTM的time step数，请保证训练数据量大于它
    time_Interval_step = 4       #训练数据使用4小时间隔：1-20 5-24.。。

    # 训练参数
    do_train = True
    do_log_print_to_screen = True
    add_train = False           # 是否载入已有模型参数进行增量训练
    shuffle_train_data = True   # 是否对训练数据做shuffle
    use_cuda = False            # 是否使用GPU训练

    # train_data_rate = 0.97      # 训练数据占总体数据比例，测试数据就是 1-train_data_rate
    valid_data_rate = 0.1      # 验证数据占训练数据比例，验证集在训练过程使用，为了做模型和参数选择

    batch_size = 256
    learning_rate = 0.001
    epoch = 60                  # 整个训练集被训练多少遍，不考虑早停的前提下
    patience = 24                # 训练多少epoch，验证集没提升就停掉
    random_seed = 42            # 随机种子，保证可复现

    do_continue_train = False    # 每次训练把上一次的final_state作为下一次的init_state，仅用于RNN类型模型，目前仅支持pytorch
    continue_flag = ""           # 但实际效果不佳，可能原因：仅能以 batch_size = 1 训练
    if do_continue_train:
        shuffle_train_data = False
        # batch_size = 1
        # continue_flag = "continue_"

    # 训练模式
    debug_mode = False  # 调试模式下，只使用一部分数据
    debug_num = 10000  # 仅用debug_num条数据来调试

    model_name = "model_pytorch.pth"

    # 路径参数
    model_save_path = "./data/"
    log_save_path = "./log/"
    # do_log_print_to_screen = True
    do_log_save_to_file = True # 是否将config和训练过程记录到log

    if not os.path.exists(model_save_path):
        os.makedirs(model_save_path)    # makedirs 递归创建目录
    if do_train and do_log_save_to_file:
        cur_time = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())
        log_save_path = log_save_path + cur_time +"/"
        os.makedirs(log_save_path)

class Data:
    def __init__(self, config,DB):
        self.config = config
        self.DB = DB
        self.data = self.read_data()

        self.data_num = self.data.shape[0]

        self.mean = np.mean(self.data, axis=0)              # 数据的均值和方差
        self.std = np.std(self.data, axis=0)
        self.norm_data = (self.data - self.mean)/self.std   # 归一化，去量纲

        self.start_num_in_test = 0      # 测试集中前几天的数据会被删掉，因为它不够一个time_step

    def read_data(self):                # 读取初始数据
        if self.config.debug_mode:
            input_data = pd.read_csv(self.config.train_data_path, nrows=self.config.debug_num,
                                    usecols=self.config.feature_columns)
        else:
            # 选择最近的5年数据训练365*5*24*9
            self.DB.execute("SELECT WateId,Level,Flow FROM `md_wate_meas` ORDER BY `MeasDT` DESC LIMIT 394200;")
            DB_data = self.DB.fetchall()
            if DB_data[len(DB_data) - 1][0] != DB_data[len(DB_data) - 10][0]:
                raise Exception("时间周期错误！")
            input_data = []
            row = []
            flow = []

            # 整理数据至二维
            for index in range(int(len(DB_data))):
                if index % 9 == 0 and index != 0:
                    row.extend(flow)
                    input_data.append(row)
                    row = []
                    flow = []
                row.append(DB_data[index][1])
                flow.append(DB_data[index][2])
                if index == int(len(DB_data) - 1):
                    row.extend(flow)
                    input_data.append(row)
                # print("range-", index)
            input_data = np.array(input_data, dtype=np.float32)
            # 删除空列
            input_data = np.delete(input_data, [10,12,13,15,16], axis=1)
            input_data = CleanData(input_data)

        return input_data

    def get_train_and_valid_data(self):
        feature_data = self.norm_data
        # 去除不能利用的非整齐部分（去前部数据
        shift = (feature_data.shape[0] - self.config.time_step-self.config.predict_day) % self.config.time_Interval_step
        feature_data = feature_data[shift:]

        if not self.config.do_continue_train:
            # (168,13)7天的数据 -> (72,1)未来三天
            train_x = [feature_data[i*self.config.time_Interval_step:i*self.config.time_Interval_step + self.config.time_step]
                       for i in range(int((feature_data.shape[0] - self.config.time_step-self.config.predict_day) / self.config.time_Interval_step)+1)]
            train_y = [feature_data[i*self.config.time_Interval_step + self.config.time_step:i*self.config.time_Interval_step + self.config.time_step + self.config.predict_day,
                       self.config.label_in_feature_index]
                       for i in range(int((feature_data.shape[0] - self.config.time_step-self.config.predict_day) / self.config.time_Interval_step)+1)]

        train_x, train_y = np.array(train_x,dtype=np.float16), np.array(train_y, dtype=np.float16)

        train_x, valid_x, train_y, valid_y = train_test_split(train_x, train_y, test_size=self.config.valid_data_rate,
                                                              random_state=self.config.random_seed,
                                                              shuffle=self.config.shuffle_train_data)   # 划分训练和验证集，并打乱
        return train_x, valid_x, train_y, valid_y

def CleanData(data):
    # global e
    # alle = 0
    for i in range(data.shape[1]):
        # estr = "Error-" + str(i) + "-"
        # e = 0
        col = data[:, i]
        mean = col.mean()
        std = col.std()
        # 误差为10倍方差
        threshold = 3 * std

        # 根据均值和标准差计算异常值范围
        upper_bound = mean + threshold
        lower_bound = mean - threshold

        if lower_bound < 0:
            lower_bound = 0.1

        # 找到并打印异常值
        for row in range(data.shape[0]):
            # if data[row,i] > upper_bound or data[row,i] < lower_bound:
            if data[row,i] == 0:
                while(row != 0 or row != data.shape[0]):
                    if data[row-1,i] < upper_bound or data[row-1,i] > lower_bound:
                        # e = e + 1
                        # print(f"Replace  row {row} col{i} data {data[row-1, i]}")
                        data[row, i] = data[row-1, i]
                        break
        # print(f"Replace Error {estr}{e}")
        # alle = alle + e
    # print(f"All Error {alle}")

    #调整数据列的顺序
    data = np.concatenate((np.flip(data[:, :9], axis=1), np.flip(data[:, 9:], axis=1)), axis=1)
    # data = data[:,:9:-1]
    # data = data[:, 9::-1]
    return data

def load_logger(config):
    logger = logging.getLogger()
    logger.setLevel(level=logging.DEBUG)

    if config.do_log_print_to_screen:
        stream_handler = logging.StreamHandler(sys.stdout)
        stream_handler.setLevel(level=logging.INFO)
        formatter = logging.Formatter(datefmt='%Y/%m/%d %H:%M:%S',
                                      fmt='[ %(asctime)s ] %(message)s')
        stream_handler.setFormatter(formatter)
        logger.addHandler(stream_handler)

    # FileHandler
    if config.do_log_save_to_file:
        file_handler = RotatingFileHandler(config.log_save_path + "out.log", maxBytes=1024000, backupCount=5)
        file_handler.setLevel(level=logging.INFO)
        formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)

        # 把config信息也记录到log 文件中
        config_dict = {}
        for key in dir(config):
            if not key.startswith("_"):
                config_dict[key] = getattr(config, key)
        config_str = str(config_dict)
        config_list = config_str[1:-1].split(", '")
        config_save_str = "\nConfig:\n" + "\n'".join(config_list)
        logger.info(config_save_str)
    return logger

# pth文件转成推理文件ONNX
def pthToONNX(con):
    # 加载模型
    model = Net(con)
    model.load_state_dict(torch.load('./data/model_pytorch.pth'))

    # 保存为ONNX模型
    input_shape = (1, con.time_step, 13)
    input_names = ["input"]
    output_names = ["output"]
    x = torch.randn(input_shape)
    torch.onnx.export(model, x, "./data/lstm_model.onnx", input_names=input_names, output_names=output_names)

def StartTrain(DB):
    config = Config()
    logger = load_logger(config)
    try:
        np.random.seed(config.random_seed)  # 设置随机种子，保证可复现
        data_gainer = Data(config,DB)

        if config.do_train:
            train_X, valid_X, train_Y, valid_Y = data_gainer.get_train_and_valid_data()
            train(config, logger, [train_X, train_Y, valid_X, valid_Y])

        # 将平均数和方差保存为 JSON 文件
        dataconfig = {"mean": data_gainer.mean.tolist(), "std": data_gainer.std.tolist()}
        with open("./data/config.json", "w") as f:
            json.dump(dataconfig, f)

        print("训练完成！config地址为：./data/config.json ")

        # 转ONNX
        pthToONNX(config)

    except Exception:
        logger.error("Train Error", exc_info=True)




# if __name__=="__main__":
#     con = Config()
#     main(con)
