# encoding:utf-8
"""
@project:DLFactor_Inference
@author:liuzeyu
@time:2022-08-04
@version:v1.12
@修复了已知的问题，支持pt模型离线推断
"""
import functools
import os
import time
import pandas as pd
import numpy as np
import sys
import configparser
from DL_Models import *
import torch.utils.data
from torch.utils.data import DataLoader


def timer(func):
    @functools.wraps(func)
    def wrapper_timer(*args, **kwargs):
        """
        一个用于计算运行时间的装饰器
        :param args:
        :param kwargs:
        :return:
        """
        tic = time.perf_counter()
        value = func(*args, **kwargs)
        toc = time.perf_counter()
        elapsed_time = toc - tic
        print(f"{str(func.__name__)} 方法运行用时: {elapsed_time:0.4f} seconds")
        return value

    return wrapper_timer


class Test_data:
    def __init__(self, parser: configparser.ConfigParser):
        self.parser = parser
        self.data_path = parser.get('path', 'data_path')
        self.label_path = parser.get('path', 'label_path')
        self.start_date = parser.getint('para', 'start_dt')
        self.end_date = parser.getint('para', 'end_dt')

    @staticmethod
    @timer
    def combine_data(dir_path: str, export=False):
        """
        一个工具方法，将一个文件夹路径下的所有数据文件（.csv）合并为一个(.csv)
        :param dir_path:文件夹路径
        :param export:是否存储到磁盘
        :return:合并后的数据df
        """
        sourcedata = os.listdir(dir_path)
        first = True
        result = pd.DataFrame()
        for f in sourcedata:
            if os.path.isfile(dir_path + "/" + f):
                sourcedata_1 = pd.read_csv(dir_path + "/" + f)
                if first:
                    first = False
                    result = sourcedata_1
                else:
                    sourcedata_x = pd.read_csv(dir_path + "/" + f)
                    result = pd.concat([result, sourcedata_x], ignore_index=True)
            else:
                break
        if export:
            result.to_csv(dir_path + '/combine.csv')
            print("合并结果已输出到相对路径根目录，请查看combine.csv")
        else:
            return result

    def get_test_data(self):
        """
        获得测试集，或者说是因子推断的输入数据
        :return:算法模型能够识别的tensor形状
        """
        data = pd.read_csv(self.data_path)
        label = pd.read_csv(self.label_path)
        trade_dt = label.trade_dt
        trdata = []
        index_id = []
        for i in range(label.shape[0]):
            if int(trade_dt[i]) >= self.start_date:
                index_id.append(i)
                trdata.append((data[i * 30:i * 30 + 29].values[:, 2:].astype(np.float32), label.iloc[i][-1]))
                if int(trade_dt[i]) > self.end_date:
                    break
        return label.iloc[index_id, 0:2], DataLoader(dataset=trdata, batch_size=100, shuffle=False)


class Inference:
    def __init__(self, test_index: pd.DataFrame, test_data: torch.utils.data.DataLoader, parser: configparser.ConfigParser):
        self.test_index = test_index
        self.test_data = test_data
        self.parser = parser
        self.device = torch.device(parser.get('dev', 'device'))
        self.pth_path = parser.get('path', 'pth_path')
        self.pt_path = parser.get('path', 'pt_path')

    @staticmethod
    def str_to_classname(name):
        """
        一个工具方法，功能是将一个字符串转化为一个类名，并实例化
        :param name:代表类名的字符串
        :return:类实例化后的对象和句柄
        """
        sn = sys.modules[__name__]
        c = [i for i in dir(sn) if callable(getattr(sn, i))]
        temp_list = [current_usr.lower() for current_usr in c]
        if name.lower() in temp_list:
            p = temp_list.index(name.lower())
            return getattr(sn, (c[p]))
        else:
            raise ValueError(name + "类不存在")

    def inference_factor_pt(self) -> None:
        """
        核心方法，基于pt格式模型进行因子推断
        :return:无返回值，方法执行成功后保存推断所得的因子文件
        """
        in_p = self.parser.getint('model', 'in')
        hidden_size = self.parser.getint('model', 'hidden')
        out = self.parser.getint('model', 'out')
        # 模型名：20200130_CNN_best.pt
        # model_name = self.pt_path.split("_")[-2]
        # 模型名：CNN-P2-43&44-M3-1903.pt
        model_name = os.path.split(self.pt_path)[-1].split("-")[0]
        output_name = self.pt_path.split("\\")[-1].split(".")[0] + '.f'
        print("生成的因子将被命名为：" + output_name)
        algo_classname = Inference.str_to_classname(model_name)
        model = algo_classname(in_p=in_p, in_b=0, hidden=hidden_size, out=out, rnn=model_name).to(self.device)
        if self.parser.get('dev', 'device') == 'cpu':
            model.load_state_dict(torch.load(self.pt_path, map_location='cpu'))
        else:
            model.load_state_dict(torch.load(self.pt_path))
        model.eval()
        alpha = []
        with torch.no_grad():
            for x, y in self.test_data:
                feat_p = x.to(self.device)
                pre = model(feat_p)
                alpha.append(pre.cpu().numpy())
            alpha = np.concatenate(alpha, axis=0)
            self.test_index['alpha'] = alpha
            self.test_index.columns = ['stockcode', 'date', 'alpha']
            test_index = self.test_index.reset_index(drop=True)
            test_index["date"] = test_index["date"].astype(int)
            test_index.to_feather(output_name)
        print("推断因子过程已结束，结果请查看" + output_name)

    def inference_factor_pth(self) -> None:
        """
        核心方法，基于pth格式模型进行因子推断
        :return:无返回值，方法执行成功后保存推断所得的因子文件
        """
        output_name = self.pth_path.split("\\")[-1].split(".")[0] + ".f"
        print("生成的因子将被命名为：" + output_name)
        if self.parser.get('dev', 'device') == 'cpu':
            model = torch.load(self.pth_path, map_location='cpu')
        else:
            model = torch.load(self.pth_path)
        model.eval()
        alpha = []
        with torch.no_grad():
            for x, y in self.test_data:
                feat_p = x.to(self.device)
                pre = model(feat_p)
                alpha.append(pre.cpu().numpy())
            alpha = np.concatenate(alpha, axis=0)
            self.test_index['alpha'] = alpha
            self.test_index.columns = ['stockcode', 'date', 'alpha']
            test_index = self.test_index.reset_index(drop=True)
            test_index["date"] = test_index["date"].astype(int)
            test_index.to_feather(output_name)
        print("推断因子过程已结束，结果请查看" + output_name)


class Factor:
    def __init__(self, path):
        self.path = path

    def view(self, print_factor: bool = True) -> pd.DataFrame:
        """
        一个工具方法，查看实例化的因子文件
        :param print_factor: 是否打印因子值
        :return: 存储因子数据的df
        """
        factor = pd.read_feather(self.path)
        print(factor) if print_factor else 0
        return factor

    @staticmethod
    @timer
    def combine_factors(dir_path: str, export=True) -> pd.DataFrame:
        """
        一个工具方法，将一个文件夹路径下的所有因子文件（.f）合并为一个(.f)
        :param dir_path:文件夹路径
        :param export:是否存储到磁盘
        :return:合并后的因子df
        """
        factors = os.listdir(dir_path)
        first = True
        result = pd.DataFrame()
        for f in factors:
            if os.path.isfile(dir_path + "/" + f):
                factor_1 = pd.read_feather(dir_path + "/" + f)
                if first:
                    first = False
                else:
                    factor_x = pd.read_feather(dir_path + "/" + f)
                    result = pd.concat([factor_1, factor_x], ignore_index=True)
            else:
                break
        if export:
            result.to_feather(dir_path + '/LSTM_P2_43&44_M3_1903.f')
            print("合并结果已输出到相对路径根目录，请查看combine.f")
        else:
            return result


class Model:
    def __init__(self, parser: configparser.ConfigParser):
        self.parser = parser
        self.device = torch.device(parser.get('dev', 'device'))
        self.pth_path = parser.get('path', 'pth_path')
        self.pt_path = parser.get('path', 'pt_path')

    def view(self, model_type: str, print_model: bool = True) -> None:
        """
        一个工具方法，查看实例化的模型文件
        :param model_type: 模型的格式，目前仅支持pt/pth格式
        :param print_model: 是否打印模型结构和权重
        :return:无返回值，方法执行成功后打印模型结构和权重
        """
        if model_type == 'pth':
            if self.parser.get('dev', 'device') == 'cpu':
                model = torch.load(self.pth_path, map_location='cpu')
            else:
                model = torch.load(self.pth_path)
            print(model) if print_model else 0
        elif model_type == 'pt':
            in_p = self.parser.getint('model', 'in')
            hidden_size = self.parser.getint('model', 'hidden')
            out = self.parser.getint('model', 'out')
            # 模型名：20200130_CNN_best.pt
            # model_name = self.pt_path.split("_")[-2]
            # 模型名：CNN-P2-43&44-M3-1903.pt
            model_name = os.path.split(self.pt_path)[-1].split("-")[0]
            model = globals()[model_name](in_p=in_p, in_b=0, hidden=hidden_size, out=out, rnn=model_name).to(self.device)
            if self.parser.get('dev', 'device') == 'cpu':
                model.load_state_dict(torch.load(self.pt_path, map_location='cpu'))
            else:
                model.load_state_dict(torch.load(self.pt_path))
            print(model) if print_model else 0
        else:
            print("暂不支持该格式的模型，请使用pt/pth模型")


# class Automatization:
#     def __init__(self, parser: configparser):
#         self.parser = parser
#         self.modelset_location = parser.get('auto', 'modelset_location')
#         self.dataset_location = parser.get('auto', 'dataset_location')
#         self.labelset_location = parser.get('auto', 'labelset_location')
#
#     def ergodic_dataset(self):
#         dataset = os.listdir(self.dataset_location)
#         labelset = os.listdir(self.labelset_location)
#         for d in dataset:
#             dataset_path = self.dataset_location + "/" + d
#             if os.path.isfile(dataset_path):
#                 for l in labelset:
#                     labelset_path = self.labelset_location + "/" + l
#                     if os.path.isfile(labelset_path):
#                         TestYear = '20' + l.split("_")[0]
#                         start_dt = TestYear + '0101'
#                         end_dt = TestYear + '1231'
#                         self.parser.set('para', 'start_dt', start_dt)
#                         self.parser.set('para', 'end_dt', end_dt)
#                         self.parser.set('path', 'data_path', dataset_path)
#                         self.parser.set('path', 'label_path', labelset_path)
#                         with open('Inference_config_auto.ini', 'w') as conf:
#                             self.parser.write(conf)
#                         print('config已更新')


if __name__ == '__main__':
    ## 加载配置文件
    config = configparser.ConfigParser()
    config.read("Inference_config.ini", encoding='utf-8')

    ## 生成测试集（使用Test_data类中的get_test_data方法）
    test_index, testloader = Test_data(parser=config).get_test_data()

    ## 推断因子（使用Inference类中的inference_factor_pt和inference_factory_pth方法）
    ##以.pt模型文件进行因子推断，适用于模型参数较多、模型结构较复杂的情况
    # Inference(test_index=test_index, test_data=testloader, parser=config).inference_factor_pt()
    ##以.pth模型文件进行因子推断，pth文件保存整个model的状态，不需要重构模型结构
    Inference(test_index=test_index, test_data=testloader, parser=config).inference_factor_pth()

    ## 查看模型文件保存的参数
    # M_pt = Model(parser=config).view(model_type='pt')
    M_pth = Model(parser=config).view(model_type='pth')

    ## 查看生成因子（使用Factor类中的view方法）
    # path = r"F:\WebProject\feater\待处理数据\factors\factors\TRANS-B2-BE&BT-M3-1912.f"
    # F = Factor(path=path).view()

    ## 多个因子纵向拼接功能，使用Factor类中的静态方法combine_factors
    # path = r'F:\PyProject\DL_Factors\DLFactor_Inference\combine_factor'
    # result = Factor.combine_factors(dir_path=path)

    # config = configparser.ConfigParser()
    # config.read("Inference_config_auto.ini", encoding='utf-8')
    # Automatization(parser=config).ergodic_dataset()

    pass
