import time

import pandas as pd
import numpy as np

from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import OneHotEncoder

"""
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('max_colwidth', 100)
"""
from warnings import filterwarnings

filterwarnings('ignore')


def loadUserData(fPath: str):
    if len(fPath) > 4:
        if fPath.endswith('.csv'):
            # print("载入文件：" + fPath)
            dfx = pd.read_csv(fPath, dtype=str) \
                .applymap(lambda x: int(x.strip().replace('*', '')) if type(x) == str else np.nan)
            dfx.set_index(dfx.columns[0], inplace=True)
            return dfx
        elif fPath.endswith('.txt'):
            # print("载入文件：" + fPath)
            dfx = pd.read_csv(fPath, dtype=str, delimiter='\t') \
                .applymap(lambda x: int(x.strip().replace('*', '')) if type(x) == str else np.nan)
            dfx.set_index(dfx.columns[0], inplace=True)
            return dfx
        else:
            # print("载入文件：" + fPath)
            dfx = pd.read_excel(fPath, dtype=str) \
                .applymap(lambda x: int(x.strip().replace('*', '')) if type(x) == str else np.nan)
            dfx.set_index(dfx.columns[0], inplace=True)
            return dfx
    else:
        print("未成功载入用户文件,请检查路径")
        return


def createDir(filePath):
    import os
    if os.path.exists(filePath):
        pass
    else:
        try:
            os.mkdir(filePath)
            print('新建文件夹：%s' % filePath)
        except Exception as e:
            pass
    # os.makedirs(filePath)
    # print('新建多层文件夹：%s' % filePath)


def loadFileData(path_x: str, path_y: str, itemColumn: int, reFlag: str = '*'):
    """
	获取文件数据
	:param path_x: 一二期成绩
	:param path_y: 第三期成绩
	:param itemColumn: 第三期的指定科目的列
	:param reFlag: 补考或重修标记
	:return:
	"""
    dfx = loadUserData(path_x)
    dfy = loadUserData(path_y)
    if itemColumn < 0 or itemColumn > dfy.shape[1] - 1:
        print('请检查目标科目的列编号...')
        print('自动转为第一列预测')
        return dfx, dfy.iloc[:, 0]
    return dfx, dfy.iloc[:, itemColumn]


def fillMissingData(df: pd.DataFrame):
    """
	缺失值处理
	:param df: pandas.DataFrame
	:return: pandas.DataFrame without deficiency
	"""
    for col in df.columns:
        df[col].fillna(int(df[col].mean()), inplace=True)
    return df


def encodeDF(df: pd.DataFrame):
    """
	编码
	:param df: pandas.DataFrame
	:return: pandas.DataFrame
	"""
    # DV: DictVectorizer = DictVectorizer(sparse=False)
    # return DV.fit_transform(df.to_dict(orient="records"))
    return OneHotEncoder(categories=14).fit_transform(df.applymap(lambda x: ord(x)))


def maskLevel(dfx: pd.DataFrame, n: int = 2):
    if n == 2:
        df = dfx.mask(dfx < 60, 'F').where(dfx < 60, 'P')
    elif n == 3:
        df = dfx.where(dfx < 0, 'F').where(dfx < 60, 'B').where(dfx < 80, 'A')
    elif n == 5:
        df = dfx.where(dfx < 0, 'F').where(dfx < 60, 'D').where(dfx < 70, 'C').where(dfx < 80, 'B').where(dfx < 90, 'A')
    else:
        raise ValueError('输入的划分值未定义')
    return df


def maskLabel(label: pd.Series, n: int = 2):
    if n == 2:
        dfy = label.mask(label < 60, 'F').where(label < 60, 'P')
    elif n == 3:
        dfy = label.where(label < 0, 'F').where(label < 60, 'B').where(label < 80, 'A')
    elif n == 5:
        dfy = label.where(label < 0, 'F').where(label < 60, 'D').where(label < 70, 'C').where(label < 80, 'B').where(
            label < 90, 'A')
    else:
        raise ValueError('输入的划分值未定义')
    return dfy


def maskDF(dfx: pd.DataFrame, label: pd.Series, n: int = 2):
    return maskLevel(dfx, n), maskLabel(label, n)


def saveModel(gcModel: GridSearchCV, path: str, item: int, level: int, x_test: pd.DataFrame, y_test: pd.DataFrame,
              userFlag: bool = False):
    if userFlag:
        createDir('./user')
        path = path.replace('./', './user/') + '-' + str(item) + '-' + str(level)
    else:
        createDir('./public')
        path = path.replace('./', './public/') + '-' + str(item) + '-' + str(level)
    import os
    score_new = gcModel.score(x_test, y_test)
    if os.path.exists(path):
        gc_old = loadModel(path, item, level, userFlag, True)
        try:
            score_old = gc_old.score(x_test, y_test)
            if score_old < score_new:
                print('此次获取到更高准确率的模型，准确为：', score_new.__format__('.2f'))
            # print(path)
            else:
                print('保留原模型，准确为：', score_old.__format__('.2f'))
        except Exception:
            print('模型与预测源不匹配，保留新模型')
            os.remove(path)
        finally:
            import pickle
            with open(path, 'wb') as f:
                pickle.dump(gcModel, f)
    else:
        import pickle
        with open(path, 'wb') as f:
            pickle.dump(gcModel, f)
        # print(path)
        print('首次保存此类模型，准确为：', score_new.__format__('.2f'))


def loadModel(path: str, item: int, level: int, userFlag: bool = False, checkFlag: bool = False):
    """

	:param path:
	:param item:
	:param level:
	:param userFlag:
	:param checkFlag: 配合saveModel时设置True
	:return:
	"""
    if checkFlag:
        pass
    else:
        if userFlag:
            path = path.replace('./', './user/') + '-' + str(item) + '-' + str(level)
        else:
            path = path.replace('./', './public/') + '-' + str(item) + '-' + str(level)
    import pickle
    with open(path, 'rb') as f:
        gc = pickle.load(f)
        return gc


# def stop_thread(thread: threading):
#     tid = ctypes.c_long(thread.ident)
#     exctype = SystemExit
#     if not inspect.isclass(exctype):
#         exctype = type(exctype)
#     res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
#     if res == 0:
#         raise ValueError("invalid thread id")
#     elif res != 1:
#         ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
#         raise SystemError("PyThreadState_SetAsyncExc failed")


def getParam(paramFlag: bool):
    param_user = {"max_features": ["auto"],
                  "n_estimators": [120],
                  "max_depth": [5, 8, 15]
                  }
    param = {"max_features": ["auto", "sqrt", "log2"],
             "n_estimators": [120, 200, 300],
             "max_depth": [5, 8, 15, 25]
             }
    if paramFlag:
        return param
    return param_user


def DataPreDeal(dfx: pd.DataFrame, dfy: pd.DataFrame, n: int):
    dfx = fillMissingData(dfx)
    dfy = maskLabel(dfy, n)
    return train_test_split(dfx, dfy, test_size=0.25)


def predict(path_x: str, path_y: str, item: int, level: int, userFlag: bool = True, paramFlag: bool = False):

    # 读取数据
    dfx, dfy = loadFileData(path_x, path_y, item - 1)

    # 处理数据，筛选特征值(剔除极差小于20的科目列)
    # reg = ((df.max() - df.min())[:-2] < 20)
    # for index in reg.index:
    # 	if reg[index]:
    # 		print(index, end='\t')
    # 		df.pop(index)

    # 进行分类处理、缺失值处理,分割数据集到训练集合测试集
    x_train, x_test, y_train, y_test = DataPreDeal(dfx, dfy, level)

    # 随机森林进行预测 （超参数调优）
    rf = RandomForestClassifier(n_jobs=-1)

    # # 网格搜索与交叉验证
    gc = GridSearchCV(rf, param_grid=getParam(paramFlag), cv=4)
    gc.fit(x_train, y_train)

    # 保存模型
    saveModel(gc, './model', item, level, x_test, y_test, userFlag)


if __name__ == "__main__":
    dfy = loadUserData('./grade3.xlsx')
    # # print(dfy.columns)
    # # print(len(dfy.columns))
    item = range(1, len(dfy.columns) + 1)
    # item = range(1, 3)
    level = [2, 3, 5]
    s = time.time()
    for i in item:
        for l in level:
            predict('C:\\Users\\Bink\\Desktop\\test.xlsx', 'C:\\Users\\Bink\\Desktop\\grade3.xlsx', i, l, True)
    print("单线程：生成模型耗时：%2f s" % (time.time() - s))
