import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn import utils
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier

rc = {'font.sans-serif': 'SimHei',
      'axes.unicode_minus': False}
sns.set_theme(style="white", context="talk", font_scale=0.75, rc=rc)


def split_data(data: pd.DataFrame, train_ratio=0.8, test_ratio=0.1, shuffle=False, k_fold: int = None):
    """
    分离数据集
    默认：不打乱顺序(可选打乱顺序)，训练集80%，测试集10%，验证集10%
    可选方式：K折交叉验证，训练集与测试集按K折分离
    :param data: 数据集
    :param train_ratio: 训练集比例
    :param test_ratio: 测试集比例(测试集+验证集<1.0)，验证集比例=1-(训练集比例+测试集比例)
    :param shuffle: 是否打乱顺序
    :param k_fold: 默认为None，值为交叉验证K值
    :return: train_data,test_data,dev_data|train_data_list,test_data_list
    """
    length = len(data)
    if shuffle:
        data = utils.shuffle(data)
    if k_fold == None:
        train_data = data.iloc[:int(length * train_ratio), :]
        test_data = data.iloc[int(length * train_ratio):int(length * (train_ratio + test_ratio)), :]
        dev_data = data.iloc[int(length * (train_ratio + test_ratio)):, :]
        return train_data, test_data, dev_data
    else:
        train_data_list = []
        test_data_list = []
        for k in range(k_fold):
            train1 = data.iloc[0:int(length / k_fold * k), :]
            train2 = data.iloc[int(length / k_fold * (k + 1)):len(data), :]
            train_data = pd.concat([train1, train2])
            test_data = data.iloc[int(length / k_fold * k):int(length / k_fold * (k + 1)), :]
            train_data_list.append(train_data)
            test_data_list.append(test_data)
        return train_data_list, test_data_list


def get_pre_processed_data_set(filename):
    data = pd.read_csv(filename, index_col=0)
    data = data.sort_index()
    # 数据较多，含缺失值直接删除
    data = data[data["Arrival Delay in Minutes"].notnull()]
    d = {j: i for i, j in enumerate(data["satisfaction_v2"].unique())}
    data["satisfaction_v2"] = data["satisfaction_v2"].map(lambda x: d[x])
    d = {j: i for i, j in enumerate(data["Gender"].unique())}
    data["Gender"] = data["Gender"].map(lambda x: d[x])
    d = {j: i for i, j in enumerate(data["Customer Type"].unique())}
    data["Customer Type"] = data["Customer Type"].map(lambda x: d[x])
    d = {j: i for i, j in enumerate(data["Type of Travel"].unique())}
    data["Type of Travel"] = data["Type of Travel"].map(lambda x: d[x])
    d = {j: i for i, j in enumerate(data["Class"].unique())}
    data["Class"] = data["Class"].map(lambda x: d[x])
    return data


def feature_selection(data):
    return data.loc[:,
           ['Age', 'Flight Distance', 'Inflight entertainment', 'Ease of Online booking', 'On-board service',
            'Leg room service', 'Arrival Delay in Minutes']]


def x_y_split(data, y_name):
    data_y = data[y_name]
    data_x = data.drop(labels=y_name, axis=1)
    return data_x, data_y


# P-R曲线
def make_P_R(y_score, test_y, title="P-R", filename="P-R.png", dpi=100):
    score = pd.concat([y_score, test_y], axis=1)
    score.columns = ['acc', 'real']
    score = score.sort_values('acc', ascending=False)
    P_R = []
    for i in range(len(score)):
        TP = score.iloc[:i, 1].sum()
        FP = i - TP
        FN = score.iloc[i:, 1].sum()
        TN = (len(score) - i) - FN
        P = TP / (TP + FP)
        R = TP / (TP + FN)
        P_R.append([P, R])
    P_R_df = pd.DataFrame(P_R)
    plt.plot(P_R_df.iloc[:, 0], P_R_df.iloc[:, 1])
    plt.title(title)
    plt.savefig(filename, dpi=dpi, bbox_inches='tight')


# ROC曲线
def make_ROC(y_score, test_y, title="ROC", filename="ROC.png", dpi=100):
    score = pd.concat([y_score, test_y], axis=1)
    score.columns = ['acc', 'real']
    score = score.sort_values('acc', ascending=True)
    ROC = []
    for i in range(len(score)):
        TP = score.iloc[:i, 1].sum()
        FP = i - TP
        FN = score.iloc[i:, 1].sum()
        TN = (len(score) - i) - FN
        TPR = TP / (TP + FN)
        FPR = FP / (TN + FP)
        ROC.append([TPR, FPR])
    ROC_df = pd.DataFrame(ROC)
    plt.plot(ROC_df.iloc[:, 0], ROC_df.iloc[:, 1])
    plt.title(title)
    plt.savefig(filename, dpi=dpi, bbox_inches='tight')

# P-R曲线和ROC曲线绘制
def make_P_R_ROC(y_score, test_y, P_R_title="P_R", ROC_title="ROC",filename="P_R&ROC.png", dpi=100):
    score = pd.concat([y_score, test_y], axis=1)
    score.columns = ['acc', 'real']
    score = score.sort_values('acc', ascending=True)
    ROC = []
    P_R = []
    for i in range(len(score)):
        TP = score.iloc[:i, 1].sum()
        FP = i - TP
        FN = score.iloc[i:, 1].sum()
        TN = (len(score) - i) - FN
        TPR = TP / (TP + FN)
        FPR = FP / (TN + FP)
        ROC.append([TPR, FPR])
        P = TP / (TP + FP)
        R = TP / (TP + FN)
        P_R.append([P, R])
    ROC_df = pd.DataFrame(ROC)
    P_R_df = pd.DataFrame(P_R)
    fig,ax=plt.subplots(1,2)
    plt.subplots_adjust(right=2)
    ax[0].plot(ROC_df.iloc[:, 0], ROC_df.iloc[:, 1])
    ax[0].set_title(ROC_title)
    ax[1].plot(P_R_df.iloc[:, 0], P_R_df.iloc[:, 1])
    ax[1].set_title(P_R_title)
    plt.savefig(filename, dpi=dpi, bbox_inches='tight')

#逻辑回归
def logistic(train_x, train_y, test_x, test_y, dev_x=None, dev_y=None):
    model = LogisticRegression()
    if dev_x is None and dev_y is None:
        model.fit(train_x, train_y)
        y_score = pd.DataFrame(model.predict_proba(test_x)).iloc[:, 1]
        y_score.index = test_y.index
        return y_score, accuracy_score(model.predict(test_x), test_y)
    else:
        model.fit(train_x, train_y)
        return accuracy_score(model.predict(test_x), test_y), accuracy_score(model.predict(dev_x), dev_y)

#随机森林
def random_forest(train_x, train_y, test_x, test_y, dev_x=None, dev_y=None, n_estimators=200):
    model = RandomForestClassifier(n_estimators=n_estimators)
    if dev_x is None and dev_y is None:
        model.fit(train_x, train_y)
        y_score = pd.DataFrame(model.predict_proba(test_x)).iloc[:, 1]
        y_score.index = test_y.index
        return y_score, accuracy_score(model.predict(test_x), test_y)
    else:
        model.fit(train_x, train_y)
        return accuracy_score(model.predict(test_x), test_y), accuracy_score(model.predict(dev_x), dev_y)

#k近邻
def k_neighbors(train_x, train_y, test_x, test_y, dev_x=None, dev_y=None, n_neighbors=3):
    model = KNeighborsClassifier(n_neighbors=n_neighbors)
    if dev_x is None and dev_y is None:
        model.fit(train_x, train_y)
        y_score = pd.DataFrame(model.predict_proba(test_x)).iloc[:, 1]
        y_score.index = test_y.index
        return y_score, accuracy_score(model.predict(test_x), test_y)
    else:
        model.fit(train_x, train_y)
        return accuracy_score(model.predict(test_x), test_y), accuracy_score(model.predict(dev_x), dev_y)

#XGBoost
def xgboost(train_x, train_y, test_x, test_y, dev_x=None, dev_y=None):
    model = XGBClassifier()
    if dev_x is None and dev_y is None:
        model.fit(train_x, train_y)
        y_score = pd.DataFrame(model.predict_proba(test_x)).iloc[:, 1]
        y_score.index = test_y.index
        return y_score, accuracy_score(model.predict(test_x), test_y)
    else:
        model.fit(train_x, train_y)
        return accuracy_score(model.predict(test_x), test_y), accuracy_score(model.predict(dev_x), dev_y)

#决策树
def decision_tree(train_x, train_y, test_x, test_y, dev_x=None, dev_y=None):
    model = DecisionTreeClassifier()
    if dev_x is None and dev_y is None:
        model.fit(train_x, train_y)
        y_score = pd.DataFrame(model.predict_proba(test_x)).iloc[:, 1]
        y_score.index = test_y.index
        return y_score, accuracy_score(model.predict(test_x), test_y)
    else:
        model.fit(train_x, train_y)
        return accuracy_score(model.predict(test_x), test_y), accuracy_score(model.predict(dev_x), dev_y)
