# -*- coding: utf-8 -*-

"""
@author: Laowang
@contact: QQ:1125564921
@Created on: 2022/6/27
@Remark: 
"""
import pandas as pd
from imblearn.over_sampling import SMOTE
from sklearn import metrics
from typing import List
from sklearn.preprocessing import StandardScaler

from framework_strategy.gopy.utils.ptable import pretty_table


def recall_score(predict_y, real_y, label=[], is_print=False):

    # Calculate recall rate
    #  real_y : 1d array-like, or label indicator array / sparse matrix
    #    Ground truth (correct) target values.

    #  predict_y : 1d array-like, or label indicator array / sparse matrix
    #    Estimated targets as returned by a classifier.

    # res = metrics.recall_score(real_y, predict_y.tolist(), labels=[-1,0,1])
    # print("[+] none Under the recall rate res::", res)
    label = list(label)
    row = []

    res = metrics.recall_score(
        real_y, predict_y, labels=label, average="macro")
    # print("[+] macro Under the recall rate res::", res)
    row.append(res)

    res = metrics.recall_score(
        real_y, predict_y, labels=label, average="weighted")
    # print("[+] weighted Under the recall rate res::", res)
    row.append(res)

    res = metrics.recall_score(
        real_y, predict_y, labels=label, average="micro")
    # print("[+] micro Under the recall rate res::", res)
    row.append(res)

    title = "Recall Score"
    columns = ["macro", "weighted", "micro"]
    if is_print:
        pretty_table(columns, row, title=title)

    return res


def predict_result(predict_y, real_y, *, total_num=0, test_num=0, is_print=False):
    """
    Calculate the accuracy of the prediction

    Parameters
    ----------
    real_y : 1d array-like, or label indicator array / sparse matrix
        Ground truth (correct) target values.
    predict_y : 1d array-like, or label indicator array / sparse matrix
        Estimated targets as returned by a classifier.
    total_num: Total training of the sample data entry
    test_num: Entries for testing sample data

    Example
    ---------
    table = PrettyTable(["TestPoints", "Units"])
    table.add_row([(XX, XX])
    print(table)
    """
    title = "Predictive accuracy"
    true, false = 0, 0

    for i in range(0, len(predict_y)):
        if predict_y[i] == real_y[i]:
            true += 1
        else:
            false += 1

    ptable_columns = [
        "Accurate item of prediction",
        "Prediction error item",
        "Predictive accuracy",
        "Prediction error rate"
    ]

    ptable_value = [
        true,
        false,
        round(true / len(predict_y), 2),
        round(false / len(predict_y), 2),
    ]

    if test_num > 0:
        ptable_columns.insert(0, "Test data entry")
        ptable_value.insert(0, test_num)

    if total_num > 0:
        ptable_columns.insert(0, "General training items")
        ptable_value.insert(0, total_num)

    if is_print:
        pretty_table(ptable_columns, ptable_value, title=title)


# Sample standardization
def sample_stand_handle(data: pd.DataFrame, column, isHandle=False):
    # volatility
    # side
    if not isHandle:
        x = data.drop(column, axis=1)
        y = data.loc[:, column]
    else:
        # To check the sample imbalance problem | added sample
        sm = SMOTE(sampling_strategy='auto', random_state=7)
        oversampled_data, oversampled_label = sm.fit_resample(
            data.drop([column], axis=1), data[column])
        oversampled_table = pd.concat(
            [oversampled_data, oversampled_label], axis=1)
        oversampled_table[column].plot(x=[0, 1, -1], kind="hist")
        # plt.show()

        # Deal with sample imbalance
        x = oversampled_table.drop(column, axis=1)
        y = oversampled_table.loc[:, column]

    # Sample standardization
    return x, y


# Sample standardization
def sample_stand_handle_mid(data: pd.DataFrame, column, isHandle=False):
    x, y = sample_stand_handle(data, column, isHandle)

    sc = StandardScaler()
    x_train = sc.fit_transform(x)
    return x_train, y
