import pandas as pd
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
plt.rcParams['font.sans-serif'] = ['KaiTi']
plt.rcParams['axes.unicode_minus'] = False
import seaborn as sns
import logging
import io
import base64
import os


def load_and_split_data(file_path, label, n_train, n_test, L, small_class_flag):
    data = np.load(file_path)
    x_data = data[0:(n_train + n_test), 0:L]
    y_data = np.array([label] * len(x_data))
    x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=n_test / (n_train + n_test),
                                                        random_state=None, shuffle=True)

    if small_class_flag != 0:
        x_train = x_train[:int(n_train * small_class_flag * 0.01)]
        y_train = y_train[:int(n_train * small_class_flag * 0.01)]

    return x_train, x_test, y_train, y_test

# 绘制混淆矩阵热力图
def plot_confusion_matrix(cm, title, labels, fontsize=20, annot_fontsize=14):
    plt.figure(figsize=(10, 7), facecolor='none')
    sns.heatmap(cm, annot=True, fmt='.1f', cmap='coolwarm', cbar=False, annot_kws={"size": annot_fontsize},
                xticklabels=labels, yticklabels=labels)
    plt.xlabel('Predicted', fontsize=fontsize, color='white')
    plt.ylabel('True', fontsize=fontsize, color='white')
    plt.title(title, fontsize=fontsize, color='white')
    plt.xticks(fontsize=annot_fontsize, color='white')
    plt.yticks(fontsize=annot_fontsize, color='white')
    # plt.show()
    # 将图像保存到字节流
    buf = io.BytesIO()
    plt.savefig(buf, format='png', transparent=True)
    buf.seek(0)

    # 将字节流编码为Base64
    image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
    return image_base64

def binary_strings_to_integers(binary_strings):
    # 将所有输入的二进制字符串转换为整数的列表
    result = []
    # 如果字符串长度不是8的倍数，在前面补零
    if len(binary_strings) % 8 != 0:
        binary_strings =  binary_strings + '0'
    # 按照每8个字符分组
    chunks = [binary_strings[i:i+8] for i in range(0, len(binary_strings), 8)]
    # 将每个8位的二进制字符串转换为整数
    integers = [int(chunk, 2) for chunk in chunks]
    result.extend(integers)
    return result


def binary_df_to_classes(df, L):
    # 创建一个字典来保存每个类别的数据
    class_data = {}
    all_data = []

    # 遍历每一行的数据
    for _, row in df.iterrows():
        data = row['data']
        label = row['label']

        # 将二进制字符串转换为整数
        byte_str = binary_strings_to_integers(data)

        # 确保 byte_str 的长度为 L
        if len(byte_str) > L:
            byte_str = byte_str[:L]
        elif len(byte_str) < L:
            byte_str += [0] * (L - len(byte_str))  # 用0填充

        # 如果当前标签不在字典中，初始化一个新的列表
        if label not in class_data:
            class_data[label] = []

        # 将数据添加到对应的类别列表中
        class_data[label].append(byte_str)
        # 将数据及其标签添加到 all_data 列表中
        all_data.append(byte_str)

    # 将每个类别的数据转换为 numpy 数组
    for label in class_data:
        class_data[label] = np.array(class_data[label])
    # 将 all_data 转换为 numpy 数组
    all_data_array = np.array(all_data, dtype=object)

    return class_data, all_data_array

def binary_df_to_classes_2(df, L):
    # 创建一个字典来保存每个类别的数据
    all_data = []

    # 遍历每一行的数据
    for _, row in df.iterrows():
        data = row['data']

        # 将二进制字符串转换为整数
        byte_str = binary_strings_to_integers(data)

        # 确保 byte_str 的长度为 L
        if len(byte_str) > L:
            byte_str = byte_str[:L]
        elif len(byte_str) < L:
            byte_str += [0] * (L - len(byte_str))  # 用0填充

        all_data.append(byte_str)

    # 将 all_data 转换为 numpy 数组
    all_data_array = np.array(all_data, dtype=object)

    return all_data_array


# 重新排列以确保小类和大类交替出现
def mix_small_large(sizes, labels):
    large_indices = [i for i in range(len(sizes)) if sizes[i] > np.mean(sizes)]
    small_indices = [i for i in range(len(sizes)) if sizes[i] <= np.mean(sizes)]

    mixed_sizes = []
    mixed_labels = []
    large_idx = 0
    small_idx = 0

    while large_idx < len(large_indices) or small_idx < len(small_indices):
        if large_idx < len(large_indices):
            mixed_sizes.append(sizes[large_indices[large_idx]])
            mixed_labels.append(labels[large_indices[large_idx]])
            large_idx += 1

        if small_idx < len(small_indices):
            mixed_sizes.append(sizes[small_indices[small_idx]])
            mixed_labels.append(labels[small_indices[small_idx]])
            small_idx += 1

    return mixed_sizes, mixed_labels


def main(parameters):
    # 配置日志级别和基本设置
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S',
                        filename='app.log',
                        filemode='a')

    waibu_train_path = parameters['waibu_train_path']
    waibu_test_path = parameters['waibu_test_path']

    # 输入形状
    L = parameters['L_waibu']
    if L <= 0:
        raise '数据加载有误!'
    train_data = pd.read_csv(waibu_train_path)
    test_data = pd.read_csv(waibu_test_path)
    train_dict, all_train_data = binary_df_to_classes(train_data, L)
    all_test_data = binary_df_to_classes_2(test_data, L)

    X_train = all_train_data / 255
    X_test = all_test_data / 255

    X_train, X_val, Y_train, Y_val = train_test_split(X_train, train_data['label'], test_size=0.2, random_state=42,
                                                      stratify=train_data['label'])

    encoder = LabelEncoder()
    Y_train = encoder.fit_transform(Y_train)
    Y_val = encoder.transform(Y_val)
    labels = encoder.classes_.tolist()

    unique_elements, train_counts = np.unique(Y_train, return_counts=True)
    sizes = train_counts

    small_classes = []
    for i in range(len(sizes)):
        if (sizes[i] / max(sizes)) <= 0.01:
            small_classes.append(sizes[i] / max(sizes) * 100)
        else:
            small_classes.append(100)

    # 选择一个颜色方案
    colors = sns.color_palette('pastel')  # 使用 seaborn 的 pastel 颜色方案
    sorted_indices = np.argsort(sizes)[::-1]  # 降序排序
    sizes_sorted = np.array(sizes)[sorted_indices]
    labels_sorted = np.array(labels)[sorted_indices]
    sizes_mixed, labels_mixed = mix_small_large(sizes_sorted, labels_sorted)
    # 创建环形图
    plt.figure(figsize=(8, 10), facecolor='none')  # 增大图形尺寸以容纳更大的文本
    wedges, texts, autotexts = plt.pie(sizes_mixed, labels=labels_mixed, autopct='%1.4f%%', startangle=140,
                                       wedgeprops=dict(width=0.4),
                                       textprops=dict(color="white", fontsize=16),  # 将标签文本颜色调整为黑色
                                       labeldistance=1.1,  # 增加标签距离
                                       pctdistance=0.85,  # 调整百分比文本的位置
                                       colors=colors)  # 使用自定义颜色方案

    # 调整文本样式
    # 调整百分比文本样式
    for autotext in autotexts:
        autotext.set_fontsize(16)  # 增大百分比文本大小
        autotext.set_color('white')
    # 自动调整布局
    plt.tight_layout()
    # 添加标题
    plt.title('原始数据分布', fontsize=20, color='white')
    # 将图像保存到字节流
    buf = io.BytesIO()
    plt.savefig(buf, format='png', transparent=True)
    buf.seek(0)

    # 将字节流编码为Base64
    image_base64_1 = base64.b64encode(buf.getvalue()).decode('utf-8')


    # 标准化数据
    scaler = StandardScaler()
    X_train_scaler = scaler.fit_transform(X_train)
    X_val_scaler = scaler.fit_transform(X_val)
    X_test_scaler = scaler.transform(X_test)

    # 不使用SMOTE的情况
    # 训练模型
    model_no_smote = RandomForestClassifier(n_estimators=10, random_state=16)
    model_no_smote.fit(X_train_scaler, Y_train.ravel())

    # 预测
    y_pred_no_smote = model_no_smote.predict(X_val_scaler)

    # 评估模型
    # print("Without SMOTE:")
    # print("Confusion Matrix:")
    cm_no_smote = (confusion_matrix(Y_val.ravel(), y_pred_no_smote))
    cm_no_smote_percent = cm_no_smote.astype('float') / cm_no_smote.sum(axis=1)[:, np.newaxis] * 100
    # print(cm_no_smote)
    print("不均衡验证集直接分类结果：")
    logging.info("不均衡验证集直接分类结果：")
    jieguo_no = classification_report(Y_val.ravel(), y_pred_no_smote, output_dict=True)
    print(classification_report(Y_val.ravel(), y_pred_no_smote))
    # logging.info(classification_report(Y_encoded_te.ravel(), y_pred_no_smote))
    # 获取分类报告
    report = classification_report(Y_val.ravel(), y_pred_no_smote)
    # 将报告逐行写入日志
    for line in report.split('\n'):
        logging.info(line)

    # 使用SMOTE进行过采样
    smote = SMOTE(random_state=42, k_neighbors=5)
    X_train_resampled, y_train_resampled = smote.fit_resample(X_train_scaler, Y_train.ravel())

    # 选择一个颜色方案
    colors = sns.color_palette('pastel')  # 使用 seaborn 的 pastel 颜色方案

    # 创建环形图
    plt.figure(figsize=(8, 10), facecolor='none')  # 增大图形尺寸以容纳更大的文本
    unique_elements, counts = np.unique(y_train_resampled, return_counts=True)
    wedges, texts, autotexts = plt.pie(counts, labels=labels, autopct='%1.4f%%', startangle=140,
                                       wedgeprops=dict(width=0.4),
                                       textprops=dict(color="white", fontsize=16),  # 将标签文本颜色调整为黑色
                                       labeldistance=1.1,  # 增加标签距离
                                       pctdistance=0.85,  # 调整百分比文本的位置
                                       colors=colors)  # 使用自定义颜色方案

    # 调整文本样式
    # 调整百分比文本样式
    for autotext in autotexts:
        autotext.set_fontsize(16)  # 增大百分比文本大小
        autotext.set_color('white')
    # 自动调整布局
    plt.tight_layout()
    # 添加标题
    plt.title('增强后数据分布', fontsize=20, color='white')
    # 将图像保存到字节流
    buf = io.BytesIO()
    plt.savefig(buf, format='png', transparent=True)
    buf.seek(0)

    # 将字节流编码为Base64
    image_base64_2 = base64.b64encode(buf.getvalue()).decode('utf-8')

    # 训练模型
    model = RandomForestClassifier(n_estimators=10, random_state=16)
    model.fit(X_train_resampled, y_train_resampled.ravel())

    # 预测
    y_pred = model.predict(X_val_scaler)

    # 评估模型
    # print("With SMOTE:")
    # print("Confusion Matrix:")
    cm_with_smote = confusion_matrix(Y_val.ravel(), y_pred)
    cm_with_smote_percent = cm_no_smote.astype('float') / cm_with_smote.sum(axis=1)[:, np.newaxis] * 100
    # print(cm_with_smote)
    print("\n增强后验证集分类结果：")
    logging.info("增强后验证集分类结果：")
    jieguo = classification_report(Y_val.ravel(), y_pred, output_dict=True)
    print(classification_report(Y_val.ravel(), y_pred))
    # 获取分类报告
    report = classification_report(Y_val.ravel(), y_pred)
    # 将报告逐行写入日志
    for line in report.split('\n'):
        logging.info(line)
    # logging.info(classification_report(Y_encoded_te.ravel(), y_pred))
    jieguo_classes = {}
    for i in range(len(labels)):
        if small_classes[i] != 100:
            logging.info(f'{labels[i]}: ')
            logging.info(f'precision: {jieguo_no[str(i)]["precision"]} --> {jieguo[str(i)]["precision"]}')
            logging.info(f'recall: {jieguo_no[str(i)]["recall"]} --> {jieguo[str(i)]["recall"]}')
            logging.info(f'f1-score: {jieguo_no[str(i)]["f1-score"]} --> {jieguo[str(i)]["f1-score"]}')
            print(f'{labels[i]}: ')
            print(f'precision: {jieguo_no[str(i)]["precision"]} --> {jieguo[str(i)]["precision"]}')
            print(f'recall: {jieguo_no[str(i)]["recall"]} --> {jieguo[str(i)]["recall"]}')
            print(f'f1-score: {jieguo_no[str(i)]["f1-score"]} --> {jieguo[str(i)]["f1-score"]}')
            jieguo_classes[labels[i]] = {
                'precision': [jieguo_no[str(i)]["precision"], jieguo[str(i)]["precision"]],
                'recall': [jieguo_no[str(i)]["recall"], jieguo[str(i)]["recall"]],
                'f1-score': [jieguo_no[str(i)]["f1-score"], jieguo[str(i)]["f1-score"]]
            }
    logging.info(f'accuracy: {jieguo_no["accuracy"]} --> {jieguo["accuracy"]}')
    print(f'accuracy: {jieguo_no["accuracy"]} --> {jieguo["accuracy"]}')
    # 绘制没有使用SMOTE的混淆矩阵热力图
    image_base64_3 = plot_confusion_matrix(cm_no_smote_percent, "未增强分类结果混淆矩阵", labels)

    # 绘制使用SMOTE的混淆矩阵热力图
    image_base64_4 = plot_confusion_matrix(cm_with_smote_percent, "增强后分类结果混淆矩阵", labels)

    # plt.show()
    plt.close('all')

    # 预测
    y_test_pred_no = model_no_smote.predict(X_test_scaler)
    Y_pred_label_no = encoder.inverse_transform(y_test_pred_no)

    y_test_pred_smote = model.predict(X_test_scaler)
    Y_pred_label = encoder.inverse_transform(y_test_pred_smote)

    output = (((pd.concat(
        [pd.DataFrame(test_data['data'], columns=['data']),
         pd.DataFrame(Y_pred_label_no, columns=['unenhanced pred']),
         pd.DataFrame(Y_pred_label, columns=['enhanced pred'])], axis=1))))

    result_dict = {
        'pic_1': image_base64_1,    # 原始数据分布，例子Figure_1.png
        'pic_2': image_base64_2,    # 增强后数据分布， 例子Figure_1.png
        'pic_3': image_base64_3,     # 未增强分类结果混淆矩阵，例子Figure_2.png
        'pic_4': image_base64_4,     # 增强后分类结果混淆矩阵，例子Figure_3.png
        'output': output,
        'jieguo_classes': jieguo_classes    # 被增强的类的指标变化，前未增强，后增强
    }
    return result_dict

if __name__ == '__main__':
    parameters1 = {
        # 外部数据选择
        'L_waibu': 10,   # int，输入框，大于0
        'waibu_train_path': r'D:\temp\xinda\LSTM-GAN\dataset\外部数据_分类001.csv',    # str，训练数据
        'waibu_test_path': r'D:\temp\xinda\LSTM-GAN\dataset\外部数据_测试002.csv',     # str，测试数据，训练和测试数据如果有一个为空则运行内部数据

    }
    result_dict_1 = main(parameters1)
