import os
import io
import glob
import base64
import argparse
import joblib
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
from itertools import groupby
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from xgboost import XGBClassifier
from utils_xgb4 import set_and_load_parameters_pattern_0, set_and_load_parameters_pattern_1, set_and_load_parameters_pattern_2,\
    set_and_load_parameters_pattern_3, set_and_load_parameters_pattern_4
from keras.callbacks import Callback
import logging
# 配置日志级别和基本设置
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S',
                    filename='app.log',
                    filemode='a')

# 设置Matplotlib的字体以支持中文
plt.rcParams['font.sans-serif'] = ['SimHei']  # 使用黑体
plt.rcParams['axes.unicode_minus'] = False  # 正常显示负号


def sliding_window(data, length, stride=1):
    """ 滑窗, 对一条轨迹进行采样 """
    if len(data) < length:
        yield data
        return

    for i in range(0, len(data), stride): #间隔一进行采样
        if i + length > len(data):
            yield data[i:]
            break
        else:
            yield data[i:i+length]


def load_single_class(parameters):
    params = set_and_load_parameters_pattern_1(parameters)
    seqs = []
    labels = []
    path=params['trajdata_path']
    val_data = np.load(path, allow_pickle=True)
    logging.info(f'正在读取文件名为\'{path}\'的文件...')
    data_2d = np.array([tuple(record) for record in val_data])
    data_2d = data_2d.reshape(val_data.shape[0], 22)
    # 提取数据
    data = data_2d[:, [16, 0, 2, 3, 13, 21]]
    for _, group in groupby(data, lambda x: x[-1]):
        group = np.array([row[:-1] for row in group])
        for d in sliding_window(group, params['win_length'], params['win_stride']):  # 滑动窗口划分数据
            if len(d) >= params['win_length'] * 1:
                geoaltitude = d[:, -1]
                unique_geo_counts = len(np.unique(geoaltitude))
                if unique_geo_counts >= 20:  # 确保高度值的唯一数量大于等于20
                    label = d[0, 0]  # 获取标签
                    # 直接添加标签和数据，无需计数
                    labels.append(label)
                    d = np.delete(d, 0, axis=1).astype(np.float32)
                    seqs.append(d)

    return {'seqs': np.array(seqs), 'labels': np.array(labels), 'num_seq': len(seqs)}


def load_trajectories(parameters):
    params_0 = set_and_load_parameters_pattern_0(parameters)
    trajs_train = None  # 初始化轨迹数据
    labels_train = None  # 初始化标签数据

    for filename in os.listdir(params_0['root_path']):
        file_path = os.path.join(params_0['root_path'], filename)
        parameters_n = {
            "trajdata_path": file_path,
            "win_length": params_0['win_length'],
            "win_stride": params_0['win_stride']
        }
        try:
            # logging.info(f'正在加载:{file_path}...')
            result_n = load_single_class(parameters_n)
            traj_n = result_n['seqs']
            label_n = result_n['labels']
        
        # 在这里处理 `traj_n` 和 `label_n`，如合并到主列表或其他逻辑
        # ...
        
        except Exception as e:
            # print(f"跳过文件 {file_path}，错误信息: {e}")
            continue

        # 拼接数据
        if trajs_train is None:
            # 第一次循环时，直接赋值
            trajs_train = traj_n
            labels_train = label_n
        else:
            # 第二次及后续循环时，使用 np.concatenate 拼接
            trajs_train = np.concatenate((trajs_train, traj_n), axis=0)
            labels_train = np.concatenate((labels_train, label_n), axis=0)

    return {'trajs_train': trajs_train, 'labels_train': labels_train}


def plot_trajectory(group, show=False, save_path=None):
    """
    Plot a 3D trajectory of a group.

    Parameters:
    group (pandas.DataFrame): The DataFrame containing the trajectory data.
    show (bool): Whether to display the plot. Default is True.
    save_path (str): The path to save the plot. If None, the plot will not be saved. Default is None.

    Returns:
    matplotlib.figure.Figure: The created figure.
    """
    # Extract values from the group
    latitudes = group.lat.values
    longitudes = group.lon.values
    geoAltitudes = group.geoaltitude.values

    # Create a figure with 3d projection
    fig = plt.figure(figsize=(10, 8))
    ax = fig.add_subplot(111, projection='3d')

    # Plot the 3D scatter plot with alpha representing velocity
    plot = ax.scatter3D(longitudes, latitudes, geoAltitudes, s=2)

    # Add labels for axes
    ax.set_xlabel('Longitude')
    ax.set_ylabel('Latitude')
    ax.set_zlabel('GeoAltitude')

    # fig.colorbar(plot, label='Velocity (knot)', shrink=0.4)

    # Show the plot if required
    if show:
        plt.show()

    # Save the plot if save_path is provided
    if save_path is not None:
        fig.savefig(save_path)

    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.close()

    return image_base64


def load_single(parameters):

    params = set_and_load_parameters_pattern_3(parameters)
    val_data = np.load(params['trajdata_path'], allow_pickle=True)
    data_2d = np.array([tuple(record) for record in val_data])
    data_2d = data_2d.reshape(val_data.shape[0], 22)
    # 提取数据
    data = data_2d[:params['win_length'], [0, 2, 3, 13]].astype(np.float32)
    label = data_2d[1, 16]
    columns = ['time', 'lat', 'lon', 'geoaltitude', ]
    traj_df = pd.DataFrame(data, columns=columns)
    image_base64_original = plot_trajectory(traj_df)

    return {'test_traj': data, 'test_label': label, 'traj_image': image_base64_original}


def plot_confusion_matrix(labels_array, predictions, class_names, show=False):
    """
    绘制混淆矩阵的热图。

    参数:
    labels_array: 实际标签数组
    predictions: 预测标签数组
    class_names: 类别名称列表
    """
    # 计算混淆矩阵
    cm = confusion_matrix(labels_array, predictions)

    # 使用 pandas DataFrame 进行更好的可视化
    cm_df = pd.DataFrame(cm, index=class_names, columns=class_names)

    # 绘制热图
    plt.figure(figsize=(10, 7))
    sns.heatmap(cm_df, annot=True, fmt='d', cmap='Blues', cbar=False)

    # 设置图形标题和标签
    plt.title('混淆矩阵', fontsize=16)
    plt.xlabel('预测标签', fontsize=12)
    plt.ylabel('真实标签', fontsize=12)

    # 显示图形
    plt.tight_layout()
    if show:
        plt.show()

    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.close()

    return image_base64


def plot_classification_metrics(labels_array, predictions, class_names, show=False):
    """
    绘制分类指标的柱状图，包括精度、召回率和 F1 分数。

    参数:
    labels_array: 实际标签数组
    predictions: 预测标签数组
    class_names: 分类名称列表
    """
    # 计算分类报告
    report = classification_report(
        labels_array,
        predictions,
        target_names=class_names,
        labels=list(range(len(class_names))),
        output_dict=True
    )

    # 从报告中提取指标
    precision = []
    recall = []
    f1_score = []
    categories = []

    for key, values in report.items():
        if key not in ['accuracy', 'macro avg', 'weighted avg']:
            categories.append(key)
            precision.append(values['precision'])
            recall.append(values['recall'])
            f1_score.append(values['f1-score'])

    # 转换为 DataFrame 以方便绘图
    metrics_df = pd.DataFrame({
        '精度': precision,
        '召回率': recall,
        'F1分数': f1_score
    }, index=categories)

    # 绘制柱状图
    ax = metrics_df.plot(kind='bar', figsize=(12, 6))
    plt.title('分类指标', fontsize=16)

    # 设置横轴和纵轴字体大小
    plt.xticks(rotation=0, fontsize=14)  # 设置 x 轴字体大小
    plt.yticks(fontsize=14)  # 设置 y 轴字体大小

    plt.ylabel('分数', fontsize=12)
    plt.xlabel('类别', fontsize=12)
    plt.ylim(0, 1)  # 设置 y 轴范围
    plt.legend(title='指标', fontsize=10)
    plt.grid(axis='y')

    # 在柱状图上显示数值，保留两位小数
    for p in ax.patches:
        ax.annotate(f'{p.get_height():.2f}',
                    (p.get_x() + p.get_width() / 2., p.get_height()),
                    ha='center', va='bottom', fontsize=10)

    plt.tight_layout()

    if show:
        plt.show()

    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.close()

    return image_base64


def train_and_evaluate_model_pattern(trajs_train, labels_train, parameters):
    reset_canceltrain()
    params = set_and_load_parameters_pattern_2(parameters)
    # 构建标签序列
    labels = pd.Series(labels_train, dtype="category")
    class_names = labels.cat.categories
    labels_array = labels.cat.codes.values.astype(np.int8)  # 转为 int8

    # 使用分层抽样划分数据
    sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
    for train_index, test_index in sss.split(trajs_train, labels_array):
        train_images, test_images = trajs_train[train_index], trajs_train[test_index]
        train_labels, test_labels = labels_array[train_index], labels_array[test_index]

    # Flatten the data
    train_shape = train_images.shape
    test_shape = test_images.shape
    train_images_flat = train_images.reshape(-1, train_shape[2])
    test_images_flat = test_images.reshape(-1, test_shape[2])

    # 标准化数据
    loaded_params = pd.read_csv(params['scaler_path'])
    loaded_mean = loaded_params['mean'].values
    loaded_var = loaded_params['var'].values

    scaler = StandardScaler()
    scaler.mean_ = loaded_mean
    scaler.var_ = loaded_var
    scaler.scale_ = np.sqrt(loaded_var)  # 计算标准差

    # 标准化特征
    train_features_normalized = scaler.transform(train_images_flat)
    test_features_normalized = scaler.transform(test_images_flat)

    # Reshape back to original dimensions for further processing
    train_features_normalized_reshaped = train_features_normalized.reshape(train_shape[0], train_shape[1], -1)
    test_features_normalized_reshaped = test_features_normalized.reshape(test_shape[0], test_shape[1], -1)

    # 如果您想合并第二维和第三维
    train_features_final = train_features_normalized_reshaped.reshape(train_shape[0], -1)
    test_features_final = test_features_normalized_reshaped.reshape(test_shape[0], -1)

    # 训练XGBoost模型
    model = XGBClassifier(n_estimators=800, max_depth=15, random_state=42, use_label_encoder=False, eval_metric='mlogloss')
    model.fit(train_features_final, train_labels)

    # 保存模型
    joblib.dump(model, params['model_save_path'])

    # 预测
    y_pred = model.predict(test_features_final)

    # 评估模型
    test_acc = accuracy_score(test_labels, y_pred)

    # 打印混淆矩阵和分类报告
    image_confusion_matrix_train = plot_confusion_matrix(test_labels, y_pred, class_names)
    image_class_metrics_train = plot_classification_metrics(test_labels, y_pred, class_names)

    return {'train_acc': test_acc, 'image_confusion_matrix_train': image_confusion_matrix_train, 'image_class_metrics_train': image_class_metrics_train}


def evaluate_model_pattern(labels_test, trajs_test, parameters):
    params = set_and_load_parameters_pattern_4(parameters)
    # 将标签转换为类别类型
    labels = pd.Series(labels_test, dtype="category")
    class_names = labels.cat.categories
    labels_array = labels.cat.codes.values.astype(np.int8)  # 转为 int8

    # 将轨迹数据转换为 (n_samples, n_points, 4) 形状
    trajectories_test = np.array(trajs_test)
    trajectories_test_shape = trajectories_test.shape
    # 将数据展平成 2D 形状
    trajectories_test_flat = trajectories_test.reshape(-1, trajectories_test_shape[2])

    # 标准化数据
    loaded_params = pd.read_csv(params['scaler_path'])
    loaded_mean = loaded_params['mean'].values
    loaded_var = loaded_params['var'].values
    scaler = StandardScaler()
    scaler.mean_ = loaded_mean
    scaler.var_ = loaded_var
    scaler.scale_ = np.sqrt(loaded_var)  # 计算标准差
    trajectories_test_normalized = scaler.transform(trajectories_test_flat)
    trajectories_test_normalized_reshaped = trajectories_test_normalized.reshape(
        trajectories_test_shape[0], trajectories_test_shape[1], -1)  # 形状变为 (n_samples, n_points, 4)

    features_test_final = trajectories_test_normalized_reshaped.reshape(trajectories_test_shape[0], -1)

    # 加载模型
    model = joblib.load(params['load_model_path'])
    # 进行预测
    predictions = model.predict(features_test_final)

    # 评估模型准确性
    test_acc = accuracy_score(labels_array, predictions)

    # 打印混淆矩阵和分类报告
    image_confusion_matrix_train = plot_confusion_matrix(labels_array, predictions, class_names)
    image_class_metrics_train = plot_classification_metrics(labels_array, predictions, class_names)

    return {
       "eva_acc": test_acc, 
        "image_confusion_matrix_test":image_confusion_matrix_train,
        "image_class_metrics_test":image_class_metrics_train,
    }

def CancelTrain():
    __CANCEL_WAITHANDLE__=True
def reset_canceltrain():
    __CANCEL_WAITHANDLE__=False
# 自定义回调函数
class CheckCancel(Callback):
    def on_batch_end(self, batch, logs=None):
        global __CANCEL_WAITHANDLE__
        if __CANCEL_WAITHANDLE__:
            self.model.stop_training = True
            print("Training stopped due to __CANCEL_WAITHANDLE__ being True")

def process_pipeline(parameters):
    """
    加载数据并训练模型，只返回模型训练的结果。

    :param parameters: 字典，包含加载数据和训练模型的参数
        必须包含:
            - "root_path": 数据集路径
            - "win_length": 窗口长度 (可选，默认100)
            - "win_stride": 窗口步幅 (可选，默认100)
            - "scaler_path": 标准化模型保存路径
            - "model_save_path": 训练模型保存路径
    :return: train_and_evaluate_model_pattern 函数的结果
    """
    # 验证必要参数
    required_keys = ["root_path", "scaler_path", "model_save_path"]
    for key in required_keys:
        if key not in parameters:
            raise ValueError(f"缺少必要的参数: {key}")

    # 加载数据
    print("加载训练数据...")
    root_path = parameters["root_path"]
    win_length = parameters.get("win_length", 100)
    win_stride = parameters.get("win_stride", 100)
    
    result_load = load_trajectories({"root_path": root_path, "win_length": win_length, "win_stride": win_stride})
    trajs_train = result_load['trajs_train']
    labels_train = result_load['labels_train']
    print("训练数据加载完成。")
    
    # 模型训练
    print("模型训练中...")
    scaler_path = parameters["scaler_path"]
    model_save_path = parameters["model_save_path"]

    result_train = train_and_evaluate_model_pattern(
        trajs_train,
        labels_train,
        {"scaler_path": scaler_path, "model_save_path": model_save_path}
    )
    print("模型训练完成。")
    
    return result_train

def load_and_evaluate(file_paths,win_len ,model_params):
    """
    加载数据并测试模型
    :param file_paths: List[str] 文件路径列表
    :param model_params: dict 模型测试参数，包含 'scaler_path' 和 'load_model_path'
    :return: tuple 测试准确率，混淆矩阵图像，分类指标图像
    """
    trajs_test = []  # 存储所有轨迹数据
    labels_test = []  # 存储所有标签数据

    for file_path in file_paths:
        parameters_n_test = {"trajdata_path": file_path, "win_length": win_len}
        try:
            # 加载单个文件的数据
            # logging.info(f'正在加载:{file_path}...')
            result_n = load_single(parameters_n_test)
            traj_n_test = result_n['test_traj']
            label_n_test = result_n['test_label']

            # 拼接数据
            trajs_test.append(traj_n_test)
            labels_test.append(label_n_test)
        except Exception as e:
            # 记录错误并跳过
            print(f"文件加载失败，跳过文件 {file_path}，错误信息: {e}")
            continue

    # 模型测试
    test_accuracy, confusion_matrix_img, classification_metrics_img = evaluate_model_pattern(
        labels_test, trajs_test, model_params
    )

    return test_accuracy, confusion_matrix_img, classification_metrics_img

__CANCEL_WAITHANDLE__=False

if __name__ == "__main__":
    '''功能一，加载所有训练数据'''
    parameters_0 = {"root_path": r'D:\temp\xinda\TrackClassifyRecogine\训练集',
                    "win_length": 100,
                    "win_stride": 100}
    result_load = load_trajectories(parameters_0)
    # trajs_train              ndarray:(n,100,4)
    # labels_train             ndarray:(n,)
    trajs_train = result_load['trajs_train']
    labels_train = result_load['labels_train']
    '''功能二，模型训练'''
    parameters_2 = {"scaler_path": r'scaler_params.csv',
                    "model_save_path": r'D:\temp\xinda\TrackClassifyRecogine\xgb_model.pkl'
                    }
    result_train = train_and_evaluate_model_pattern(trajs_train, labels_train, parameters_2)
    # train_acc: test_acc,                                                   float64                  准确率
    # image_confusion_matrix_train: image_confusion_matrix_train,            image_base64             混淆矩阵图像
    # image_class_metrics_train: image_class_metrics_train                   image_base64             柱状图
    '''功能三，加载单条测试数据'''
    parameters_3 = {"trajdata_path": r'D:\temp\xinda\TrackClassifyRecogine\训练集\H1T(17).npy',
                    "win_length": 100}
    trajectories_test, labels_test, image_64 = load_single(parameters_3)
    '''加载所有测试数据'''
    test_trajectory_directory = r'D:\temp\xinda\TrackClassifyRecogine\测试集'
    trajs_test = []  # 全局变量，功能二的输入
    labels_test = []  # 全局变量，功能二的输入
    for filename in os.listdir(test_trajectory_directory):
        file_path = os.path.join(test_trajectory_directory, filename)
        parameters_n_test = {"trajdata_path": file_path,
                            "win_length": 100}
        result_n = load_single(parameters_n_test)
        traj_n_test = result_n['test_traj']
        label_n_test = result_n['test_label']
        # 拼接数据
        trajs_test.append(traj_n_test)
        labels_test.append(label_n_test)
    '''功能四，模型测试'''
    parameters_4 = {"scaler_path": r'scaler_params.csv',
                    "load_model_path": r'D:\pythonProject\分类识别\xgb_model.pkl'}
    test_accuracy, confusion_matrix_img, classification_metrics_img = evaluate_model_pattern(labels_test, trajs_test, parameters_4)
    # eva_acc: test_acc,                                                 float64                  准确率
    # image_confusion_matrix_test: image_confusion_matrix_train,         image_base64             混淆矩阵图像
    # image_class_metrics_test: image_class_metrics_train                image_base64             柱状图

