import numpy as np
import matplotlib.pyplot as plt
from xgboost import XGBClassifier
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
import pandas as pd
import os
from scipy.interpolate import interp1d
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import io
import seaborn as sns
import cv2
import torch
from tempo import *
from tempo0 import *  # 含有setParameters等函数
import matplotlib.dates as mdates
import base64
import joblib
import logging
from xgb3_utils import read_mean_variance_from_csvfile, set_and_load_parameters_class_1, set_and_load_parameters_class_2,set_and_load_parameters_class_3
# 配置日志级别和基本设置
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S',
                    filename='app.log',
                    filemode='a')


# 设置Matplotlib的字体以支持中文
plt.rcParams['font.sans-serif'] = ['SimHei']  # 使用黑体
plt.rcParams['axes.unicode_minus'] = False  # 正常显示负号


# 读取单个CSV文件并生成轨迹数据
def read_single_trajectory_from_csv(file_path, n_points):
    df = pd.read_csv(file_path)
    lat_col = df['y'].values
    lon_col = df['x'].values
    alt_col = df['height'].values  # 假设CSV文件中有高度列 'height'
    time_col = df['time'].values  # 假设CSV文件中有时间列 'time'
    label = df['Label'][0]
    trajectory = np.stack((lat_col, lon_col, alt_col, time_col), axis=1)
    
    # 处理轨迹点数
    if len(trajectory) > n_points:
        # 如果点数超过，进行截断
        trajectory = trajectory[:n_points]
    elif len(trajectory) < n_points:
        # 如果点数不足，进行最近邻插值
        t = np.linspace(0, 1, len(trajectory))
        t_new = np.linspace(0, 1, n_points)
        f = interp1d(t, trajectory, axis=0, kind='nearest')
        trajectory = f(t_new)
    
    return trajectory, label


def normalize_trajectory(trajectory):
    scaler = MinMaxScaler()
    normalized_trajectory = scaler.fit_transform(trajectory)
    return normalized_trajectory


# 提取图像特征
def extract_shape_features(trajectory):
    # 轨迹点的坐标
    points = trajectory[:, :2].astype(np.int32)  # 确保点的类型为 int32
    
    # 计算轨迹的凸包
    hull = cv2.convexHull(points)
    
    # 计算轨迹的面积
    area = cv2.contourArea(hull)
    
    # 计算轨迹的周长
    perimeter = cv2.arcLength(hull, True)
    
    # 计算轨迹的长宽比
    rect = cv2.minAreaRect(hull)
    width, height = rect[1]
    if min(width, height) == 0:
        aspect_ratio = 1.0  # 默认值，避免除以零
    else:
        aspect_ratio = max(width, height) / min(width, height)
    
    # 计算轨迹的椭圆度
    if len(hull) >= 5:
        (x, y), (MA, ma), angle = cv2.fitEllipse(hull)
        if MA == 0 or ma == 0:
            eccentricity = 0.0  # 默认值，避免除以零
        else:
            eccentricity = np.sqrt(1 - (MA / ma) ** 2)
    else:
        # 如果点数少于5个，返回默认值
        eccentricity = 0.0
    
    return {
        'area': area,
        'perimeter': perimeter,
        'aspect_ratio': aspect_ratio,
        'eccentricity': eccentricity
    }
'''
# 特征工程
def combine_features(trajectory):
    normalized_trajectory = normalize_trajectory(trajectory)
    # 提取形状特征
    shape_features = extract_shape_features(trajectory)
    # 将形状特征转换为数组
    shape_features_array = np.array([
        shape_features['area'],
        shape_features['perimeter'],
        shape_features['aspect_ratio'],
        shape_features['eccentricity']
    ])
    # 为每个轨迹点添加形状特征
    # 注意这里 shape_features_array 是标量，复制到每个点
    shape_features_repeated = np.tile(shape_features_array, (trajectory.shape[0], 1))  # 重复形状特征
    # 将轨迹点与形状特征结合
    combined_trajectory = np.hstack((normalized_trajectory, shape_features_repeated))  # 这是包含8个特征的二维数组
    # 拉平
    combined_feature = combined_trajectory.flatten()  # 最后将整个轨迹拉平为一维

    return combined_feature
'''
# 特征工程
def combine_features(trajectory):
    normalized_trajectory = normalize_trajectory(trajectory).flatten()
    # 提取形状特征
    shape_features = extract_shape_features(trajectory)
    # 将形状特征转换为数组
    shape_features_array = np.array([
        shape_features['area'],
        shape_features['perimeter'],
        shape_features['aspect_ratio'],
        shape_features['eccentricity']
    ])
    # 将原始特征和形状特征结合起来
    combined_feature = np.concatenate((normalized_trajectory, shape_features_array))

    return combined_feature

def plot_trajectory(group, show=False, save_path=None):
    """
    Plot a 3D trajectory of a group.

    Parameters:
    group (pandas.DataFrame): The DataFrame containing the trajectory data.
    show (bool): Whether to display the plot. Default is True.
    save_path (str): The path to save the plot. If None, the plot will not be saved. Default is None.

    Returns:
    matplotlib.figure.Figure: The created figure.
    """
    # Extract values from the group
    latitudes = group.lat.values
    longitudes = group.lon.values
    geoAltitudes = group.geoaltitude.values

    # Create a figure with 3d projection
    fig = plt.figure(figsize=(10, 8))
    ax = fig.add_subplot(111, projection='3d')

    # Plot the 3D scatter plot with alpha representing velocity
    plot = ax.scatter3D(longitudes, latitudes, geoAltitudes, s=2)

    # Add labels for axes
    ax.set_xlabel('Longitude')
    ax.set_ylabel('Latitude')
    ax.set_zlabel('GeoAltitude')

    # fig.colorbar(plot, label='Velocity (knot)', shrink=0.4)

    # Show the plot if required
    if show:
        plt.show()

    # Save the plot if save_path is provided
    if save_path is not None:
        fig.savefig(save_path)

    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.close()

    return image_base64


def load_trajectory_class(parameters):
    """功能一：导入数据"""
    # 设置和加载功能一的参数
    params = set_and_load_parameters_class_1(parameters)
    trajdata_path = params["trajdata_path"]
    n_points = params["num_points"]
    # 加载航迹数据
    traj_data, label = read_single_trajectory_from_csv(trajdata_path, n_points)
    # 截取数据
    traj_data = traj_data[:len(traj_data) - (len(traj_data) % 100)]

    combined_features = combine_features(traj_data)

    return {'traj_features_normalized': combined_features, 'label': label}



def load_and_visualize_trajectory_class(parameters):
    """功能一：导入数据"""
    # 设置和加载功能一的参数
    params = set_and_load_parameters_class_1(parameters)
    trajdata_path = params["trajdata_path"]
    n_points = params["num_points"]
    # 加载航迹数据
    traj_data, label = read_single_trajectory_from_csv(trajdata_path, n_points)
    # 截取数据
    traj_data = traj_data[:len(traj_data) - (len(traj_data) % 100)]
    columns = ['lon', 'lat', 'geoaltitude', 'time']
    # 可视化原始数据
    traj_df = pd.DataFrame(traj_data, columns=columns)
    image_base64_original = plot_trajectory(traj_df)
    combined_features = combine_features(traj_data)

    # 输出
    setShowData({"image_base64": image_base64_original})

    return {'traj_data': traj_data, 'traj_features_normalized': combined_features, 'label': label, 'image_base64_original': image_base64_original}


def plot_confusion_matrix(labels_array, predictions, class_names, show=False):
    """
    绘制混淆矩阵的热图。

    参数:
    labels_array: 实际标签数组
    predictions: 预测标签数组
    class_names: 类别名称列表
    """
    # 计算混淆矩阵
    cm = confusion_matrix(labels_array, predictions)

    # 使用 pandas DataFrame 进行更好的可视化
    cm_df = pd.DataFrame(cm, index=class_names, columns=class_names)

    # 绘制热图
    plt.figure(figsize=(10, 7))
    sns.heatmap(cm_df, annot=True, fmt='d', cmap='Blues', cbar=False)

    # 设置图形标题和标签
    plt.title('混淆矩阵', fontsize=16)
    plt.xlabel('预测标签', fontsize=12)
    plt.ylabel('真实标签', fontsize=12)

    # 显示图形
    plt.tight_layout()
    if show:
        plt.show()

    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.close()

    return image_base64


def plot_classification_metrics(labels_array, predictions, class_names, show=False):
    """
    绘制分类指标的柱状图，包括精度、召回率和 F1 分数。

    参数:
    labels_array: 实际标签数组
    predictions: 预测标签数组
    class_names: 分类名称列表
    """
    # 计算分类报告
    report = classification_report(
        labels_array,
        predictions,
        target_names=class_names,
        labels=list(range(len(class_names))),
        output_dict=True
    )

    # 从报告中提取指标
    precision = []
    recall = []
    f1_score = []
    categories = []

    for key, values in report.items():
        if key not in ['accuracy', 'macro avg', 'weighted avg']:
            categories.append(key)
            precision.append(values['precision'])
            recall.append(values['recall'])
            f1_score.append(values['f1-score'])

    # 转换为 DataFrame 以方便绘图
    metrics_df = pd.DataFrame({
        '精度': precision,
        '召回率': recall,
        'F1分数': f1_score
    }, index=categories)

    # 绘制柱状图
    ax = metrics_df.plot(kind='bar', figsize=(12, 6))
    plt.title('分类指标', fontsize=16)

    # 设置横轴和纵轴字体大小
    plt.xticks(rotation=0, fontsize=14)  # 设置 x 轴字体大小
    plt.yticks(fontsize=14)  # 设置 y 轴字体大小

    plt.ylabel('分数', fontsize=12)
    plt.xlabel('类别', fontsize=12)
    plt.ylim(0, 1)  # 设置 y 轴范围
    plt.legend(title='指标', fontsize=10)
    plt.grid(axis='y')

    # 在柱状图上显示数值，保留两位小数
    for p in ax.patches:
        ax.annotate(f'{p.get_height():.2f}',
                    (p.get_x() + p.get_width() / 2., p.get_height()),
                    ha='center', va='bottom', fontsize=10)

    plt.tight_layout()

    if show:
        plt.show()

    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.close()

    return image_base64


def evaluate_model(trajs, labels_test, parameters):
    """
    评估模型的准确性，绘制混淆矩阵和分类报告。

    参数:
        trajs: 输入的轨迹数据，类型为可转换为numpy数组的结构。
        labels: 轨迹数据对应的标签，类型为列表或Pandas Series。
        model_file: 保存的模型文件路径。

    返回:
        test_acc: 模型的准确率。
        image_confusion_matrix_test: 混淆矩阵的绘图对象或路径。
        image_class_metrics_test: 分类指标的绘图对象或路径。
    """
    # 将轨迹数据转换为numpy数组
    params = set_and_load_parameters_class_3(parameters)
    trajs_np = np.array(trajs)

    # 将标签转换为分类数据
    labels = pd.Series(labels_test, dtype="category")
    class_names = labels.cat.categories
    labels_df = pd.DataFrame(labels.cat.codes, dtype=np.int8)
    labels_array = labels_df.to_numpy().ravel()

    # 加载模型
    model = joblib.load(params['pred_model_file'])

    # 进行预测
    predictions = model.predict(trajs_np)

    # 评估模型准确性
    test_acc = accuracy_score(labels_array, predictions)

    # 打印混淆矩阵
    image_confusion_matrix_test = plot_confusion_matrix(labels_array, predictions, class_names)

    # 打印分类报告
    image_class_metrics_test = plot_classification_metrics(labels_array, predictions, class_names)

    return {'eva_acc': test_acc, 'image_confusion_matrix_test': image_confusion_matrix_test,
            'image_class_metrics_test': image_class_metrics_test}


def train_and_evaluate_model(trajs_train, labels_train, parameters):
    """
    训练 XGBoost 分类模型并评估其性能，返回模型准确性、混淆矩阵图和分类报告图。

    参数:
        trajs_train: 训练数据，类型为可转换为numpy数组的结构。
        labels_train: 训练数据对应的标签，类型为列表或Pandas Series。
        model_file: 保存的模型文件路径。

    返回:
        test_acc: 模型的准确率。
        image_confusion_matrix_train: 混淆矩阵的绘图对象或路径。
        image_class_metrics_train: 分类指标的绘图对象或路径。
    """
    # 将轨迹数据转换为numpy数组
    params = set_and_load_parameters_class_2(parameters)
    trajs_np = np.array(trajs_train)

    # 将标签转换为分类数据
    labels = pd.Series(labels_train, dtype="category")
    class_names = labels.cat.categories
    labels_df = pd.DataFrame(labels.cat.codes, dtype=np.int8)
    labels_array = labels_df.to_numpy().ravel()

    # 使用分层抽样划分数据
    sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
    for train_index, test_index in sss.split(trajs_np, labels_array):
        train_features, test_features = trajs_np[train_index], trajs_np[test_index]
        train_labels, test_labels = labels_array[train_index], labels_array[test_index]

    # 创建和训练模型
    model = XGBClassifier(n_estimators=params['n_estimators'], learning_rate=params['learning_rate'],
                          max_depth=params['max_depth'], random_state=42, use_label_encoder=False, eval_metric='mlogloss')
    model.fit(train_features, train_labels)

    # 保存模型
    joblib.dump(model, params["train_model_file"])

    # 评估模型
    y_pred = model.predict(test_features)
    test_acc = accuracy_score(test_labels, y_pred)

    # 打印混淆矩阵
    image_confusion_matrix_train = plot_confusion_matrix(test_labels, y_pred, class_names)

    # 打印分类报告
    image_class_metrics_train = plot_classification_metrics(test_labels, y_pred, class_names)

    return {'train_acc': test_acc, 'image_confusion_matrix_train': image_confusion_matrix_train,
            'image_class_metrics_train': image_class_metrics_train}

def display_data(trajdata_path, num_points):
    """
    数据展示：加载并可视化单条轨迹数据。

    :param trajdata_path: str，轨迹数据文件路径
    :param num_points: int，采样点数量
    :return: dict，包含可视化结果和模型输入数据的字典
    """
    parameters = {
        "trajdata_path": trajdata_path,
        "num_points": num_points
    }
    result = load_and_visualize_trajectory_class(parameters)
    return result


def train_model(paths, num_points, arg):
    """
    数据训练：加载多条轨迹数据并训练模型。

    :param paths: list[str]，轨迹文件的绝对路径列表
    :param num_points: int，采样点数量
    :param train_model_file: str，模型保存路径
    :param n_estimators: int，模型参数：树的数量
    :param learning_rate: float，模型参数：学习率
    :param max_depth: int，模型参数：树的最大深度
    :return: dict，训练结果，包括准确率和图像数据
    """
    trajs_train = []
    labels_train = []

    for file_path in paths:
        parameters = {"trajdata_path": file_path, "num_points": num_points}
        logging.info(f'正在读取文件名为\'{file_path}\'的文件...')
        result = load_trajectory_class(parameters)
        trajs_train.append(result['traj_features_normalized'])
        labels_train.append(result['label'])
    traj_len=len(trajs_train)
    logging.info(f'加载成功，正在对{traj_len}条航迹进行训练')

    result_train = train_and_evaluate_model(trajs_train, labels_train, arg)
    logging.info(f'训练完毕')
    return result_train
def test_model(paths, num_points, pred_model_file):
    """
    数据测试：加载测试数据并评估模型。

    :param paths: list[str]，轨迹文件的绝对路径列表
    :param num_points: int，采样点数量
    :param pred_model_file: str，预测模型文件路径
    :return: dict，测试结果，包括准确率和图像数据
    """
    trajs = []
    labels = []

    for file_path in paths:
        parameters = {"trajdata_path": file_path, "num_points": num_points}
        logging.info(f'正在读取文件名为\'{file_path}\'的文件...')
        result = load_and_visualize_trajectory_class(parameters)
        trajs.append(result['traj_features_normalized'])
        labels.append(result['label'])
    traj_len=len(trajs)
    logging.info(f'加载成功，正在对{traj_len}条航迹进行测试')
    parameters = {'pred_model_file': pred_model_file}
    result_eva = evaluate_model(trajs, labels, parameters)
    return result_eva

if __name__ == "__main__":
    '''功能一，加载数据'''
    parameters_1 = {"trajdata_path": r'D:\temp\xinda\ModelRecognition\测试集\E3TF_1(13).csv',
                    "num_points": 200}
    load_and_visualize_result = load_and_visualize_trajectory_class(parameters_1)
    # 'traj_data':                                    ndarray:(200,4)              用于可视化
    # 'traj_features_normalized'                      ndarray:(804,)               模型训练
    # 'label': label,                                 str                          模型训练
    # 'image_base64_original':                        image_base64                 轨迹三维图像
    """加载训练数据"""
    train_trajectory_directory = r"D:\temp\xinda\ModelRecognition\训练集_备份"
    trajs_train = []      # 全局变量，功能二的输入
    labels_train = []     # 全局变量，功能二的输入
    for filename in os.listdir(train_trajectory_directory):
        if filename.endswith('.csv'):
            file_path = os.path.join(train_trajectory_directory, filename)
            parameters_n = {"trajdata_path": file_path,
                            "num_points": 200}
            # 读取单个轨迹
            result_n = load_trajectory_class(parameters_n)
            traj_n = result_n['traj_features_normalized']
            label_n = result_n['label']
            trajs_train.append(traj_n)
            labels_train.append(label_n)
    '''功能二，模型训练'''
    parameters_2 = {'train_model_file': r'D:\temp\xinda\ModelRecognition\xgb_model.pkl',
                    'n_estimators': 800,
                    'learning_rate': 0.3,
                    'max_depth': 6}
    result_train = train_and_evaluate_model(trajs_train, labels_train, parameters_2)
    # train_acc: test_acc,                                                   float64                  准确率
    # image_confusion_matrix_train: image_confusion_matrix_train,            image_base64             混淆矩阵图像
    # image_class_metrics_train: image_class_metrics_train                   image_base64             柱状图
    """加载测试数据"""
    test_trajectory_directory = r"D:\temp\xinda\ModelRecognition\测试集_备份"
    trajs_2D = []
    trajs = []
    labels = []
    for filename in os.listdir(test_trajectory_directory):
        if filename.endswith('.csv'):
            file_path = os.path.join(test_trajectory_directory, filename)
            parameters_n = {"trajdata_path": file_path,
                            "num_points": 200}
            # 读取单个轨迹
            result_n = load_and_visualize_trajectory_class(parameters_n)
            traj_n = result_n['traj_features_normalized']
            traj_2D_n = result_n['traj_data']
            label_n = result_n['label']
            trajs_2D.append(traj_2D_n)
            trajs.append(traj_n)
            labels.append(label_n)
    '''功能三，模型测试'''
    parameters_3 = {'pred_model_file': r'D:\temp\xinda\ModelRecognition\xgb_model.pkl'}
    result_eva = evaluate_model(trajs, labels, parameters_3)
    print()
    # eva_acc: test_acc,                                                 float64                  准确率
    # image_confusion_matrix_test: image_confusion_matrix_train,         image_base64             混淆矩阵图像
    # image_class_metrics_test: image_class_metrics_train                image_base64             柱状图
    
