import io
import os
import torch
import base64
import pandas as pd
import torch.nn as nn
from tempo import *
from tempo0 import *  # 含有setParameters等函数
from model.bert_liner_svm import BERT, masked_position
from dataproc_npy_sanwei import TraceSet, load_yaml
import matplotlib.dates as mdates
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_padded_sequence
from utils import read_mean_variance_from_csvfile, set_and_load_parameters_imp_1, set_and_load_parameters_imp_2, set_and_load_parameters_imp_3


def load_and_visualize_trajectory_imp(parameters):
    """功能一：导入数据"""
    # 设置和加载功能一的参数
    params = set_and_load_parameters_imp_1(parameters)
    trajdata_path = params["trajdata_path"]
    # 加载航迹数据
    traj_data = np.load(trajdata_path, allow_pickle=True)
    # 截取数据
    traj_data = traj_data[:len(traj_data) - (len(traj_data) % 100)]
    columns = ['time', 'icao24', 'lat', 'lon', 'geoaltitude', 'segment_id']

    # 可视化原始数据
    traj_df = pd.DataFrame(traj_data, columns=columns)
    image_base64_original = plot_n_trajectory([traj_df[['time', 'lat', 'lon', 'geoaltitude']]], ['Original'], ['red'], show=True)

    # 输出
    setShowData({"image_base64": image_base64_original})

    return {'traj_data': traj_data, 'image_base64_original': image_base64_original}


def create_model(parameters) -> BERT:
    """ 构建模型 """
    model = BERT(loc_size=parameters['loc_size'],
                 dim_in=parameters['dim_in'],
                 dim_out=parameters['dim_out'],
                 n_heads=parameters['n_heads'],
                 n_layers=parameters['n_layers'],
                 dropout=parameters['dropout'],
                 max_len=parameters['traj_len'],
                 scale=parameters['scale'],
                 max_targets=parameters['max_pair_targets'],
                 position_embedding_size=parameters['position_embedding_size'])

    return model


def torch_cov(input_vec):
    """ 计算一个batch的协方差
    Args:
        input_vec: shape = (bn, seq, 2)
    """
    m = input_vec - torch.mean(input_vec, dim=1, keepdim=True)  # 数据减去其均值
    mT = torch.transpose(m, 1, 2) # 交换两个维度的顺序
    cov_matrix = torch.matmul(mT, m) / (m.shape[1] - 1)  # 相乘并除以n-1，计算协方差的公式
    return cov_matrix.reshape(input_vec.shape[0], -1)  # 返回协方差矩阵


def eveluation(mtm, target, mask_labels):
    posi = masked_position(mask_labels)
    target_selected = target * posi
    mtm_selected = mtm * posi
    valid_count = posi.sum(dim=(0, 1))

    # RMS
    squared_error = ((mtm_selected - target_selected) ** 2) * posi  # 确保只计算有效位置的误差
    squared_error_total = squared_error.sum()
    squared_error_total = (squared_error_total / valid_count.sum()).sqrt()

    # MDE
    relative_error = (mtm_selected - target_selected) / torch.where(target_selected == 0,
                                                                    torch.ones_like(target_selected), target_selected)
    absolute_relative_error = torch.abs(relative_error) * posi  # 只计算有效位置
    absolute_relative_error_total = absolute_relative_error.sum(dim=(0, 1)) / valid_count

    # MAE
    absolute_error = torch.abs(mtm_selected - target_selected) * posi  # 只计算有效位置
    absolute_error_total = absolute_error.sum(dim=(0, 1)) / valid_count

    return squared_error_total.detach(), absolute_relative_error_total.detach(), absolute_error_total.detach()


def transform_trajectory_data(data, mtm_df, masked_data_label, len_data):
    """Transform trajectory data for plotting."""
    columns_2 = ['lat', 'lon', 'geoaltitude']

    mtm_df.columns = columns_2
    data_df = pd.DataFrame(data, columns=columns_2)

    masked_data_label_series = pd.Series(masked_data_label.numpy() == 1)
    shou_traj_df = data_df[masked_data_label_series]

    masked_data_label_series = pd.Series(masked_data_label.numpy() == 0)
    mtm_df_data = mtm_df[masked_data_label_series]
    mtm_df_target =data_df[masked_data_label_series]
    data_df = data_df.iloc[:len_data]
    # Create a new DataFrame with the same index as data_df
    combined_df = pd.DataFrame(data_df.copy(), columns=columns_2)
    # Fill the new DataFrame with data from shou_traj_df and mtm_df_data
    combined_df.loc[shou_traj_df.index] = shou_traj_df
    combined_df.loc[mtm_df_data.index] = mtm_df_data
    combined_df = combined_df.iloc[:len_data]

    return data_df, shou_traj_df, mtm_df_data, combined_df, mtm_df_target


def plot_trajectory(group, show=True, save_path=None):
    """
    Plot a 3D trajectory of a group.

    Parameters:
    group (pandas.DataFrame): The DataFrame containing the trajectory data.
    show (bool): Whether to display the plot. Default is True.
    save_path (str): The path to save the plot. If None, the plot will not be saved. Default is None.

    Returns:
    matplotlib.figure.Figure: The created figure.
    """
    # Extract values from the group
    latitudes = group.lat.values
    longitudes = group.lon.values
    geoAltitudes = group.geoaltitude.values


    # Create a figure with 3d projection
    fig = plt.figure(figsize=(10, 8))
    ax = fig.add_subplot(projection='3d')

    # Plot the 3D scatter plot with alpha representing velocity
    plot = ax.scatter3D(longitudes, latitudes, geoAltitudes, s=2)

    # Add labels for axes
    ax.set_xlabel('Longitude')
    ax.set_ylabel('Latitude')
    ax.set_zlabel('GeoAltitude')

    # fig.colorbar(plot, label='Velocity (knot)', shrink=0.4)

    # Show the plot if required
    if show:
        plt.show()

    # Save the plot if save_path is provided
    if save_path is not None:
        fig.savefig(save_path)

    return fig


def plot_Interpolation_trajectory(group, new_masked_seq, masked_data, show=True, save_path=None):
    """
    Plot a 3D trajectory of a group and masked_data.

    Parameters:
    group (pandas.DataFrame): The DataFrame containing the original trajectory data.
    new_masked_seq (pandas.DataFrame): The DataFrame containing the new masked sequence.
    masked_data (pandas.DataFrame): The DataFrame containing the masked data.
    show (bool): Whether to display the plot. Default is True.
    save_path (str): The path to save the plot. If None, the plot will not be saved. Default is None.

    Returns:
    matplotlib.figure.Figure: The created figure.
    """
    # Extract values from the group
    latitudes = new_masked_seq.lat.values
    longitudes = new_masked_seq.lon.values
    geoAltitudes = new_masked_seq.geoaltitude.values

    # Create a figure with 3d projection
    fig = plt.figure(figsize=(10, 8))
    ax = fig.add_subplot(projection='3d')

    # Plot the 3D scatter plot with alpha representing velocity
    plot1 = ax.scatter3D(longitudes, latitudes, geoAltitudes, s=2)

    # Extract values from the masked_data
    latitudes_masked = masked_data.lat.values
    longitudes_masked = masked_data.lon.values
    geoAltitudes_masked = masked_data.geoaltitude.values

    # Plot the 3D scatter plot for masked_data
    plot2 = ax.scatter3D(longitudes_masked, latitudes_masked, geoAltitudes_masked, alpha=0.5, s=2, color='red')

    # Add labels for axes
    ax.set_xlabel('Longitude')
    ax.set_ylabel('Latitude')
    ax.set_zlabel('GeoAltitude')

    # Show the plot if required
    if show:
        plt.show()

    # Save the plot if save_path is provided
    if save_path is not None:
        fig.savefig(save_path)

    return fig


def plot_n_trajectory(groups, labels, colors, show=True, save_path=None):
    """Plot trajectories and return three separate base64 encoded images."""
    image_base64_list = []

    # Create three separate figures instead of subplots
    for i in range(3):
        fig = plt.figure(figsize=(8, 6))
        if i == 0:
            ax = fig.add_subplot(111, projection='3d')
        else:
            ax = fig.add_subplot(111)

        for group, label, color in zip(groups, labels, colors):
            # Convert timestamp to datetime
            times_dt = pd.to_datetime(group['time'], unit='s')
            times_num = mdates.date2num(times_dt)

            # Extract values from the group
            latitudes = group.lat.values
            longitudes = group.lon.values
            geoAltitudes = group.geoaltitude.values
            times = group.time.values

            if i == 0:  # 3D trajectory
                ax.scatter3D(longitudes, latitudes, geoAltitudes, s=2, color=color, label=label)
                ax.set_title('3D trajectory')
                ax.set_xlabel('Longitude')
                ax.set_ylabel('Latitude')
                ax.set_zlabel('GeoAltitude')
            elif i == 1:  # 2D scatter plot
                ax.scatter(longitudes, latitudes, s=2, color=color, label=label)
                ax.set_title('2D scatter plot')
                ax.set_xlabel('Longitude')
                ax.set_ylabel('Latitude')
            else:  # Altitude plot
                ax.scatter(times_num, geoAltitudes, s=2, color=color, label=label)
                ax.set_title('Altitude plot')
                ax.set_xlabel('Time')
                ax.set_ylabel('GeoAltitude')
                ax.set_xticks(ax.get_xticks()[::2])  # keep every other tick
                ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))

        ax.legend()

        if show:
            plt.tight_layout()
            plt.show()

            # Save individual plot if save_path is provided
        if save_path is not None:
            fig.savefig(f"{save_path}_{i}.png")

            # Convert to base64
        buf = io.BytesIO()
        plt.savefig(buf, format='png')
        buf.seek(0)
        image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
        image_base64_list.append(image_base64)
        plt.close()

        # Return three separate base64 encoded images
    return image_base64_list


def merge_same_flight(merged_data):
    processed = [False] * len(merged_data)
    merged_result = []

    for i in range(len(merged_data)):
        if processed[i]:
            continue

        current_trajectory = merged_data[i]
        current_flight_code = current_trajectory[0, 1]
        to_merge = [(i, current_trajectory)]  # 存储索引和轨迹

        # 查找相同航班号的轨迹
        for j in range(i + 1, len(merged_data)):
            if processed[j]:
                continue

            next_trajectory = merged_data[j]
            next_flight_code = next_trajectory[0, 1]

            if current_flight_code == next_flight_code:
                to_merge.append((j, next_trajectory))
                processed[j] = True

        if len(to_merge) > 1:
            # 按时间排序（假设第一列是时间信息）
            to_merge.sort(key=lambda x: x[1][0, 0])

            # 合并轨迹
            trajectories = [t[1] for t in to_merge]
            combined_trajectory = np.concatenate(trajectories, axis=0)

            # 可以添加额外的处理，比如去除重复时间点的数据
            # 这里假设我们要保留所有数据点
        else:
            combined_trajectory = current_trajectory

        merged_result.append(combined_trajectory)
        processed[i] = True

    return merged_result


def merge_seq_data(data_seq, char_seq, mask_label=None):
    # 确保数据类型统一
    if torch.is_tensor(data_seq):
        data_seq = data_seq.numpy()
    if torch.is_tensor(mask_label) and mask_label is not None:
        mask_label = mask_label.numpy()

    merged_data = []

    # 遍历每条轨迹
    for i in range(len(char_seq)):
        # 获取当前轨迹的数据
        current_data_seq = data_seq[i]  # shape: [100, 3]
        current_char_seq = char_seq[i]  # shape: [100, 3]

        # 如果mask_label为None，保留所有行
        if mask_label is None:
            n_rows = len(current_data_seq)
            full_seq = np.zeros((n_rows, 6), dtype=object)

            # 填充身份信息（前3列）
            full_seq[:, 0] = current_char_seq[:, 0]  # 第1列身份信息
            full_seq[:, 1] = current_char_seq[:, 1]  # 第2列身份信息
            full_seq[:, 5] = current_char_seq[:, 2]  # 最后1列身份信息

            # 填充位置信息（中间3列）
            full_seq[:, 2:5] = current_data_seq  # 经纬度和高度信息

        else:
            # 有mask_label时的处理逻辑
            current_mask = mask_label[i][0]  # shape: [100]

            # 找出需要保留的行的索引（mask值为1的位置）
            valid_indices = np.where(current_mask == 1)[0]

            # 根据mask筛选数据
            filtered_data_seq = current_data_seq[valid_indices]
            filtered_char_seq = current_char_seq[valid_indices]

            # 获取有效数据的行数
            n_valid_rows = len(valid_indices)

            # 创建完整的轨迹数据
            full_seq = np.zeros((n_valid_rows, 6), dtype=object)

            # 填充身份信息（前3列）
            full_seq[:, 0] = filtered_char_seq[:, 0]  # 第1列身份信息
            full_seq[:, 1] = filtered_char_seq[:, 1]  # 第2列身份信息
            full_seq[:, 5] = filtered_char_seq[:, 2]  # 最后1列身份信息

            # 填充位置信息（中间3列）
            full_seq[:, 2:5] = filtered_data_seq  # 经纬度和高度信息

        merged_data.append(full_seq)

    return merged_data


def mask_and_visualize_trajectories(tra_data, parameters):
    params = set_and_load_parameters_imp_2(parameters)
    dataset = TraceSet(tra_data, params)
    masked_sentence, target, pairs, length_indicator, binary_mask, pairs_mask, masked_label, pair_targets, char_seq = dataset.get_all_data()
    merged_sequences = merge_seq_data(target, char_seq, masked_label)
    final_merged_sequences = merge_same_flight(merged_sequences)
    show_dfs = []
    image_base64_mask_trajs = []
    for final_merged_sequence in final_merged_sequences:
        shou_df = pd.DataFrame(final_merged_sequence, columns=['time', 'icao24', 'lat', 'lon', 'geoaltitude', 'segment_id'])
        image_base64_mask_traj = plot_n_trajectory([shou_df[['time', 'lat', 'lon', 'geoaltitude']]], ['Original'], ['red'], show=False)
        image_base64_mask_trajs.append(image_base64_mask_traj)
        show_dfs.append(shou_df)
    return{'masked_sentence': masked_sentence, 'target': target, 'pairs': pairs, 'length_indicator': length_indicator,
           'binary_mask': binary_mask, 'pairs_mask': pairs_mask, 'masked_label': masked_label, 'final_merged_sequences': final_merged_sequences,
           'pair_targets': pair_targets, 'char_seq': char_seq, 'image_base64_mask_trajs': image_base64_mask_trajs}


def inverse_normalize_data(normalized_data, norm_file_path):
    """ 反标准化数据 """
    # 确保输入数据是numpy数组
    mean, variance = read_mean_variance_from_csvfile(norm_file_path)
    if not isinstance(normalized_data, np.ndarray):
        normalized_data = np.array(normalized_data)

        # 创建输出数组，避免修改输入数据
    denormalized_data = normalized_data.copy()

    # 只对经纬度和高度进行反标准化
    columns_to_denormalize = [0, 1, 4]  # 与标准化时相同的列
    mean_select = mean[columns_to_denormalize]
    variance_select = variance[columns_to_denormalize]

    # 反标准化公式: x = z * sqrt(variance) + mean
    # 其中z是标准化后的数据，x是原始数据
    denormalized_data[:, 2:5] = (normalized_data[:, 2:5] * (variance_select ** 0.5)) + mean_select

    return denormalized_data


def save_denormalized_data(denormalized_data_df, save_path='./'):
    # 获取第一行数据
    flight_id = denormalized_data_df.iloc[0, 1]  # 第二列的航班号
    sequence_num = denormalized_data_df.iloc[0, -1]  # 最后一列的序号

    # 确保保存路径存在
    if not os.path.exists(save_path):
        os.makedirs(save_path)

        # 构造完整的文件路径
    filename = os.path.join(save_path, f"{flight_id}_{int(sequence_num)}.npy")

    # 将DataFrame转换为numpy数组并保存
    np.save(filename, denormalized_data_df.values)


def imp_and_visualize_trajectories(parameters, mask_and_vis_result):
    params = set_and_load_parameters_imp_3(parameters)
    '''获取数据'''
    targets = mask_and_vis_result['target']
    masked_sentence = mask_and_vis_result['masked_sentence']
    pairs = mask_and_vis_result['pairs']
    length_indicator = mask_and_vis_result['length_indicator']
    binary_mask = mask_and_vis_result['binary_mask']
    masked_label = mask_and_vis_result['masked_label']
    pairs_mask = mask_and_vis_result['pairs_mask']
    char_seq = mask_and_vis_result['char_seq']
    final_merged_sequences = mask_and_vis_result['final_merged_sequences']
    '''加载模型'''
    model = create_model(params)
    state = torch.load(params['predict_model'])
    model.load_state_dict(state['module'])
    model.eval()
    mtms, pairs_predictions = model(masked_sentence, pairs, length_indicator, binary_mask, masked_label, pairs_mask)
    rms, mde, mae = eveluation(mtms, targets, masked_label)
    posi = masked_position(masked_label)
    combined_tensors = mtms.detach() * posi + targets * ~posi
    merged_combineds = merge_seq_data(combined_tensors, char_seq)
    final_merged_combineds = merge_same_flight(merged_combineds)
    reversed_masked_label = 1 - masked_label
    merged_mtms = merge_seq_data(mtms.detach(), char_seq, reversed_masked_label)
    final_merged_mtms = merge_same_flight(merged_mtms)
    image_base64_impu_trajs = []
    for final_merged_mtm, final_merged_sequence, mask_label, final_merged_combined in zip(final_merged_mtms, final_merged_sequences, masked_label, final_merged_combineds):
        imp_df = pd.DataFrame(final_merged_mtm, columns=['time', 'icao24', 'lat', 'lon', 'geoaltitude', 'segment_id'])
        imp_df_plot = imp_df[['time', 'lat', 'lon', 'geoaltitude']]
        show_df = pd.DataFrame(final_merged_sequence, columns=['time', 'icao24', 'lat', 'lon', 'geoaltitude', 'segment_id'])
        shou_df_plot = show_df[['time', 'lat', 'lon', 'geoaltitude']]
        image_base64_impu_traj = plot_n_trajectory([imp_df_plot, shou_df_plot], ['imp', 'shou'], ['red', 'blue'], show=True)
        image_base64_impu_trajs.append(image_base64_impu_traj)
        denormalized_data = inverse_normalize_data(final_merged_combined, params['norm_file_path'])
        denormalized_data_df = pd.DataFrame(denormalized_data, columns=['time', 'icao24', 'lat', 'lon', 'geoaltitude', 'segment_id'])
        save_denormalized_data(denormalized_data_df, save_path=params['imp_save_path'])

    return {'image_base64_impu_trajs': image_base64_impu_trajs, 'RMS': rms, 'MDE': mde, 'MAE': mae}


if __name__ == "__main__":
    '''功能一，加载数据'''
    parameters_1 = {"trajdata_path": r'D:\pythonProject\粒子滤波\补盲\拆分-npy\3c70c9_18116.npy'}
    load_and_visualize_result = load_and_visualize_trajectory_imp(parameters_1)
    # print('traj_data', type(load_and_visualize_result['traj_data']), load_and_visualize_result['traj_data'].shape)
    # load_and_visualize_result = {'traj_data': traj_data, 'image_base64_original': image_base64_original}
    # traj_data                      2D:numpy.ndarray                            未归一化的数据，用于在地图上展示
    # image_base64_original：        list[string1(base64编码),string2,string3]    所有轨迹的图像

    """加载所有数据"""
    # 获取文件路径
    folder_path = r'D:\pythonProject\粒子滤波\补盲\拆分2-npy'
    file_name = os.listdir(folder_path)
    lines = []
    for alphabet in file_name:
        lines.append(folder_path + '\\' + alphabet)
    # 循环调用
    tra_data_all = []
    for file_path in lines:
        # 构建参数字典
        parameters = {
            "trajdata_path": file_path
        }
        result = load_and_visualize_trajectory_imp(parameters)
        tra_data = result['traj_data']
        tra_data_all.append(tra_data)

    '''功能二，稀疏轨迹生成'''
    parameters2 = {"max_pair_targets": 15,
                   'span_lower': 5,
                   'span_upper': 10,
                   'mask_factor': 0.3,
                   'geometric_p': 0.5,
                   'norm_file_path': r'D:/pythonProject/粒子滤波/Bert/result.csv'}
    mask_and_vis_result = mask_and_visualize_trajectories(tra_data_all, parameters2)
    # 该步产生的数据除image_base64_mask_trajs用于展示缺失效果外，其他都用于模型计算
    # mask_and_vis_result = {
    # 'masked_sentence': masked_sentence,                    3D:torch.Tensor                          遮盖轨迹
    # 'target': target,                                      3D:torch.Tensor                          目标轨迹
    # 'pairs': pairs,                                        3D:torch.Tensor                          遮盖跨度
    # 'length_indicator': length_indicator,                  1D:torch.Tensor,例如torch.Size([36])      遮盖跨度长度
    # 'binary_mask': binary_mask,                            3D:torch.Tensor                          模型输入
    # 'pairs_mask': pairs_mask,                              2D:torch.Tensor                          模型输入
    # 'masked_label': masked_label,                          3D:torch.Tensor                          模型输入
    # 'final_merged_sequences': final_merged_sequences,      list[2D:numpy.ndarray]                   功能三绘图
    # 'pair_targets': pair_targets,                          4D:torch.Tensor                          遮盖目标
    # 'char_seq': char_seq,                                  list[2D:numpy.ndarray]                   身份信息
    # 'image_base64_mask_trajs': image_base64_mask_trajs}    list[list[str1(base64编码),str2,str3]]    绘图结果

    '''功能三，补盲'''
    parameters3 = {"loc_size": 3,
                   'traj_len': 100,
                   'dim_in': 256,
                   'dim_out': 256,
                   'n_heads': 5,
                   'n_layers': 4,
                   'dropout': 0.2,
                   'scale': 1,
                   'max_pair_targets': 15,
                   'position_embedding_size': 100,
                   'predict_model': 'weights/run_26/best_state.pt',
                   'norm_file_path': r'D:/pythonProject/粒子滤波/Bert/result.csv',
                   'imp_save_path': r'D:\pythonProject\粒子滤波\补盲\插补'}
    result = imp_and_visualize_trajectories(parameters3, mask_and_vis_result)
    print(result['RMS'])
    print(result['MAE'])
    print(result['MDE'])
    # 'image_base64_impu_trajs': image_base64_impu_trajs}    list[list[str1(base64编码),str2,str3]]    绘图结果
    # 'RMS'                                                  tensor[a1]                               均方根误差
    # 'MAE'                                                  tensor[a1,a2,a3]                         纬度、经度、高度预测的平均绝对误差
    # 'MDE'                                                  tensor[a1,a2,a3]                         纬度、经度、高度预测的平均相对误差


















