import io
import os
import torch
import base64
import pandas as pd
import torch.nn as nn
import numpy as np
from tempo import *
from tempo0 import *  # 含有setParameters等函数
from bert_liner_svm_nonpair import BERT, masked_position
from dataproc_mask_sanwei import TraceSet, load_yaml
import matplotlib.dates as mdates
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_padded_sequence
from fb_utils import read_mean_variance_from_csvfile, set_and_load_parameters_imp_1, set_and_load_parameters_imp_2, \
    set_and_load_parameters_imp_3, sliding_window


def load_and_visualize_trajectory_imp(parameters):
    """功能一：导入数据"""
    # 设置和加载功能一的参数
    params = set_and_load_parameters_imp_1(parameters)
    trajdata_path = params["trajdata_path"]
    # 加载航迹数据
    traj_data = np.load(trajdata_path, allow_pickle=True)
    # 截取数据
    traj_data = traj_data[:len(traj_data) - (len(traj_data) % 100)]
    columns = ['time', 'icao24', 'lat', 'lon', 'geoaltitude', 'segment_id']

    # 可视化原始数据
    traj_df = pd.DataFrame(traj_data, columns=columns)
    image_base64_original = plot_n_trajectory([traj_df[['time', 'lat', 'lon', 'geoaltitude']]], ['Original'], ['red'], show=False)

    # 输出
    setShowData({"image_base64": image_base64_original})

    return {'traj_data': traj_data, 'image_base64_original': image_base64_original}


def create_model(params, include_ptp=False):
    # 根据参数创建模型
    if include_ptp:
        model = BERT(
            loc_size=params['loc_size'],
            dim_in=params['dim_in'],
            dim_out=params['dim_out'],
            n_heads=params['n_heads'],
            n_layers=params['n_layers'],
            dropout=params['dropout'],
            max_len=params['max_len'],
            scale=params['scale'],
            mask_value=params['mask_value'],
            include_ptp=include_ptp,
            max_targets=params['max_targets'],
            position_embedding_size=params['position_embedding_size']
        )
    else:
        model = BERT(
            loc_size=params['loc_size'],
            dim_in=params['dim_in'],
            dim_out=params['dim_out'],
            n_heads=params['n_heads'],
            n_layers=params['n_layers'],
            dropout=params['dropout'],
            max_len=params['max_len'],
            scale=params['scale'],
            mask_value=params['mask_value'],
            include_ptp=include_ptp
            # 不提供 max_targets 和 position_embedding_size
        )
    return model


def torch_cov(input_vec):
    """ 计算一个batch的协方差
    Args:
        input_vec: shape = (bn, seq, 2)
    """
    m = input_vec - torch.mean(input_vec, dim=1, keepdim=True)  # 数据减去其均值
    mT = torch.transpose(m, 1, 2) # 交换两个维度的顺序
    cov_matrix = torch.matmul(mT, m) / (m.shape[1] - 1)  # 相乘并除以n-1，计算协方差的公式
    return cov_matrix.reshape(input_vec.shape[0], -1)  # 返回协方差矩阵


def plot_n_trajectory(groups, labels, colors, show=False, save_path=None):
    """Plot trajectories and return three separate base64 encoded images."""
    image_base64_list = []

    # Create three separate figures instead of subplots
    for i in range(3):
        fig = plt.figure(figsize=(8, 6))
        if i == 0:
            ax = fig.add_subplot(111, projection='3d')
        else:
            ax = fig.add_subplot(111)

        for group, label, color in zip(groups, labels, colors):
            # Convert timestamp to datetime
            times_dt = pd.to_datetime(group['time'], unit='s')
            times_num = mdates.date2num(times_dt)

            # Extract values from the group
            latitudes = group['lat'].values
            longitudes = group['lon'].values
            geoAltitudes = group['geoaltitude'].values
            times = group['time'].values

            # Convert to numpy arrays
            latitudes = np.array(latitudes, dtype=np.float64)
            longitudes = np.array(longitudes, dtype=np.float64)
            geoAltitudes = np.array(geoAltitudes, dtype=np.float64)
            times_num = np.array(times_num, dtype=np.float64)

            # Create a valid data mask (exclude None or NaN)
            valid_mask = (~np.isnan(latitudes)) & (~np.isnan(longitudes)) & (~np.isnan(geoAltitudes)) & (~np.isnan(times_num))

            # Filter the data
            latitudes = latitudes[valid_mask]
            longitudes = longitudes[valid_mask]
            geoAltitudes = geoAltitudes[valid_mask]
            times_num = times_num[valid_mask]

            # Check if there is data to plot
            if len(latitudes) == 0 or len(longitudes) == 0 or len(geoAltitudes) == 0:
                continue  # Skip if no valid data

            if i == 0:  # 3D trajectory
                ax.scatter3D(longitudes, latitudes, geoAltitudes, s=2, color=color, label=label)
                ax.set_title('3D trajectory')
                ax.set_xlabel('Longitude')
                ax.set_ylabel('Latitude')
                ax.set_zlabel('GeoAltitude')
            elif i == 1:  # 2D scatter plot
                ax.scatter(longitudes, latitudes, s=2, color=color, label=label)
                ax.set_title('2D scatter plot')
                ax.set_xlabel('Longitude')
                ax.set_ylabel('Latitude')
            else:  # Altitude plot
                ax.scatter(times_num, geoAltitudes, s=2, color=color, label=label)
                ax.set_title('Altitude plot')
                ax.set_xlabel('Time')
                ax.set_ylabel('GeoAltitude')
                ax.set_xticks(ax.get_xticks()[::2])  # keep every other tick
                ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))

        ax.legend()

        if show:
            plt.tight_layout()
            plt.show()

        # Save individual plot if save_path is provided
        if save_path is not None:
            fig.savefig(f"{save_path}_{i}.png")

        # Convert to base64
        buf = io.BytesIO()
        plt.savefig(buf, format='png')
        buf.seek(0)
        image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
        image_base64_list.append(image_base64)
        plt.close()

    # Return three separate base64 encoded images
    return image_base64_list


def inverse_normalize_data(normalized_data, norm_file_path):
    """ 反标准化数据 """
    # 确保输入数据是numpy数组
    mean, variance = read_mean_variance_from_csvfile(norm_file_path)
    if not isinstance(normalized_data, np.ndarray):
        normalized_data = np.array(normalized_data)

        # 创建输出数组，避免修改输入数据
    denormalized_data = normalized_data.copy()

    # 只对经纬度和高度进行反标准化
    columns_to_denormalize = [0, 1, 4]  # 与标准化时相同的列
    mean_select = mean[columns_to_denormalize]
    variance_select = variance[columns_to_denormalize]

    # 反标准化公式: x = z * sqrt(variance) + mean
    # 其中z是标准化后的数据，x是原始数据
    denormalized_data[:, 2:5] = (normalized_data[:, 2:5] * (variance_select ** 0.5)) + mean_select

    return denormalized_data


def save_denormalized_data(denormalized_data_df, save_path='./'):
    # 获取第一行数据
    flight_id = denormalized_data_df.iloc[0, 1]  # 第二列的航班号
    sequence_num = denormalized_data_df.iloc[0, -1]  # 最后一列的序号

    # 确保保存路径存在
    if not os.path.exists(save_path):
        os.makedirs(save_path)

        # 构造完整的文件路径
    filename = os.path.join(save_path, f"{flight_id}_{int(sequence_num)}.npy")

    # 将DataFrame转换为numpy数组并保存
    np.save(filename, denormalized_data_df.values)


def generate_data(seq_list, length, stride=1, mean=None, variance=None):
    """ 采样轨迹集合 """
    seqs = []
    columns_to_normalize = [0, 1, 4]
    mean_sekect = mean[columns_to_normalize]
    variance_select = variance[columns_to_normalize]
    for data in seq_list:
        # 获取数据类型和形状

        # 创建一个内存映射数组
        # data = np.memmap(npy, dtype=dtype, mode='r', shape=shape)

        if mean is not None and variance is not None:
            data[:, 2:5] = (data[:, 2:5] - mean_sekect) / (variance_select ** 0.5)
        for d in sliding_window(data, length, stride):  # 滑动窗口划分数据，增加数据量
            seqs.append(d)

    return seqs


def merge_seq_data(data_seq, char_seq, mask_label=None):
    # 确保数据类型统一
    if torch.is_tensor(data_seq):
        data_seq = data_seq.numpy()
    if torch.is_tensor(mask_label) and mask_label is not None:
        mask_label = mask_label.numpy()

    merged_data = []

    # 遍历每条轨迹
    for i in range(len(char_seq)):
        # 获取当前轨迹的数据
        current_data_seq = data_seq[i]  # shape: [100, 3]
        current_char_seq = char_seq[i]  # shape: [100, 3]

        # 创建一个用于存放合并数据的数组，并初始化为 None
        n_rows = len(current_data_seq)
        full_seq = np.full((n_rows, 6), None, dtype=object)  # 使用 None 初始化整个数组

        # 填充身份信息（前3列）
        full_seq[:, 0] = current_char_seq[:, 0]  # 第1列身份信息
        full_seq[:, 1] = current_char_seq[:, 1]  # 第2列身份信息
        full_seq[:, 5] = current_char_seq[:, 2]  # 最后1列身份信息

        # 填充位置信息（中间3列）
        if mask_label is None:
            full_seq[:, 2:5] = current_data_seq  # 经纬度和高度信息
        else:
            # 有mask_label时的处理逻辑
            current_mask = mask_label[i][0]  # shape: [100]

            # 找出需要保留的行的索引（mask值为1的位置）
            valid_indices = np.where(current_mask == 1)[0]

            # 根据mask筛选数据
            for j in range(n_rows):
                if j in valid_indices:
                    full_seq[j, 2:5] = current_data_seq[j]  # 填充有效的位置信息
                # 对于无效的索引，保持已初始化为 None

        merged_data.append(full_seq)

    return merged_data


def merge_same_flight(merged_data):
    processed = [False] * len(merged_data)
    merged_result = []

    for i in range(len(merged_data)):
        if processed[i]:
            continue

        current_trajectory = merged_data[i]
        current_flight_code = current_trajectory[0, 1]
        to_merge = [(i, current_trajectory)]  # 存储索引和轨迹

        # 查找相同航班号的轨迹
        for j in range(i + 1, len(merged_data)):
            if processed[j]:
                continue

            next_trajectory = merged_data[j]
            next_flight_code = next_trajectory[0, 1]

            if current_flight_code == next_flight_code:
                to_merge.append((j, next_trajectory))
                processed[j] = True

        if len(to_merge) > 1:
            # 按时间排序（假设第一列是时间信息）
            to_merge.sort(key=lambda x: x[1][0, 0])

            # 合并轨迹
            trajectories = [t[1] for t in to_merge]
            combined_trajectory = np.concatenate(trajectories, axis=0)

            # 可以添加额外的处理，比如去除重复时间点的数据
            # 这里假设我们要保留所有数据点
        else:
            combined_trajectory = current_trajectory

        merged_result.append(combined_trajectory)
        processed[i] = True

    return merged_result


def imp_and_visualize_mask_trajs(parameters, mask_traj_list):
    params = set_and_load_parameters_imp_3(parameters)
    '''获取数据'''
    dataset = TraceSet(mask_traj_list, params)
    masked_sentence, masked_label, char_seq = dataset.get_all_data()

    '''加载模型'''
    # 创建模型时设置 include_ptp=False
    model = create_model(params, include_ptp=False)

    # 加载模型权重时设置 strict=False
    state = torch.load(params['predict_model'])
    model.load_state_dict(state['module'], strict=False)
    model.eval()

    mtms, _ = model(tokens=masked_sentence, mask=masked_label)
    mtm_o = mtms[0]
    posi = masked_position(masked_label)
    combined_tensors = mtms.detach() * posi + masked_sentence * ~posi

    merged_combineds = merge_seq_data(combined_tensors, char_seq)
    final_merged_combineds = merge_same_flight(merged_combineds)
    reversed_masked_label = 1 - masked_label

    merged_mtms = merge_seq_data(mtms.detach(), char_seq, reversed_masked_label)
    final_merged_mtms = merge_same_flight(merged_mtms)

    merged_sequences = merge_seq_data(masked_sentence, char_seq, masked_label)
    final_merged_sequences = merge_same_flight(merged_sequences)

    image_base64_impu_trajs = []
    denormalized_datas = []
    for final_merged_mtm, final_merged_sequence, mask_label, final_merged_combined in zip(final_merged_mtms, final_merged_sequences, masked_label, final_merged_combineds):
        imp_df = pd.DataFrame(final_merged_mtm, columns=['time', 'icao24', 'lat', 'lon', 'geoaltitude', 'segment_id'])
        imp_df_plot = imp_df[['time', 'lat', 'lon', 'geoaltitude']]
        show_df = pd.DataFrame(final_merged_sequence, columns=['time', 'icao24', 'lat', 'lon', 'geoaltitude', 'segment_id'])
        shou_df_plot = show_df[['time', 'lat', 'lon', 'geoaltitude']]
        image_base64_impu_traj = plot_n_trajectory([imp_df_plot, shou_df_plot], ['imp', 'shou'], ['red', 'blue'], show=False)
        image_base64_impu_trajs.append(image_base64_impu_traj)
        denormalized_data = inverse_normalize_data(final_merged_combined, params['norm_file_path'])
        denormalized_datas.append(denormalized_data)
        denormalized_data_df = pd.DataFrame(denormalized_data, columns=['time', 'icao24', 'lat', 'lon', 'geoaltitude', 'segment_id'])
        save_denormalized_data(denormalized_data_df, save_path=params['imp_save_path'])

    return {'image_base64_impu_trajs': image_base64_impu_trajs, 'denormalized_datas': denormalized_datas}


if __name__ == "__main__":
    '''功能一，加载数据'''
    parameters_1 = {"trajdata_path": r'D:\pythonProject\粒子滤波\补盲\缺失航迹\3c64f4_18115.npy'}
    load_and_visualize_result = load_and_visualize_trajectory_imp(parameters_1)
    # load_and_visualize_result = {'traj_data': traj_data, 'image_base64_original': image_base64_original}
    # traj_data                      2D:numpy.ndarray                            未归一化的数据，用于在地图上展示
    # image_base64_original：        list[string1(base64编码),string2,string3]    所有轨迹的图像
    '''加载所有数据'''
    folder_path = r'D:\pythonProject\粒子滤波\补盲_用户界面\缺失航迹'
    file_name = os.listdir(folder_path)
    lines = []
    for alphabet in file_name:
        lines.append(folder_path + '\\' + alphabet)
    # 循环调用
    tra_data_all_mask = []
    for file_path in lines:
        # 构建参数字典
        parameters_n = {
            "trajdata_path": file_path
        }
        result = load_and_visualize_trajectory_imp(parameters_n)
        tra_data_mask = result['traj_data']
        tra_data_all_mask.append(tra_data_mask)

    '''功能二，补盲'''
    parameters3 = {"loc_size": 3,
                   'traj_len': 100,
                   'max_len': 100,
                   'dim_in': 256,
                   'dim_out': 256,
                   'n_heads': 5,
                   'n_layers': 4,
                   'dropout': 0.2,
                   'scale': 1,
                   'max_pair_targets': 15,
                   'position_embedding_size': 100,
                   'mask_value': -9999,
                   'predict_model': 'weights/run_26/best_state.pt',
                   'norm_file_path': r'D:/pythonProject/粒子滤波/Bert/result.csv',
                   'imp_save_path': r'D:\pythonProject\粒子滤波\补盲_用户界面\插补'}
    result = imp_and_visualize_mask_trajs(parameters3, tra_data_all_mask)
    # 'image_base64_impu_trajs': image_base64_impu_trajs}    list[list[str1(base64编码),str2,str3]]    绘图结果
    # denormalized_datas                                     list[np.array]                           标准化反变换的结果
