""" 数据处理 """
import io
import os
from tempo import *
from tempo0 import *  # 含有setParameters等函数
import base64
import yaml
import torch
import matplotlib.dates as mdates
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from glob import glob
from tqdm import tqdm
from typing import List
from itertools import groupby
from span_mask_test import PairWithSpanMaskingScheme
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from sklearn.model_selection import train_test_split
from utils import read_mean_variance_from_csvfile, set_and_load_parameters_imp_1, set_and_load_parameters_imp_2


def plot_n_trajectory(groups, labels, colors, show=False, save_path=None):
    """Plot trajectories and return three separate base64 encoded images."""
    image_base64_list = []

    # Create three separate figures instead of subplots
    for i in range(3):
        fig = plt.figure(figsize=(8, 6))
        if i == 0:
            ax = fig.add_subplot(111, projection='3d')
        else:
            ax = fig.add_subplot(111)

        for group, label, color in zip(groups, labels, colors):
            # Convert timestamp to datetime
            times_dt = pd.to_datetime(group['time'], unit='s')
            times_num = mdates.date2num(times_dt)

            # Extract values from the group
            latitudes = group.lat.values
            longitudes = group.lon.values
            geoAltitudes = group.geoaltitude.values
            times = group.time.values

            if i == 0:  # 3D trajectory
                ax.scatter3D(longitudes, latitudes, geoAltitudes, s=2, color=color, label=label)
                ax.set_title('3D trajectory')
                ax.set_xlabel('Longitude')
                ax.set_ylabel('Latitude')
                ax.set_zlabel('GeoAltitude')
            elif i == 1:  # 2D scatter plot
                ax.scatter(longitudes, latitudes, s=2, color=color, label=label)
                ax.set_title('2D scatter plot')
                ax.set_xlabel('Longitude')
                ax.set_ylabel('Latitude')
            else:  # Altitude plot
                ax.plot(times_num, geoAltitudes, color=color, label=label)
                ax.set_title('Altitude plot')
                ax.set_xlabel('Time')
                ax.set_ylabel('GeoAltitude')
                ax.set_xticks(ax.get_xticks()[::2])  # keep every other tick
                ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))

        ax.legend()

        if show:
            plt.tight_layout()
            plt.show()

            # Save individual plot if save_path is provided
        if save_path is not None:
            fig.savefig(f"{save_path}_{i}.png")

            # Convert to base64
        buf = io.BytesIO()
        plt.savefig(buf, format='png')
        buf.seek(0)
        image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')
        image_base64_list.append(image_base64)
        plt.close()

        # Return three separate base64 encoded images
    return image_base64_list


def load_and_visualize_trajectory(parameters):
    """功能一：导入数据"""
    # 设置和加载功能一的参数
    params = set_and_load_parameters_imp_1(parameters)
    trajdata_path = params["trajdata_path"]
    # 加载航迹数据
    traj_data = np.load(trajdata_path, allow_pickle=True)
    traj_data = traj_data[:len(traj_data) - (len(traj_data) % 100)]
    columns = ['time', 'icao24', 'lat', 'lon', 'geoaltitude', 'segment_id']

    # 可视化原始数据
    traj_df = pd.DataFrame(traj_data, columns=columns)
    image_base64_original = plot_n_trajectory([traj_df[['time', 'lat', 'lon', 'geoaltitude']]], ['Original'], ['red'])

    # 输出
    setShowData({"image_base64": image_base64_original})

    return {'traj_data': traj_data, 'image_base64_original': image_base64_original}


def load_yaml(yaml_path: str = 'configs.yaml'):
    """ 读配置文件, 默认configs.yaml """
    with open(yaml_path, 'r', encoding='utf-8') as f:
        data = yaml.safe_load(f)
    return data


def load_LT(csv_path: str):

    data = pd.read_csv(csv_path, usecols=[2, 3, 13, 21])

    return data


def load_npy(npy_path, usecols):
    # 加载.npy文件
    data = np.load(npy_path, allow_pickle=True)

    # 选择需要的列
    data = data[:, usecols]

    return data


def read_mean_variance_from_file(input_file):
    mean_values = []
    variance_values = []

    with open(input_file, 'r') as txt_file:
        for line in txt_file:
            # 解析每行数据
            column, values = line.strip().split(' - ')
            mean, variance = values.split(', ')
            mean_values.append(float(mean.split(': ')[1]))
            variance_values.append(float(variance.split(': ')[1]))

    # 转换为NumPy数组
    mean_array = np.array(mean_values)
    variance_array = np.array(variance_values)
    # 删除第一个元素，即时间戳的均值和方差
    mean_array = np.delete(mean_array, 0)
    variance_array = np.delete(variance_array, 0)

    return mean_array, variance_array


def generate_data(seq_list, length, stride=1, mean=None, variance=None):
    """ 采样轨迹集合 """
    seqs = []
    columns_to_normalize = [0, 1, 4]
    mean_sekect = mean[columns_to_normalize]
    variance_select = variance[columns_to_normalize]
    for data in seq_list:
        # 获取数据类型和形状

        # 创建一个内存映射数组
        # data = np.memmap(npy, dtype=dtype, mode='r', shape=shape)

        if mean is not None and variance is not None:
            data[:, 2:5] = (data[:, 2:5] - mean_sekect) / (variance_select ** 0.5)
        for d in sliding_window(data, length, stride):  # 滑动窗口划分数据，增加数据量
            seqs.append(d)

    return seqs


def padding(data: np.ndarray, length: int, value:int):
    """ 长度补齐 """
    shape = list(data.shape)
    shape[0] = length - shape[0]
    paddings = np.full(shape, value, dtype=np.float64)
    data = np.concatenate([data, paddings])
    return data


def sliding_window(data, length, stride=1):
    """ 滑窗, 对一条轨迹进行采样 """
    if len(data) < length:
        yield data
        return

    for i in range(0, len(data), stride): #间隔一进行采样
        if i + length > len(data):
            yield data[i:]
            break
        else:
            yield data[i:i+length]


class TraceSet(Dataset):

    def __init__(self, data_list, parameters):
        super(TraceSet, self).__init__()
        self.params = set_and_load_parameters_imp_2(parameters)
        self.data_list = data_list
        self.traj_len = 100
        self.use_norm = True
        self.stride = 100
        self.mask_factor = self.params['mask_factor']
        self.mask_value = -9999
        self.max_pair_targets = self.params['max_pair_targets']
        self.dim_in = 256
        self.position_embedding_size = 100
        self.mean_value, self.variance_value = read_mean_variance_from_csvfile(self.params['norm_file_path'])

        self.datas = generate_data(self.data_list, self.traj_len, self.stride, self.mean_value, self.variance_value)

    def __len__(self):
        return len(self.datas)

    def __getitem__(self, index):
        seq = self.datas[index]  # 归一化后的数据
        # 生成mask
        data_seq = seq[:, 2:5].astype(np.float32)
        char_seq = seq[:, [0, 1, 5]]
        scheme = PairWithSpanMaskingScheme(self.params)
        target_sentence, masked_sentence, pair_targets, masked_label = scheme.mask(data_seq)
        pairs = [item[:4] for item in pair_targets]
        pair_targets = [item[4] for item in pair_targets]
        # pair_targets = [item[4:] for item in pair_targets]
        pair_lengths = len(pairs)

        if masked_sentence.shape[0] < self.traj_len:
            masked_sentence = padding(masked_sentence, self.traj_len, 0)
            target_sentence = padding(target_sentence, self.traj_len, 0)
            masked_label = padding(masked_label, self.traj_len, 1)
            char_seq = padding(char_seq, self.traj_len, 0)

        # to tensor
        masked_sentence = torch.Tensor(masked_sentence)
        target_sentence = torch.Tensor(target_sentence)
        pairs = torch.Tensor(pairs)
        pair_targets = np.array(pair_targets)
        pair_targets = torch.Tensor(pair_targets)
        masked_label = torch.LongTensor(masked_label).unsqueeze(0)

        return masked_sentence, target_sentence, pairs, pair_lengths, masked_label, pair_targets, char_seq

    def get_all_data(self):
        # 使用生成器逐个产生数据
        def data_generator():
            for idx in range(len(self)):
                yield self.__getitem__(idx)
        # 创建生成器对象
        data_gen = data_generator()
        # 分别存储不同类型的数据
        masked_sentence, target, pairs, pair_lengths = [], [], [], []
        masked_label, pair_targets, char_seq = [], [], []

        # 逐个处理数据
        for item in data_gen:
            masked_sentence.append(item[0])
            target.append(item[1])
            pairs.append(item[2])
            pair_lengths.append(item[3])
            masked_label.append(item[4])
            pair_targets.append(item[5])
            char_seq.append(item[6])

        # 使用pad_sequence来填充数据
        masked_sentence = pad_sequence(masked_sentence, batch_first=True)
        target = pad_sequence(target, batch_first=True)
        pairs = pad_sequence(pairs, batch_first=True)
        # char_seq = pad_sequence(char_seq, batch_first=True)

        length_indicator = (pairs[:, :, 2] - pairs[:, :, 1] - 1).clamp(min=0).view(-1, 1)

        bs, num_pairs, _ = pairs.size()
        mask = torch.arange(self.max_pair_targets).unsqueeze(0).unsqueeze(2).expand(bs * num_pairs,
                                                                                    self.max_pair_targets,
                                                                                    self.dim_in * 4 + self.position_embedding_size)
        length_indicator_expanded = length_indicator.unsqueeze(2).expand(bs * num_pairs, self.max_pair_targets,
                                                                         self.dim_in * 4 +
                                                                         self.position_embedding_size)
        binary_mask = mask < length_indicator_expanded
        length_indicator = length_indicator.view(-1)
        # length_indicator = length_indicator != 0

        pair_targets = pad_sequence(pair_targets, batch_first=True)
        pair_lengths = torch.Tensor(pair_lengths)

        # bs, num_pairs, _ = pairs.size()
        seq_len = torch.arange(num_pairs)
        pair_lengths = pair_lengths.unsqueeze(-1)
        pairs_mask = seq_len < pair_lengths

        masked_label = torch.stack(masked_label)

        # pair_targets = pad_sequence(pair_targets, batch_first=True)
        return masked_sentence, target, pairs, length_indicator, binary_mask, pairs_mask, masked_label, pair_targets, char_seq


if __name__ == "__main__":
    """加载所有数据"""
    # 获取文件路径
    folder_path = r'D:\pythonProject\粒子滤波\ADS-B\拆分-npy'
    file_name = os.listdir(folder_path)
    lines = []
    for alphabet in file_name:
        lines.append(folder_path + '\\' + alphabet)
    # 循环调用
    tra_data_all = []
    for file_path in lines:
        # 构建参数字典
        parameters1 = {"trajdata_path": file_path}
        result = load_and_visualize_trajectory(parameters1)
        tra_data = result['traj_data']
        tra_data_all.append(tra_data)

    parameters2 = {"max_pair_targets": 15,
                   'span_lower': 5,
                   'span_upper': 10,
                   'mask_factor': 0.3,
                   'geometric_p': 0.5,
                   'norm_file_path': r'D:/pythonProject/粒子滤波/Bert/result.csv'}

    dataset = TraceSet(tra_data_all, parameters2)
    masked_sentence, target, pairs, length_indicator, binary_mask, pairs_mask, masked_label, pair_targets, char_seq = dataset.get_all_data()


    pd.set_option('display.max_rows', None)
    pd.set_option('display.max_columns', None)
    pd.set_option('display.width', None)
    pd.set_option('display.max_colwidth', None)

    print("Masked sentence:", masked_sentence.shape, masked_sentence.dtype)
    # print(pd.DataFrame(masked_sentence[0].numpy()))
    print("Target:", target.shape, target.dtype)
    # print(target[0])
    print("Pairs:", pairs.shape)
    print(pairs)
    print('length_indicator', length_indicator.size())
    print(length_indicator)
    print('binary_mask,', binary_mask.size())
    print(binary_mask)
    print("pairs_mask:", pairs_mask.shape)
    print(pairs_mask)
    print("mask_label", masked_label.shape)
    print(masked_label)
    print('char_seq', len(char_seq), type(char_seq[0]), char_seq[0].shape)
    print(char_seq[0][0])
    print(char_seq[1][0])
    print(char_seq[2][0])
    print(char_seq[3][0])


