import pandas as pd
import numpy as np
import os
from fastdtw import fastdtw
from matplotlib import pyplot as plt
from scipy.spatial.distance import euclidean
from concurrent.futures import ProcessPoolExecutor #这种方法可以利用多核处理器的优势，同时计算多对轨迹之间的距离。
from shapely.geometry import LineString
from sklearn.cluster import KMeans
from sklearn.impute import KNNImputer
from dtw import *


def process_csj(file_path):# CSJ处理逻辑
    columns_csj = ['MMSI', 'Timestamp', 'TimeDelta', 'LATITUDE', 'LONGITUDE', 'SOG', 'COG']
    df = pd.read_csv(file_path, sep=' ', names=columns_csj, usecols=['MMSI', 'Timestamp', 'TimeDelta', 'LATITUDE', 'LONGITUDE', 'SOG', 'COG'])
    df['Timestamp'] = pd.to_datetime(df['Timestamp'], format='%Y%m%d%H%M%S')
    df = df[['MMSI', 'Timestamp', 'LONGITUDE', 'LATITUDE', 'SOG', 'COG']]  # 按照指定顺序选择列并排除TimeDelta
    return df

def process_zs(file_path, mmsi, subdir):# ZS处理逻辑
    df = pd.read_csv(file_path, usecols=['Date_Time', 'SOG','COG', 'LONGITUDE', 'LATITUDE'])
    if subdir == "2018-4-24":
        df['Date_Time'] += 86400  # 加上一天的秒数
    df.rename(columns={'Date_Time': 'Timestamp'}, inplace=True)
    df['Timestamp'] = pd.to_datetime(df['Timestamp'], unit='s', origin=pd.Timestamp('2018-04-23'))
    df['MMSI'] = mmsi
    df = df[['MMSI', 'Timestamp', 'LONGITUDE', 'LATITUDE', 'SOG', 'COG']]  # 按照指定顺序选择列并排除TimeDelta
    return df

def process_cfd(file_path, mmsi, subdir):
    # 提取日期中的天数，并转换为对应的秒数（距离2018-06-01的天数乘以每天的秒数）
    day = int(subdir.split('-')[-1])  # 提取日期的天
    seconds_from_start = (day - 1) * 86400  # 从2018-06-01开始的秒数
    # 读取文件内容，手动指定列名（注意这里不再包含MMSI）
    columns_cfd = ['Timestamp', 'TimeDelta', 'LONGITUDE', 'LATITUDE', 'SOG',  'COG', 'WhateverOG Idontknow' ]
    df = pd.read_csv(file_path, names=columns_cfd, usecols=['TimeDelta', 'LONGITUDE', 'LATITUDE', 'SOG', 'COG'])
    # 调整时间戳
    df['TimeDelta'] += seconds_from_start
    # 将时间戳转换为实际的DateTime
    df['Timestamp'] = pd.to_datetime(df['TimeDelta'], unit='s', origin=pd.Timestamp('2018-06-01'))
    # 由于文件名即MMSI，这里我们添加一个MMSI列，所有行都使用该MMSI值
    df['MMSI'] = mmsi
    df = df[['MMSI', 'Timestamp', 'LONGITUDE', 'LATITUDE', 'SOG', 'COG']]  # 按照指定顺序选择列并排除TimeDelta
    return df

# 通过计算不同MMSI之间轨迹的DTW相似度来进行分析。这里，我们将每个MMSI的轨迹视为一个完整的时间序列，并使用经度、纬度和SOG（速度）作为特征来进行比较。
# 我们会先选取部分数据（例如前100个MMSI）进行分析，以便更快地得到结果并验证方法的可行性。最后，对所有轨迹数据进行分析。

# DP算法，重点在于tolerance的调试，不能过大，也不能过小，可以通过可视化部分轨迹  查看不同程度tolerance下轨迹的压缩程度（压缩过大可能会导致轨迹的部分关键点被删除）
def downsample_trajectory(df, tolerance=0.0001):
    # 将轨迹转换为LineString对象
    if len(df[['LONGITUDE', 'LATITUDE']].values) > 1:
        line = LineString(df[['LONGITUDE', 'LATITUDE']].values)
        # 使用Douglas-Peucker算法简化轨迹
        simplified_line = line.simplify(tolerance, preserve_topology=False)
        # 提取简化后的轨迹点
        simplified_points = np.array(simplified_line.coords)
        # 查找简化后的轨迹点在原始轨迹中的索引
        indices = [df.index[(df['LONGITUDE'] == point[0]) & (df['LATITUDE'] == point[1])][0] for point in
                   simplified_points]
        # 返回简化后的轨迹数据
        return df.loc[indices]

# 使用fastdtw计算两个轨迹之间的距离（因为dtw计算得太慢了，所以目前使用的是fastdtw，精度稍微弱一些）
def calculate_dtw_distance(pair):
    df1, df2 = pair
    # 检查df1或df2是否为None
    if df1 is None or df2 is None:
        # 可以选择返回None或其他默认值
        return None  # 或者返回一个代表无效值的特定数字，例如0或float('inf')
    # 提取经纬度和速度作为特征
    trajectory1 = df1[['LONGITUDE', 'LATITUDE', 'SOG', 'COG']].to_numpy()
    trajectory2 = df2[['LONGITUDE', 'LATITUDE', 'SOG', 'COG']].to_numpy()
    # # 使用fastdtw计算两个轨迹之间的距离
    distance, path = fastdtw(trajectory1, trajectory2, dist=euclidean)
    # 将每对轨迹点之间的四个特征值视为一个四维空间中的点，然后计算这些点之间的欧氏距离。

    # 使用dtw计算两个轨迹之间的距离（若算力足够，可选择dtw进行计算）
    # distance, cost_matrix, acc_cost_matrix, path = dtw(trajectory1, trajectory2,
    #                                                    dist=lambda x, y: np.linalg.norm(x - y))
    return distance

# 真正的计算函数，上面的函数偏向单线程，本函数结合Python并行计算库，将上面的计算函数封装成一个个任务，并行提交
def compute_dtw_distances_concurrently(selected_mmsis, all_trajectories):
    # 初始化一个空字典来存储DTW距离
    dtw_distances = {}
    # 使用ProcessPoolExecutor来并行化计算
    with ProcessPoolExecutor(max_workers=8) as executor:
        # 创建一个映射，将future映射到MMSI对
        future_to_mmsi_pair = {}
        for i, mmsi1 in enumerate(selected_mmsis):
            for j, mmsi2 in enumerate(selected_mmsis):
                if i < j:  # 避免重复计算
                    # 提交任务并记录对应的MMSI对
                    future = executor.submit(calculate_dtw_distance, (all_trajectories[mmsi1], all_trajectories[mmsi2]))
                    future_to_mmsi_pair[future] = (mmsi1, mmsi2)
        # 等待所有任务完成并收集结果
        for future in future_to_mmsi_pair:
            mmsi1, mmsi2 = future_to_mmsi_pair[future]
            distance = future.result()  # 获取计算结果
            dtw_distances[(mmsi1, mmsi2)] = distance

    return dtw_distances

# 为了可视化现有的轨迹写的函数
def visualize_all_trajectories(all_trajectories, output_file):
    plt.figure(figsize=(10, 6))
    for mmsi, df in all_trajectories.items():
        if not df.empty:
            plt.plot(df['LONGITUDE'], df['LATITUDE'], label=f'MMSI {mmsi}')
    plt.xlabel('Longitude')
    plt.ylabel('Latitude')
    plt.title('All Trajectories')
    plt.legend()
    plt.savefig(output_file)
    plt.close()

# 下面两个函数提取特征是为了进行kmeans算法的，但已经被废弃
def extract_features(df):
    """ 提取轨迹的统计特征 """
    features = {}
    features['mean_speed'] = df['SOG'].mean()
    features['max_speed'] = df['SOG'].max()
    features['min_speed'] = df['SOG'].min()
    features['std_dev_speed'] = df['SOG'].std()
    features['trajectory_length'] = LineString(df[['LONGITUDE', 'LATITUDE']].values).length
    return features

def prepare_trajectories_features(all_trajectories):
    """ 为聚类准备特征数据 """
    data_for_clustering = []
    mmsi_labels = []
    for mmsi, df in all_trajectories.items():
        if not df.empty:
            features = extract_features(df)
            data_for_clustering.append(list(features.values()))
            mmsi_labels.append(mmsi)
    return np.array(data_for_clustering), mmsi_labels

def perform_kmeans_clustering(data, all_trajectories, mmsi_labels, n_clusters=6, output_dir='./Data/DTW/CSJ/Trajectories'):
    """
    执行K-means聚类并返回聚类标签，并为每个聚类绘制轨迹图。
    data: 轨迹的特征数据，用于聚类。
    all_trajectories: 所有轨迹的数据，用于绘图。
    mmsi_labels: 轨迹的MMSI标签，与data对应。
    n_clusters: 聚类数量。
    output_dir: 图像保存目录。
    """
    # 执行K-means聚类
    kmeans = KMeans(n_clusters=n_clusters, random_state=0)
    labels = kmeans.fit_predict(data)

    # 为每个聚类创建轨迹图
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    for i in range(n_clusters):
        plt.figure(figsize=(10, 6))
        cluster_mmsis = [mmsi for mmsi, label in zip(mmsi_labels, labels) if label == i]
        for mmsi in cluster_mmsis:
            df = all_trajectories[mmsi]
            plt.plot(df['LONGITUDE'], df['LATITUDE'], label=f'MMSI {mmsi}')
        plt.xlabel('Longitude')
        plt.ylabel('Latitude')
        plt.title(f'Cluster {i} Trajectories')
        plt.legend(loc='upper left', bbox_to_anchor=(1,1))  # 图例放在图外
        plt.savefig(f"{output_dir}/cluster_{i}_trajectories.png", bbox_inches='tight')
        plt.close()

    return labels

# 结果不佳，与师姐讨论得出的结论是需要对原来的数据进行进一步的数据预处理，所以我的想法是结合聚类和长度阈值进行预处理，但效果不好，所以本函数无效，后续通过人工筛选进行数据的预处理
def filter_short_trajectories(all_trajectories, labels, mmsi_labels, fraction=0.25):
    """ 根据聚类结果和长度阈值过滤轨迹。"""
    # 计算每个聚类的平均轨迹长度
    cluster_lengths = {i: [] for i in range(max(labels) + 1)}
    for label, mmsi in zip(labels, mmsi_labels):
        trajectory_length = len(all_trajectories[mmsi])
        cluster_lengths[label].append(trajectory_length)

    # 确定每个聚类的长度阈值
    length_thresholds = {k: np.mean(v) * fraction for k, v in cluster_lengths.items()}

    # 过滤短轨迹
    filtered_trajectories = {}
    for label, mmsi in zip(labels, mmsi_labels):
        if len(all_trajectories[mmsi]) >= length_thresholds[label]:
            filtered_trajectories[mmsi] = all_trajectories[mmsi]

    return filtered_trajectories

# 重采样！该函数有效！是该项目重要的统计学处理步骤！
def resample_and_select(df, resample_period='1S', selection_period='5S'):
    """对DataFrame进行重采样和选点"""
    # 将时间设置为索引
    df.index = df['Timestamp']
    # 线性插值
    resampled = df.resample(resample_period).mean().interpolate()

    # 从数据的最小时间开始计算，确定第一个选取点的时间
    min_time = df['Timestamp'].min()
    offset = pd.Timedelta(seconds=min_time.second % int(selection_period[:-1]))  # 计算偏移量
    selection_start_time = min_time + pd.Timedelta(seconds=int(selection_period[:-1]) - offset.seconds)  # 校正到下一个最近的5秒处

    # 以校正后的起始时间为基准，每隔selection_period选择一个点
    selected = resampled[selection_start_time:].resample(selection_period).first().dropna()
    return selected.reset_index(drop=True)

# def resample_and_select(df, resample_period='1S', selection_period='5S'):
#     """对DataFrame进行重采样和选点"""
#     # 将时间设置为索引
#     df.index = df['Timestamp']
#     # 从DataFrame中移除时间列
#     df = df.drop(columns=['Timestamp'])
#     # 重采样
#     resampled = df.resample(resample_period).mean()
#
#     # 使用KNN填充缺失值
#     imputer = KNNImputer(n_neighbors=5)  # 设置KNN插值的邻居数
#     interpolated = pd.DataFrame(imputer.fit_transform(resampled), columns=resampled.columns, index=resampled.index)
#
#     # 从数据的最小时间开始计算，确定第一个选取点的时间
#     min_time = df.index.min()
#     offset = pd.Timedelta(seconds=min_time.second % int(selection_period[:-1]))  # 计算偏移量
#     selection_start_time = min_time + pd.Timedelta(seconds=int(selection_period[:-1]) - offset.seconds)  # 校正到下一个最近的5秒处
#
#     # 以校正后的起始时间为基准，每隔selection_period选择一个点
#     selected = interpolated[selection_start_time:].resample(selection_period).first().dropna()
#     return selected.reset_index(drop=True)

def visualize_trajectory(df):
    """对轨迹进行二维和三维可视化"""
    fig, ax = plt.subplots(1, 2, figsize=(12, 6))
    # 二维可视化
    ax[0].plot(df['LONGITUDE'], df['LATITUDE'], marker='o', markersize=2, linestyle='-')
    ax[0].set_title('2D Trajectory')
    ax[0].set_xlabel('Longitude')
    ax[0].set_ylabel('Latitude')

    # 三维可视化
    ax3 = plt.subplot(122, projection='3d')
    ax3.plot3D(df['LONGITUDE'], df['LATITUDE'], df['SOG'], marker='o', markersize=2)
    ax3.set_title('3D Trajectory (Including Speed)')
    ax3.set_xlabel('Longitude')
    ax3.set_ylabel('Latitude')
    ax3.set_zlabel('Speed')

    plt.tight_layout()
    # plt.savefig(os.path.join('./Data/DTW/CSJ/Trajectories/all_trajectories_plot.png'))
    plt.show()

# def main_clustering_and_filtering(all_trajectories):
#     # 准备数据
#     data, mmsi_labels = prepare_trajectories_features(all_trajectories)
#
#     # 执行聚类
#     labels = perform_kmeans_clustering(data, all_trajectories, mmsi_labels)
#
#     # 过滤短轨迹
#     filtered_trajectories = filter_short_trajectories(all_trajectories, labels, mmsi_labels)
#
#     return filtered_trajectories

# 本函数是在人工筛选出应该删除的轨迹之后，把这些轨迹的MMSI集中，最后进行删除，有很多当时debug或者测试用的语句，没有实际效果
def remove_specified_mmsis(all_trajectories, mmsis_to_remove):
    """
    从轨迹字典中移除指定的MMSI列表。
    确保MMSI格式统一化处理。
    """
    removed_count = 0

    # count = 0
    # for mmsi in all_trajectories.keys():
    #     count = count + 1
    #     print(mmsi)
    # print("Count:",count)
    #
    # for mmsi in mmsis_to_remove:
    #     if "412551020" in all_trajectories.keys():
    #         removed_count = removed_count + 1
    #

    # for key in all_trajectories.keys():
    #     print(f"Key: {key}, Type: {type(key)}")

    count2 = 0
    for mmsi in mmsis_to_remove:
        count2 = count2 + 1
        # print(mmsi)
    print("Count2:",count2)

    # 确保 MMSI 列表中的所有项都是整数
    mmsis_to_remove = [int(mmsi) for mmsi in mmsis_to_remove]

    print(f"Initial count: {len(all_trajectories)}")  # 打印初始数量

    # 遍历待移除的 MMSI 列表
    for mmsi in mmsis_to_remove:
        if mmsi in all_trajectories.keys():
            del all_trajectories[mmsi]  # 如果存在，则删除
            removed_count += 1
        else:
            print(f"MMSI {mmsi} not found in the dataset.")  # 打印找不到的MMSI
    print(f"Removed {removed_count} trajectories from the dataset.")
    print(f"Final count: {len(all_trajectories)}")  # 打印最终数量

# 最后的下策：通过经纬度确定八种航行模式。这从科学的角度是不对的，需要真正使用聚类得到航行模式，这也是这个项目最后需要实现的最困难点！
def visualize_trajectories_by_geofence_0(all_trajectories, lat_min, lat_max, lon_min, lon_max, output_path):
    """
    Visualizes all trajectories within a specified geographical boundary from a dictionary of DataFrames,
    where both the starting and ending points of the trajectory must be within the boundary.
    Each trajectory will be plotted in different default colors and the plot will be saved to a PNG file.

    Parameters:
    all_trajectories (dict): Dictionary with MMSI as keys and DataFrames as values.
                             Each DataFrame must contain 'LATITUDE' and 'LONGITUDE' columns.
    lat_min (float): Minimum latitude of the boundary.
    lat_max (float): Maximum latitude of the boundary.
    lon_min (float): Minimum longitude of the boundary.
    lon_max (float): Maximum longitude of the boundary.
    output_path (str): Path to save the output plot as a PNG file.
    """
    plt.figure(figsize=(6, 6))
    saved_mmsis = []
    # 过滤掉所有轨迹点的经度在122.72到122.80的轨迹
    filtered_trajectories = {
        mmsi: df for mmsi, df in all_trajectories.items()
        if not ((df['LONGITUDE'] >= 122.72) & (df['LONGITUDE'] <= 122.82)).all()
    }
    filtered_trajectories = {
        mmsi: df for mmsi, df in filtered_trajectories.items()
        if not ((df['LATITUDE'] >= 37.5) & (df['LATITUDE'] <= 37.8)).all()
    }
    for mmsi, df in filtered_trajectories.items():
        # Check if the start and end points are within the specified boundary
        if (df.iloc[0]['LATITUDE'] >= lat_min and df.iloc[0]['LATITUDE'] <= lat_max and
            df.iloc[0]['LONGITUDE'] >= lon_min and df.iloc[0]['LONGITUDE'] <= lon_max and
            df.iloc[-1]['LATITUDE'] >= lat_min and df.iloc[-1]['LATITUDE'] <= lat_max and
            df.iloc[-1]['LONGITUDE'] >= lon_min and df.iloc[-1]['LONGITUDE'] <= lon_max):
            plt.plot(df['LONGITUDE'], df['LATITUDE'], marker='o', linestyle='-', markersize=2,
                     label=f'MMSI {mmsi}')
            saved_mmsis.append(mmsi)

    plt.title('Trajectories within Specified Geographical Boundaries')
    plt.xlabel('Longitude')
    plt.ylabel('Latitude')
    plt.legend()
    plt.grid(True)
    plt.savefig(output_path)  # Save the figure to a file
    plt.close()  # Close the plot to free up memory

    mmsi_df = pd.DataFrame(saved_mmsis, columns=['MMSI'])
    mmsi_df.to_csv('Data/DTW/CSJ/New_Human_Trajectories/cluster_0.csv', index=False)
# 这个方法和上面的方法类似，只不过处理的是不同的区域
def visualize_trajectories_by_geofence_1(all_trajectories, lat_min, lat_max, lon_min, lon_max, output_path):
    """
    Visualizes all trajectories within a specified geographical boundary from a dictionary of DataFrames,
    where both the starting and ending points of the trajectory must be within the boundary.
    Each trajectory will be plotted in different default colors and the plot will be saved to a PNG file.

    Parameters:
    all_trajectories (dict): Dictionary with MMSI as keys and DataFrames as values.
                             Each DataFrame must contain 'LATITUDE' and 'LONGITUDE' columns.
    lat_min (float): Minimum latitude of the boundary.
    lat_max (float): Maximum latitude of the boundary.
    lon_min (float): Minimum longitude of the boundary.
    lon_max (float): Maximum longitude of the boundary.
    output_path (str): Path to save the output plot as a PNG file.
    """
    plt.figure(figsize=(10, 6))
    saved_mmsis = []
    # 过滤掉所有轨迹点的经度在122.72到122.80的轨迹
    filtered_trajectories = {
        mmsi: df for mmsi, df in all_trajectories.items()
        if not ((df['LATITUDE'] >= 37.55) & (df['LATITUDE'] <= 37.8)).all()
    }
    filtered_trajectories = {
        mmsi: df for mmsi, df in filtered_trajectories.items()
        if not ((df['LONGITUDE'] >= 122.725) & (df['LONGITUDE'] <= 123.2)).all()
    }
    filtered_trajectories = {
        mmsi: df for mmsi, df in filtered_trajectories.items()
        if not ((df['LONGITUDE'] >= 122.6) & (df['LONGITUDE'] <= 123.15)).all()
    }
    # filtered_trajectories = {
    #     mmsi: df for mmsi, df in filtered_trajectories.items()
    #     if not ((df['LATITUDE'] >= 37.2) & (df['LATITUDE'] <= 37.45)).all()
    # }
    filtered_trajectories = {
        mmsi: df for mmsi, df in filtered_trajectories.items()
        if not ((df['LATITUDE'] >= 37.2) & (df['LATITUDE'] <= 37.44)).any()
    }

    for mmsi, df in filtered_trajectories.items():
        # Check if the start and end points are within the specified boundary
        if (df.iloc[0]['LATITUDE'] >= lat_min and df.iloc[0]['LATITUDE'] <= lat_max and
            df.iloc[0]['LONGITUDE'] >= lon_min and df.iloc[0]['LONGITUDE'] <= lon_max and
            df.iloc[-1]['LATITUDE'] >= lat_min and df.iloc[-1]['LATITUDE'] <= lat_max and
            df.iloc[-1]['LONGITUDE'] >= lon_min and df.iloc[-1]['LONGITUDE'] <= lon_max):
            plt.plot(df['LONGITUDE'], df['LATITUDE'], marker='o', linestyle='-', markersize=2,
                     label=f'MMSI {mmsi}')
            saved_mmsis.append(mmsi)

    plt.title('Trajectories within Specified Geographical Boundaries')
    plt.xlabel('Longitude')
    plt.ylabel('Latitude')
    plt.legend()
    plt.grid(True)
    plt.savefig(output_path)  # Save the figure to a file
    plt.close()  # Close the plot to free up memory

    mmsi_df = pd.DataFrame(saved_mmsis, columns=['MMSI'])
    mmsi_df.to_csv('Data/DTW/CSJ/New_Human_Trajectories/cluster_6.csv', index=False)
# 类似，只不过处理的是不同的区域
def visualize_trajectories_by_geofence_012(all_trajectories, lat_min, lat_max, lon_min, lon_max, output_path):
    """
    Visualizes and splits trajectories into two subclasses based on the last point's longitude,
    and saves the plot and lists of MMSIs to CSV files.

    Parameters:
    all_trajectories (dict): Dictionary with MMSI as keys and DataFrames as values.
                             Each DataFrame must contain 'LATITUDE' and 'LONGITUDE' columns.
    lat_min (float): Minimum latitude of the boundary.
    lat_max (float): Maximum latitude of the boundary.
    lon_min (float): Minimum longitude of the boundary.
    lon_max (float): Maximum longitude of the boundary.
    output_path (str): Base path to save the output plot and MMSI lists as PNG and CSV files.
    """
    plt.figure(figsize=(4, 6))
    class_0_1_mmsis = []  # For trajectories where the last point's longitude > 122.459
    class_0_2_mmsis = []  # For trajectories where the last point's longitude <= 122.459

    filtered_trajectories = {
        mmsi: df for mmsi, df in all_trajectories.items()
        if not ((df['LONGITUDE'] >= 122.72) & (df['LONGITUDE'] <= 122.82)).all()
    }
    filtered_trajectories = {
        mmsi: df for mmsi, df in filtered_trajectories.items()
        if not ((df['LATITUDE'] >= 37.5) & (df['LATITUDE'] <= 37.8)).all()
    }
    # Prepare plot for two subclasses
    for mmsi, df in filtered_trajectories.items():
        if (df.iloc[0]['LATITUDE'] >= lat_min and df.iloc[0]['LATITUDE'] <= lat_max and
                df.iloc[0]['LONGITUDE'] >= lon_min and df.iloc[0]['LONGITUDE'] <= lon_max and
                df.iloc[-1]['LATITUDE'] >= lat_min and df.iloc[-1]['LATITUDE'] <= lat_max and
                df.iloc[-1]['LONGITUDE'] >= lon_min and df.iloc[-1]['LONGITUDE'] <= lon_max):

            if df.iloc[-1]['LONGITUDE'] > 122.752500:
                plt.plot(df['LONGITUDE'], df['LATITUDE'], marker='o', linestyle='-', markersize=2,
                         label=f'Class 0_1',  color='blue')
                class_0_1_mmsis.append(mmsi)
            else:
                plt.plot(df['LONGITUDE'], df['LATITUDE'], marker='o', linestyle='-', markersize=2,
                         label=f'Class 0_2' ,  color='red')
                class_0_2_mmsis.append(mmsi)

    plt.title('Trajectories within Specified Geographical Boundaries')
    plt.xlabel('Longitude')
    plt.ylabel('Latitude')
    plt.legend()
    plt.grid(True)
    plt.savefig(f"{output_path}.png")
    plt.close()

    # Save MMSI lists to CSV files
    pd.DataFrame(class_0_1_mmsis, columns=['MMSI']).to_csv(f"{output_path}_class_0_1.csv", index=False)
    pd.DataFrame(class_0_2_mmsis, columns=['MMSI']).to_csv(f"{output_path}_class_0_2.csv", index=False)
# 类似，只不过处理的是不同的区域
def visualize_trajectories_by_geofence_01(all_trajectories, output_path):
    """
    Visualizes and splits trajectories from a list of MMSIs into two subclasses based on the last point's longitude,
    and saves the plot to a PNG file.

    Parameters:
    all_trajectories (dict): Dictionary with MMSI as keys and DataFrames as values.
                             Each DataFrame must contain 'LATITUDE' and 'LONGITUDE' columns.
    output_path (str): Path to save the output plot as a PNG file.
    """
    # Load MMSIs from the previously saved cluster_0.csv
    cluster_df = pd.read_csv('Data/DTW/CSJ/New_Human_Trajectories/cluster_0.csv')
    mmsis = cluster_df['MMSI'].tolist()

    plt.figure(figsize=(4, 6))
    class_0_1_mmsis = []  # For trajectories where the last point's longitude > 122.459
    class_0_2_mmsis = []  # For trajectories where the last point's longitude <= 122.459

    # Filter trajectories by MMSI and classify based on the last point's longitude
    for mmsi in mmsis:
        df = all_trajectories.get(mmsi)
        if df is not None and not df.empty:
            if df.iloc[-1]['LONGITUDE'] > 122.752500:
                plt.plot(df['LONGITUDE'], df['LATITUDE'], linestyle='-', markersize=2 , color='blue')
                class_0_1_mmsis.append(mmsi)
            else:
                plt.plot(df['LONGITUDE'], df['LATITUDE'], linestyle='-', markersize=2 , color='red')
                class_0_2_mmsis.append(mmsi)

    plt.title('Trajectories Classified by Last Longitude Point')
    plt.xlabel('Longitude')
    plt.ylabel('Latitude')
    plt.grid(True)
    plt.savefig(output_path)
    plt.close()

    # Optionally save MMSIs for each subclass to CSV files
    pd.DataFrame(class_0_1_mmsis, columns=['MMSI']).to_csv(f"{output_path}_class_0_1.csv", index=False)
    pd.DataFrame(class_0_2_mmsis, columns=['MMSI']).to_csv(f"{output_path}_class_0_2.csv", index=False)
# 将得到的八种航行模式使用不同的颜色进行可视化
def visualize_clusters_from_files(all_trajectories, folder_path, output_path):
    """
    Visualizes trajectories from different cluster files, coloring each cluster differently and plotting all on a single plot.

    Parameters:
    all_trajectories (dict): Dictionary with MMSI as keys and DataFrames as values.
                             Each DataFrame must contain 'LATITUDE' and 'LONGITUDE' columns.
    folder_path (str): Path to the folder containing CSV files with MMSI lists for each cluster.
    output_path (str): Path to save the combined visualization plot as a PNG file.
    """
    plt.figure(figsize=(10, 6))
    colors = ['#A56B45', '#CDBB71', '#9E5648', '#BD8A99', '#6A8864', '#B7C685', '#4E606E', '#B5BCC2']  # Color for each cluster

    # List all CSV files in the specified folder
    files = [f for f in os.listdir(folder_path) if f.endswith('.csv')]
    for i, file_name in enumerate(files):
        file_path = os.path.join(folder_path, file_name)
        mmsis = pd.read_csv(file_path)['MMSI'].tolist()
        color = colors[i % len(colors)]  # Select a color from the predefined list, wrap around if more than 8 clusters

        for mmsi in mmsis:
            df = all_trajectories.get(mmsi)
            if df is not None and not df.empty:
                plt.plot(df['LONGITUDE'], df['LATITUDE'], linestyle='-', markersize=2,
                         color=color)

    plt.title('Visualized Trajectories by Clusters')
    plt.xlabel('Longitude')
    plt.ylabel('Latitude')
    plt.grid(True)
    plt.savefig(output_path)
    plt.close()

def main():
    dirs = {
        './Data/CSJ': process_csj,  # 目前只处理SCJ水域，其他水域的数据还需要进行人工数据预处理，才能贴合后续的一系列代码，不难
        # './Data/ZS': process_zs,
        # './Data/CFD': process_cfd
    }
    all_trajectories = {}

    for root_dir, process_func in dirs.items():
        if root_dir.endswith('CSJ'):  # CSJ逻辑
            for file_name in os.listdir(root_dir):
                if file_name.endswith('.txt'):
                    file_path = os.path.join(root_dir, file_name)
                    df = process_csj(file_path)
                    # 使用从DataFrame中提取的MMSI作为键来更新all_trajectories字典。这确保了使用文件中实际的MMSI值，而不是文件名来索引轨迹数据。
                    # 确保DataFrame不为空，然后从第一行获取MMSI作为键
                    if not df.empty:
                        # 假设MMSI是DataFrame中的第一列
                        mmsi = df.iloc[0]['MMSI']
                        all_trajectories.setdefault(mmsi, []).append(df)

        elif root_dir.endswith('CFD'):
            for subdir in os.listdir(root_dir):
                subdir_path = os.path.join(root_dir, subdir)
                if os.path.isdir(subdir_path):
                    for file_name in os.listdir(subdir_path):
                        if file_name.endswith('.csv'):
                            file_path = os.path.join(subdir_path, file_name)
                            mmsi = file_name.split('.')[0]
                            df = process_cfd(file_path, mmsi, subdir)
                            all_trajectories.setdefault(mmsi, []).append(df)

        else:  # ZS逻辑，包含子文件夹
            for subdir in os.listdir(root_dir):
                subdir_path = os.path.join(root_dir, subdir)
                if os.path.isdir(subdir_path):
                    for file_name in os.listdir(subdir_path):
                        if file_name.endswith('.csv'):
                            file_path = os.path.join(subdir_path, file_name)
                            mmsi = file_name.split('.')[0]
                            df = process_func(file_path, mmsi, subdir)
                            all_trajectories.setdefault(mmsi, []).append(df)

    # 合并同一MMSI的DataFrame
    for mmsi, dfs in all_trajectories.items():
        all_trajectories[mmsi] = pd.concat(dfs).reset_index(drop=True).sort_values(by='Timestamp')

    # Visualize all trajectories
    # visualize_all_trajectories(all_trajectories, './Data/DTW/CSJ/Trajectories/all_trajectories_plot.png')

    # # Filter short trajectories
    # all_trajectories = main_clustering_and_filtering(all_trajectories)

    # 我们需要移除的MMSI列表
    mmsis_to_remove = [
        '412750680', '412752690', '412755580', '412760760', '412760730', '412760710', '412761180', '412760920',
        '412761310', '412762850', '412762120', '412762270', '412762960', '412764160', '412765000', '412764150',
        '412766460', '412766750', '412766390', '412765690', '412765670', '412765470', '412766810', '412767240',
        '412848000', '412898000', '412914000', '413003000', '210933000', '413021140', '413037000', '413058000',
        '413065000', '255805566', '413111000', '413130000', '413126000', '413169000', '413185000', '413170000',
        '413189000', '413200380', '413201770', '413201050', '413200880', '413202970', '413203080', '413203840',
        '413204350', '413203980', '211777500', '413204020', '413249870', '413270330', '413254000', '413270770',
        '413271010', '413271210', '413270860', '413271090', '413271250', '413271420', '413271990', '413272940',
        '413272980', '413272770', '413273230', '413300180', '413273240', '413301460', '413301850', '413301420',
        '413302000', '413302230', '413302320', '413303030', '413303060', '413303370', '413303720', '413303990',
        '413304520', '413304040', '413306080', '413322820', '413320390', '413322930', '413323670', '259409000',
        '413324150', '413324230', '413324020', '413324580', '413324740', '413324850', '413324840', '413325020',
        '413324930', '413325480', '413326010', '413325840', '413325920', '413326110', '413326330', '413326570',
        '271042511', '413327340', '413327220', '271042638', '413329010', '413330120', '413330220', '413332020',
        '413331780', '413332160', '413349000', '413350370', '413352060', '413350440', '413352380', '413352520',
        '413355670', '413355590', '413355890', '413356240', '413356830', '413356720', '413356840', '413357940',
        '413358570', '413359170', '413359620', '413359710', '413361160', '413360730', '413361090', '413362430',
        '413363310', '413363710', '413364530', '413365710', '413368380', '413368610', '413367920', '413369440',
        '413369230', '413370020', '413373220', '413371110', '413370000', '413375820', '413376010', '413376610',
        '413376570', '413376930', '413377440', '413377170', '413377720', '413378160', '413378790', '413378250',
        '413379630', '413379640', '413379890', '413380220', '413380150', '413400620', '413401270', '413402310',
        '413405370', '413405560', '413405160', '413406690', '413406830', '413408230', '413412990', '413411330',
        '413414090', '413414040', '413414530', '413417630', '413422620', '413422660', '413422310', '413425240',
        '413427020', '413379570', '413432000', '413432110', '413431350', '413432920', '413433740', '413434530',
        '413435280', '413435310', '413435690', '413436560', '413436820', '312016000', '312072000', '312209000',
        '312415000', '312437000', '312521000', '312771000', '312897000', '312987000', '339300580', '351153000',
        '351743000', '351797000', '351619000', '352773000', '353187000', '353924000', '354709000', '355652000',
        '356088000', '356237000', '357573000', '357379000', '357856000', '371277000', '371453260', '229127000',
        '372229000', '371800000', '373672000', '229387000', '374835000', '229531000', '412004000', '412018000',
        '412028000', '412079000', '412078000', '229602000', '412203480', '412206350', '412207000', '412208000',
        '412208780', '412209130', '412209280', '412272040', '412274000', '412300360', '412302050', '412302240',
        '412302940', '412302420', '412328830', '412328230', '412326550', '412328810', '412330850', '412331340',
        '412332070', '412350620', '412351340', '412352210', '412355160', '412353000', '412355880', '412355680',
        '412355810', '412356350', '412356480', '412356540', '412361460', '412358980', '412359190', '412362910',
        '412371680', '412376430', '412378740', '412378820', '412378560', '412376580', '412379260', '412380160',
        '412381030', '412401350', '412411350', '412402730', '412417160', '412415630', '412415960', '412419230',
        '412420770', '412422580', '412424350', '412428930', '412428030', '412427630', '412432190', '412432690',
        '412434740', '412436270', '412437440', '412437620', '412439580', '412449920', '412452000', '241256000',
        '412458440', '412457460', '412457440', '412459870', '412470260', '412469310', '412464970', '412473450',
        '412499000', '412501710', '412502330', '412502620', '412502540', '412502780', '412521070', '412523890',
        '412554120', '412554490', '412703760', '412702210', '412658000', '412703360', '412705880', '412567000',
        '412705340', '412701450', '247276200', '412704040', '412704420', '248278000', '412570370', '412741000',
        '412592260', '412705160', '412704720', '412633000', '412613000', '412704670', '412735000', '412733000',
        '247360700', '412714000'
    ]

    # 移除指定的MMSI
    remove_specified_mmsis(all_trajectories, mmsis_to_remove)

    # visualize_all_trajectories(all_trajectories, './Data/DTW/CSJ/Trajectories/all_trajectories_plot_after_preprocessing.png')

    # 示例输出
    # for mmsi, df in list(all_trajectories.items())[:1]:
    #     print(f"MMSI: {mmsi}, Data: \n{df.head(200)}\n")

    sorted_mmsis = sorted(all_trajectories.keys(), key=lambda x: int(x))
    selected_mmsis = sorted_mmsis  # 选取MMSI进行分析

    # # 对所有轨迹进行插值和选点处理，这个只需要进行一次即可，已经运行过了，所以注释
    # for mmsi in selected_mmsis:
    #     df = all_trajectories[mmsi]
    #     df = resample_and_select(df)
    #     all_trajectories[mmsi] = df

    # for mmsi in list(all_trajectories.keys())[:5]:  # 获取前五个MMSI
    #     df = all_trajectories[mmsi]
    #     print(f"MMSI: {mmsi}, DataFrame Head:\n{df.head()}\n")

    # # 可视化第一个MMSI的轨迹，便于比较DP算法前后的轨迹，看看压缩的效果好不好，在实验报告里也有清楚的图片比较，可结合图片理解代码
    # if selected_mmsis:
    #     visualize_trajectory(all_trajectories[selected_mmsis[0]])

    # DP算法压缩all_trajectories
    for mmsi in selected_mmsis:
        df = all_trajectories[mmsi]
        # print(f"MMSI: {mmsi}, Data:{df.head(20)}")
        # print(df[['LONGITUDE', 'LATITUDE']].values)
        all_trajectories[mmsi] = downsample_trajectory(df, tolerance=0.0000005)

    # # 可视化第一个MMSI的轨迹，这是DP算法之后的轨迹
    # if selected_mmsis:
    #     visualize_trajectory(all_trajectories[selected_mmsis[0]])

    # 根据经纬度范围寻找不同模式的轨迹
    # visualize_trajectories_by_geofence_01(all_trajectories, output_path='Data/DTW/CSJ/New_Human_Trajectories/trajectories_area_01.png')

    # 总轨迹
    visualize_clusters_from_files(all_trajectories, "Data/DTW/CSJ/New_Human_Trajectories/", "Data/DTW/CSJ/New_Human_Trajectories/visualization.png")

    # 下面是计算dtw_distance的代码段，因为当前已经计算完了，非注释代码里主要还是 通过经纬度可视化轨迹，计算过的代码段基本都被注释了，但是在之前都是必要的过程

    # dtw_distances = compute_dtw_distances_concurrently(selected_mmsis, all_trajectories)
    #
    # # 初始化一个空列表来存储DTW距离记录
    # dtw_distances_records = []
    #
    # # 计算选定MMSI之间的DTW距离，并保存记录到列表
    # for key, distance in dtw_distances.items():
    #     dtw_distances_records.append({
    #         'MMSI1': key[0],
    #         'MMSI2': key[1],
    #         'DTW_Distance': distance
    #     })
    #
    # # 将记录转换为DataFrame
    # dtw_distances_df = pd.DataFrame(dtw_distances_records)
    #
    # # 输出所有DTW距离的记录
    # print(dtw_distances_df.head())
    #
    # # 保存到CSV文件
    # dtw_distances_df.to_csv('./Data/DTW/CSJ/dtw_distances3.csv', index=False)

#     看到DTW距离的数值很大是正常的，因为DTW计算的是两个序列之间的累计差异。如果序列长且特征值（如经纬度和速度）的差异较大，累计的距离自然会很大。
#     这并不意味着方法有误，而是这些数值反映了轨迹之间的相对差异大小。

if __name__ == "__main__":
    main()

