import mysql.connector
import pandas as pd
import numpy as np
from simplification.cutil import simplify_coords_idx
from sklearn.cluster import DBSCAN
from geopy.distance import great_circle
import folium
import logging
import random

# 配置日志记录
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)

# 数据库配置
DB_CONFIG = {
    'user': 'root',
    'password': '123456',
    'host': '192.168.124.8',
    'database': 'vanna_ai',
    'port': 3333
}

# 算法参数配置
ALGORITHM_CONFIG = {
    'cluster_radius_km': 2000,  # 聚类半径（公里）
    'min_cluster_points': 1000,  # 最小聚类点数
    'anomaly_threshold_km': 2500,  # 异常距离阈值
    'simplify_tolerance': 0.0001,  # 轨迹压缩精度
    'earth_radius_km': 6371  # 地球半径（公里）
}

class AviationAnalyzer:
    def __init__(self, db_config, algorithm_config):
        self.db_config = db_config
        self.algorithm_config = algorithm_config
        self.connection = None
        self.cursor = None

    def __enter__(self):
        """上下文管理连接"""
        self.connect_db()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        """关闭数据库连接"""
        self.close_db()

    def connect_db(self):
        """建立数据库连接"""
        try:
            self.connection = mysql.connector.connect(**self.db_config)
            self.cursor = self.connection.cursor()
            logging.info("数据库连接成功")
        except Exception as e:
            logging.error(f"数据库连接失败: {str(e)}")
            raise

    def close_db(self):
        """关闭数据库连接"""
        if self.cursor:
            self.cursor.close()
        if self.connection:
            self.connection.close()
        logging.info("数据库连接已关闭")

    def get_aircraft_list(self):
        """获取所有航空器注册号列表"""
        query = "SELECT DISTINCT reg_id FROM civil_aircraft"
        try:
            self.cursor.execute(query)
            return [row[0] for row in self.cursor.fetchall()]
        except Exception as e:
            logging.error(f"查询航空器列表失败: {str(e)}")
            return []

    def fetch_trajectory_data(self, reg_id):
        """获取指定航空器轨迹数据
         ORDER BY timestamp
        """
        query = """
            SELECT timestamp , longitude, latitude 
            FROM civil_aircraft_trace 
            WHERE reg_id = %s 
            ORDER BY timestamp
            LIMIT 100000
        """
        try:
            self.cursor.execute(query, (reg_id,))
            columns = ['timestamp', 'longitude', 'latitude']
            traces = self.cursor.fetchall()
            logging.debug(f"查询到 {reg_id} 的轨迹数据: {traces}")
            if not traces:
                logging.warning(f"没有找到 {reg_id} 的轨迹数据")
                return pd.DataFrame(columns=columns)
            return pd.DataFrame(traces, columns=columns)
        except Exception as e:
            logging.error(f"获取{reg_id}轨迹数据失败: {str(e)}")
            return pd.DataFrame(columns=columns)

    @staticmethod
    def preprocess_data(df, tolerance):
        """数据预处理流程"""
        # 转换时间格式（秒级时间戳转为datetime）
        df['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')

        # 生成C语言连续内存数组
        points = df[['longitude', 'latitude']].to_numpy(dtype=np.float64)

        # 确保内存连续性（关键修复步骤）
        if not points.flags['C_CONTIGUOUS']:
            points = np.ascontiguousarray(points)

        # 轨迹压缩
        if len(points) > 0:
            indices = simplify_coords_idx(points, tolerance)
            return df.iloc[indices].reset_index(drop=True)
        return df

    def perform_clustering(self, df):
        """执行时空聚类分析"""
        # 转换坐标到弧度
        coords_rad = np.radians(df[['latitude', 'longitude']])

        # 计算聚类参数
        epsilon = self.algorithm_config['cluster_radius_km'] / self.algorithm_config['earth_radius_km']

        # 执行DBSCAN聚类
        dbscan = DBSCAN(
            eps=epsilon,
            min_samples=self.algorithm_config['min_cluster_points'],
            metric='haversine',
            algorithm='ball_tree'
        )
        df['cluster'] = dbscan.fit_predict(coords_rad)
        return df

    def detect_anomalies(self, df):
        """多维度异常检测"""
        # 初始化异常标记列
        df['anomaly'] = 0

        # 第一层检测：未聚类点
        df.loc[df['cluster'] == -1, 'anomaly'] = 1

        # 第二层检测：偏离航线点
        for cluster_id in df['cluster'].unique():
            if cluster_id == -1:
                continue

            cluster_points = df[df['cluster'] == cluster_id]
            centroid = (
                cluster_points['latitude'].mean(),
                cluster_points['longitude'].mean()
            )

            # 计算各点到聚类中心的距离
            distances = cluster_points.apply(
                lambda x: great_circle(
                    (x['latitude'], x['longitude']), centroid
                ).kilometers,
                axis=1
            )

            # 标记异常点
            anomaly_mask = (distances > self.algorithm_config['anomaly_threshold_km'])
            df.loc[anomaly_mask.index[anomaly_mask], 'anomaly'] = 1

        return df

    def generate_report(self, reg_id, df):
        """生成异常报告并存储"""
        if df.empty:
            return

        # 提取异常时段
        anomalies = []
        current_start = None
        sorted_df = df.sort_values('timestamp')

        for idx, row in sorted_df.iterrows():
            if row['anomaly'] and current_start is None:
                current_start = row['timestamp']
            elif not row['anomaly'] and current_start is not None:
                if idx > 0:  # 确保 idx 不为 0
                    anomalies.append({
                        'reg_id': reg_id,
                        'start_time': current_start,
                        'end_time': sorted_df.iloc[idx - 1]['timestamp'],
                        'duration_hours': (
                            sorted_df.iloc[idx - 1]['timestamp'] - current_start
                        ).total_seconds() // 3600
                    })
                current_start = None

        # 处理最后一个异常时段
        if current_start is not None:
            # 如果最后一个数据点是异常点，使用最后一个数据点的时间戳作为结束时间
            last_row = sorted_df.iloc[-1]
            if last_row['anomaly']:
                anomalies.append({
                    'reg_id': reg_id,
                    'start_time': current_start,
                    'end_time': last_row['timestamp'],
                    'duration_hours': (
                        last_row['timestamp'] - current_start
                    ).total_seconds() // 3600
                })
        # 将start_time和end_time转换为秒级时间戳
        anomalies = [
            {
                'reg_id': a['reg_id'],
                'start_time': a['start_time'].timestamp(),
                'end_time': a['end_time'].timestamp(),
                'duration_hours': a['duration_hours']
            }
            for a in anomalies
        ]
        # 存储到数据库
        if anomalies:
            insert_sql = """
                INSERT INTO civil_aircraft_exceptional_event 
                (reg_id, start_time, end_time, duration_hours)
                VALUES (%s, %s, %s, %s)
            """

            try:
                self.cursor.executemany(insert_sql, [
                    (str(a['reg_id']), str(a['start_time']), str(a['end_time']), str(a['duration_hours']))
                    for a in anomalies
                ])
                self.connection.commit()
                logging.info(f"{reg_id}异常报告已存储，共{len(anomalies)}条记录")
            except Exception as e:
                logging.error(f"存储异常报告失败: {str(e)}")
                self.connection.rollback()

    def visualize_trajectory(self, df, reg_id):
        """生成可视化轨迹地图"""
        if df.empty:
            return

        map_center = [df['latitude'].mean(), df['longitude'].mean()]
        m = folium.Map(location=map_center, zoom_start=8)

        # 绘制正常轨迹
        for cluster_id in df['cluster'].unique():
            if cluster_id != -1:
                cluster_df = df[df['cluster'] == cluster_id]
                folium.PolyLine(
                    cluster_df[['latitude', 'longitude']].values,
                    color='blue',
                    weight=2,
                    tooltip=f'Cluster {cluster_id}'
                ).add_to(m)

        # 标记异常点
        anomaly_df = df[df['anomaly'] == 1]
        for _, row in anomaly_df.iterrows():
            folium.CircleMarker(
                location=[row['latitude'], row['longitude']],
                radius=5,
                color='red',
                fill=True,
                tooltip=f"异常时间: {row['timestamp'].strftime('%Y-%m-%d %H:%M')}"
            ).add_to(m)

        filename = f"./异常检测/{reg_id}_anomaly_map.html"
        m.save(filename)
        logging.info(f"可视化地图已保存至 {filename}")

    def sample_usually_trace(self, df):
        """从每个聚类中抽样100个轨迹点"""
        usually_trace = []
        for cluster_id in df['cluster'].unique():
            if cluster_id == -1:
                continue

            cluster_points = df[df['cluster'] == cluster_id]
            if len(cluster_points) >= 100:
                sampled_points = cluster_points.sample(n=100)
            else:
                sampled_points = cluster_points  # 如果聚类点数少于100，全部取用

            usually_trace.extend(sampled_points.to_dict(orient='records'))
        return usually_trace

    def insert_usually_trace(self,reg_id, usually_trace):
        insert_sql = """
            INSERT INTO civil_aircraft_usually_trace (reg_id, latitude, longitude)
            VALUES (%s, %s, %s)
        """
        try:
            self.cursor.executemany(insert_sql, [(reg_id, point['latitude'], point['longitude']) for point in usually_trace])
            self.connection.commit()
            logging.info(f"{reg_id}通常轨迹已存储，共{len(usually_trace)}条记录")
        except Exception as e:
            logging.error(f"存储通常轨迹失败: {str(e)}")
            self.connection.rollback()

    def analyze_all_aircraft(self):
        """执行全量分析流程"""
        aircraft_list = self.get_aircraft_list()
        logging.info(f"共发现{len(aircraft_list)}个航空器需要分析")

        for reg_id in aircraft_list:
            logging.info(f"正在分析 {reg_id}...")

            # 获取原始数据
            raw_df = self.fetch_trajectory_data(reg_id)
            if raw_df.empty:
                logging.warning(f"{reg_id} 没有轨迹数据，跳过分析")
                continue

            # 数据预处理
            processed_df = self.preprocess_data(raw_df, ALGORITHM_CONFIG['simplify_tolerance'])

            # 时空聚类
            clustered_df = self.perform_clustering(processed_df)

            # 异常检测
            final_df = self.detect_anomalies(clustered_df)

            # 生成报告和可视化
            self.generate_report(reg_id, final_df)
            # self.visualize_trajectory(final_df, reg_id)

            # 抽样通常轨迹点
            usually_trace = self.sample_usually_trace(final_df)

            self.insert_usually_trace(reg_id, usually_trace)

            # 绘制抽样通常轨迹点

            # 可以在这里将 usually_trace 存储到文件或数据库中
            # 例如，存储到文件

if __name__ == "__main__":
    with AviationAnalyzer(DB_CONFIG, ALGORITHM_CONFIG) as analyzer:
        analyzer.analyze_all_aircraft()
