import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats

def load_and_preprocess_data(file_path):
    """
    加载并预处理数据
    """
    try:
        # 读取CSV文件
        df = pd.read_csv(file_path)
        print(f"原始数据条数：{len(df)}")

        # 1.1 筛选Dubai地区的数据
        df = df[df['attr_location'] == 'Asia/Dubai']
        print(f"Dubai地区数据条数：{len(df)}")

        # 1.2 & 1.3 删除关键字段缺失的数据
        key_columns = [
            'attr_location_latitude', 
            'attr_location_longitude',
            'attr_connection_downstream_bandwidth_kbps',
            'attr_connection_upstream_bandwidth_kbps',
            'num_packet_loss_received',
            'id_device'
        ]
        df = df.dropna(subset=key_columns)
        print(f"删除缺失数据后条数：{len(df)}")

        # 处理经纬度精度和速率单位
        df['attr_location_latitude'] = df['attr_location_latitude'].round(3)
        df['attr_location_longitude'] = df['attr_location_longitude'].round(3)
        df['downstream_mbps'] = df['attr_connection_downstream_bandwidth_kbps'] / 1000
        df['upstream_mbps'] = df['attr_connection_upstream_bandwidth_kbps'] / 1000

        return df
    except Exception as e:
        print(f"数据加载和预处理出错：{str(e)}")
        return None

def aggregate_data(df):
    """
    按经纬度汇聚数据
    """
    try:
        grouped_df = df.groupby(['attr_location_latitude', 'attr_location_longitude']).agg({
            'id_device': 'count',
            'downstream_mbps': 'median',
            'upstream_mbps': 'median',
            'num_packet_loss_received': 'median'
        }).reset_index()

        # 重命名列
        grouped_df.columns = [
            'latitude', 'longitude', 'id_device_count', 
            'median_DL_mbps', 'median_UL_mbps', 'median_packet_loss'
        ]
        
        return grouped_df
    except Exception as e:
        print(f"数据汇聚出错：{str(e)}")
        return None

def calculate_correlation(data):
    """
    计算相关系数
    """
    try:
        corr_dl = stats.pearsonr(data['id_device_count'], data['median_DL_mbps'])[0]
        corr_ul = stats.pearsonr(data['id_device_count'], data['median_UL_mbps'])[0]
        corr_pl = stats.pearsonr(data['id_device_count'], data['median_packet_loss'])[0]
        return pd.Series({
            'corr_DL': corr_dl, 
            'corr_UL': corr_ul, 
            'corr_packet_loss': corr_pl
        })
    except Exception as e:
        print(f"相关系数计算出错：{str(e)}")
        return None

def create_analysis_table(data, sort_column, ascending=False, n=100):
    """
    创建分析表格
    """
    try:
        result = data.sort_values(sort_column, ascending=ascending).head(n)
        correlations = calculate_correlation(result)
        print(f"\n=== {sort_column} 分析结果 ===")
        print(f"相关系数：\n{correlations}\n")
        return result
    except Exception as e:
        print(f"分析表格创建出错：{str(e)}")
        return None

def plot_correlation(data, x_col, y_col, title, output_dir):
    """
    绘制相关性散点图
    """
    try:
        plt.figure(figsize=(10, 6))
        sns.scatterplot(data=data, x=x_col, y=y_col)
        plt.title(title)
        plt.xlabel(x_col)
        plt.ylabel(y_col)
        plt.savefig(f"{output_dir}/{title.replace(' ', '_')}.png")
        plt.close()
    except Exception as e:
        print(f"图表绘制出错：{str(e)}")

def save_results(data, filename, output_dir):
    """
    保存分析结果到CSV文件
    """
    try:
        output_path = f"{output_dir}/{filename}"
        data.to_csv(output_path, index=False)
        print(f"结果已保存至：{output_path}")
    except Exception as e:
        print(f"结果保存出错：{str(e)}")

def main():
    # 设置输入输出路径
    input_file = input("请输入CSV文件路径：")
    output_dir = input("请输入输出目录路径：")

    # 加载和预处理数据
    df = load_and_preprocess_data(input_file)
    if df is None:
        return

    # 数据汇聚
    grouped_df = aggregate_data(df)
    if grouped_df is None:
        return

    # 创建分析表格
    analyses = {
        'top_devices': ('id_device_count', False),
        'lowest_dl': ('median_DL_mbps', True),
        'lowest_ul': ('median_UL_mbps', True),
        'highest_packet_loss': ('median_packet_loss', False)
    }

    for name, (column, ascending) in analyses.items():
        result = create_analysis_table(grouped_df, column, ascending)
        if result is not None:
            save_results(result, f"{name}_analysis.csv", output_dir)

    # 绘制相关性图表
    correlations = [
        ('id_device_count', 'median_DL_mbps', '设备数量与下行速率关系'),
        ('id_device_count', 'median_UL_mbps', '设备数量与上行速率关系'),
        ('id_device_count', 'median_packet_loss', '设备数量与丢包率关系')
    ]

    for x_col, y_col, title in correlations:
        plot_correlation(grouped_df, x_col, y_col, title, output_dir)

if __name__ == "__main__":
    main() 