#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
昆明地铁数据分析与可视化 - 第零部分
本脚本负责处理原始数据，清洗和预处理地铁站点、线路和客流量数据
"""

import os
import pandas as pd
import numpy as np
import geopandas as gpd
import matplotlib.pyplot as plt
import warnings
from pathlib import Path
import glob

# 忽略警告
warnings.filterwarnings('ignore')

def create_directories():
    """创建必要的目录结构"""
    # 创建数据处理输出目录
    output_dir = Path("data/processed/cleaned_data")
    os.makedirs(output_dir, exist_ok=True)
    
    # 创建分析结果输出目录
    analysis_dir = Path("analysis_results/kunming_analysis")
    os.makedirs(analysis_dir, exist_ok=True)
    
    return output_dir

def clean_flow_data(raw_data_dir, output_dir):
    """清洗客流量数据"""
    print("正在处理客流量数据...")
    
    try:
        # 查找客流量数据文件
        flow_file = None
        for pattern in ["*客流量*.xlsx", "*客流量*.xls", "*客流量*.csv"]:
            files = list(raw_data_dir.glob(pattern))
            if files:
                flow_file = files[0]
                break
        
        if not flow_file:
            print("未找到客流量数据文件")
            return None
        
        print(f"找到客流量数据文件: {flow_file}")
        
        # 根据文件扩展名读取数据
        if flow_file.suffix.lower() in ['.xlsx', '.xls']:
            # 读取Excel文件，考虑可能的前几行为表头信息
            flow_data = pd.read_excel(flow_file, engine='openpyxl')
        else:
            flow_data = pd.read_csv(flow_file, encoding='utf-8-sig')
        
        print(f"原始客流量数据形状: {flow_data.shape}")
        
        # 检查数据格式，确定是否需要特殊处理
        # 标准格式应该是日期列和城市客流量列
        if '指标名称' in flow_data.columns:
            print("检测到特殊格式客流量数据，进行转换...")
            
            # 查找包含城市名称的列
            city_columns = [col for col in flow_data.columns if '地铁客流量' in col]
            
            if not city_columns:
                print("无法找到城市客流量列")
                return None
            
            # 创建新的数据框
            new_flow_data = pd.DataFrame()
            
            # 忽略前几行（单位行、来源行等），从第四行开始解析日期
            date_series = flow_data['指标名称'].iloc[3:]
            new_flow_data['日期'] = pd.to_datetime(date_series, errors='coerce')
            
            # 处理各城市客流量
            for city_col in city_columns:
                try:
                    city_name = city_col.split(':')[0]
                    new_flow_data[city_name] = pd.to_numeric(flow_data[city_col].iloc[3:], errors='coerce')
                except Exception as e:
                    print(f"处理{city_col}时出错: {str(e)}")
            
            # 删除包含NaN的行
            new_flow_data = new_flow_data.dropna(subset=['日期'])
            
            flow_data = new_flow_data
        
        # 保存清洗后的数据
        flow_data.to_csv(output_dir / "cleaned_flow_data.csv", index=False, encoding='utf-8-sig')
        print(f"客流量数据清洗完成，包含{len(flow_data)}条记录，{len(flow_data.columns)}列")
        
        return flow_data
    
    except Exception as e:
        print(f"处理客流量数据时出错: {str(e)}")
        return None

def clean_station_data(raw_data_dir, output_dir, year=2023):
    """清洗站点数据"""
    print(f"\n正在处理{year}年地铁站点数据...")
    
    try:
        # 查找站点数据文件
        station_pattern = f"*{year}*站点*.shp"
        station_files = list(raw_data_dir.glob(station_pattern))
        
        if not station_files:
            # 尝试查找其他可能的文件名
            for pattern in [f"*{year}*站*.shp", f"*{year}*地铁站*.shp"]:
                station_files = list(raw_data_dir.glob(pattern))
                if station_files:
                    break
        
        if not station_files:
            # 尝试在子目录查找
            for pattern in [f"**/*{year}*站点*.shp", f"**/*{year}*站*.shp"]:
                station_files = list(raw_data_dir.glob(pattern))
                if station_files:
                    break
        
        if not station_files:
            print(f"未找到{year}年站点数据文件")
            return None
        
        print(f"找到{year}年站点数据文件: {station_files[0]}")
        
        # 读取站点数据
        stations_df = gpd.read_file(station_files[0], encoding='utf-8')
        print(f"原始站点数据形状: {stations_df.shape}")
        
        # 检查是否有城市字段
        if '城市' not in stations_df.columns and 'city' not in stations_df.columns.str.lower():
            print("警告: 站点数据缺少城市字段")
            # 如果缺少城市字段，尝试从文件名或其他列提取
            # 这里简化处理，仅添加一个空列
            stations_df['城市'] = '未知'
        
        # 处理缺失值
        for col in stations_df.columns:
            if col != 'geometry':
                if stations_df[col].dtype == 'object':
                    stations_df[col] = stations_df[col].fillna('未知')
                else:
                    stations_df[col] = stations_df[col].fillna(0)
        
        # 移除重复数据
        if 'station_name' in stations_df.columns and '城市' in stations_df.columns:
            stations_df = stations_df.drop_duplicates(subset=['station_name', '城市'])
        elif '站点名称' in stations_df.columns and '城市' in stations_df.columns:
            stations_df = stations_df.drop_duplicates(subset=['站点名称', '城市'])
        else:
            stations_df = stations_df.drop_duplicates()
        
        # 保存清洗后的数据
        stations_df.to_file(output_dir / f"cleaned_stations_{year}.shp", encoding='utf-8')
        stations_df.drop('geometry', axis=1).to_csv(output_dir / f"cleaned_stations_{year}.csv", index=False, encoding='utf-8-sig')
        
        print(f"{year}年站点数据清洗完成，包含{len(stations_df)}个站点")
        
        return stations_df
    
    except Exception as e:
        print(f"处理{year}年站点数据时出错: {str(e)}")
        return None

def clean_line_data(raw_data_dir, output_dir, year=2023):
    """清洗线路数据"""
    print(f"\n正在处理{year}年地铁线路数据...")
    
    try:
        # 查找线路数据文件
        line_pattern = f"*{year}*线路*.shp"
        line_files = list(raw_data_dir.glob(line_pattern))
        
        if not line_files:
            # 尝试查找其他可能的文件名
            for pattern in [f"*{year}*线*.shp", f"*{year}*地铁线*.shp"]:
                line_files = list(raw_data_dir.glob(pattern))
                if line_files:
                    break
        
        if not line_files:
            # 尝试在子目录查找
            for pattern in [f"**/*{year}*线路*.shp", f"**/*{year}*线*.shp"]:
                line_files = list(raw_data_dir.glob(pattern))
                if line_files:
                    break
        
        if not line_files:
            print(f"未找到{year}年线路数据文件")
            return None
        
        print(f"找到{year}年线路数据文件: {line_files[0]}")
        
        # 读取线路数据
        lines_df = gpd.read_file(line_files[0], encoding='utf-8')
        print(f"原始线路数据形状: {lines_df.shape}")
        
        # 检查是否有城市字段
        if '城市' not in lines_df.columns and 'city' not in lines_df.columns.str.lower():
            print("警告: 线路数据缺少城市字段")
            # 如果缺少城市字段，尝试从文件名或其他列提取
            # 这里简化处理，仅添加一个空列
            lines_df['城市'] = '未知'
        
        # 处理缺失值
        for col in lines_df.columns:
            if col != 'geometry':
                if lines_df[col].dtype == 'object':
                    lines_df[col] = lines_df[col].fillna('未知')
                else:
                    lines_df[col] = lines_df[col].fillna(0)
        
        # 移除重复数据
        if 'line_name' in lines_df.columns and '城市' in lines_df.columns:
            lines_df = lines_df.drop_duplicates(subset=['line_name', '城市'])
        elif '线路名称' in lines_df.columns and '城市' in lines_df.columns:
            lines_df = lines_df.drop_duplicates(subset=['线路名称', '城市'])
        else:
            lines_df = lines_df.drop_duplicates()
        
        # 保存清洗后的数据
        lines_df.to_file(output_dir / f"cleaned_lines_{year}.shp", encoding='utf-8')
        lines_df.drop('geometry', axis=1).to_csv(output_dir / f"cleaned_lines_{year}.csv", index=False, encoding='utf-8-sig')
        
        print(f"{year}年线路数据清洗完成，包含{len(lines_df)}条线路")
        
        return lines_df
    
    except Exception as e:
        print(f"处理{year}年线路数据时出错: {str(e)}")
        return None

def extract_kunming_data(stations_df, lines_df, flow_df):
    """提取昆明的数据"""
    print("\n提取昆明地铁数据...")
    
    try:
        # 获取输出目录
        data_dir = Path("data/processed/cleaned_data")
        
        # 提取昆明站点数据
        if stations_df is not None and '城市' in stations_df.columns:
            kunming_stations = stations_df[stations_df['城市'] == '昆明']
            print(f"找到{len(kunming_stations)}个昆明地铁站点")
            
            # 保存昆明站点数据
            if not kunming_stations.empty:
                kunming_stations.to_csv(data_dir / "kunming_stations.csv", index=False, encoding='utf-8-sig')
        else:
            print("无法提取昆明站点数据")
            kunming_stations = None
        
        # 提取昆明线路数据
        if lines_df is not None and '城市' in lines_df.columns:
            kunming_lines = lines_df[lines_df['城市'] == '昆明']
            print(f"找到{len(kunming_lines)}条昆明地铁线路")
            
            # 保存昆明线路数据
            if not kunming_lines.empty:
                kunming_lines.to_csv(data_dir / "kunming_lines.csv", index=False, encoding='utf-8-sig')
        else:
            print("无法提取昆明线路数据")
            kunming_lines = None
        
        # 提取昆明客流量数据
        if flow_df is not None:
            print(f"客流量数据列: {flow_df.columns.tolist()}")
            
            if '昆明' in flow_df.columns:
                # 创建包含日期和客流量的数据框
                kunming_flow = pd.DataFrame({
                    '日期': flow_df['日期'],
                    '客流量': flow_df['昆明']
                })
                print(f"找到{len(kunming_flow)}条昆明客流量记录")
                
                # 保存昆明客流量数据
                kunming_flow.to_csv(data_dir / "kunming_flow.csv", index=False, encoding='utf-8-sig')
                
                # 确保客流量数据格式正确
                kunming_flow['日期'] = pd.to_datetime(kunming_flow['日期'])
                kunming_flow['客流量'] = pd.to_numeric(kunming_flow['客流量'], errors='coerce')
                
                # 删除可能的缺失值
                kunming_flow = kunming_flow.dropna()
                
                # 添加时间相关特征，便于后续分析
                kunming_flow['年份'] = kunming_flow['日期'].dt.year
                kunming_flow['月份'] = kunming_flow['日期'].dt.month
                kunming_flow['日'] = kunming_flow['日期'].dt.day
                kunming_flow['星期'] = kunming_flow['日期'].dt.dayofweek
                kunming_flow['是否周末'] = kunming_flow['星期'].apply(lambda x: 1 if x >= 5 else 0)
                
                print(f"昆明客流量数据处理完成，共{len(kunming_flow)}条有效记录")
                print(f"客流量数据时间范围: {kunming_flow['日期'].min()} 至 {kunming_flow['日期'].max()}")
            elif '昆明:地铁客流量' in flow_df.columns:
                # 有些数据可能使用完整列名
                kunming_flow = pd.DataFrame({
                    '日期': flow_df['日期'],
                    '客流量': flow_df['昆明:地铁客流量']
                })
                print(f"使用'昆明:地铁客流量'列，找到{len(kunming_flow)}条昆明客流量记录")
                
                # 保存昆明客流量数据
                kunming_flow.to_csv(data_dir / "kunming_flow.csv", index=False, encoding='utf-8-sig')
                
                # 确保客流量数据格式正确
                kunming_flow['日期'] = pd.to_datetime(kunming_flow['日期'])
                kunming_flow['客流量'] = pd.to_numeric(kunming_flow['客流量'], errors='coerce')
                
                # 删除可能的缺失值
                kunming_flow = kunming_flow.dropna()
                
                # 添加时间相关特征，便于后续分析
                kunming_flow['年份'] = kunming_flow['日期'].dt.year
                kunming_flow['月份'] = kunming_flow['日期'].dt.month
                kunming_flow['日'] = kunming_flow['日期'].dt.day
                kunming_flow['星期'] = kunming_flow['日期'].dt.dayofweek
                kunming_flow['是否周末'] = kunming_flow['星期'].apply(lambda x: 1 if x >= 5 else 0)
                
                print(f"昆明客流量数据处理完成，共{len(kunming_flow)}条有效记录")
                print(f"客流量数据时间范围: {kunming_flow['日期'].min()} 至 {kunming_flow['日期'].max()}")
            else:
                # 尝试查找包含"昆明"的列
                kunming_cols = [col for col in flow_df.columns if '昆明' in col]
                if kunming_cols:
                    print(f"找到可能的昆明客流量列: {kunming_cols}")
                    kunming_flow = pd.DataFrame({
                        '日期': flow_df['日期'],
                        '客流量': flow_df[kunming_cols[0]]
                    })
                    print(f"使用'{kunming_cols[0]}'列，找到{len(kunming_flow)}条昆明客流量记录")
                    
                    # 保存昆明客流量数据
                    kunming_flow.to_csv(data_dir / "kunming_flow.csv", index=False, encoding='utf-8-sig')
                    
                    # 确保客流量数据格式正确
                    kunming_flow['日期'] = pd.to_datetime(kunming_flow['日期'])
                    kunming_flow['客流量'] = pd.to_numeric(kunming_flow['客流量'], errors='coerce')
                    
                    # 删除可能的缺失值
                    kunming_flow = kunming_flow.dropna()
                    
                    # 添加时间相关特征，便于后续分析
                    kunming_flow['年份'] = kunming_flow['日期'].dt.year
                    kunming_flow['月份'] = kunming_flow['日期'].dt.month
                    kunming_flow['日'] = kunming_flow['日期'].dt.day
                    kunming_flow['星期'] = kunming_flow['日期'].dt.dayofweek
                    kunming_flow['是否周末'] = kunming_flow['星期'].apply(lambda x: 1 if x >= 5 else 0)
                    
                    print(f"昆明客流量数据处理完成，共{len(kunming_flow)}条有效记录")
                    print(f"客流量数据时间范围: {kunming_flow['日期'].min()} 至 {kunming_flow['日期'].max()}")
                else:
                    print("在客流量数据中未找到昆明相关列")
                    kunming_flow = None
        else:
            print("无法提取昆明客流量数据：客流量数据为空")
            kunming_flow = None
        
        # 确保数据都已正确处理
        if kunming_flow is not None and kunming_flow.empty:
            print("警告：昆明客流量数据为空DataFrame")
            kunming_flow = None
            
        return kunming_stations, kunming_lines, kunming_flow
    
    except Exception as e:
        print(f"提取昆明数据时出错: {str(e)}")
        import traceback
        traceback.print_exc()
        return None, None, None

def run_data_cleaning():
    """执行完整的数据清洗流程"""
    print("=== 开始地铁数据清洗流程 ===")
    
    # 创建目录
    output_dir = create_directories()
    
    # 设置原始数据目录
    raw_data_dir = Path("data/raw")
    if not os.path.exists(raw_data_dir):
        print(f"错误：原始数据目录 {raw_data_dir} 不存在")
        return
    
    # 清洗客流量数据
    flow_df = clean_flow_data(raw_data_dir, output_dir)
    
    # 清洗2023年站点数据
    stations_2023_df = clean_station_data(raw_data_dir, output_dir, year=2023)
    
    # 清洗2023年线路数据
    lines_2023_df = clean_line_data(raw_data_dir, output_dir, year=2023)
    
    # 清洗2024年站点数据
    stations_2024_df = clean_station_data(raw_data_dir, output_dir, year=2024)
    
    # 清洗2024年线路数据
    lines_2024_df = clean_line_data(raw_data_dir, output_dir, year=2024)
    
    # 提取昆明数据
    # 优先使用2024年数据，如果没有则使用2023年数据
    if stations_2024_df is not None and not stations_2024_df.empty:
        print("使用2024年数据提取昆明信息")
        kunming_stations, kunming_lines, kunming_flow = extract_kunming_data(
            stations_2024_df, lines_2024_df, flow_df
        )
    else:
        print("使用2023年数据提取昆明信息")
        kunming_stations, kunming_lines, kunming_flow = extract_kunming_data(
            stations_2023_df, lines_2023_df, flow_df
        )
    
    # 数据特征分析
    print("\n=== 昆明地铁数据特征分析 ===")
    if kunming_stations is not None and not kunming_stations.empty:
        print(f"昆明地铁站点数量: {len(kunming_stations)}")
        
        # 显示站点数据基本信息
        print("\n站点数据特征:")
        print(kunming_stations.dtypes)
    
    if kunming_lines is not None and not kunming_lines.empty:
        print(f"昆明地铁线路数量: {len(kunming_lines)}")
        
        # 显示线路数据基本信息
        print("\n线路数据特征:")
        print(kunming_lines.dtypes)
    
    if kunming_flow is not None and not kunming_flow.empty:
        print(f"昆明地铁客流量记录数: {len(kunming_flow)}")
        
        # 添加客流量数据的基本统计信息
        print("\n客流量数据基本统计:")
        print(kunming_flow['客流量'].describe())
        
        # 添加年份和月份列，用于后续分析
        kunming_flow['年份'] = pd.to_datetime(kunming_flow['日期']).dt.year
        kunming_flow['月份'] = pd.to_datetime(kunming_flow['日期']).dt.month
        kunming_flow['日'] = pd.to_datetime(kunming_flow['日期']).dt.day
        kunming_flow['星期'] = pd.to_datetime(kunming_flow['日期']).dt.dayofweek
        kunming_flow['是否周末'] = kunming_flow['星期'].apply(lambda x: 1 if x >= 5 else 0)
        
        # 保存增强的客流量数据
        kunming_flow.to_csv(output_dir / "kunming_flow_enhanced.csv", index=False, encoding='utf-8-sig')
    
    print("\n数据清洗与处理完成!")
    return kunming_stations, kunming_lines, kunming_flow

if __name__ == "__main__":
    # 设置全局变量
    output_dir = create_directories()
    
    # 运行数据清洗流程
    kunming_stations, kunming_lines, kunming_flow = run_data_cleaning() 