import pandas as pd
from sqlalchemy import create_engine, types, text
from sqlalchemy.types import VARCHAR
import json # 用于处理 routes.csv 中的 stops 列
import traceback # 新增导入
import pymysql # 新增导入
import re

# MySQL 数据库配置
# TODO: 请将以下占位符替换为你的实际 MySQL 用户名和密码！
DB_CONFIG = {
    'host': 'localhost',
    'user': 'root',   # <-- 替换为你的 MySQL 用户名
    'password': '1234', # <-- 替换为你的 MySQL 密码
    'database': 'harbin_transport_analysis'
}

# 尝试使用不同的编码读取CSV文件
def read_csv_with_encoding(filepath):
    encodings = ['utf-8', 'gbk', 'gb2312'] # 尝试常用的编码
    for encoding in encodings:
        try:
            # 尝试用当前编码读取文件
            df = pd.read_csv(filepath, encoding=encoding)
            print(f"成功使用 {encoding} 编码读取 {filepath}")
            return df
        except UnicodeDecodeError:
            # 如果解码失败，继续尝试下一个编码
            print(f"使用 {encoding} 编码读取 {filepath} 失败，尝试下一种...")
        except Exception as e:
            # 捕获其他可能的错误
            print(f"读取 {filepath} 时发生未知错误：{e}")
            break # 发生其他错误时退出循环

    print(f"无法使用已知编码读取 {filepath}，请手动检查文件编码或数据问题。")
    return None

# 创建 SQLAlchemy 数据库引擎
try:
    engine = create_engine(
        f"mysql+pymysql://{DB_CONFIG['user']}:{DB_CONFIG['password']}@{DB_CONFIG['host']}:3307/{DB_CONFIG['database']}?charset=utf8mb4"
    )
    print("成功连接到 MySQL 数据库引擎。")
except Exception as e:
    print(f"连接 MySQL 数据库引擎失败：{e}")
    exit() # 连接失败则退出程序

# --- 导入 MapReduce 分析结果 ---
print("\n--- 正在导入 MapReduce 分析结果 ---")

def import_data(filepath, table_name, custom_engine, columns=None, use_tab_sep=False):
    print(f"\n--- 正在处理 {filepath} ---")
    try:
        df = None
        # --- 针对 dispatch_suggestion_analysis.tsv 的特殊数据清洗 ---
        if "dispatch_suggestion_analysis.tsv" in filepath:
            print("对 dispatch_suggestion_analysis.tsv 进行特殊解析...")
            parsed_data = []
            # 正则表达式现在可以处理中间可能包含下划线的站名
            pattern = re.compile(r'(.+?)_([^_]+)_(\d+)\s+([\d\.]+)\s+([\d\.]+)')
            with open(filepath, 'r', encoding='utf-8') as f:
                for line in f:
                    match = pattern.match(line.strip())
                    if match:
                        route_id, station, hour, onboard, avg_crowd_level = match.groups()
                        # 假设 route_id 和 station_id 是明确的
                        parsed_data.append([route_id, station, int(hour), int(onboard), float(avg_crowd_level)])

            if not parsed_data:
                 print(f"警告：在 {filepath} 中没有找到匹配的数据。")
                 return

            df = pd.DataFrame(parsed_data, columns=['route_id', 'station_id', 'hour', 'onboard', 'avg_crowd_level'])
        else:
            # 保持对其他文件的现有逻辑
            if use_tab_sep:
                df = pd.read_csv(filepath, sep='\t', header=None, names=columns, encoding='utf-8')
            else:
                df = pd.read_csv(filepath, sep=r'\s+', header=None, names=columns, encoding='utf-8', engine='python')

        if df is None or df.empty:
            print(f"警告：处理 {filepath} 后没有生成数据。")
            return

        print(f"\n{table_name} head:")
        print(df.head().to_string())

        # 为字符串列定义通用数据类型以避免主键长度问题
        dtype_mapping = {col: VARCHAR(255) for col in df.columns if df[col].dtype == 'object'}

        df.to_sql(table_name, con=custom_engine, if_exists='replace', index=False, dtype=dtype_mapping)
        print(f"数据成功导入到表 '{table_name}'。")

        # 在数据导入后为 dispatch_suggestion_data 表添加主键
        if "dispatch_suggestion_analysis.tsv" in filepath:
            print(f"为表 {table_name} 添加复合主键...")
            with custom_engine.connect() as con:
                con.execute(text('ALTER TABLE `dispatch_suggestion_data` ADD PRIMARY KEY (`route_id`, `station_id`, `hour`);'))
            print("复合主键添加成功。")

    except Exception as e:
        print(f"导入 {filepath} 到 {table_name} 失败: {e}")
        traceback.print_exc()

# 定义所有MapReduce输出文件及其对应的表名和列名
# 注意：这里的列名已根据实际文件内容进行了调整
# 新增一个布尔值，标记是否应使用制表符（tab）分隔
mapreduce_files_config = [
    ('hourly_traffic_trends.tsv', 'hourly_traffic_trends', ['hour', 'onboard', 'offboard'], True),
    ('station_passenger_ranking.tsv', 'station_passenger_ranking', ['station_name', 'onboard', 'offboard'], True),
    ('direction_crowd_output.tsv', 'direction_crowd', ['direction', 'onboard', 'offboard', 'avg_crowd_level'], False),
    ('route_passenger_mobility_analysis.tsv', 'route_passenger_mobility', ['route_id', 'total_passengers', 'avg_passengers_per_stop'], False),
    ('station_crowd_ranking.tsv', 'station_crowd_ranking', ['station_name', 'avg_crowd_level'], False),
    ('bus_rush_hour_traffic.tsv', 'bus_rush_hour_traffic', ['station_name', 'time', 'onboard', 'offboard'], False),
    ('subway_transfer_pressure.tsv', 'subway_transfer_pressure', ['station_name', 'pressure_index'], False),
    ('bus_stop_frequency_usage_analysis.tsv', 'bus_stop_frequency_usage', ['station_name', 'total_stops', 'onboard', 'offboard'], True),
    ('dispatch_suggestion_analysis.tsv', 'dispatch_suggestion_data', None, False)
]

# 循环导入所有已定义的分析文件
for filename, tablename, columns, use_tab in mapreduce_files_config:
    path = f'mapreduce_jobs/data/{filename}'
    import_data(path, tablename, engine, columns=columns, use_tab_sep=use_tab)

# 特殊处理线路漏斗文件，将两部分合并后导入
print("\n--- 正在处理 route_funnel 文件 ---")
try:
    path1 = 'mapreduce_jobs/data/route_funnel_part1.tsv'
    path2 = 'mapreduce_jobs/data/route_funnel_part2.tsv'
    columns = ['route_id', 'station_name', 'onboard', 'offboard']
    
    df1 = pd.read_csv(path1, sep='\t', header=None, names=columns, encoding='utf-8')
    df2 = pd.read_csv(path2, sep='\t', header=None, names=columns, encoding='utf-8')
    
    route_funnel_df = pd.concat([df1, df2], ignore_index=True)
    
    print("\nroute_funnel head:")
    print(route_funnel_df.head().to_string())
    
    dtype_mapping = {col: VARCHAR(255) for col in route_funnel_df.columns if route_funnel_df[col].dtype == 'object'}
    route_funnel_df.to_sql('route_funnel', con=engine, if_exists='replace', index=False, dtype=dtype_mapping)
    print("数据成功合并并导入到表 'route_funnel'。")
except FileNotFoundError as e:
    print(f"错误：漏斗文件未找到 {e.filename}")
except Exception as e:
    print(f"导入 route_funnel 失败：{e.__class__.__name__} - {e.args}")
    traceback.print_exc()

print("\n--- 所有文件导入尝试完成 ---") 