'''将处理后的数据加载到数据库中'''

import os
from pathlib import Path
import pandas as pd
import geopandas as gpd
from sqlalchemy import create_engine, text
from shapely.geometry import Point
import warnings
import json
import io
import csv
import math

# --- 配置 ---
# 加载 .env 文件中的环境变量
from dotenv import load_dotenv
load_dotenv()

DB_URL = os.getenv("DB_URL")
if not DB_URL:
    raise ValueError("请在 .env 文件中设置 DB_URL 环境变量")

BASE_DIR = Path(__file__).parents[1] / "data"
RAW_DIR = BASE_DIR / "raw"
PROCESSED_DIR = BASE_DIR / "processed"

# --- 社区数据导入 ---
def write_communities_to_postgis(engine, geojson_path):
    print("--- 1. 开始导入社区边界数据 ---")
    gdf = gpd.read_file(geojson_path)
    
    if gdf.crs is None or gdf.crs.to_epsg() != 4326:
        gdf = gdf.to_crs(epsg=4326)

    valid_mask = gdf.geometry.notnull() & gdf.geometry.is_valid
    if not valid_mask.all():
        print(f"警告: {len(gdf[~valid_mask])} 个无效的社区几何图形将被跳过。")
        gdf = gdf[valid_mask]

    gdf["area_sqkm_approx"] = gdf.to_crs(epsg=3857).area / 1e6
    gdf["geometry"] = gdf.geometry.simplify(tolerance=0.0001)
    
    gdf.to_postgis("communities", engine, if_exists="replace", index=False)
    
    with engine.begin() as conn:
        conn.execute(text("CREATE INDEX IF NOT EXISTS idx_communities_geom ON communities USING GIST (geometry);"))
    
    print("✅ 社区数据导入成功！")

# --- 订单数据导入 (最高效方案) ---
def _bulk_insert_orders_fast(engine, records):
    if not records:
        return

    raw_conn = engine.raw_connection()
    cursor = raw_conn.cursor()

    try:
        # 1. 准备 COPY 数据 (不包含 raw JSON)
        output = io.StringIO()
        writer = csv.writer(output, delimiter='\t')
        for r in records:
            writer.writerow([
                r["order_id"],
                r["start_time"],
                r["end_time"],
                r["start_wkt"],
                r["end_wkt"]
            ])
        output.seek(0)

        # 2. 创建临时表
        cursor.execute("""
            CREATE TEMP TABLE temp_orders (
                order_id TEXT,
                start_time TEXT,
                end_time TEXT,
                start_wkt TEXT,
                end_wkt TEXT
            ) ON COMMIT DROP;
        """)

        # 3. COPY 数据到临时表
        cursor.copy_from(output, 'temp_orders', sep='\t', null='')

        # 4. 从临时表插入到正式表 (不包含 raw JSON)
        #    使用 RETURNING id 获取新插入行的 ID
        cursor.execute("""
            INSERT INTO bike_orders(order_id, start_time, end_time, start_location, end_location)
            SELECT 
                NULLIF(order_id, ''),
                CAST(NULLIF(start_time, '') AS timestamp),
                CAST(NULLIF(end_time, '') AS timestamp),
                ST_SetSRID(ST_GeomFromText(start_wkt), 4326),
                ST_SetSRID(ST_GeomFromText(NULLIF(end_wkt, '')), 4326)
            FROM temp_orders
            RETURNING id;
        """)
        
        # 获取新插入的 ID 列表
        inserted_ids = [row[0] for row in cursor.fetchall()]
        raw_conn.commit()

        # 5. 单独更新 raw JSON 数据
        #    这是最关键的修复步骤
        if inserted_ids:
            update_data = []
            for i, record_id in enumerate(inserted_ids):
                raw_dict = records[i]["raw"]
                # 将 Python 字典转为合法的 JSON 字符串
                json_string = json.dumps(raw_dict)
                update_data.append({'id': record_id, 'raw_json': json_string})

            # 修复：使用 psycopg2 的 execute_batch 高效批量更新
            from psycopg2.extras import execute_batch
            execute_batch(
                cursor,
                "UPDATE bike_orders SET raw = %s WHERE id = %s",
                [(item['raw_json'], item['id']) for item in update_data]
            )
            raw_conn.commit()

        print(f"  成功插入并更新 {len(records)} 条订单记录")

    except Exception as e:
        raw_conn.rollback()
        print(f"  批量插入时出错: {e}")
        raise
    finally:
        cursor.close()
        raw_conn.close()

def import_orders_and_assign_communities(engine):
    print("--- 2. 开始导入订单数据 ---")
    
    # 确保表结构存在
    with engine.begin() as conn:
        conn.execute(text("""
        CREATE TABLE IF NOT EXISTS bike_orders (
            id SERIAL PRIMARY KEY,
            order_id TEXT,
            start_time TIMESTAMP,
            end_time TIMESTAMP,
            start_location geometry(Point,4326),
            end_location geometry(Point,4326),
            raw jsonb
        );
        """))

    csv_files = sorted(RAW_DIR.glob("bike_orders_*.csv"))
    if not csv_files:
        print("警告: 未在 'raw' 目录中找到 'bike_orders_*.csv' 文件。")
        return

    for csv_path in csv_files:
        print(f"处理文件: {csv_path.name}")
        df = pd.read_csv(csv_path)
        
        # --- 数据清洗 ---
        columns_map = {
            "START_LNG": "start_lng",
            "START_LAT": "start_lat",
            "END_LNG": "end_lng",
            "END_LAT": "end_lat",
            "START_TIME": "start_time",
            "END_TIME": "end_time",
        }
        df.rename(columns=columns_map, inplace=True)
        df["start_time"] = pd.to_datetime(df["start_time"], errors='coerce')
        df["end_time"] = pd.to_datetime(df["end_time"], errors='coerce')

        df["start_geom_wkt"] = df.apply(lambda r: Point(r["start_lng"], r["start_lat"]).wkt, axis=1)
        df["end_geom_wkt"] = df.apply(lambda r: Point(r["end_lng"], r["end_lat"]).wkt, axis=1)

        # --- 准备批量插入 ---
        records = []
        for _, row in df.iterrows():
            raw_dict = row.to_dict()
            # 清理字典中的 NaT 和 NaN 值，以便 JSON 序列化
            clean_dict = {}
            for k, v in raw_dict.items():
                if pd.isna(v):
                    clean_dict[k] = None
                elif isinstance(v, pd.Timestamp):
                    clean_dict[k] = v.isoformat()
                else:
                    clean_dict[k] = v
            
            records.append({
                "order_id": clean_dict.get("order_id") or clean_dict.get("id"),
                "start_time": clean_dict.get("start_time"),
                "end_time": clean_dict.get("end_time"),
                "start_wkt": clean_dict.get("start_geom_wkt"),
                "end_wkt": clean_dict.get("end_geom_wkt"),
                "raw": clean_dict
            })

            if len(records) >= 5000:
                _bulk_insert_orders_fast(engine, records)
                records = []
        
        if records:
            _bulk_insert_orders_fast(engine, records)
    
    print("✅ 订单数据导入完成！")

# --- 空间关联 ---
def perform_spatial_join(engine):
    print("--- 3. 开始进行空间关联 (将订单分配到社区) ---")
    with engine.begin() as conn:
        # 关键修复：在执行关联前，先确保列存在
        print("  - 检查并添加 'start_community_id' 列...")
        conn.execute(text("ALTER TABLE bike_orders ADD COLUMN IF NOT EXISTS start_community_id INTEGER;"))
        
        print("  - 检查并添加 'end_community_id' 列...")
        conn.execute(text("ALTER TABLE bike_orders ADD COLUMN IF NOT EXISTS end_community_id INTEGER;"))

        print("  - 分配起点社区...")
        conn.execute(text("""
        UPDATE bike_orders SET start_community_id = c.id
        FROM communities c
        WHERE bike_orders.start_community_id IS NULL 
          AND ST_Within(bike_orders.start_location, c.geometry);
        """))
        print("  - 分配终点社区...")
        conn.execute(text("""
        UPDATE bike_orders SET end_community_id = c.id
        FROM communities c
        WHERE bike_orders.end_community_id IS NULL 
          AND bike_orders.end_location IS NOT NULL
          AND ST_Within(bike_orders.end_location, c.geometry);
        """))
    
    print("✅ 空间关联完成！")

# --- 天气数据导入 ---
def import_weather_csv(engine):
    print("--- 4. 开始导入天气数据 ---")
    csv_path = RAW_DIR / "weather_20210501_20210514.csv"
    if not csv_path.exists():
        print(f"警告: 天气文件未找到: {csv_path}")
        return
    
    df = pd.read_csv(csv_path)
    df["dt"] = pd.to_datetime(df["DDATETIME"], format="%Y%m%d%H%M%S", errors="coerce")
    
    cols = ["dt", "WINDDIRECT", "WINDSPEED", "MINTEMP", "HUMIDITY", "AIR", "AIRLEVEL", "MAXTEMP", "RAIN"]
    sub = df[[c for c in cols if c in df.columns]].copy()
    sub.columns = [c.lower() for c in sub.columns]
    
    sub.to_sql("weather", engine, if_exists="replace", index=False)
    with engine.begin() as conn:
        conn.execute(text("CREATE INDEX IF NOT EXISTS idx_weather_dt ON weather (dt);"))
    
    print("✅ 天气数据导入成功！")

# --- 主程序 ---
if __name__ == "__main__":
    engine = create_engine(DB_URL)
    
    # 步骤 1: 导入社区地理数据 (如果已导入，可以注释掉)
    print(">>> 正在执行步骤 1: 导入社区数据")
    geojson_path = PROCESSED_DIR / "shenzhen_voronoi_communities.geojson"
    if not geojson_path.exists():
        raise SystemExit(f"错误: GeoJSON 文件未找到: {geojson_path}")
    write_communities_to_postgis(engine, geojson_path)

    # 步骤 2: 导入订单数据 (如果已导入，可以注释掉)
    print("\n>>> 正在执行步骤 2: 导入订单数据")
    import_orders_and_assign_communities(engine)

    # 步骤 3: 执行空间关联 (这是你需要运行的步骤)
    print("\n>>> 正在执行步骤 3: 空间关联")
    perform_spatial_join(engine)

    # 步骤 4: 导入天气数据 (如果已导入，可以注释掉)
    print("\n>>> 正在执行步骤 4: 导入天气数据")
    import_weather_csv(engine)
    
    print("\n所有请求的操作已成功完成！")
