"""
预计算并缓存每个社区在各时间槽的净流量到 Redis。

注：该操作与实际运行业务不符，实际业务中需实时处理上一个时间槽的流量数据，计算净流量后存入 Redis。这里一次性预计算仅为方便演示。
"""


import os
import redis
from sqlalchemy import create_engine, text
from dotenv import load_dotenv
from datetime import datetime

# --- 配置 ---
load_dotenv()
DB_URL = os.getenv("DB_URL")
REDIS_HOST = 'localhost'
REDIS_PORT = 6379

def precompute_and_cache_net_flows():
    """
    连接数据库，计算所有社区在所有时间槽的净流量，并存入 Redis。
    """
    print("开始连接数据库和 Redis...")
    try:
        db_engine = create_engine(DB_URL)
        redis_client = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=0, decode_responses=True)
        # 测试 Redis 连接
        redis_client.ping()
        print("数据库和 Redis 连接成功！")
    except Exception as e:
        print(f"连接失败: {e}")
        return

    # 使用一个 Pipeline 来批量执行 Redis 命令，速度更快
    redis_pipeline = redis_client.pipeline()

    # --- SQL 查询 ---
    # 这个查询会计算出每个社区在每个30分钟时间槽的总净流量
    query = text("""
        WITH borrows AS (
            SELECT
                start_community_id AS community_id,
                date_trunc('hour', start_time) + floor(extract(minute from start_time) / 30) * interval '30 minute' AS time_slot,
                COUNT(*) AS borrow_count
            FROM bike_orders
            WHERE start_community_id IS NOT NULL
            GROUP BY 1, 2
        ),
        returns AS (
            SELECT
                end_community_id AS community_id,
                date_trunc('hour', end_time) + floor(extract(minute from end_time) / 30) * interval '30 minute' AS time_slot,
                COUNT(*) AS return_count
            FROM bike_orders
            WHERE end_community_id IS NOT NULL
            GROUP BY 1, 2
        )
        SELECT
            COALESCE(b.community_id, r.community_id) AS community_id,
            COALESCE(b.time_slot, r.time_slot) AS time_slot,
            (COALESCE(r.return_count, 0) - COALESCE(b.borrow_count, 0)) AS net_flow
        FROM borrows b
        FULL OUTER JOIN returns r ON b.community_id = r.community_id AND b.time_slot = r.time_slot
        WHERE COALESCE(b.time_slot, r.time_slot) IS NOT NULL;
    """)

    print("开始执行数据库查询，这可能需要一些时间...")
    with db_engine.connect() as connection:
        result = connection.execute(query)
        count = 0
        print("查询完成，开始将数据写入 Redis...")
        for row in result:
            row_dict = dict(row._mapping)
            community_id = row_dict['community_id']
            time_slot_str = row_dict['time_slot'].isoformat()
            net_flow = row_dict['net_flow']
            
            # 我们使用 Redis 的 Hash 数据结构
            # Key 是社区ID，Field 是时间槽，Value 是净流量
            redis_pipeline.hset(f"community:{community_id}:netflow", time_slot_str, net_flow)
            count += 1
    
    print(f"共处理 {count} 条数据，正在执行 Redis 写入...")
    redis_pipeline.execute()
    print("预计算完成！所有净流量数据已成功缓存到 Redis。")

if __name__ == "__main__":
    precompute_and_cache_net_flows()