import math, joblib
from pyhive import hive
from datetime import datetime, timedelta
from collections import defaultdict

# ====== 模型加载 ======
class LogisticRegressionSTD:
    def __init__(self):
        self.coef = None

    def predict(self, X):
        return [
            1 if (self.coef[0] + sum(c*v for c, v in zip(self.coef[1:], row))) >= 0 else 0
            for row in X
        ]

def load_model(path="lr_std.model"):
    try:
        obj = joblib.load(path)
        if isinstance(obj, LogisticRegressionSTD):
            return obj
        elif isinstance(obj, list):  # 直接保存的系数数组
            m = LogisticRegressionSTD()
            m.coef = obj
            return m
        else:
            raise ValueError("不支持的模型对象类型！")
    except Exception as e:
        print(f"[警告] 无法加载 {path}: {e}")
        return None

model = load_model()
if model is None:
    raise RuntimeError("模型加载失败，退出！")

# ====== Hive 连接 ======
conn = hive.connect(
    host="hadoop01",
    port=10000,
    database="gd_qg",
    auth="NONE"
)
cur = conn.cursor()

# ====== Hive 执行参数优化 ======
hive_settings = [
    "SET hive.exec.mode.local.auto=false",   # 强制走 YARN，不用本地 MR
    "SET mapreduce.map.memory.mb=4096",      # Map 内存
    "SET mapreduce.reduce.memory.mb=4096",   # Reduce 内存
    "SET hive.auto.convert.join=false"       # 关闭 map join，避免 OOM
]
for sql in hive_settings:
    cur.execute(sql)

# ====== 获取最近 31 天时间范围 ======
mode = "fixed"   # 可选 "dynamic" 或 "fixed"
if mode == "dynamic":
    end_date = datetime.today()
    start_date = end_date - timedelta(days=30)
    start_str = start_date.strftime("%Y-%m-%d")
    end_str = end_date.strftime("%Y-%m-%d")
else:  # 固定范围
    start_str = "2025-05-01"
    end_str   = "2025-05-31"

print(f">>> 拉取数据范围: {start_str} ~ {end_str}")

# ====== 拉取特征数据 ======
cur.execute(f"""
    SELECT c.data_id, c.user_id, c.product_id,
           length(c.comment_content) AS txt_len,
           length(c.comment_title) AS title_len,
           p.cate1_id, p.cate2_id, p.cate3_id,
           u.user_day30_cnt, u.user_pos_rate,
           c.dt
    FROM gd_qg.dwd_jd_comment_flat c
    JOIN gd_qg.dwd_jd_product_flat p 
         ON c.product_id=p.product_id AND c.dt=p.dt
    JOIN gd_qg.dws_user_profile_wide u 
         ON c.user_id=u.user_id AND c.dt=u.dt
    JOIN gd_qg.dws_product_profile_wide pr 
         ON c.product_id=pr.product_id AND c.dt=pr.dt
    WHERE c.dt BETWEEN '{start_str}' AND '{end_str}'
""")
rows = cur.fetchall()

# ====== Python 端预测 ======
def to_float(x):
    try:
        return float(x) if x is not None else 0.0
    except:
        return 0.0

pred_records = []
for idx, r in enumerate(rows):
    features = [
        to_float(r[3]),  # txt_len
        to_float(r[4]),  # title_len
        to_float(r[5]),  # cate1_id
        to_float(r[6]),  # cate2_id
        to_float(r[7]),  # cate3_id
        to_float(r[8]),  # user_day30_cnt
        to_float(r[9])   # user_pos_rate
    ]

    if idx < 5:  # 打印前 5 条特征，调试用
        print(f"[调试] features[{idx}] = {features}")

    pred = model.predict([features])[0]
    pred_records.append((r[0], r[1], r[2], pred, r[10]))  # dt 放最后

# ====== 按 dt 分区写入 Hive ======
partitioned = defaultdict(list)
for rec in pred_records:
    partitioned[rec[4]].append(rec)

cur.execute("SET hive.exec.dynamic.partition=true")
cur.execute("SET hive.exec.dynamic.partition.mode=nonstrict")

for dt, recs in partitioned.items():
    # ====== 按 dt 分区写入 Hive ======
    partitioned = defaultdict(list)
    for rec in pred_records:
        partitioned[rec[4]].append(rec)

    cur.execute("SET hive.exec.dynamic.partition=true")
    cur.execute("SET hive.exec.dynamic.partition.mode=nonstrict")

    for dt, recs in partitioned.items():
        print(f">>> 批量回写 {dt} 共 {len(recs)} 条记录")

        # 拼接 SELECT ... UNION ALL
        select_list = []
        for r in recs:
            select_list.append(f"SELECT '{r[0]}' AS data_id, {r[1]} AS user_id, "
                               f"'{r[2]}' AS product_id, {r[3]} AS rating_pred")

        union_sql = " UNION ALL ".join(select_list)

        insert_sql = f"""
            INSERT OVERWRITE TABLE gd_qg.dws_comment_pred PARTITION (dt='{dt}')
            {union_sql}
        """
        cur.execute(insert_sql)
        conn.commit()

# ====== Micro-F1 Hive 端计算 ======
cur.execute(f"""
    INSERT OVERWRITE TABLE gd_qg.dws_model_eval PARTITION (dt)
    SELECT 'lr_std' AS model_name,
           sum(if(rating_true=rating_pred,1,0))*1.0/count(*) AS micro_f1,
           dt
    FROM gd_qg.dws_comment_pred
    WHERE dt BETWEEN '{start_str}' AND '{end_str}'
    GROUP BY dt
""")
conn.commit()

# ====== 提交到 ADS 表 ======
cur.execute(f"""
    INSERT OVERWRITE TABLE gd_qg.ads_comment_pred_submit PARTITION (dt)
    SELECT data_id, rating_pred, dt
    FROM gd_qg.dws_comment_pred
    WHERE dt BETWEEN '{start_str}' AND '{end_str}'
""")
conn.commit()

cur.execute(f"""
    INSERT OVERWRITE TABLE gd_qg.ads_micro_f1_submit PARTITION (dt)
    SELECT micro_f1, dt
    FROM gd_qg.dws_model_eval
    WHERE dt BETWEEN '{start_str}' AND '{end_str}'
""")
conn.commit()

print(">>> ✅ 31 天预测 + Micro-F1 已完成 Hive 回写！")
