# predict_to_hive_overwrite.py
import joblib, math
from pyhive import hive

# 加载模型
coef = joblib.load('lr_std.model')

# Hive 连接
conn = hive.connect(host='hadoop01', port=10000, database='gd_qg', auth='NONE')
cur = conn.cursor()

# Sigmoid 多分类预测函数
def predict(row):
    """假设 lr_std.model 返回每个类别的分数向量，此处以二分类为例，可扩展为多分类"""
    z = coef[0] + sum(c * v for c, v in zip(coef[1:], row))
    prob = 1.0 / (1.0 + math.exp(-z))
    # 返回类别索引或概率阈值划分
    if prob >= 0.8:
        return 5
    elif prob >= 0.6:
        return 4
    elif prob >= 0.4:
        return 3
    elif prob >= 0.2:
        return 2
    else:
        return 0

# 安全转换为 float
def safe_float(x):
    try:
        return float(x)
    except:
        return 0.0

# 批量大小，防止 SQL 太长
BATCH_SIZE = 1000

# 31 天循环预测
for dt in [f'2025-05-{i:02d}' for i in range(1, 32)]:
    print('>>> Predicting', dt)

    # 查询当天评论及用户、商品宽表信息
    sql = f"""
        SELECT 
            c.data_id,
            c.user_id,
            c.product_id,
            length(c.comment_content) AS txt_len,
            length(c.comment_title)   AS title_len,
            p.cate1_id,
            p.cate2_id,
            p.cate3_id,
            u.user_day30_cnt,
            pr.pos_rate,
            c.rating
        FROM gd_qg.dwd_jd_comment_flat c
        JOIN gd_qg.dwd_jd_product_flat p ON c.product_id = p.product_id AND c.dt = p.dt
        JOIN gd_qg.dws_user_profile_wide u ON c.user_id = u.user_id AND c.dt = u.dt
        JOIN gd_qg.dws_product_profile_wide pr ON c.product_id = pr.product_id AND c.dt = pr.dt
        WHERE c.dt = '{dt}'
    """
    cur.execute(sql)
    rows = cur.fetchall()
    if not rows:
        continue

    # 构造预测结果
    preds = []
    for r in rows:
        features = [safe_float(r[i]) for i in range(3, 10)]  # txt_len, title_len, cate1~3, user_day30_cnt, pos_rate
        pred_label = predict(features)
        preds.append((r[0], r[1], r[2], pred_label, r[10]))  # data_id, user_id, product_id, pred_label, rating

    # 按批次覆盖写入 Hive
    for i in range(0, len(preds), BATCH_SIZE):
        batch = preds[i:i + BATCH_SIZE]
        values_sql = ",".join([
            f"('{r[0]}','{r[1]}','{r[2]}',{r[3]},{r[4]})"
            for r in batch
        ])
        insert_sql = f"""
            INSERT OVERWRITE TABLE gd_qg.dws_comment_pred PARTITION (dt='{dt}')
            VALUES {values_sql}
        """
        cur.execute(insert_sql)
        conn.commit()

print('>>> 31 天预测完成！')
