from datetime import datetime

import joblib
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer

from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import  cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sqlalchemy import create_engine


from db_config import user, password, host, port, database

# 获取用户数据
def get_training_data(ref_date):
    engine = create_engine(f'mysql+pymysql://{user}:{password}@{host}:{port}/{database}')

    sql = f"""
    WITH user_activity_days AS (
        SELECT platform_id, platform, COUNT(DISTINCT create_date) AS active_days
        FROM tra_user_date_summary
        WHERE create_date < '{ref_date}' AND platform_id > 0
        GROUP BY platform_id, platform
    )

    SELECT 
        u.platform_id,
        u.platform,
        u.view_count AS total_views,
        IFNULL(u.stay_ms / NULLIF(u.view_count, 0), 0) AS avg_stay_ms_per_view,
        u.comment_count,
        u.avg_score,
        u.reward_total,
        u.sales_amount,
        u.contact_count,
        u.share_count,
        IFNULL(recent.view_count, 0) AS recent_7d_views,
        IFNULL(recent.stay_ms, 0) AS recent_7d_stay_ms,
        IFNULL(recent.comment_count, 0) AS recent_7d_comments,
        IFNULL(old.view_count, 0) AS previous_7d_views,
        IFNULL(old.stay_ms, 0) AS previous_7d_stay_ms,
        DATEDIFF('{ref_date}', u.create_time) AS days_since_register,
        a.active_days,
        CASE 
            WHEN DATEDIFF('{ref_date}', u.create_time) <= 14 AND a.active_days <= 3 THEN 'cold_start'
            WHEN (DATEDIFF('{ref_date}', u.create_time) BETWEEN 15 AND 60 OR a.active_days BETWEEN 4 AND 20) THEN 'growth'
            WHEN DATEDIFF('{ref_date}', u.create_time) > 60 AND a.active_days >= 21 THEN 'mature'
            ELSE 'unknown'
        END AS user_lifecycle_segment,
        CASE WHEN NOT EXISTS (
            SELECT 1 FROM tra_user_date_summary 
            WHERE platform_id = u.platform_id 
              AND platform = u.platform
              AND create_date BETWEEN DATE_ADD('{ref_date}', INTERVAL 1 DAY)
                                  AND DATE_ADD('{ref_date}', INTERVAL 30 DAY)
        ) THEN 1 ELSE 0 END AS is_churn_next_30d

    FROM tra_user_summary u

    JOIN user_activity_days a 
      ON u.platform_id = a.platform_id AND u.platform = a.platform

    LEFT JOIN (
        SELECT platform_id, platform, 
               SUM(view_count) AS view_count, 
               SUM(stay_ms) AS stay_ms,
               SUM(comment_count) AS comment_count
        FROM tra_user_date_summary
        WHERE create_date BETWEEN DATE_SUB('{ref_date}', INTERVAL 6 DAY) AND '{ref_date}'
        GROUP BY platform_id, platform
    ) recent ON u.platform_id = recent.platform_id AND u.platform = recent.platform

    LEFT JOIN (
        SELECT platform_id, platform, 
               SUM(view_count) AS view_count, 
               SUM(stay_ms) AS stay_ms
        FROM tra_user_date_summary
        WHERE create_date BETWEEN DATE_SUB('{ref_date}', INTERVAL 13 DAY) 
                              AND DATE_SUB('{ref_date}', INTERVAL 7 DAY)
        GROUP BY platform_id, platform
    ) old ON u.platform_id = old.platform_id AND u.platform = old.platform

    WHERE a.active_days >= 7 and u.create_time < '{ref_date}';
    """

    return pd.read_sql(sql, engine)


def train_and_save_model(df, model_path='data/logistic_model.pkl'):
    features = [
        'total_views', 'avg_stay_ms_per_view', 'comment_count', 'avg_score',
        'reward_total', 'sales_amount', 'contact_count', 'share_count',
        'recent_7d_views', 'recent_7d_stay_ms', 'recent_7d_comments',
        'previous_7d_views', 'previous_7d_stay_ms', 'days_since_register', 'active_days'
    ]
    X = df[features]
    y = df['is_churn_next_30d']
    print("训练特征:\n",features)
    print("训练集流失标签分布：\n", y.value_counts(normalize=True))
    # 定义多个 pipeline
    pipelines = {
        "Logistic + mean impute": make_pipeline(
            SimpleImputer(strategy='mean'),
            StandardScaler(),
            LogisticRegression(max_iter=1000, class_weight='balanced')
        ),
        "RandomForest": make_pipeline(
            SimpleImputer(strategy='mean'),
            RandomForestClassifier(n_estimators=100, class_weight='balanced')
        )
    }
    best_name = None
    best_score = -1
    best_pipeline = None

    for name, pipe in pipelines.items():
        scores = cross_val_score(pipe, X, y, cv=3, scoring='f1')
        mean_score = scores.mean()
        print(f"{name} 的平均 F1 分数: {mean_score:.4f}")
        if mean_score > best_score:
            best_score = mean_score
            best_pipeline = pipe
            best_name = name

    print(f"选择表现最好的模型：{best_name}，平均 F1 分数为 {best_score:.4f}")

    # 用全部训练集训练最优模型
    best_pipeline.fit(X, y)

    # 保存模型
    joblib.dump(best_pipeline, model_path)
    print(f"模型已保存至 {model_path}")

    return best_pipeline

# 函数入口
if __name__ == '__main__':
    df = get_training_data("2025-01-01")
    model = train_and_save_model(df)

