import pandas as pd
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import xgboost as xgb
from pyecharts.charts import Radar
from sklearn.model_selection import train_test_split

class HighValueUserAnalyzer:
    def __init__(self, ddf, analysis_date='2025-04-20'):
        """初始化分析器（时区统一处理）[3,6](@ref)
        :param ddf: 数据集需包含以下字段：
            last_login, registration_date, purchase_history, login_history, income
        """
        self.required_columns = ['last_login', 'registration_date', 'purchase_history', 'login_history', 'income']
        self._validate_columns(ddf)
        
        self.ddf = ddf.copy()
        self.analysis_date = pd.to_datetime(analysis_date).tz_localize(None)  # 确保基准日期无时区[6](@ref)
        self.feature_cols = []
        
    def _validate_columns(self, df):
        """列名校验增强版"""
        missing = set(self.required_columns) - set(df.columns)
        if missing:
            suggest = [col for col in df.columns if any(kw in col.lower() for kw in 
                      ['login','purchase','regist','income'])]
            raise KeyError(f"Missing required columns: {missing}\nCandidate columns: {suggest}")

    def _convert_datetime(self, series, timezone='UTC'):
        """统一时区转换方法[3,6](@ref)"""
        return (
            pd.to_datetime(series, utc=True)
            .dt.tz_convert(timezone)
            .dt.tz_localize(None)  # 转换为无时区格式
        )

    def feature_engineering(self):
        """特征工程模块（时区安全处理）[3,6](@ref)"""
        try:
            # 时间特征处理
            self.ddf['last_active'] = self._convert_datetime(self.ddf['last_login'])
            self.ddf['reg_date'] = self._convert_datetime(self.ddf['registration_date'])
            
            # 计算时间差（统一使用tz-naive类型）
            self.ddf['R_days'] = (self.analysis_date - self.ddf['last_active']).dt.days
            self.ddf['reg_days'] = (self.analysis_date - self.ddf['reg_date']).dt.days
            
            # 行为特征解析
            self.ddf['F_count'] = self.ddf['purchase_history'].apply(
                lambda x: len(str(x).split(';')) if pd.notnull(x) else 0
            )
            self.ddf['M_total'] = self.ddf['income'].cumsum()
            
            # 活跃天数计算（增强空值处理）
            self.ddf['active_days'] = self.ddf['login_history'].apply(
                lambda x: len(str(x).split('|')) if pd.notnull(x) else 0
            )
            self.ddf['L_ratio'] = self.ddf['active_days'] / self.ddf['reg_days'].replace(0, 1e-6)  # 防止除零错误[4](@ref)
            
            # 设备多样性解析（增强异常处理）
            def _parse_device(history):
                try:
                    devices = set()
                    for record in str(history).split('|'):
                        if 'iPhone' in record: devices.add('iOS')
                        elif 'Android' in record: devices.add('Android')
                    return len(devices)
                except:
                    return 0
            self.ddf['device_types'] = self.ddf['login_history'].apply(_parse_device)
            
            self.feature_cols = ['R_days', 'F_count', 'M_total', 'L_ratio', 'device_types']
            return self.ddf[self.feature_cols]
            
        except Exception as e:
            raise ValueError(f"特征工程失败: {str(e)}")

    def cluster_users(self, n_clusters=5):
        """聚类分群模块（含标准化）[8](@ref)"""
        features = self.feature_engineering()
        scaler = StandardScaler()
        X_scaled = scaler.fit_transform(features)
        
        kmeans = KMeans(n_clusters=n_clusters, random_state=42)
        self.ddf['cluster'] = kmeans.fit_predict(X_scaled)
        return self.ddf
    
    def train_prediction_model(self, test_size=0.2):
        """预测模型模块（含早停机制）[7](@ref)"""
        self.feature_engineering()
        
        # 定义高价值用户（前20%收入）
        self.ddf['high_value'] = (self.ddf['M_total'] >= self.ddf['M_total'].quantile(0.8)).astype(int)
        
        # 数据集分割
        X_train, X_test, y_train, y_test = train_test_split(
            self.ddf[self.feature_cols], 
            self.ddf['high_value'], 
            test_size=test_size, 
            stratify=self.ddf['high_value']
        )
        
        # 优化模型参数
        params = {
            'objective': 'binary:logistic',
            'eval_metric': 'auc',
            'max_depth': 5,
            'learning_rate': 0.1,
            'subsample': 0.8,
            'colsample_bytree': 0.7,
            'gamma': 0.5,
            'random_state': 42
        }
        
        # 训练模型（含早停）
        dtrain = xgb.DMatrix(X_train, label=y_train)
        dtest = xgb.DMatrix(X_test, label=y_test)
        
        self.model = xgb.train(
            params, dtrain,
            num_boost_round=1000,
            early_stopping_rounds=50,
            evals=[(dtest, 'test')],
            verbose_eval=False
        )
        return self.model

    def generate_radar_chart(self, output_path='cluster_radar.html'):
        """可视化模块（动态范围）[7](@ref)"""
        cluster_profile = self.ddf.groupby('cluster')[self.feature_cols].mean()
        
        radar_data = []
        for cluster in cluster_profile.index:
            values = cluster_profile.loc[cluster].values.tolist()
            radar_data.append({'name': f'Cluster {cluster}', 'value': values})
        
        radar = (
            Radar()
            .add_schema(schema=[
                {"name": col, "max": cluster_profile[col].max()*1.2} 
                for col in self.feature_cols
            ])
            .add("", radar_data)
        )
        radar.render(output_path)