import pandas as pd
import numpy as np
from datetime import timedelta
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import seaborn as sns
import os
import json
from typing import Dict, List, Optional, Union
import logging

class RFMAnalyzer:
    """
    RFM分析工具类，用于计算RFM指标、用户分群及生成用户画像

    RFM分析是一种客户分群方法，通过最近消费(Recency)、消费频率(Frequency)
    和消费金额(Monetary)三个维度对用户进行分类，以制定针对性营销策略
    """

    def __init__(self, data_processor):
        self.data_processor = data_processor
        self.data = data_processor.data
        self._validate_data()

        # 筛选出购买行为数据
        self.purchase_data = self.data[self.data['behavior_type'] == 'buy'].copy()
        self._handle_price_data()
        self._handle_datetime_data()

        # 缓存目录
        self.cache_dir = 'cache'
        os.makedirs(self.cache_dir, exist_ok=True)

    def _validate_data(self):
        """验证输入数据是否包含必要的列"""
        required_cols = ['user_id', 'item_id', 'behavior_type', 'datetime']
        missing_cols = [col for col in required_cols if col not in self.data.columns]

        if missing_cols:
            raise ValueError(f"数据缺少必要的列: {', '.join(missing_cols)}")

    def _handle_price_data(self):
        """处理价格数据，若不存在则生成合理的模拟数据"""
        if 'price' not in self.purchase_data.columns:
            # 根据item_id生成相对合理的价格分布，同一商品价格相同
            unique_items = self.purchase_data['item_id'].unique()
            item_prices = {item: np.random.uniform(10, 200) for item in unique_items}
            self.purchase_data['price'] = self.purchase_data['item_id'].map(item_prices)

    def _handle_datetime_data(self):
        """处理日期时间数据，确保格式正确"""
        if not pd.api.types.is_datetime64_any_dtype(self.purchase_data['datetime']):
            try:
                self.purchase_data['datetime'] = pd.to_datetime(self.purchase_data['datetime'])
            except Exception as e:
                raise ValueError(f"无法将'datetime'列转换为日期时间格式: {str(e)}")

    def _get_cache_path(self, func_name: str) -> str:
        """获取缓存文件路径"""
        return os.path.join(self.cache_dir, func_name)

    def _cache_result(self, func_name: str, result: Union[pd.DataFrame, Dict, List]) -> None:
        """
        缓存计算结果，支持多种数据类型

        Args:
            func_name: 函数名称，作为缓存标识
            result: 要缓存的结果数据
        """
        cache_path = self._get_cache_path(func_name)

        if isinstance(result, dict):
            for key, value in result.items():
                if isinstance(value, pd.DataFrame):
                    value.to_csv(f'{cache_path}_{key}.csv', index=False)
                elif isinstance(value, (dict, list)):
                    with open(f'{cache_path}_{key}.json', 'w', encoding='utf-8') as f:
                        json.dump(value, f, ensure_ascii=False, indent=2)
                else:
                    with open(f'{cache_path}_{key}.txt', 'w', encoding='utf-8') as f:
                        f.write(str(value))
        elif isinstance(result, pd.DataFrame):
            result.to_csv(f'{cache_path}.csv', index=False)
        elif isinstance(result, (dict, list)):
            with open(f'{cache_path}.json', 'w', encoding='utf-8') as f:
                json.dump(result, f, ensure_ascii=False, indent=2)

    def _load_cached_result(self, func_name: str) -> Optional[Union[pd.DataFrame, Dict, List]]:
        """
        加载缓存的结果

        Args:
            func_name: 函数名称，作为缓存标识

        Returns:
            缓存的结果数据，若不存在则返回None
        """
        cache_path = self._get_cache_path(func_name)

        # 检查是否是字典类型的缓存
        if os.path.exists(f'{cache_path}_rfm_data.csv'):
            result = {}
            keys = ['rfm_data', 'cluster_analysis', 'cluster_labels', 'strategies']
            all_exists = True

            for key in keys:
                if key in ['rfm_data', 'cluster_analysis']:
                    file = f'{cache_path}_{key}.csv'
                    if os.path.exists(file):
                        result[key] = pd.read_csv(file)
                        # 恢复datetime列类型
                        if 'datetime' in result[key].columns:
                            result[key]['datetime'] = pd.to_datetime(result[key]['datetime'])
                    else:
                        all_exists = False
                else:
                    file = f'{cache_path}_{key}.json'
                    if os.path.exists(file):
                        with open(file, 'r', encoding='utf-8') as f:
                            result[key] = json.load(f)
                    else:
                        all_exists = False

            if all_exists:
                return result

        # 检查单一DataFrame缓存
        if os.path.exists(f'{cache_path}.csv'):
            df = pd.read_csv(f'{cache_path}.csv')
            # 恢复datetime列类型
            if 'datetime' in df.columns:
                df['datetime'] = pd.to_datetime(df['datetime'])
            return df

        # 检查JSON缓存
        if os.path.exists(f'{cache_path}.json'):
            with open(f'{cache_path}.json', 'r', encoding='utf-8') as f:
                return json.load(f)

        return None

    def clear_cache(self) -> None:
        """清除所有缓存文件"""
        for filename in os.listdir(self.cache_dir):
            file_path = os.path.join(self.cache_dir, filename)
            try:
                if os.path.isfile(file_path):
                    os.unlink(file_path)
            except Exception as e:
                print(f"清除缓存文件 {file_path} 时出错: {e}")


    # 确保在文件顶部导入logging模块

    def calculate_rfm(self, force_recalculate: bool = False) -> pd.DataFrame:
        """
        计算RFM指标

        Args:
            force_recalculate: 是否强制重新计算，忽略缓存

        Returns:
            包含用户ID及对应的Recency、Frequency、Monetary指标和得分的DataFrame
        """
        # 配置日志（确保全局唯一配置）
        if not logging.getLogger().hasHandlers():
            logging.basicConfig(
                level=logging.INFO,
                format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
            )

        # 获取专用logger并设置明确的日志级别
        logger = logging.getLogger('rfm_analysis')
        logger.setLevel(logging.INFO)

        # 记录方法开始执行
        logger.info("===== 开始计算RFM指标 =====")

        # 检查缓存逻辑并记录
        cached_result = self._load_cached_result('calculate_rfm')
        if not force_recalculate and cached_result is not None:
            logger.info("使用缓存的RFM计算结果")
            return cached_result

        if force_recalculate:
            logger.info("强制重新计算RFM指标，忽略缓存")
        else:
            logger.info("未找到有效缓存，将重新计算RFM指标")

        # 验证购买数据是否存在
        if self.purchase_data is None or self.purchase_data.empty:
            logger.error("购买数据为空，无法计算RFM指标")
            raise ValueError("购买数据为空，无法计算RFM指标")

        logger.info(f"使用购买数据计算RFM，数据量: {len(self.purchase_data)} 条记录")

        # 计算当前日期（最大日期+1天）
        try:
            max_date = self.purchase_data['datetime'].max()
            current_date = max_date + timedelta(days=1)
            logger.info(f"计算基准日期: {current_date} (最大购买日期: {max_date})")
        except KeyError as e:
            logger.error(f"购买数据中缺少'datetime'列: {str(e)}")
            raise
        except Exception as e:
            logger.error(f"计算基准日期时发生错误: {str(e)}")
            raise

        # 计算RFM指标
        try:
            rfm = self.purchase_data.groupby('user_id').agg({
                'datetime': lambda x: (current_date - x.max()).days,  # Recency
                'item_id': 'count',  # Frequency
                'price': 'sum'  # Monetary
            }).reset_index()

            rfm.columns = ['user_id', 'Recency', 'Frequency', 'Monetary']
            logger.info(f"RFM指标计算完成，共 {len(rfm)} 个用户")
        except Exception as e:
            logger.error(f"计算RFM指标时发生错误: {str(e)}")
            raise

        # 输出R、F、M原始数据的分布情况日志
        logger.info("\n===== RFM原始数据分布统计 =====")

        # Recency分布
        try:
            r_stats = rfm['Recency'].describe()
            logger.info("Recency(最近购买天数)分布:")
            logger.info(f"  样本数: {r_stats['count']:.0f}")
            logger.info(f"  最小值: {r_stats['min']:.0f}")
            logger.info(f"  最大值: {r_stats['max']:.0f}")
            logger.info(f"  平均值: {r_stats['mean']:.2f}")
            logger.info(f"  中位数: {r_stats['50%']:.0f}")
            logger.info(f"  标准差: {r_stats['std']:.2f}")
        except Exception as e:
            logger.error(f"计算Recency分布时发生错误: {str(e)}")

        # Frequency分布
        try:
            f_stats = rfm['Frequency'].describe()
            logger.info("\nFrequency(购买频率)分布:")
            logger.info(f"  样本数: {f_stats['count']:.0f}")
            logger.info(f"  最小值: {f_stats['min']:.0f}")
            logger.info(f"  最大值: {f_stats['max']:.0f}")
            logger.info(f"  平均值: {f_stats['mean']:.2f}")
            logger.info(f"  中位数: {f_stats['50%']:.0f}")
            logger.info(f"  标准差: {f_stats['std']:.2f}")
        except Exception as e:
            logger.error(f"计算Frequency分布时发生错误: {str(e)}")

        # Monetary分布
        try:
            m_stats = rfm['Monetary'].describe()
            logger.info("\nMonetary(消费金额)分布:")
            logger.info(f"  样本数: {m_stats['count']:.0f}")
            logger.info(f"  最小值: {m_stats['min']:.2f}")
            logger.info(f"  最大值: {m_stats['max']:.2f}")
            logger.info(f"  平均值: {m_stats['mean']:.2f}")
            logger.info(f"  中位数: {m_stats['50%']:.2f}")
            logger.info(f"  标准差: {m_stats['std']:.2f}")
        except Exception as e:
            logger.error(f"计算Monetary分布时发生错误: {str(e)}")

        # 计算R得分（越小越好，得分反向）
        try:
            rfm['R_Score'] = self._calculate_rfm_score(
                rfm['Recency'],
                ascending=False,  # R值越小越好，所以分箱后标签逆序
                q=5
            )
            logger.info(f"\nR得分计算完成，值范围: {rfm['R_Score'].min()} - {rfm['R_Score'].max()}")
        except Exception as e:
            logger.error(f"计算R得分时发生错误: {str(e)}")
            raise

        # 计算F得分（越大越好）
        try:
            rfm['F_Score'] = self._calculate_rfm_score(
                rfm['Frequency'],
                ascending=True,
                q=5
            )
            logger.info(f"F得分计算完成，值范围: {rfm['F_Score'].min()} - {rfm['F_Score'].max()}")
        except Exception as e:
            logger.error(f"计算F得分时发生错误: {str(e)}")
            raise

        # 计算M得分（越大越好）
        try:
            rfm['M_Score'] = self._calculate_rfm_score(
                rfm['Monetary'],
                ascending=True,
                q=5
            )
            logger.info(f"M得分计算完成，值范围: {rfm['M_Score'].min()} - {rfm['M_Score'].max()}")
        except Exception as e:
            logger.error(f"计算M得分时发生错误: {str(e)}")
            raise

        # 转换为数值型并计算组合得分
        try:
            rfm['R_Score'] = rfm['R_Score'].astype(int)
            rfm['F_Score'] = rfm['F_Score'].astype(int)
            rfm['M_Score'] = rfm['M_Score'].astype(int)
            rfm['RFM_Score'] = rfm['R_Score'] * 100 + rfm['F_Score'] * 10 + rfm['M_Score']
            logger.info(f"RFM组合得分计算完成，值范围: {rfm['RFM_Score'].min()} - {rfm['RFM_Score'].max()}")
        except Exception as e:
            logger.error(f"计算RFM组合得分时发生错误: {str(e)}")
            raise

        # 缓存结果
        try:
            self._cache_result('calculate_rfm', rfm)
            logger.info("RFM计算结果已缓存")
        except Exception as e:
            logger.warning(f"缓存RFM结果时发生错误: {str(e)}")

        logger.info("===== RFM指标计算完成 =====")
        return rfm

    def _calculate_rfm_score(self, series: pd.Series, ascending: bool, q: int) -> pd.Series:
        """
        计算RFM各项得分的通用方法

        Args:
            series: 要计算得分的序列
            ascending: 得分是否与序列值正相关
            q: 分箱数量

        Returns:
            计算得到的得分序列
        """
        try:
            # 使用rank避免重复值导致分箱失败
            if series.nunique() < q:
                q = series.nunique()

            labels = list(range(1, q + 1))
            if not ascending:
                labels = labels[::-1]  # 逆序标签

            return pd.qcut(
                series.rank(method='first'),
                q,
                labels=labels,
                duplicates='drop'
            )
        except ValueError:
            # 若分箱失败，降级为3分箱
            q = min(3, series.nunique())
            labels = list(range(1, q + 1))
            if not ascending:
                labels = labels[::-1]

            return pd.qcut(
                series.rank(method='first'),
                q,
                labels=labels,
                duplicates='drop'
            )

    def segment_rfm(self, n_clusters: int = 3) -> Dict:
        """
        基于K-Means聚类划分用户群体

        Args:
            n_clusters: 聚类数量

        Returns:
            包含分群结果的字典，包括:
            - rfm_data: 包含聚类结果的RFM数据
            - cluster_analysis: 各聚类的统计分析
            - cluster_labels: 聚类标签映射
            - strategies: 各群体的营销策略建议
        """
        if not isinstance(n_clusters, int) or n_clusters < 2:
            raise ValueError("聚类数量必须是大于等于2的整数")

        cache_key = f'segment_rfm_{n_clusters}'
        cached_result = self._load_cached_result(cache_key)
        if cached_result is not None:
            return cached_result

        rfm_data = self.calculate_rfm()

        # 选择用于聚类的特征并标准化
        features = rfm_data[['Recency', 'Frequency', 'Monetary']]
        features_normalized = (features - features.mean()) / features.std()

        # 应用K-Means聚类
        kmeans = KMeans(
            n_clusters=n_clusters,
            random_state=42,
            n_init=10,  # 增加n_init确保稳定性
            max_iter=300
        )
        rfm_data['Cluster'] = kmeans.fit_predict(features_normalized)

        # 分析每个聚类的特征
        cluster_analysis = rfm_data.groupby('Cluster').agg({
            'Recency': ['mean', 'std'],
            'Frequency': ['mean', 'std'],
            'Monetary': ['mean', 'std'],
            'user_id': 'count'
        }).round(2)

        # 重命名列
        cluster_analysis.columns = [
            'Recency_mean', 'Recency_std',
            'Frequency_mean', 'Frequency_std',
            'Monetary_mean', 'Monetary_std',
            '用户数量'
        ]

        # 计算各群体占比
        total = cluster_analysis['用户数量'].sum()
        cluster_analysis['占比'] = (cluster_analysis['用户数量'] / total).round(4)

        # 定义标签（根据聚类中心的特征排序）
        cluster_labels = self._generate_cluster_labels(
            cluster_analysis,
            n_clusters
        )

        # 添加标签到RFM数据
        rfm_data['用户标签'] = rfm_data['Cluster'].map(cluster_labels)

        # 生成策略建议
        strategies = self._generate_strategies(cluster_labels.values())

        result = {
            'rfm_data': rfm_data,
            'cluster_analysis': cluster_analysis,
            'cluster_labels': cluster_labels,
            'strategies': strategies
        }

        self._cache_result(cache_key, result)
        return result

    def _generate_cluster_labels(self, cluster_analysis: pd.DataFrame, n_clusters: int) -> Dict:
        """生成聚类标签"""
        # 按用户价值排序聚类（Recency升序，Frequency降序）
        sorted_clusters = cluster_analysis.sort_values(
            by=['Recency_mean', 'Frequency_mean'],
            ascending=[True, False]
        ).index

        # 根据聚类数量动态生成标签
        if n_clusters == 2:
            labels = ['高价值客户', '低价值客户']
        elif n_clusters == 3:
            labels = ['高价值客户', '中价值客户', '低价值客户']
        elif n_clusters == 4:
            labels = ['高价值客户', '潜力客户', '一般价值客户', '低价值客户']
        elif n_clusters == 5:
            labels = ['高价值客户', '重要发展客户', '重要保持客户', '一般价值客户', '低价值客户']
        else:
            # 对于更多聚类，生成通用标签
            labels = [f'客户群{i + 1}' for i in range(n_clusters)]

        # 分配标签
        return {cluster_id: labels[i] for i, cluster_id in enumerate(sorted_clusters)}

    def _generate_strategies(self, labels: List[str]) -> Dict:
        """生成各客户群体的营销策略"""
        base_strategies = {
            '高价值客户': '提供VIP服务、专属优惠，保持互动，提高忠诚度',
            '中价值客户': '通过促销活动提高购买频率，推荐相关产品',
            '低价值客户': '发送唤醒优惠券，推送热门产品，尝试重新激活',
            '潜力客户': '提供入门优惠，引导增加购买频率',
            '重要发展客户': '提升客户满意度，增加互动频率',
            '重要保持客户': '防止流失，提供会员福利',
            '一般价值客户': '适当促销，提高购买金额'
        }

        # 为所有标签分配策略
        strategies = {}
        for label in labels:
            if label in base_strategies:
                strategies[label] = base_strategies[label]
            else:
                strategies[label] = '根据客户行为特征制定个性化营销策略'

        return strategies

    def get_user_segments(self, n_clusters: int = 3) -> pd.DataFrame:
        """
        获取用户分群结果，包含各群体的关键指标

        Args:
            n_clusters: 聚类数量

        Returns:
            包含用户分群信息的DataFrame
        """
        cache_key = f'get_user_segments_{n_clusters}'
        cached_result = self._load_cached_result(cache_key)
        if cached_result is not None:
            return cached_result

        segment_result = self.segment_rfm(n_clusters=n_clusters)
        rfm_data = segment_result['rfm_data']
        strategies = segment_result['strategies']

        # 计算每个分群的价格敏感度和活跃度
        segment_metrics = rfm_data.groupby('用户标签').apply(
            self._calculate_segment_portrait
        ).reset_index()
        segment_metrics.columns = ['用户标签', '价格敏感度', '活跃度']

        # 统计各群体数量和占比
        segment_counts = rfm_data['用户标签'].value_counts().reset_index()
        segment_counts.columns = ['用户标签', '用户数量']
        total = segment_counts['用户数量'].sum()
        segment_counts['占比'] = (segment_counts['用户数量'] / total).round(4)

        # 合并指标和数量信息
        segments = pd.merge(segment_counts, segment_metrics, on='用户标签')

        # 添加策略建议
        segments['策略建议'] = segments['用户标签'].map(strategies)

        self._cache_result(cache_key, segments)
        return segments

    def _calculate_segment_portrait(self, group: pd.DataFrame) -> pd.Series:
        """
        计算单个用户分群的画像特征

        Args:
            group: 单个用户分群的数据

        Returns:
            包含价格敏感度和活跃度的Series
        """
        user_ids = group['user_id'].unique()
        user_data = self.purchase_data[self.purchase_data['user_id'].isin(user_ids)]

        # 计算价格敏感度（平均价格越低表示敏感度越高）
        avg_price = user_data['price'].mean()
        overall_avg_price = self.purchase_data['price'].mean()

        # 避免除以零
        if overall_avg_price == 0:
            price_sensitivity = 0.0
        else:
            price_sensitivity = 1 - (avg_price / overall_avg_price)  # 归一化处理

        # 计算活跃度（购买频率）
        activity_score = self._calculate_purchase_rate(user_data)
        overall_purchase_rate = self._calculate_overall_purchase_rate()

        # 避免除以零
        if overall_purchase_rate <= 0:
            activity = 0.0
        else:
            activity = activity_score / overall_purchase_rate

        return pd.Series({
            '价格敏感度': round(price_sensitivity, 4),
            '活跃度': round(activity, 4)
        })

    def _calculate_purchase_rate(self, data: pd.DataFrame) -> float:
        """计算指定用户群体的购买频率"""
        user_purchase_dates = data.groupby('user_id')['datetime'].agg(['min', 'max', 'count'])
        # 过滤只有一次购买的用户（无法计算频率）
        user_purchase_dates = user_purchase_dates[user_purchase_dates['count'] > 1]

        if len(user_purchase_dates) == 0:
            return 0.0

        user_purchase_dates['days_active'] = (user_purchase_dates['max'] - user_purchase_dates['min']).dt.days
        user_purchase_dates['days_active'] = user_purchase_dates['days_active'].replace(0, 1)  # 避免除以0
        user_purchase_dates['purchase_rate'] = user_purchase_dates['count'] / user_purchase_dates['days_active']

        return user_purchase_dates['purchase_rate'].mean()

    def _calculate_overall_purchase_rate(self) -> float:
        """计算整体用户的平均购买频率"""
        user_purchase_dates = self.purchase_data.groupby('user_id')['datetime'].agg(['min', 'max', 'count'])
        user_purchase_dates = user_purchase_dates[user_purchase_dates['count'] > 1]  # 至少购买两次才计算

        if len(user_purchase_dates) == 0:
            return 0.1  # 避免除以0

        user_purchase_dates['days_active'] = (user_purchase_dates['max'] - user_purchase_dates['min']).dt.days
        user_purchase_dates['days_active'] = user_purchase_dates['days_active'].replace(0, 1)
        user_purchase_dates['purchase_rate'] = user_purchase_dates['count'] / user_purchase_dates['days_active']

        return user_purchase_dates['purchase_rate'].mean()

    def analyze_user_portrait(self, n_clusters: int = 3) -> Dict:
        """
        生成用户画像数据，用于前端标签云展示

        Args:
            n_clusters: 聚类数量

        Returns:
            包含价格敏感度和活跃度标签的字典
        """
        cache_key = f'analyze_user_portrait_{n_clusters}'
        cached_result = self._load_cached_result(cache_key)
        if cached_result is not None:
            return cached_result

        segments = self.get_user_segments(n_clusters)

        # 生成价格敏感度标签
        price_tags = self._generate_sensitivity_tags(segments)

        # 生成活跃度标签
        activity_tags = self._generate_activity_tags(segments)

        result = {
            'price_tags': price_tags,
            'activity_tags': activity_tags,
            'all_tags': price_tags + activity_tags
        }

        self._cache_result(cache_key, result)
        return result

    def _generate_sensitivity_tags(self, segments: pd.DataFrame) -> List[Dict]:
        """生成价格敏感度标签"""
        tags = []
        for _, row in segments.iterrows():
            sensitivity = row['价格敏感度']
            if sensitivity < 0.3:
                tag_text = f"{row['用户标签']}: 价格不敏感"
            elif sensitivity < 0.7:
                tag_text = f"{row['用户标签']}: 价格中等敏感"
            else:
                tag_text = f"{row['用户标签']}: 价格敏感"

            # 标签大小基于用户数量，限制在10-30之间
            size = min(10 + row['用户数量'] * 0.01, 30)
            tags.append({'text': tag_text, 'size': round(size, 2)})
        return tags

    def _generate_activity_tags(self, segments: pd.DataFrame) -> List[Dict]:
        """生成活跃度标签"""
        tags = []
        for _, row in segments.iterrows():
            activity = row['活跃度']
            if activity < 0.5:
                tag_text = f"{row['用户标签']}: 低活跃度"
            elif activity < 1.5:
                tag_text = f"{row['用户标签']}: 中等活跃度"
            else:
                tag_text = f"{row['用户标签']}: 高活跃度"

            # 标签大小基于用户数量，限制在10-30之间
            size = min(10 + row['用户数量'] * 0.01, 30)
            tags.append({'text': tag_text, 'size': round(size, 2)})
        return tags

    def get_user_list(self, n_clusters: int = 3, date_range: str = '180') -> list:
        """
        获取用户列表数据，包含分群信息

        Args:
            n_clusters: 聚类数量
            date_range: 日期范围

        Returns:
            用户列表数据
        """
        try:
            # 1. 确保purchase_data存在
            if self.purchase_data is None or self.purchase_data.empty:
                raise ValueError("购买数据为空，无法生成用户列表")

            # 2. 应用日期范围过滤
            filtered_data = self._filter_by_date_range(self.purchase_data, date_range)

            if filtered_data.empty:
                self.logger.warning(f"在日期范围 {date_range} 内没有找到数据")
                return []

            # 3. 获取分群结果
            segment_result = self.segment_rfm(n_clusters=n_clusters)
            rfm_data = segment_result.get('rfm_data')

            if rfm_data is None or rfm_data.empty:
                raise ValueError("分群结果为空，无法生成用户列表")

            # 4. 聚合用户数据
            # 确保所需列存在
            required_columns = ['user_id', 'datetime', 'order_id', 'price']
            missing_columns = [col for col in required_columns if col not in filtered_data.columns]
            if missing_columns:
                raise ValueError(f"过滤后的数据缺少必要的列: {missing_columns}")

            user_agg = filtered_data.groupby('user_id').agg({
                'datetime': 'max',  # 最近购买时间
                'order_id': 'nunique',  # 购买次数
                'price': 'sum'  # 消费总额
            }).reset_index()

            # 5. 合并分群信息
            # 检查rfm_data是否包含必要的列
            if 'user_id' not in rfm_data.columns or '用户标签' not in rfm_data.columns:
                raise ValueError("分群数据缺少必要的列: user_id或用户标签")

            user_agg = user_agg.merge(
                rfm_data[['user_id', '用户标签']],
                on='user_id',
                how='left'
            )

            # 6. 格式化输出
            user_list = []
            for _, row in user_agg.iterrows():
                # 处理可能的缺失值
                last_purchase = row['datetime'].isoformat() if pd.notna(row['datetime']) else None

                user_list.append({
                    'user_id': row['user_id'],
                    'name': f'用户{row["user_id"]}',  # 实际应用中可能需要关联用户表
                    'segment': row['用户标签'] if pd.notna(row['用户标签']) else '未分类',
                    'purchase_count': int(row['order_id']) if pd.notna(row['order_id']) else 0,
                    'total_amount': float(row['price']) if pd.notna(row['price']) else 0.0,
                    'last_purchase': last_purchase
                })

            return user_list

        except Exception as e:
            self.logger.error(f"获取用户列表失败: {str(e)}", exc_info=True)
            raise  # 重新抛出异常，让API层处理

    def _filter_by_date_range(self, data: pd.DataFrame, date_range: str) -> pd.DataFrame:
        """根据日期范围过滤数据"""
        if date_range == 'all':
            return data.copy()

        try:
            days = int(date_range)
            # 确保datetime列是datetime类型
            if not pd.api.types.is_datetime64_any_dtype(data['datetime']):
                data['datetime'] = pd.to_datetime(data['datetime'])

            cutoff_date = pd.Timestamp.now() - pd.Timedelta(days=days)
            filtered = data[data['datetime'] >= cutoff_date]
            self.logger.info(f"日期范围过滤: 保留 {len(filtered)} 条记录 (从 {cutoff_date} 至今)")
            return filtered
        except ValueError:
            self.logger.warning(f"无效的日期范围: {date_range}, 使用全部数据")
            return data.copy()
