import os
import matplotlib.pyplot as plt
import seaborn as sns
from mlxtend.frequent_patterns import apriori, association_rules
import pandas as pd
import numpy as np
from datetime import datetime
from collections import defaultdict
import networkx as nx

# 创建保存图片的目录
def ensure_dir(directory='./figs'):
    """确保目录存在，如果不存在则创建"""
    if not os.path.exists(directory):
        os.makedirs(directory)
        print(f"创建目录: {directory}")
    return directory

# 保存图片的通用函数
def save_fig(fig, filename, figs_dir='./figs'):
    """保存图片到指定目录"""
    ensure_dir(figs_dir)
    filepath = os.path.join(figs_dir, filename)
    fig.savefig(filepath, bbox_inches='tight', dpi=300)
    plt.close(fig)
    print(f"图片已保存: {filepath}")


def handle_task1(new_df):
    # 1. 首先将purchase_history中的商品类别转换为交易记录格式
    def extract_categories(purchase_history):
        # 从purchase_history中提取所有main_category，去重
        categories = set(item['main_category'] for item in purchase_history['items'])
        return list(categories)

    # 创建交易记录
    transactions = new_df['purchase_history'].apply(extract_categories)

    # 2. 创建one-hot编码的DataFrame
    # 获取所有可能的类别
    all_categories = set()
    for cats in transactions:
        all_categories.update(cats)

    # 创建one-hot编码矩阵
    one_hot_df = pd.DataFrame([[1 if category in transaction else 0 
                            for category in all_categories]
                            for transaction in transactions],
                            columns=list(all_categories))

    # 3. 使用apriori算法找出频繁项集
    frequent_itemsets = apriori(one_hot_df, 
                            min_support=0.02,  # 最小支持度为0.02
                            use_colnames=True)

    # 4. 生成关联规则
    rules = association_rules(frequent_itemsets, 
                            metric="confidence",
                            min_threshold=0.5)  # 最小置信度为0.5

    # 5. 添加lift和conviction指标
    rules["lift"] = rules["lift"].round(4)
    rules = rules.sort_values("lift", ascending=False)

    # 6. 特别关注与电子产品相关的规则
    electronic_rules = rules[
        (rules['antecedents'].apply(lambda x: '电子产品' in x) |
        rules['consequents'].apply(lambda x: '电子产品' in x))
    ]

    # 输出分析结果
    print("频繁项集数量:", len(frequent_itemsets))
    print("\n所有类别的支持度:")
    support_by_category = one_hot_df.mean().sort_values(ascending=False)
    print(support_by_category)

    print("\n前10个最强关联规则:")
    print(rules.head(10)[['antecedents', 'consequents', 'support', 'confidence', 'lift']])

    print("\n与电子产品相关的规则:")
    print(electronic_rules[['antecedents', 'consequents', 'support', 'confidence', 'lift']])

    # 7. 可视化分析

    # 绘制支持度分布
    fig1 = plt.figure(figsize=(10, 6))
    support_by_category.plot(kind='bar')
    plt.title('各商品类别的支持度分布')
    plt.xticks(rotation=45)
    plt.tight_layout()
    save_fig(fig1, 'task1_category_support.png')

    # 绘制关联规则散点图
    fig2 = plt.figure(figsize=(10, 6))
    plt.scatter(rules['support'], rules['confidence'], alpha=0.5)
    plt.xlabel('Support')
    plt.ylabel('Confidence')
    plt.title('关联规则支持度-置信度分布')
    plt.tight_layout()
    save_fig(fig2, 'task1_rules_scatter.png')

    # 8. 输出一些具体的见解
    print("\n主要发现：")
    print("1. 最常见的商品类别组合：")
    top_frequent = frequent_itemsets.sort_values('support', ascending=False).head(5)
    print(top_frequent)

    print("\n2. 最强的关联规则：")
    top_rules = rules.sort_values('lift', ascending=False).head(5)
    print(top_rules[['antecedents', 'consequents', 'support', 'confidence', 'lift']])

    print("\n3. 电子产品的主要关联类别：")
    electronic_insights = electronic_rules.sort_values('lift', ascending=False).head(5)
    print(electronic_insights[['antecedents', 'consequents', 'support', 'confidence', 'lift']])

def handle_task2(new_df):


    # 1. 准备数据：创建支付方式和商品类别的组合数据
    def extract_payment_category_info(row):
        purchase_data = row['purchase_history']
        payment_method = purchase_data['payment_method']
        categories = []
        high_value_categories = []  # 高价值商品类别
        
        for item in purchase_data['items']:
            category = f"类别_{item['main_category']}"
            categories.append(category)
            
            # 标记高价值商品
            if item['price'] > 5000:
                high_value_categories.append(item['main_category'])
        
        # 添加支付方式
        categories.append(f"支付_{payment_method}")
        
        return {
            'categories': categories,
            'payment_method': payment_method,
            'high_value_categories': high_value_categories
        }

    # 提取数据
    transaction_data = new_df.apply(extract_payment_category_info, axis=1)

    # 2. 创建one-hot编码矩阵（包含支付方式和类别）
    all_items = set()
    for data in transaction_data:
        all_items.update(data['categories'])

    one_hot_df = pd.DataFrame([[1 if item in trans['categories'] else 0 
                            for item in all_items]
                            for trans in transaction_data],
                            columns=list(all_items))

    # 3. 使用Apriori算法找出频繁项集
    frequent_itemsets = apriori(one_hot_df,
                            min_support=0.01,
                            use_colnames=True)

    # 4. 生成关联规则
    rules = association_rules(frequent_itemsets,
                            metric="confidence",
                            min_threshold=0.6)

    # 5. 分析高价值商品的支付方式
    def analyze_high_value_transactions():
        high_value_payments = []
        for data in transaction_data:
            if data['high_value_categories']:
                high_value_payments.append({
                    'payment_method': data['payment_method'],
                    'categories': data['high_value_categories']
                })
        
        return pd.DataFrame(high_value_payments)

    high_value_df = analyze_high_value_transactions()

    # 6. 输出分析结果
    print("1. 支付方式与商品类别的关联规则：")
    # 筛选只包含支付方式的规则
    payment_rules = rules[
        (rules['antecedents'].apply(lambda x: any('支付_' in str(i) for i in x))) |
        (rules['consequents'].apply(lambda x: any('支付_' in str(i) for i in x)))
    ]
    print("\n前10个最强支付方式关联规则：")
    print(payment_rules.sort_values('lift', ascending=False).head(10)[
        ['antecedents', 'consequents', 'support', 'confidence', 'lift']
    ])

    print("\n2. 高价值商品(>5000)的支付方式分析：")
    if not high_value_df.empty:
        payment_distribution = high_value_df['payment_method'].value_counts()
        print("\n支付方式分布：")
        print(payment_distribution)
        
        print("\n高价值商品类别分布：")
        category_distribution = pd.Series([cat for cats in high_value_df['categories'] 
                                        for cat in cats]).value_counts()
        print(category_distribution)

    # 7. 可视化分析
    import matplotlib.pyplot as plt

    # 支付方式总体分布
    fig1 = plt.figure(figsize=(10, 6))
    payment_methods = [data['payment_method'] for data in transaction_data]
    pd.Series(payment_methods).value_counts().plot(kind='bar')
    plt.title('所有交易的支付方式分布')
    plt.xticks(rotation=45)
    plt.tight_layout()
    save_fig(fig1, 'task2_payment_distribution.png')

    # 高价值商品的支付方式分布
    if not high_value_df.empty:
        fig2 = plt.figure(figsize=(10, 6))
        payment_distribution.plot(kind='bar')
        plt.title('高价值商品(>5000)的支付方式分布')
        plt.xticks(rotation=45)
        plt.tight_layout()
        save_fig(fig2, 'task2_high_value_payment.png')

    # 8. 输出具体见解
    print("\n主要发现：")
    print("1. 最常用的支付方式：")
    print(pd.Series(payment_methods).value_counts().head())

    if not high_value_df.empty:
        print("\n2. 高价值商品的首选支付方式：")
        print(payment_distribution.head())

    print("\n3. 最强的支付方式-类别关联规则：")
    print(payment_rules.sort_values('lift', ascending=False).head(5)[
        ['antecedents', 'consequents', 'support', 'confidence', 'lift']
    ])

    # 9. 计算条件概率
    print("\n支付方式选择的条件概率（给定商品类别）：")
    for category in set(cat for data in transaction_data 
                    for cat in data['high_value_categories']):
        category_data = high_value_df[high_value_df['categories'].apply(lambda x: category in x)]
        if not category_data.empty:
            print(f"\n{category}类商品的支付方式分布：")
            print(category_data['payment_method'].value_counts(normalize=True))

def handle_task3(new_df):
    # 1. 准备时间序列数据
    def extract_time_category_info(row):
        purchase_data = row['purchase_history']
        date = pd.to_datetime(purchase_data['purchase_date'])
        categories = [item['main_category'] for item in purchase_data['items']]
        return {
            'date': date,
            'categories': categories,
            'user_id': row['id'],
            'quarter': date.quarter,
            'month': date.month,
            'weekday': date.weekday(),
            'year': date.year
        }

    # 转换数据
    time_data = pd.DataFrame([extract_time_category_info(row) for _, row in new_df.iterrows()])

    # 2. 季节性分析
    def analyze_seasonality():
        # 按季度统计
        quarterly_stats = time_data.groupby('quarter').size()
        
        # 按月份统计
        monthly_stats = time_data.groupby('month').size()
        
        # 按星期几统计
        weekday_stats = time_data.groupby('weekday').size()
        
        fig = plt.figure(figsize=(8, 16))
        fig.add_subplot(311)
        quarterly_stats.plot(kind='bar')
        plt.title('季度购物分布')
        plt.xlabel('季度')
        
        fig.add_subplot(312)
        monthly_stats.plot(kind='bar')
        plt.title('月度购物分布')
        plt.xlabel('月份')
        
        fig.add_subplot(313)
        weekday_stats.plot(kind='bar')
        plt.title('星期购物分布')
        plt.xlabel('星期')
        
        plt.tight_layout()
        save_fig(fig, 'task3_time_distribution.png')
        
        return quarterly_stats, monthly_stats, weekday_stats

    # 3. 分析特定商品类别的时间分布
    def analyze_category_time_patterns():
        # 展开类别数据
        category_time_data = []
        for _, row in time_data.iterrows():
            for category in row['categories']:
                category_time_data.append({
                    'date': row['date'],
                    'category': category,
                    'quarter': row['quarter'],
                    'month': row['month'],
                    'weekday': row['weekday']
                })
        
        category_df = pd.DataFrame(category_time_data)
        
        # 按类别和月份统计
        category_month_stats = category_df.groupby(['category', 'month']).size().unstack()
        
        # 修改可视化部分
        fig = plt.figure(figsize=(15, 8))
        sns.heatmap(category_month_stats, cmap='YlOrRd', annot=True, fmt='g')
        plt.title('商品类别的月度分布热力图')
        save_fig(fig, 'task3_category_month_heatmap.png')
        
        return category_month_stats
    
    # 4. 分析购买序列模式
    def analyze_sequential_patterns():
        # 按用户ID和日期排序
        user_sequences = defaultdict(list)
        for _, row in time_data.iterrows():
            user_sequences[row['user_id']].append({
                'date': row['date'],
                'categories': set(row['categories'])
            })
        
        # 对每个用户的购买记录按时间排序
        for user_id in user_sequences:
            user_sequences[user_id].sort(key=lambda x: x['date'])
        
        # 分析序列模式
        sequence_patterns = defaultdict(int)
        for user_id, purchases in user_sequences.items():
            for i in range(len(purchases)-1):
                for cat1 in purchases[i]['categories']:
                    for cat2 in purchases[i+1]['categories']:
                        sequence_patterns[(cat1, cat2)] += 1
        
        # 转换为DataFrame并计算概率
        patterns_df = pd.DataFrame([
            {'first_category': k[0], 
            'next_category': k[1], 
            'count': v} 
            for k, v in sequence_patterns.items()
        ])
        
        if not patterns_df.empty:
            patterns_df['probability'] = patterns_df['count'] / patterns_df['count'].sum()
            patterns_df = patterns_df.sort_values('count', ascending=False)
        
        return patterns_df

    # 执行分析
    print("正在分析季节性模式...")
    quarterly_stats, monthly_stats, weekday_stats = analyze_seasonality()

    print("\n各时间维度的购物分布：")
    print("\n季度分布：")
    print(quarterly_stats)
    print("\n月份分布：")
    print(monthly_stats)
    print("\n星期分布：")
    print(weekday_stats)

    print("\n正在分析商品类别的时间分布...")
    category_month_stats = analyze_category_time_patterns()

    print("\n正在分析序列模式...")
    sequence_patterns = analyze_sequential_patterns()
    print("\n最常见的购买序列模式（前10个）：")
    if not sequence_patterns.empty:
        print(sequence_patterns.head(10))

    # 5. 额外分析：类别的季节性指数
    def calculate_seasonality_index():
        category_time_data = []
        for _, row in time_data.iterrows():
            for category in row['categories']:
                category_time_data.append({
                    'date': row['date'],
                    'category': category,
                    'month': row['month']
                })
        
        category_df = pd.DataFrame(category_time_data)
        
        # 计算季节性指数
        seasonal_index = category_df.groupby(['category', 'month']).size().unstack()
        if not seasonal_index.empty:
            # 计算每个类别的月度平均值
            category_means = seasonal_index.mean(axis=1)
            # 计算季节性指数
            seasonal_index = seasonal_index.div(category_means, axis=0)
            
            # 可视化
            fig = plt.figure(figsize=(15, 8))
            sns.heatmap(seasonal_index, cmap='RdYlBu', center=1, annot=True, fmt='.2f')
            plt.title('商品类别的季节性指数')
            save_fig(fig, 'task3_seasonality_index.png')
        
        return seasonal_index

    print("\n计算季节性指数...")
    seasonality_index = calculate_seasonality_index()

    # 6. 输出主要发现
    print("\n主要发现：")
    print("1. 购物高峰期：")
    peak_month = monthly_stats.idxmax()
    print(f"- 最受欢迎的购物月份：第 {peak_month} 月")
    peak_weekday = weekday_stats.idxmax()
    print(f"- 最受欢迎的购物星期：星期 {peak_weekday + 1}")

    if not sequence_patterns.empty:
        print("\n2. 最强的购买序列模式：")
        top_sequence = sequence_patterns.iloc[0]
        print(f"- {top_sequence['first_category']} → {top_sequence['next_category']}")
        print(f"- 出现次数：{top_sequence['count']}")
        print(f"- 概率：{top_sequence['probability']:.2%}")

    print("\n3. 类别季节性特征：")
    if not seasonality_index.empty:
        for category in seasonality_index.index:
            peak_month = seasonality_index.loc[category].idxmax()
            peak_value = seasonality_index.loc[category].max()
            print(f"- {category}: 峰值在第 {peak_month} 月 (指数: {peak_value:.2f})")

def handle_task4(new_df):    
    # 1. 提取退款相关数据
    def extract_refund_info(row):
        purchase_data = row['purchase_history']
        payment_status = purchase_data['payment_status']
        
        # 判断是否为退款状态
        is_refund = payment_status in ['已退款', '部分退款']
        
        return {
            'categories': [item['main_category'] for item in purchase_data['items']],
            'is_refund': is_refund
        }

    # 转换数据
    refund_data = pd.DataFrame([extract_refund_info(row) for _, row in new_df.iterrows()])

    # 2. 分析退款订单中的商品类别组合
    def analyze_refund_patterns():
        # 只选择退款订单
        refund_orders = refund_data[refund_data['is_refund']]
        
        # 创建one-hot编码矩阵
        all_categories = set()
        for cats in refund_orders['categories']:
            all_categories.update(cats)
        
        one_hot_df = pd.DataFrame([
            [1 if cat in order_cats else 0 for cat in all_categories]
            for order_cats in refund_orders['categories']
        ], columns=list(all_categories))
        
        # 使用Apriori算法找出频繁项集
        frequent_itemsets = apriori(one_hot_df,
                                min_support=0.005,  # 最小支持度为0.005
                                use_colnames=True)
        
        # 生成关联规则
        rules = association_rules(frequent_itemsets,
                                metric="confidence",
                                min_threshold=0.4)  # 最小置信度为0.4
        
        return rules, one_hot_df

    # 执行分析
    print("分析退款模式...")
    rules, one_hot_df = analyze_refund_patterns()

    if not rules.empty:
        # 添加规则强度评分 (综合考虑support, confidence和lift)
        rules['strength'] = rules['support'] * rules['confidence'] * rules['lift']
        
        print("\n1. 退款订单总数:", len(one_hot_df))
        
        print("\n2. 最强的退款商品类别组合规则（按规则强度排序）：")
        top_rules = rules.sort_values('strength', ascending=False).head(10)
        for _, rule in top_rules.iterrows():
            print(f"\n组合: {set(rule['antecedents'])} → {set(rule['consequents'])}")
            print(f"支持度: {rule['support']:.3f}")
            print(f"置信度: {rule['confidence']:.3f}")
            print(f"提升度: {rule['lift']:.3f}")
        
        print("\n3. 最频繁的退款商品类别（按支持度排序）：")
        category_support = one_hot_df.mean().sort_values(ascending=False)
        print(category_support.head())
        
        print("\n4. 规则统计：")
        print(f"发现的规则总数: {len(rules)}")
        print(f"平均支持度: {rules['support'].mean():.3f}")
        print(f"平均置信度: {rules['confidence'].mean():.3f}")
        print(f"平均提升度: {rules['lift'].mean():.3f}")
        
        # 找出最具风险的商品类别组合
        high_risk_rules = rules[rules['lift'] > 2]  # 提升度大于2的规则
        if not high_risk_rules.empty:
            print("\n5. 高风险商品类别组合（提升度 > 2）：")
            for _, rule in high_risk_rules.head().iterrows():
                print(f"\n组合: {set(rule['antecedents'])} → {set(rule['consequents'])}")
                print(f"提升度: {rule['lift']:.3f}")
    else:
        print("未发现满足条件的退款规则（支持度 ≥ 0.005、置信度 ≥ 0.4）")

    # 在原有代码基础上添加可视化函数
    # 修改后的可视化函数
    def visualize_refund_patterns(rules, one_hot_df, figs_dir='./figs'):
        plt.style.use('default')
        
        # 1. 规则散点图：支持度-置信度-提升度
        fig = plt.figure(figsize=(10, 6))
        plt.scatter(rules['support'], rules['confidence'], 
                    c=rules['lift'], cmap='YlOrRd',
                    s=rules['lift']*100)
        plt.colorbar(label='Lift')
        plt.xlabel('Support')
        plt.ylabel('Confidence')
        plt.title('退款规则分布')
        plt.tight_layout()
        save_fig(fig, 'refund_rules_scatter.png', figs_dir=figs_dir)

        # 2. 商品类别退款频率条形图
        fig = plt.figure(figsize=(12, 6))
        category_support = one_hot_df.mean().sort_values(ascending=False)
        category_support.plot(kind='bar')
        plt.title('商品类别在退款订单中的出现频率')
        plt.xlabel('商品类别')
        plt.ylabel('频率')
        plt.xticks(rotation=45, ha='right')
        plt.tight_layout()
        save_fig(fig, 'category_refund_frequency.png', figs_dir=figs_dir)

        # 3. 规则网络图（展示最强的关联规则）
        fig = plt.figure(figsize=(15, 10))
        G = nx.DiGraph()
        top_rules = rules.sort_values('lift', ascending=False).head(10)
        for _, rule in top_rules.iterrows():
            antecedents = list(rule['antecedents'])[0] if len(rule['antecedents']) == 1 else '+'.join(list(rule['antecedents']))
            consequents = list(rule['consequents'])[0] if len(rule['consequents']) == 1 else '+'.join(list(rule['consequents']))
            G.add_edge(antecedents, consequents, 
                       weight=rule['lift'],
                       confidence=rule['confidence'],
                       support=rule['support'])
        pos = nx.spring_layout(G, k=2)
        nx.draw_networkx_nodes(G, pos, node_color='lightblue', 
                               node_size=2000, alpha=0.7)
        edges = G.edges()
        weights = [G[u][v]['weight'] for u, v in edges]
        nx.draw_networkx_edges(G, pos, edge_color='gray',
                               width=[w/2 for w in weights],
                               arrowsize=20)
        nx.draw_networkx_labels(G, pos, font_size=8)
        plt.title('Top 10 退款规则关联网络')
        plt.axis('off')
        plt.tight_layout()
        save_fig(fig, 'refund_rules_network.png', figs_dir=figs_dir)

        # 4. 热力图：展示商品类别之间的共现频率
        fig = plt.figure(figsize=(12, 10))
        cooccurrence = one_hot_df.T.dot(one_hot_df)
        cooccurrence_norm = cooccurrence.div(cooccurrence.max().max())
        sns.heatmap(cooccurrence_norm, cmap='YlOrRd', 
                    annot=True, fmt='.2f', square=True)
        plt.title('商品类别共现频率热力图')
        plt.xticks(rotation=45, ha='right')
        plt.yticks(rotation=0)
        plt.tight_layout()
        save_fig(fig, 'category_cooccurrence_heatmap.png', figs_dir=figs_dir)

        # 5. 规则强度分布直方图
        fig = plt.figure(figsize=(10, 6))
        rules['strength'] = rules['support'] * rules['confidence'] * rules['lift']
        plt.hist(rules['strength'], bins=30, edgecolor='black')
        plt.title('规则强度分布')
        plt.xlabel('规则强度')
        plt.ylabel('频次')
        plt.tight_layout()
        save_fig(fig, 'rule_strength_distribution.png', figs_dir=figs_dir)

    # 在主分析代码后调用可视化函数
    if not rules.empty:
        print("\n生成可视化图表...")
        visualize_refund_patterns(rules, one_hot_df)
        
        # 可视化说明保持不变
        print("\n可视化说明：")
        print("1. 规则散点图：展示了支持度、置信度和提升度三个维度的关系")
        print("2. 商品类别频率图：展示了各类别在退款订单中的出现频率")
        print("3. 规则网络图：展示了最强的10个关联规则之间的关系")
        print("4. 共现热力图：展示了商品类别之间的共现频率")
        print("5. 规则强度分布：展示了规则强度的整体分布情况")
