import pandas as pd
import numpy as np
from mlxtend.frequent_patterns import apriori, association_rules
import os
import logging
from typing import Tuple, Optional
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

class AssociationRuleMiner:
    def __init__(self, min_support: float = 0.1, min_confidence: float = 0.6):
        """
        初始化关联规则挖掘器
        
        Args:
            min_support: 最小支持度
            min_confidence: 最小置信度
        """
        self.min_support = min_support
        self.min_confidence = min_confidence
        self.frequent_itemsets = None
        self.rules = None
        
    def load_and_discretize(self, file_path: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
        """
        加载情感分析结果并离散化：
        - 情感标签：正向/负向/中立
        - 点赞数：高/低（基于中位数）
        - 转发数：高/低（基于中位数）
        
        Args:
            file_path: 数据文件路径
            
        Returns:
            Tuple[pd.DataFrame, pd.DataFrame]: one-hot编码后的数据和原始事务集
        """
        try:
            df = pd.read_csv(file_path)
            logger.info(f"成功加载数据，共{len(df)}条记录")
            
            # 计算分位数和统计信息
            like_stats = df['like_counts'].describe()
            reply_stats = df['reply_counts'].describe()
            
            logger.info("\n点赞数统计信息：")
            logger.info(like_stats)
            logger.info("\n转发数统计信息：")
            logger.info(reply_stats)
            
            # 使用更合理的划分标准
            # 点赞：考虑到75%的数据≤1，我们用5作为分界点
            # 转发：考虑到75%的数据=0，我们用1作为分界点
            LIKE_THRESHOLD = 5  # 超过5个点赞算高
            REPLY_THRESHOLD = 1  # 有转发就算高
            
            logger.info(f"点赞阈值: {LIKE_THRESHOLD}")
            logger.info(f"转发阈值: {REPLY_THRESHOLD}")
            
            # 点赞数离散化
            def like_level(x):
                return '点赞高' if x >= LIKE_THRESHOLD else '点赞低'
            
            # 转发数离散化
            def reply_level(x):
                return '转发高' if x >= REPLY_THRESHOLD else '转发低'
            
            # 情感标签离散化
            def sentiment_level(x):
                if x > 0.3:
                    return '情感正向'
                elif x < -0.3:
                    return '情感负向'
                else:
                    return '情感中立'
            
            # 应用离散化
            df['sentiment_level'] = df['sentiment_score'].apply(sentiment_level)
            df['like_level'] = df['like_counts'].apply(like_level)
            df['reply_level'] = df['reply_counts'].apply(reply_level)
            
            # 构建事务集
            transactions = df[['sentiment_level', 'like_level', 'reply_level']]
            
            # one-hot编码
            onehot = pd.get_dummies(transactions)
            
            # 打印离散化结果统计
            logger.info("离散化结果统计：")
            logger.info(f"情感分布：\n{df['sentiment_level'].value_counts()}")
            logger.info(f"点赞分布：\n{df['like_level'].value_counts()}")
            logger.info(f"转发分布：\n{df['reply_level'].value_counts()}")
            
            return onehot, transactions
            
        except Exception as e:
            logger.error(f"数据加载和预处理失败: {str(e)}")
            raise
            
    def mine_frequent_itemsets(self, df: pd.DataFrame) -> pd.DataFrame:
        """
        挖掘频繁项集
        
        Args:
            df: one-hot编码后的数据
            
        Returns:
            pd.DataFrame: 频繁项集
        """
        try:
            self.frequent_itemsets = apriori(df, min_support=self.min_support, use_colnames=True)
            logger.info(f"发现{len(self.frequent_itemsets)}个频繁项集")
            return self.frequent_itemsets
        except Exception as e:
            logger.error(f"频繁项集挖掘失败: {str(e)}")
            raise
            
    def generate_rules(self) -> pd.DataFrame:
        """
        生成关联规则
        
        Returns:
            pd.DataFrame: 关联规则
        """
        if self.frequent_itemsets is None:
            raise ValueError("请先执行频繁项集挖掘")
            
        try:
            self.rules = association_rules(
                self.frequent_itemsets, 
                metric="confidence", 
                min_threshold=self.min_confidence
            )
            logger.info(f"生成{len(self.rules)}条关联规则")
            return self.rules
        except Exception as e:
            logger.error(f"关联规则生成失败: {str(e)}")
            raise
            
    def visualize_rules(self, output_dir: str, top_n: int = 10):
        """
        可视化关联规则
        
        Args:
            output_dir: 输出目录
            top_n: 显示前N条规则
        """
        if self.rules is None:
            raise ValueError("请先执行关联规则生成")
            
        try:
            # 创建输出目录
            output_path = Path(output_dir)
            output_path.mkdir(parents=True, exist_ok=True)
            
            # 绘制支持度和置信度的散点图
            plt.figure(figsize=(10, 6))
            sns.scatterplot(data=self.rules, x='support', y='confidence', size='lift')
            plt.title('关联规则支持度-置信度散点图')
            plt.savefig(output_path / 'rules_scatter.png')
            plt.close()
            
            # 绘制前N条规则的提升度条形图
            top_rules = self.rules.nlargest(top_n, 'lift')
            plt.figure(figsize=(12, 6))
            sns.barplot(data=top_rules, x='lift', y=top_rules.index)
            plt.title(f'前{top_n}条规则的提升度')
            plt.savefig(output_path / 'top_rules_lift.png')
            plt.close()
            
            logger.info("可视化完成")
            
        except Exception as e:
            logger.error(f"可视化失败: {str(e)}")
            raise
            
    def save_results(self, output_dir: str):
        """
        保存结果
        
        Args:
            output_dir: 输出目录
        """
        if self.frequent_itemsets is None or self.rules is None:
            raise ValueError("请先执行完整的关联规则挖掘流程")
            
        try:
            output_path = Path(output_dir)
            output_path.mkdir(parents=True, exist_ok=True)
            
            self.frequent_itemsets.to_csv(output_path / 'frequent_itemsets.csv', index=False)
            self.rules.to_csv(output_path / 'association_rules.csv', index=False)
            
            # 保存规则统计信息
            with open(output_path / 'rules_summary.txt', 'w', encoding='utf-8') as f:
                f.write(f"频繁项集数量: {len(self.frequent_itemsets)}\n")
                f.write(f"关联规则数量: {len(self.rules)}\n")
                f.write(f"平均支持度: {self.rules['support'].mean():.4f}\n")
                f.write(f"平均置信度: {self.rules['confidence'].mean():.4f}\n")
                f.write(f"平均提升度: {self.rules['lift'].mean():.4f}\n")
                
            logger.info(f"结果已保存到{output_dir}")
            
        except Exception as e:
            logger.error(f"结果保存失败: {str(e)}")
            raise

def main():
    # 配置参数
    config = {
        'data_path': '../sentiment/analysis_results/sentiment_analysis_results.csv',
        'output_dir': 'results',
        'min_support': 0.1,
        'min_confidence': 0.6,
        'visualize_top_n': 10
    }
    
    try:
        # 创建挖掘器实例
        miner = AssociationRuleMiner(
            min_support=config['min_support'],
            min_confidence=config['min_confidence']
        )
        
        # 加载并预处理数据
        onehot, transactions = miner.load_and_discretize(config['data_path'])
        
        # 挖掘频繁项集
        frequent_itemsets = miner.mine_frequent_itemsets(onehot)
        
        # 生成关联规则
        rules = miner.generate_rules()
        
        # 可视化结果
        miner.visualize_rules(config['output_dir'], config['visualize_top_n'])
        
        # 保存结果
        miner.save_results(config['output_dir'])
        
    except Exception as e:
        logger.error(f"程序执行失败: {str(e)}")
        raise

if __name__ == "__main__":
    main() 