import pandas as pd
import numpy as np
import mysql.connector
from mysql.connector import Error
import json
from apyori import apriori
import logging

class BehaviorAnalyzer:
    def __init__(self, data_processor):
        self.data_processor = data_processor
        self.data = data_processor.data
        # 初始化其他必要属性

    def _get_db_connection(self):
        """获取数据库连接"""
        try:
            connection = mysql.connector.connect(
                host=self.data_processor.db_config['host'],
                user=self.data_processor.db_config['user'],
                password=self.data_processor.db_config['password'],
                database=self.data_processor.db_config['database'],
                port=self.data_processor.db_config['port']
            )
            return connection
        except Error as e:
            print(f"数据库连接错误: {e}")
            return None

    def _cache_result(self, func_name, result):
        """将结果存入数据库而非文件"""
        connection = self._get_db_connection()
        if not connection:
            return

        try:
            cursor = connection.cursor()

            # 先删除旧数据
            delete_query = "DELETE FROM data_cache WHERE func_name = %s"
            cursor.execute(delete_query, (func_name,))

            if isinstance(result, dict):
                for key, value in result.items():
                    data_type = ""
                    data_str = ""

                    if isinstance(value, pd.DataFrame):
                        data_type = "dataframe"
                        data_str = value.to_json(orient='records')
                    elif isinstance(value, (dict, list)):
                        data_type = "json"
                        data_str = json.dumps(value)

                    if data_type:
                        insert_query = """
                        INSERT INTO data_cache (func_name, key_name, data_type, data)
                        VALUES (%s, %s, %s, %s)
                        """
                        cursor.execute(insert_query, (func_name, key, data_type, data_str))

            elif isinstance(result, pd.DataFrame):
                data_str = result.to_json(orient='records')
                insert_query = """
                INSERT INTO data_cache (func_name, data_type, data)
                VALUES (%s, %s, %s)
                """
                cursor.execute(insert_query, (func_name, "dataframe", data_str))

            connection.commit()

        except Error as e:
            print(f"缓存数据到数据库失败: {e}")
            connection.rollback()
        finally:
            if connection.is_connected():
                cursor.close()
                connection.close()

    def _load_cached_result(self, func_name):
        """从数据库加载缓存结果"""
        connection = self._get_db_connection()
        if not connection:
            return None

        try:
            cursor = connection.cursor(dictionary=True)
            query = "SELECT * FROM data_cache WHERE func_name = %s"
            cursor.execute(query, (func_name,))
            results = cursor.fetchall()

            if not results:
                return None

            # 处理单条记录（非字典类型结果）
            if len(results) == 1 and results[0]['key_name'] is None:
                if results[0]['data_type'] == 'dataframe':
                    return pd.read_json(results[0]['data'])
                elif results[0]['data_type'] == 'json':
                    return json.loads(results[0]['data'])

            # 处理多条记录（字典类型结果）
            result_dict = {}
            for item in results:
                key = item['key_name']
                if item['data_type'] == 'dataframe':
                    result_dict[key] = pd.read_json(item['data'])
                elif item['data_type'] == 'json':
                    result_dict[key] = json.loads(item['data'])

            return result_dict

        except Error as e:
            print(f"从数据库加载缓存失败: {e}")
            return None
        finally:
            if connection.is_connected():
                cursor.close()
                connection.close()

    def get_behavior_distribution(self):
        cached_result = self._load_cached_result('behavior_distribution')
        if cached_result is not None:
            return cached_result

        behavior_counts = self.data['behavior_type'].value_counts().reset_index()
        behavior_counts.columns = ['行为类型', '次数']
        behavior_counts['比例'] = (behavior_counts['次数'] / behavior_counts['次数'].sum()).map(lambda x: f"{x:.2%}")

        self._cache_result('behavior_distribution', behavior_counts)
        return behavior_counts

    def get_user_active_time(self):
        cached_result = self._load_cached_result('user_active_time')
        if cached_result is not None:
            return cached_result['hourly'], cached_result['weekday']

        hourly_activity = self.data.groupby('hour')['user_id'].count().reset_index(name='行为次数')
        day_names = ['周一', '周二', '周三', '周四', '周五', '周六', '周日']
        daily_activity = self.data.groupby('day_of_week')['user_id'].count().reset_index()
        daily_activity.columns = ['day_of_week', '行为次数']
        daily_activity['周别'] = daily_activity['day_of_week'].map(lambda x: day_names[x])
        weekday_activity = daily_activity[['周别', '行为次数']]

        result = {
            'hourly': hourly_activity,
            'weekday': weekday_activity
        }
        self._cache_result('user_active_time', result)
        return hourly_activity, weekday_activity

    # 在BehaviorAnalyzer类中添加
    # 在behavior_analyzer.py中添加或修改该方法
    def get_user_behavior_timeline(self, user_id):
        """
        获取指定用户的行为时间线

        Args:
            user_id: 要查询的用户ID

        Returns:
            包含用户行为时间线的DataFrame，按时间排序
        """
        try:
            # 筛选指定用户的所有行为数据
            user_behavior = self.data[self.data['user_id'] == user_id].copy()

            # 确保时间格式正确
            if not pd.api.types.is_datetime64_any_dtype(user_behavior['datetime']):
                user_behavior['datetime'] = pd.to_datetime(user_behavior['datetime'])

            # 按时间排序
            user_behavior = user_behavior.sort_values('datetime')

            # 选择需要的列并重置索引
            result = user_behavior[['datetime', 'behavior_type', 'item_id', 'category_id']].reset_index(drop=True)

            # 添加行为类型描述，便于前端展示
            behavior_map = {
                'view': '浏览',
                'buy': '购买',
                'cart': '加入购物车',
                'fav': '收藏'
            }
            result['behavior_desc'] = result['behavior_type'].map(behavior_map).fillna(result['behavior_type'])

            return result
        except Exception as e:
            logging.error(f"获取用户{user_id}的行为时间线失败: {str(e)}")
            return pd.DataFrame(columns=['datetime', 'behavior_type', 'item_id', 'category_id', 'behavior_desc'])

    def find_item_association_rules(self, min_support=0.01, min_confidence=0.5):
        cache_key = f'association_rules_support_{min_support}_confidence_{min_confidence}'
        cached_result = self._load_cached_result(cache_key)
        if cached_result is not None:
            return cached_result

        # 准备交易数据（每个用户购买的商品集合）
        transactions = self.data[self.data['behavior_type'] == 'buy'].groupby('user_id')['item_id'].apply(list).tolist()

        # 使用Apriori算法挖掘关联规则
        rules = apriori(transactions, min_support=min_support, min_confidence=min_confidence)

        # 转换规则为DataFrame
        results = []
        for rule in rules:
            for item in rule.ordered_statistics:
                results.append({
                    '前项': list(item.items_base),
                    '后项': list(item.items_add),
                    '支持度': rule.support,
                    '置信度': item.confidence,
                    '提升度': item.lift
                })

        rules_df = pd.DataFrame(results)

        self._cache_result(cache_key, rules_df)
        return rules_df