import numpy as np
import random
from collections import deque


class OutfitRLAgent:
    def __init__(self, outfit_generator, learning_rate=0.1, discount_factor=0.9, exploration_rate=0.2):
        """
        初始化强化学习代理

        参数:
            outfit_generator: 搭配生成器实例
            learning_rate: 学习率
            discount_factor: 折扣因子
            exploration_rate: 探索率
        """
        self.outfit_generator = outfit_generator
        self.learning_rate = learning_rate
        self.discount_factor = discount_factor
        self.exploration_rate = exploration_rate
        self.q_values = {}  # 状态-动作值函数

        # 经验回放缓冲区
        self.replay_buffer = deque(maxlen=1000)

    def _get_state_representation(self, user_profile, context=None):
        """
        获取状态表示

        参数:
            user_profile: 用户档案
            context: 上下文信息 (可选)

        返回:
            状态表示字符串
        """
        # 如果用户没有偏好，返回新用户状态
        if not user_profile.preferences["color"] and not user_profile.preferences["subcategory"]:
            return "new_user"

        # 提取用户的主要偏好
        top_colors = user_profile.get_top_preferences("color", n=2)
        top_subcategories = user_profile.get_top_preferences("subcategory", n=2)

        state_parts = []

        # 添加颜色偏好
        if top_colors:
            color_part = "_".join([color for color, _ in top_colors])
            state_parts.append(f"color_{color_part}")

        # 添加子类别偏好
        if top_subcategories:
            subcat_part = "_".join([subcat for subcat, _ in top_subcategories])
            state_parts.append(f"subcat_{subcat_part}")

        # 添加上下文信息
        if context:
            if context.weather:
                state_parts.append(f"weather_{context.weather}")

            if context.style_theme:
                state_parts.append(f"style_{context.style_theme}")

            state_parts.append(f"season_{context.get_season()}")

        if not state_parts:
            return "neutral_user"

        return "_".join(state_parts)

    def _get_action_representation(self, outfit):
        """
        获取动作表示

        参数:
            outfit: 搭配

        返回:
            动作表示字符串
        """
        # 提取搭配中的类别和子类别
        categories = []
        subcategories = []

        for item in outfit.items:
            categories.append(item.category)
            subcategories.append(item.subcategory)

        # 按字母顺序排序并连接
        categories_str = "_".join(sorted(categories))
        subcategories_str = "_".join(sorted(subcategories))

        return f"{categories_str}|{subcategories_str}"

    def select_outfit(self, user_profile, candidate_outfits, context=None):
        """
        从候选搭配中选择最佳搭配

        参数:
            user_profile: 用户档案
            candidate_outfits: 候选搭配列表
            context: 上下文信息 (可选)

        返回:
            选择的搭配
        """
        if not candidate_outfits:
            return None
        # 增加探索率，促进多样性
        self.exploration_rate = max(0.3, self.exploration_rate)
        # 获取状态表示
        state = self._get_state_representation(user_profile, context)

        # 探索：随机选择搭配
        if np.random.random() < self.exploration_rate:
            return np.random.choice(candidate_outfits)

        # 利用：选择Q值最高的搭配
        max_q_value = float('-inf')
        best_outfit = None

        for outfit in candidate_outfits:
            action = self._get_action_representation(outfit)
            state_action = (state, action)
            q_value = self.q_values.get(state_action, 0)

            # 如果Q值相同，考虑搭配评分
            if q_value == max_q_value and best_outfit:
                outfit_score = self.outfit_generator.evaluate_outfit(outfit, context)
                best_outfit_score = self.outfit_generator.evaluate_outfit(best_outfit, context)

                if outfit_score > best_outfit_score:
                    max_q_value = q_value
                    best_outfit = outfit
            elif q_value > max_q_value:
                max_q_value = q_value
                best_outfit = outfit

        # 如果没有找到最佳搭配，随机选择
        return best_outfit or np.random.choice(candidate_outfits)

    def update_q_values(self, user_profile, outfit, rating, context=None):
        """
        更新Q值

        参数:
            user_profile: 用户档案
            outfit: 用户评价的搭配
            rating: 评分 (1-5)
            context: 上下文信息 (可选)
        """
        # 获取状态和动作表示
        state = self._get_state_representation(user_profile, context)
        action = self._get_action_representation(outfit)
        state_action = (state, action)

        # 将评分 (1-5) 转换为奖励 (-1 到 1)
        reward = (rating - 3) / 2

        # 将经验添加到回放缓冲区
        self.replay_buffer.append((state, action, reward))

        # 使用Q学习更新规则更新Q值
        old_q_value = self.q_values.get(state_action, 0)
        self.q_values[state_action] = old_q_value + self.learning_rate * (reward - old_q_value)

        # 从回放缓冲区中随机抽取经验进行批量更新
        self._batch_update()

    def _batch_update(self, batch_size=10):
        """从回放缓冲区中批量更新Q值"""
        if len(self.replay_buffer) < batch_size:
            return

        # 随机抽取经验
        batch = random.sample(self.replay_buffer, batch_size)

        for state, action, reward in batch:
            state_action = (state, action)
            old_q_value = self.q_values.get(state_action, 0)
            self.q_values[state_action] = old_q_value + self.learning_rate * (reward - old_q_value)

    def get_top_outfits(self, state, n=5):
        """获取某状态下Q值最高的搭配类型"""
        state_actions = [(sa, q) for sa, q in self.q_values.items() if sa[0] == state]

        # 按Q值排序
        state_actions.sort(key=lambda x: x[1], reverse=True)

        # 返回前n个动作
        return [(sa[1], q) for sa, q in state_actions[:n]]