import numpy as np
import torch
import torch.nn.functional as F
from sentence_transformers import SentenceTransformer
from torch import optim
import torch.nn as nn
import json
from typing import List, Dict

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# ====================tools=======================
def load_id_to_word(json_path='id_to_word.json'):
    """加载id到单词的映射字典"""
    with open(json_path, 'r', encoding='utf-8') as f:
        return json.load(f)

id_map = load_id_to_word()
id_map = {int(k): v for k, v in id_map.items()}

def ints_to_string(int_array, id_to_word=None):
    """
    int_array: 输入必须为2D np 数组，形状 (batchSize, text_len)
    id_to_word: ID到单词的映射字典（默认使用全局id_map）
    """
    if id_to_word is None:
        id_to_word = id_map

    # 强制检查输入为2D
    if int_array.ndim != 2:
        raise ValueError("输入必须是2D数组/张量，形状为 (batchSize, text_len)")

    # 批量处理
    batch_strings = []
    for sample in int_array:
        words = []
        for num in sample:
            if num is None:
                continue
            try:
                word = id_to_word.get(int(num))
                if word is not None:
                    words.append(word)
            except (ValueError, TypeError):
                pass
        batch_strings.append(''.join(words))

    return batch_strings

# ================================================
class TextEmbedder:
    def __init__(self, model_name: str = "BAAI/bge-small-zh-v1.5"):
        """
        model_name (str): Model name (default: "BAAI/bge-small-zh-v1.5")
        device (str): "cuda" or "cpu". Auto-detect if None.
        """
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.model = SentenceTransformer(model_name, device=self.device)
        self.cache: Dict[str, np.ndarray] = {}  # cache mapping
        self.embedding_dim = self.model.get_sentence_embedding_dimension()

    def embed_batch(self, texts: List[str], normalize_embeddings: bool = True, batch_size: int = 64) -> np.ndarray:
        """
        texts: List of input texts
        normalize_embeddings: Normalize to unit vectors if True
        batch_size: Processing batch size

        Returns:
            np.ndarray: Embeddings array (len(texts), embedding_dim)
        """
        if not texts:
            return np.array([])

        # Check if ALL texts exist in cache
        all_cached = all(text in self.cache for text in texts)

        if all_cached:
            # Fetch ALL from cache
            return np.stack([self.cache[text] for text in texts])
        else:
            # Compute ALL (even if some exist in cache)
            embeddings = self.model.encode(
                texts,
                batch_size=batch_size,
                normalize_embeddings=normalize_embeddings,
                convert_to_numpy=True,
                device=self.device
            )
            # Update cache for ALL new texts
            for text, emb in zip(texts, embeddings):
                self.cache[text] = emb
            return embeddings

    def clear_cache(self) -> None:
        """Clear the entire cache."""
        self.cache.clear()


class DuelingQnet(nn.Module):
    def __init__(self, state_dim, embedding_dim, hidden_dim, action_dim, num_layers=2):
        super(DuelingQnet, self).__init__()
        if num_layers < 2: num_layers = 2

        # Shared feature layers
        self.shared_layers = nn.ModuleList()
        self.shared_layers.append(nn.Linear(state_dim + embedding_dim, hidden_dim))
        for _ in range(num_layers - 1):
            self.shared_layers.append(nn.Linear(hidden_dim, hidden_dim))

        # Value stream
        self.value_stream = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.LeakyReLU(),
            nn.Linear(hidden_dim, 1)  # Outputs a single value for the state
        )

        # Advantage stream
        self.advantage_stream = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.LeakyReLU(),
            nn.Linear(hidden_dim, action_dim)  # Outputs advantage for each action
        )

    def forward(self, state, embedding):
        x = torch.cat([state, embedding], dim=-1)

        # Shared features
        for layer in self.shared_layers:
            x = F.leaky_relu(layer(x))

        # Dueling streams
        value = self.value_stream(x)
        advantages = self.advantage_stream(x)

        # Combine value and advantages
        q_values = value + (advantages - advantages.mean(dim=-1, keepdim=True))

        return q_values


class DqnMaster(object):
    def __init__(self,state_dim, hidden_dim, action_dim, lr, gamma, tau=0.005, text_len=30):
        self.embed = TextEmbedder()
        self.q_net = DuelingQnet(state_dim, self.embed.embedding_dim, hidden_dim, action_dim)
        self.optimizer = optim.Adam(self.q_net.parameters(), lr=lr)

        self.target_net = DuelingQnet(state_dim, self.embed.embedding_dim, hidden_dim, action_dim)
        self.target_net.load_state_dict(self.q_net.state_dict())

        self.tau = tau
        self.gamma = gamma
        self.text_len = text_len

    def soft_update(self, net, target_net):
        for param_target, param in zip(target_net.parameters(), net.parameters()):
            param_target.data.copy_(param_target.data * (1.0 - self.tau) + param.data * self.tau)

    def take_action(self,observations: np.array, is_train:bool):
        """
        :param is_train: 是否在训练，训练模式下要探索
        :param observations: (batchSize,obs_dim)
        """
        if observations.ndim != 2:
            raise ValueError("observations must be a 2D array (batch_size, obs_dim)")

        obs_dim = observations.shape[1]
        if self.text_len >= obs_dim:
            raise ValueError(f"self.text_len ({self.text_len}) must be < obs_dim ({obs_dim})")

        # Split into two parts
        states = observations[:, :-self.text_len]  # (batch_size, obs_dim - text_len)
        text_codes = observations[:, -self.text_len:]  # (batch_size, text_len)
        texts = ints_to_string(text_codes)
        embeddings = self.embed.embed_batch(texts)

        states = torch.tensor(states)
        embeddings = torch.tensor(embeddings)

        q_values = self.q_net(states,embeddings)

        # 根据模式选择动作
        if is_train:
            # 训练模式：按Softmax概率采样动作
            temperature = 2.0  # 可调整的温度参数,越高越随机
            probs = F.softmax(q_values / temperature, dim=-1)  # (batch_size, action_dim)
            actions = torch.multinomial(probs, num_samples=1).cpu().numpy()  # (batch_size, 1)
        else:
            # 评估模式：直接选最大Q值动作
            actions = q_values.argmax(dim=-1).cpu().numpy()  # (batch_size,)

        return actions.squeeze()  # 确保输出形状为 (batch_size,)





if __name__ == '__main__':
    dd = np.array([[1,2,3,4],[9999,3333,4444,777]])
    print(ints_to_string(dd))

