import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, List, Any


class TimeEmbedding(nn.Module):
    """
    旧版固定切片的时间嵌入。为保持兼容，保留并补充维度属性。
    输出顺序：slot(3), day(3), cont_left(4), util(3), plan(3), cont_right(3) -> 共 19
    其中语义维度为末尾 9（util+plan+cont_right）。
    """

    def __init__(self, transforms=None):
        super(TimeEmbedding, self).__init__()
        # a day contains 96 15-min slots
        self.embed_slot = nn.Embedding(4 * 24, 3)
        self.embed_day = nn.Embedding(7, 3)  # Day of the Week
        self.embed_util = nn.Embedding(10, 3)  # Utilization Type
        self.embed_plan = nn.Embedding(36, 3)  # Geolocation
        self.transforms = transforms or []
        self._out_dim = 19
        self._semantic_dim = 9

    @property
    def out_dim(self) -> int:
        return self._out_dim

    @property
    def semantic_dim(self) -> int:
        return self._semantic_dim

    def forward(self, x):
        """
        输入为 X[...,1:] 的切片，形状 [b, t, n, Cin-1]
        """
        # x: slot, day, holiday or not
        slot_idx = torch.clamp(x[..., 0].long(), 0, self.embed_slot.num_embeddings - 1)
        day_idx = torch.clamp(x[..., 1].long(), 0, self.embed_day.num_embeddings - 1)
        util_idx = torch.clamp(x[..., 6].long(), 0, self.embed_util.num_embeddings - 1)
        plan_idx = torch.clamp(x[..., 7].long(), 0, self.embed_plan.num_embeddings - 1)
        x_slot = self.embed_slot(slot_idx)
        x_day = self.embed_day(day_idx)
        x_util = self.embed_util(util_idx)
        x_plan = self.embed_plan(plan_idx)
        # 连续协变量：2:6 和 8:11
        cont = torch.cat((x[..., 2:6].float(), x[..., 8:11].float()), dim=-1)
        # 可选特征变换（仅作用于连续部分）
        for t in self.transforms:
            cont = t(cont)
        # 保持原始切片维度稳定：4 + 3
        cont_left = cont[..., :4]
        cont_right = cont[..., 4:7]
        out = torch.cat((x_slot, x_day, cont_left, x_util, x_plan, cont_right), -1)
        return out  # [b, t, n, 19]


class SchemaTimeEmbedding(nn.Module):
    """
    动态特征 schema 驱动的时间嵌入：
    - 支持按 YAML schema 构建类别嵌入与连续分组，并按 output_order 拼接；
    - 语义维度 semantic_dim 为 semantic_sources 所指维度之和，保证位于末尾便于模型切片；
    - 与旧版保持行为一致（默认 schema 复现 19/9 维度）。
    """

    def __init__(self, schema: Dict[str, Any], transforms: List[Any] = None):
        super().__init__()
        self.schema = schema or {}
        self.transforms = transforms or []
        # 构建类别嵌入
        self._cat_defs = {c['name']: c for c in self.schema.get('categorical', [])}
        self._cont_defs = {c['name']: c for c in self.schema.get('continuous_groups', [])}
        self._order = list(self.schema.get('output_order', []))
        self._sem_sources = self.schema.get('semantic_sources', [])

        self._cat_layers = nn.ModuleDict()
        for name, cfg in self._cat_defs.items():
            self._cat_layers[name] = nn.Embedding(int(cfg['cardinality']), int(cfg['emb_dim']))

        # 计算维度汇总
        def _comp_dim(item_name: str) -> int:
            if item_name in self._cat_defs:
                return int(self._cat_defs[item_name]['emb_dim'])
            if item_name in self._cont_defs:
                return len(self._cont_defs[item_name]['indices'])
            raise KeyError(f"item '{item_name}' not found in schema")

        self._out_dim = sum(_comp_dim(n) for n in self._order)
        # semantic 维度（要求这些项按顺序位于末尾）
        self._semantic_dim = 0
        for src in self._sem_sources:
            kind = src.get('type')
            name = src.get('name')
            if kind == 'categorical' and name in self._cat_defs:
                self._semantic_dim += int(self._cat_defs[name]['emb_dim'])
            elif kind == 'cont_group' and name in self._cont_defs:
                self._semantic_dim += len(self._cont_defs[name]['indices'])
            else:
                raise KeyError(f"semantic source not found: {src}")

    @property
    def out_dim(self) -> int:
        return self._out_dim

    @property
    def semantic_dim(self) -> int:
        return self._semantic_dim

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        输入为 X[...,1:] 的切片，形状 [b, t, n, Cin-1]
        索引均基于该切片（与 YAML 保持一致）。
        """
        parts: List[torch.Tensor] = []
        for name in self._order:
            if name in self._cat_defs:
                idx = int(self._cat_defs[name]['index'])
                card = int(self._cat_defs[name]['cardinality'])
                i = torch.clamp(x[..., idx].long(), 0, card - 1)
                parts.append(self._cat_layers[name](i))
            elif name in self._cont_defs:
                inds = list(self._cont_defs[name]['indices'])
                g = x[..., inds].float()
                # 连续特征可选变换
                if bool(self._cont_defs[name].get('normalize', False)):
                    for t in self.transforms:
                        g = t(g)
                parts.append(g)
            else:
                raise KeyError(f"order item '{name}' not found in schema")
        return torch.cat(parts, dim=-1)
