from typing import Dict, List, Optional, Any
import logging
import torch


def _decode_fused_to_orig_indices(fused_val: int, orig_val_to_idx: Dict[int, int]) -> List[int]:
    """Given an integer fused_val (sum of powers of two), return list of original indices.
    orig_val_to_idx maps original id value (power of two) -> original node index.
    """
    res = []
    v = int(fused_val)
    while v:
        lowbit = v & -v
        idx = orig_val_to_idx.get(int(lowbit))
        if idx is None:
            # if original mapping doesn't include this power, infer bit position
            bitpos = (lowbit.bit_length() - 1)
            idx = bitpos
        res.append(int(idx))
        v = v & (v - 1)
    return res


def _int_to_bitlist(val: int, W: int) -> List[int]:
    return [ (int(val) >> j) & 1 for j in range(W) ]


def edge_mask_from_ids(
    g,
    g1,
    aw1: Optional[torch.Tensor] = None,
    *,
    id_key: str = 'id',
    available_window_key: str = 'available_window',
    tmp: Any = None,
    debug: bool = False,
) -> torch.Tensor:
    """
    Decode fused node ids (bitmask sums) in `g` to original node indices in `g1`
    using `g1.ndata[id_key]` (expected powers-of-two). Collect original nodes'
    available_window from `aw1` (or `g1.ndata[available_window_key]`), convert
    each integer to its bit-vector (width determined by max aw value), and for each
    edge in `g` decide schedulability using `is_schedulable` (port from tmp.py).

    Returns boolean mask (torch.bool) over edges returned by `g.edges(form='all')`.
    True means the edge should be REMOVED (not schedulable).
    """
    # prepare aw1
    if aw1 is None:
        if available_window_key not in g1.ndata:
            raise KeyError(f"g1.ndata must contain '{available_window_key}' or aw1 must be provided")
        aw1 = g1.ndata[available_window_key]
    if isinstance(aw1, torch.Tensor) and aw1.dim() > 1 and aw1.size(-1) == 1:
        aw1 = aw1.squeeze(-1)
    if not isinstance(aw1, torch.Tensor):
        aw1 = torch.tensor(list(aw1), dtype=torch.long)
    aw1 = aw1.to(torch.long)

    # determine bitwidth W from max value
    max_val = int(torch.max(aw1).item()) if aw1.numel() > 0 else 1
    W = max(1, max_val.bit_length())

    # build mapping from original id value to original node index in g1
    if id_key not in g1.ndata:
        raise KeyError(f"g1.ndata must contain '{id_key}' with power-of-two ids")
    orig_id_vals = g1.ndata[id_key]
    if isinstance(orig_id_vals, torch.Tensor):
        orig_id_vals_list = [int(x.item()) for x in orig_id_vals]
    else:
        orig_id_vals_list = [int(x) for x in orig_id_vals]
    orig_val_to_idx: Dict[int, int] = {int(val): idx for idx, val in enumerate(orig_id_vals_list)}

    # precompute bitlists for aw1 entries
    aw_bits: List[List[int]] = []
    for v in aw1.tolist():
        aw_bits.append(_int_to_bitlist(int(v), W))

    # edges on fused graph
    src_nodes, dst_nodes, eids = g.edges(form='all')
    device = src_nodes.device
    num_edges = src_nodes.numel()

    # collect fused id values for all nodes involved in edges
    all_nodes = torch.cat([src_nodes, dst_nodes]).unique()
    fused_ids = {}
    if id_key not in g.ndata:
        raise KeyError(f"g.ndata must contain fused '{id_key}' values")
    g_id_data = g.ndata[id_key]
    for n in all_nodes.tolist():
        fused_val = int(g_id_data[int(n)].item()) if isinstance(g_id_data, torch.Tensor) else int(g_id_data[n])
        fused_ids[int(n)] = fused_val

    # map fused nodes to list of original indices
    fused_to_orig_idxs: Dict[int, List[int]] = {}
    for nid, fused_val in fused_ids.items():
        fused_to_orig_idxs[nid] = _decode_fused_to_orig_indices(fused_val, orig_val_to_idx)
        if debug:
            logging.debug('node %s -> original indices %s', nid, fused_to_orig_idxs[nid])
    if debug:
        print('fused_to_orig_idxs:', fused_to_orig_idxs)

    mask_remove = torch.zeros(num_edges, dtype=torch.bool, device=device)

    for i in range(num_edges):
        s = int(src_nodes[i].item())
        d = int(dst_nodes[i].item())
        s_idxs = fused_to_orig_idxs.get(s, [])
        d_idxs = fused_to_orig_idxs.get(d, [])
        if debug:
            print(f'edge {i}: ({s},{d}) -> s_idxs={s_idxs}, d_idxs={d_idxs}')
        if len(s_idxs) == 0 or len(d_idxs) == 0:
            # conservative: remove
            mask_remove[i] = True
            continue
        # build combined masks list
        combined_masks: List[List[int]] = []
        # Previously we skipped scheduling checks when both endpoints mapped to
        # a single original node; that special-case preserved all edges among
        # original (non-fused) nodes. We remove that shortcut so every edge is
        # decided by the original-node aw values decoded from ids (g1 / aw1).
        # This keeps decision logic consistent: always decode ids -> aw (from g1)
        # and run is_schedulable over the resulting mask rows.
        for idx in s_idxs:
            if idx < 0 or idx >= len(aw_bits):
                continue
            combined_masks.append(aw_bits[idx])
        for idx in d_idxs:
            if idx < 0 or idx >= len(aw_bits):
                continue
            combined_masks.append(aw_bits[idx])
        # if no masks, remove
        if len(combined_masks) == 0:
            mask_remove[i] = True
            continue
        sched = is_schedulable(combined_masks)
        if not sched:
            mask_remove[i] = True

    return mask_remove


def _to_list_masks(masks) -> List[List[int]]:
    """Support torch.Tensor or python list; return List[List[int]] of 0/1 rows."""
    if isinstance(masks, torch.Tensor):
        m = masks.detach().cpu()
        if m.dim() == 1:
            return [[int(x) for x in m.tolist()]]
        return [[int(x) for x in row] for row in m.tolist()]
    # assume sequence of sequences
    return [list(map(int, row)) for row in masks]


def is_schedulable(masks) -> bool:
    """
    Determine if a collection of binary masks (k x W) is schedulable: there exists
    a matching assigning each node to a distinct 1-bit slot.
    Accepts torch.Tensor or list of lists.
    """
    mat = _to_list_masks(masks)
    k = len(mat)
    if k == 0:
        return True
    W = len(mat[0])

    # quick necessary condition: number of available slots (OR) >= nodes
    or_bits = [0] * W
    for row in mat:
        for j, b in enumerate(row):
            or_bits[j] |= (1 if b else 0)
    if sum(or_bits) < k:
        return False

    # Kuhn (DFS) bipartite matching: nodes -> slots
    match_slot: List[Optional[int]] = [-1] * W

    def try_assign(u: int, seen: List[bool]) -> bool:
        for s, allowed in enumerate(mat[u]):
            if not allowed or seen[s]:
                continue
            seen[s] = True
            if match_slot[s] == -1 or try_assign(match_slot[s], seen):
                match_slot[s] = u
                return True
        return False

    for u in range(k):
        seen = [False] * W
        if not try_assign(u, seen):
            return False
    return True


def can_merge(cluster_masks, new_mask) -> bool:
    all_masks = _to_list_masks(cluster_masks) + _to_list_masks(new_mask)
    return is_schedulable(all_masks)


# Replace bottom test block with new tests
if __name__ == '__main__':
    # build original graph g1 with 4 nodes (ids 1,2,4,8) and available_window integers
    class SimpleGraph:
        def __init__(self, num_nodes, edges, ndata=None):
            self._num_nodes = num_nodes
            self._src = [s for s, d in edges]
            self._dst = [d for s, d in edges]
            self.ndata = ndata or {}
        def edges(self, form='all'):
            src = torch.tensor(self._src, dtype=torch.long)
            dst = torch.tensor(self._dst, dtype=torch.long)
            eids = torch.arange(len(self._src), dtype=torch.long)
            return src, dst, eids
        def number_of_nodes(self):
            return self._num_nodes

    # Original graph g1: 4 nodes, ids are powers of two
    num_nodes = 4
    orig_ids = [1 << i for i in range(num_nodes)]  # [1,2,4,8]
    # available_window integers: a=15 (1111), b=1 (0001)
    aw_ints = [15, 1, 1, 15]
    aw1 = torch.tensor(aw_ints, dtype=torch.long)
    g1 = SimpleGraph(num_nodes, edges=[], ndata={'id': torch.tensor(orig_ids, dtype=torch.long), 'available_window': aw1})

    # Test 1: fused graph identical to original (no fusion), full directed graph
    src, dst = [], []
    for i in range(num_nodes):
        for j in range(num_nodes):
            if i != j:
                src.append(i); dst.append(j)
    g_full = SimpleGraph(num_nodes, edges=list(zip(src, dst)), ndata={'id': torch.tensor(orig_ids, dtype=torch.long)})

    mask_full = edge_mask_from_ids(g=g_full, g1=g1, aw1=aw1, id_key='id', available_window_key='available_window', debug=True)
    # With decisions always derived from g1's aw, some single-node edges may be
    # unschedulable (e.g., nodes with identical single-slot masks). Expect two
    # directed edges between node 1 and 2 to be removed in this test.
    print('full graph mask sum (expected 2):', int(mask_full.sum().item()))
    if int(mask_full.sum().item()) > 0:
        src_t, dst_t, eids_t = g_full.edges(form='all')
        flagged = []
        for ii in range(mask_full.numel()):
            if mask_full[ii].item():
                flagged.append((int(src_t[ii].item()), int(dst_t[ii].item())))
        print('flagged edges in full graph:', flagged)
    assert int(mask_full.sum().item()) == 2


    # Test 2: fused graph with two nodes: node0 merges [0,1] (id=3), node1 merges [2,3] (id=12)
    fused_ids = [ (1<<0) | (1<<1), (1<<2) | (1<<3) ]  # [3,12]
    # fused graph: only supply ids (do not give per-graph available_window)
    g_fused = SimpleGraph(2, edges=[(0,1), (1,0)], ndata={'id': fused_ids})
    mask_fused = edge_mask_from_ids(g=g_fused, g1=g1, aw1=aw1, id_key='id', available_window_key='available_window', debug=True)
    print('fused graph mask:', mask_fused)
    # expected: combined masks [15,1,1,15] unschedulable -> mask True for both directed edges
    assert mask_fused.numel() == 2
    # 判定这两个都是true
    assert mask_fused[0].item() and mask_fused[1].item()

    # Test 3: new scenario — original aw = [15,15,8,10], fuse (0,1) and (2,3), expect schedulable
    aw_ints_3 = [14, 14, 10, 10]
    aw1_3 = torch.tensor(aw_ints_3, dtype=torch.long)
    g1_3 = SimpleGraph(num_nodes, edges=[], ndata={'id': torch.tensor(orig_ids, dtype=torch.long), 'available_window': aw1_3})
    fused_ids_3 = [(1<<0) | (1<<1), (1<<2) | (1<<3)]
    g_fused_3 = SimpleGraph(2, edges=[(0,1), (1,0)], ndata={'id': fused_ids_3})
    mask_fused_3 = edge_mask_from_ids(g=g_fused_3, g1=g1_3, aw1=aw1_3, id_key='id', available_window_key='available_window', debug=True)
    print('fused graph mask (test 3):', mask_fused_3)
    # For aw rows [15,15,8,10] the merged rows are schedulable -> expect False (keep edges)
    assert mask_fused_3.numel() == 2
    assert not mask_fused_3[0].item() and not mask_fused_3[1].item()

    print('All tests passed')
