import numpy as np
import torch
import dgl

from fusion_vrp.models.position import PositionEncoder  # for type reference only
from fusion_vrp.core.fusion import fusion

__all__ = [
    'set_random_seed', 'create_random_nodes', 'create_graph_from_nodes', 'inital_g'
]

pos_encoder_global = None  # optional global if needed elsewhere


def set_random_seed(seed: int):
    np.random.seed(seed)
    torch.manual_seed(seed)
    dgl.random.seed(seed)


def create_random_nodes(num_nodes, coord_range=100.0):
    # 确保随机上界为整数，避免 numpy 类型告警
    high = int(coord_range)
    x = np.random.randint(0, high, num_nodes)
    y = np.random.randint(0, high, num_nodes)
    return x, y


def _generate_goods_sum(num_nodes: int, total: int, low: int = 1, high: int = 80) -> np.ndarray:
    """Generate integer goods with exact sum=total and per-node bounds [low, high].
    Strategy: start with low; allocate remaining via randomized proportional rounding under capacities.
    """
    if num_nodes <= 0:
        raise ValueError('num_nodes must be > 0')
    if low > high:
        raise ValueError('low must be <= high')
    # Base allocation
    base = np.full(num_nodes, low, dtype=np.int64)
    remaining = total - num_nodes * low
    if remaining < 0:
        raise ValueError(f'total={total} too small for num_nodes={num_nodes} with low={low}')
    cap = np.full(num_nodes, high - low, dtype=np.int64)
    if remaining > int(cap.sum()):
        raise ValueError(f'total={total} too large to fit within per-node max={high}')
    if remaining == 0:
        return base
    # Proportional randomized rounding within capacity
    rng = np.random.default_rng()
    p = rng.random(num_nodes)
    if not np.isfinite(p.sum()) or p.sum() <= 0:
        p = np.ones(num_nodes, dtype=np.float64)
    p = p / p.sum()
    alloc = np.floor(p * remaining).astype(np.int64)
    # Cap per node
    alloc = np.minimum(alloc, cap)
    current = base + alloc
    rem2 = total - int(current.sum())
    if rem2 > 0:
        # Distribute leftover one-by-one respecting capacity
        cap_rem = cap - alloc
        # Indices still with capacity
        idxs = np.where(cap_rem > 0)[0]
        if idxs.size == 0:
            # Should not happen given earlier capacity check; fallback: randomly decrement and reassign
            take_idxs = rng.choice(np.arange(num_nodes), size=rem2, replace=True)
            for k in take_idxs:
                current[k] += 1
        else:
            # Weighted by remaining capacity to reduce rejections
            weights = cap_rem[idxs].astype(np.float64)
            weights = weights / weights.sum()
            picks = rng.choice(idxs, size=rem2, replace=True, p=weights)
            for k in picks:
                current[k] += 1
    return current


def create_graph_from_nodes(x_coords, y_coords, pos_encoder: PositionEncoder):
    num_nodes = len(x_coords)
    src, dst = [], []
    for i in range(num_nodes):
        for j in range(num_nodes):
            if i != j:
                src.append(i)
                dst.append(j)
    g = dgl.graph((src, dst), num_nodes=num_nodes)

    # Ensure input goes to same device as encoder, then bring results back to CPU for DGL processing
    model_device = next(pos_encoder.parameters()).device
    pos_device = torch.tensor(np.stack([x_coords, y_coords], axis=1), dtype=torch.float32, device=model_device)
    feat_device = pos_encoder(pos_device)
    # Store on CPU (graph operations currently assumed on CPU)
    g.ndata['pos'] = pos_device.detach().cpu()
    # node_id = [1,2,4,8,16,…, 2^(num_nodes-1)]
    node_id = [1 << i for i in range(num_nodes)]
    g.ndata['id'] = torch.tensor(node_id, dtype=torch.int64)
    feat_cpu = feat_device.detach().cpu()
    g.ndata['feat'] = feat_cpu
    g.ndata['vector'] = feat_cpu

    # Goods: exact total = 10 * num_nodes (e.g., 200 when n=20).
    # Minimal change: allow some nodes to be negative (down to -30) while keeping total sum fixed.
    total_goods = 10 * num_nodes
    # Set low=-30 to permit negative per-node goods (>= -30). high remains 100.
    goods_int = _generate_goods_sum(num_nodes, total_goods, low=-30, high=80)
    # Store goods as integer tensor to preserve exact counts (sum == total_goods).
    # Downstream places that need floating computations should call .float() explicitly.
    g.ndata['goods'] = torch.from_numpy(goods_int.astype(np.int64))

    # 生成长度为 num_nodes 的整数张量，值在 [0, 1023]
    g.ndata['available_window'] = torch.randint(1, 1024, (num_nodes,1), dtype=torch.int64)

    dist = []
    for s, d in zip(src, dst):
        dx = x_coords[s] - x_coords[d]
        dy = y_coords[s] - y_coords[d]
        dist.append(np.sqrt(dx ** 2 + dy ** 2))
    g.edata['dist'] = torch.tensor(dist, dtype=torch.float32)
    return g


def inital_g(num_nodes, pos_encoder: PositionEncoder, stop_num: int = 3):

    # set_random_seed(4321)
    # 从位置编码器中推断坐标范围：使用其位置编码的 max_len 作为坐标范围上界
    # 若不可用，则回退到 100（与默认一致）。
    try:
        coord_range = int(getattr(pos_encoder, 'pos_encoding').pe.size(0))
    except Exception:
        coord_range = 100
    x_coords, y_coords = create_random_nodes(num_nodes, coord_range=coord_range)

    g = create_graph_from_nodes(x_coords, y_coords, pos_encoder)
    # 如果停止数等于1，则返回原始图
    if stop_num == 1 or stop_num == 0:
        node_id = [1 << i for i in range(num_nodes)]
        groups=[]
        groups.append(node_id)
        return g, groups


    # Clone the graph structure and deep-copy node/edge tensors so later fusion (in-place) won't modify this original copy.
    g2 = g.clone()
    try:
        # deep-copy node features
        for k, v in g2.ndata.items():
            if isinstance(v, torch.Tensor):
                g2.ndata[k] = v.clone()
    except Exception:
        pass
    try:
        # deep-copy edge features
        for k, v in g2.edata.items():
            if isinstance(v, torch.Tensor):
                g2.edata[k] = v.clone()
    except Exception:
        pass

    g = fusion(g, stop_num=stop_num)
    node_ids = g.ndata['id'].cpu().numpy()
    groups = []
    for fused_id in node_ids:
        group = []
        i = 0
        while (1 << i) <= fused_id:
            if (fused_id >> i) & 1:
                group.append(1 << i)
            i += 1
        groups.append(group)
    return g2, groups
