import numpy as np
import torch
from torch import nn
from nanovllm.engine.metadata import SpecDecodeMetadata


class RejectionSampler(nn.Module):

    def __init__(self):
        super().__init__()

    def forward(self, logits: torch.Tensor, temperatures: torch.Tensor, spec_metadatas: list[SpecDecodeMetadata], pad_token: int):
        num_draft_tokens = [sum(1 for item in spec_metadata.spec_token_ids if item != pad_token) for spec_metadata in spec_metadatas]
        uniform_probs = generate_uniform_probs(sum(num_draft_tokens), num_draft_tokens, logits.shape[1]-1)
        logits = logits.to(torch.float)
        greedy_tokens = logits.argmax(dim=-1)
        logits.div_(temperatures.unsqueeze(dim=-1))
        probs = torch.softmax(logits, dim=-1, dtype=torch.float)
        # logprobs = torch.log_softmax(logits, dim=-1, dtype=torch.float)
        epsilon = 1e-10  
        sample_tokens = probs.div(torch.empty_like(probs).exponential_(1) + epsilon).argmax(dim=-1)  
        token_ids = torch.where(temperatures == 0, greedy_tokens, sample_tokens).tolist()

        accept_token_ids = []
        for i in range(len(spec_metadatas)):
            accept_token_id = []
            for j in range(len(spec_metadatas[i])):
                # greedy rejection sample
                if temperatures[i][0] == 0:
                    if greedy_tokens[i][j] != spec_metadatas[i].spec_token_ids[j] or spec_metadatas[i].spec_token_ids[j]==pad_token:
                        break
                # random rejection sample
                else:
                    if spec_metadatas[i].spec_token_ids[j]==pad_token or uniform_probs[i][j]<=0 or probs[i][j][spec_metadatas[i].spec_token_ids[j]]/spec_metadatas[i].spec_token_probs[j]<=uniform_probs[i][j]:
                        break
                accept_token_id.append(spec_metadatas[i].spec_token_ids[j])
            if j == len(spec_metadatas[i]):    # if previous ones all accepted, then the last one must be accepted
                accept_token_id.append(token_ids[i][-1])
            if not accept_token_id:
                accept_token_id.append(token_ids[i][0])
            accept_token_ids.append(accept_token_id)

        # accept_token_ids = []
        # for i in range(len(draft_token_ids)):
        #     accept_token_id = []
        #     for j in range(len(draft_token_ids[i])):
        #         if greedy_tokens[i][j] != draft_token_ids[i][j] or draft_token_ids[i][j]==pad_token:
        #             break
        #         accept_token_id.append(draft_token_ids[i][j])
        #     if j == len(draft_token_ids[i]):    # if previous ones all accepted, then the last one must be accepted
        #         accept_token_id.append(token_ids[i][-1])
        #     if not accept_token_id:
        #         accept_token_id.append(token_ids[i][0])
        #     accept_token_ids.append(accept_token_id)
            
        return accept_token_ids
    

def generate_uniform_probs(
    num_tokens: int,
    num_draft_tokens: list[int],
    max_spec_num: int,
) -> torch.Tensor:
    batch_size = len(num_draft_tokens)
    
    assert sum(num_draft_tokens) == num_tokens
    uniform_probs = torch.rand((num_tokens, ), dtype=torch.float32,)  # 使用torch替代np.random
    
    segments = torch.split(uniform_probs, num_draft_tokens)
    result = torch.full((batch_size, max_spec_num), -1.0, dtype=torch.float32)

    for i, seg in enumerate(segments):
        n = min(len(seg), max_spec_num)
        result[i, :n] = seg[:n]
    
    return result
