import torch
from dataset import causal_mask
def greed_decode(model, src, src_mask, tokenizer_src, tokenizer_tgt, max_len, device):
    sos_idx = tokenizer_tgt.token_to_id('[SOS]')
    eos_idx = tokenizer_tgt.token_to_id('[EOS]')

    encoder_output = model.encode(src, src_mask)
    decoder_input = torch.empty(1, 1).fill_(sos_idx).type_as(src).to(device)
    while True:
        if decoder_input.size(1) == max_len:
            break
        #build mask for decoder
        decoder_mask = causal_mask(decoder_input.size(1)).type_as(src_mask).to(device)
        #caculate output of the decoder
        out = model.decode(encoder_output, src_mask, decoder_input, decoder_mask)
        #ge the next token
        prob = model.project(out[:,-1])
        #select the token with highest probability (greedy search)
        _, next_word = torch.max(prob, dim=1)
        decoder_input = torch.cat(
            [decoder_input, torch.empty(1, 1).type_as(src).fill_(next_word.item()).to(device)], dim=1)

        if next_word == eos_idx:
            break
    return decoder_input.squeeze(0)

def run_validation(model, validation_dataset,
                    tokenizer_src, tokenizer_tgt,
                    max_len, device, global_state, writer, num_examples=2):
    model.eval()
    count = 0
    source_texts = []
    expected_texts = []
    predicted_texts = []

    console_width = 80
    with torch.no_grad():
        for batch in validation_dataset:
            count += 1
            encoder_input = batch['encoder_input'].to(device)
            encoder_mask = batch['encoder_mask'].to(device)
            assert encoder_input.size(0) == 1, "Batch size should be 1 for validation"

            model_output = greed_decode(model, encoder_input, encoder_mask,
                                         tokenizer_src, tokenizer_tgt, max_len, device)
            source_text = batch['src_text'][0]
            target_text = batch['tgt_text'][0]
            model_output_text = tokenizer_tgt.decode(model_output.detach().cpu().numpy())
            source_texts.append(source_text)
            expected_texts.append(target_text)
            predicted_texts.append(model_output_text)
            print('-'*console_width)
            print(f'Source: {source_text}')
            print(f'Expected: {target_text}')
            print(f'Predicted: {model_output_text}')

            if count >= num_examples:
                break 
    

