import torch
import torch.nn as nn
from src.module import Transformer
from src.dataset import Vocabulary
import torch.nn.functional as F
from typing import List
from torch.nn.utils.rnn import pad_sequence


class Translator:
    def __init__(self,
                 model: nn.Module,
                 src_vocab: 'Vocabulary',
                 tgt_vocab: 'Vocabulary',
                 device: str = 'cpu',
                 max_seq_len: int = 100):
        self.model = model.to(device)
        self.model.eval()
        self.src_vocab = src_vocab
        self.tgt_vocab = tgt_vocab
        self.device = device
        self.max_seq_len = max_seq_len
        self.pad_idx = self.src_vocab.word2idx['<pad>']
        self.sos_idx = self.tgt_vocab.word2idx['<sos>']
        self.eos_idx = self.tgt_vocab.word2idx['<eos>']
        self.unk_idx = self.tgt_vocab.word2idx['<unk>']

    def preprocess(self, text: str) -> torch.Tensor:
        tokens = text.strip().lower().split()
        indices = [self.src_vocab.word2idx.get(token, self.unk_idx) for token in tokens]
        indices = [self.sos_idx] + indices + [self.eos_idx]
        return torch.longTensor(indices).unsqueeze(0).to(self.device)

    def postprocess(self, indices: List[int]) -> str:
        filtered = [idx for idx in indices if idx not in [self.sos_idx, self.eos_idx, self.pad_idx]]
        return ' '.join([self.tgt_vocab.idx2word.get(idx, '<unk>') for idx in filtered])

    def create_mask(self, src: torch.Tensor) -> torch.Tensor:
        src_mask = (src != self.pad_idx).unsqueeze(1).unsqueeze(2)
        return src_mask.to(self.device)

    def translate(self,
                  text: str,
                  method: str = 'greedy',
                  beam_size: int = 5,
                  temperature: float = 1.0) -> str:
        src = self.preprocess(text)
        src_mask = self.create_mask(src)

        with torch.no_grad():
            memory = self.model.encode(src, src_mask)
            if method == 'greedy':
                result = self.greedy_decode(memory, src_mask)
            elif method == 'beam':
                result = self.beam_search_decode(memory, src_mask, beam_size)
            else:
                raise ValueError(f"Unsupported decode method:{method}")
        return self.postprocess(result)

    def greedy_decode(self,
                      memory: torch.Tensor,
                      src_mask: torch.Tensor) -> List[int]:
        batch_size = memory.size(0)
        ys = torch.ones(batch_size, 1).fill_(self.sos_idx).long().to(self.device)

        for _ in range(self.max_seq_len):
            tgt_mask = self.generate_square_subsequent_mask(ys.size(1)).to(self.device)
            out = self.model.decode(ys, memory, src_mask, tgt_mask)
            prob = out[:, -1, :]
            next_word = torch.argmax(prob, dim=-1).unsqueeze(1)
            ys = torch.cat([ys, next_word], dim=1)

            if (next_word == self.eos_idx).all():
                break

        return ys[0].tolist()

    def beam_search_decode(self,
                           memory: torch.Tensor,
                           src_mask: torch.Tensor,
                           beam_size: int = 5) -> List[int]:
        start_sequence = {'tokens': [self.sos_idx], 'score': 0.0, 'alive': True}
        beam = [start_sequence]
        memory = memory.repeat(beam_size, 1, 1)
        src_mask = src_mask.repeat(beam_size, 1, 1, 1)

        for step in range(self.max_seq_len):
            candidates = []
            all_dead = True

            for idx, hypo in enumerate(beam):
                if not hypo['alive']:
                    candidates.append(hypo)
                    continue

                tokens = hypo['tokens']
                ys = torch.LongTensor(tokens).unsqueeze(0).to(self.device)

                tgt_mask = self.generate_square_subsequent_mask(ys.size(1)).to(self.device)
                out = self.model.decode(ys, memory[idx:idx + 1], src_mask[idx:idx + 1], tgt_mask)
                log_prob = F.log_softmax(out[:, -1, :], dim=-1)
                topk_scores, topk_ids = log_prob.topk(beam_size, dim=-1)
                for i in range(beam_size):
                    new_tokens = tokens + [topk_ids[0, i].item()]
                    new_score = hypo['score'] + topk_scores[0, i].item()
                    new_alive = (topk_ids[0, i] != self.eos_idx)

                    if new_alive:
                        all_dead = False
                    candidates.append({'tokens': new_tokens, 'score': new_score, 'alive': new_alive})
            candidates = sorted(candidates, key=lambda x: x['score'] / (len(x['tokens']) + 1e-9), reverse=True)
            beam = candidates[:beam_size]
            if all_dead:
                break
        best_hypo = max(beam, key=lambda x: x['score'] / (len(x['tokens']) + 1e-9))
        return best_hypo['tokens']

    def batch_translate(self,
                        texts: List[str],
                        batch_size: int = 32,
                        method: str = 'beam') -> List[str]:
        results = []
        for i in range(0, len(texts), batch_size):
            batch = texts[i:i + batch_size]
            processed = [self.preprocess(text) for text in batch]
            lengths = [t.size(1) for t in processed]
            src = pad_sequence(processed, batch_first=True, padding_value=self.pad_idx)
            src_mask = (src != self.pad_idx).unsqueeze(1).unsqueeze(2)

            with torch.no_grad():
                memory = self.model.encode(src, src_mask)
                if method == 'greedy':
                    outputs = self.greedy_decode(memory, src_mask)
                elif method == 'beam':
                    outputs = self.beam_search_decode(memory, src_mask)
            results.extend([self.postprocess(output) for output in outputs])
        return results

    @staticmethod
    def generate_square_subsequent_mask(sz: int) -> torch.Tensor:
        return torch.triu(torch.ones(sz, sz) * float('-inf'), diagonal=1)


if __name__ == "__main__":
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 加载词汇表
    src_vocab = Vocabulary()
    tgt_vocab = Vocabulary()
    src_data, tgt_data = load_data('data/train.en', 'data/train.de')
    src_vocab.build(src_data)
    tgt_vocab.build(tgt_data)

    # 初始化模型
    config = {
        'model_dim': 512,
        'num_heads': 8,
        'num_layers': 6,
        'd_ff': 2048,
        'dropout': 0.1,
        'save_path': 'transformer_model.pth'
    }
    model = Transformer(
        len(src_vocab.word2idx),
        len(tgt_vocab.word2idx),
        d_model=config['model_dim'],
        num_heads=config['num_heads'],
        num_layers=config['num_layers'],
        d_ff=config['d_ff'],
        dropout=config['dropout']
    )

    # 加载模型权重
    model.load_state_dict(torch.load(config['save_path'], map_location=device))
    print(f"Model loaded from {config['save_path']}")

    # 初始化翻译器
    translator = Translator(model, src_vocab, tgt_vocab, device=device)

    # 示例翻译
    text = "Hello world"
    translated_text = translator.translate(text, method='beam', beam_size=5)
    print(f"Translated text: {translated_text}")
