import numpy as np
import torch
from torch.utils.data import Dataset
from collections import Counter
from preprocess import preprocess, postprocess
from typing import Dict, List


def load_glove_embeddings(glove_file):
    """Load GloVe embeddings from a file into a dictionary."""
    embeddings_dict = {}
    with open(glove_file, "r", encoding="utf-8") as file:
        for line in file:
            values = line.split()
            word = values[0]
            vector = np.asarray(values[1:], dtype="float32")
            embeddings_dict[word] = vector
    return embeddings_dict


class Vocabulary:
    PAD = "<PAD>"
    SOS = "<SOS>"
    EOS = "<EOS>"
    UNK = "<UNK>"

    def __init__(
        self,
        itos: Dict[int, str],
        stoi: Dict[str, int],
        embedding_dim: int = 100,
        pretrained_weight: Dict[str, List[str]] = None,
    ):
        self.itos = itos
        self.stoi = stoi
        self.pretrained_weight = pretrained_weight
        self.special_tokens = [self.PAD, self.SOS, self.EOS, self.UNK]
        self.special_indices = [self.stoi[token] for token in self.special_tokens]

        if pretrained_weight:
            assert len(next(iter(self.pretrained_weight.values()))) == embedding_dim

        self.embedding_dim = embedding_dim

    @classmethod
    def from_text(
        cls, sentence_list: List[str], embedding_dim: int, freq_threshold: int = 5
    ):
        frequencies = Counter()
        for sentence in sentence_list:
            tokens = preprocess(sentence, return_str=False)
            frequencies.update(tokens)
        stoi = {cls.PAD: 0, cls.SOS: 1, cls.EOS: 2, cls.UNK: 3}
        idx = 4
        for word, freq in frequencies.items():
            if freq >= freq_threshold:
                stoi[word] = idx
                idx += 1
        itos = {v: k for k, v in stoi.items()}
        return cls(itos, stoi, embedding_dim)

    def create_embedding_layer(self) -> torch.nn.Embedding:
        embedding_dim = self.embedding_dim
        embedding_layer = torch.nn.Embedding(self.vocab_size, embedding_dim)

        if self.pretrained_weight is None:
            return embedding_layer

        weights_matrix = np.zeros((self.vocab_size, embedding_dim))
        # Fill in the matrix with glove vectors
        for i, (_, vector) in enumerate(self.pretrained_weight.items(), 4):
            weights_matrix[i] = vector

        # Initialize special tokens with random vectors (can also use zeros or other initialization)
        weights_matrix[0] = np.zeros(embedding_dim)  # <PAD> often initialized to zero
        weights_matrix[1:4] = np.random.normal(
            scale=0.6, size=(3, embedding_dim)
        )  # <SOS>, <EOS>, <UNK>
        embedding_layer.weight = torch.nn.Parameter(
            torch.tensor(weights_matrix, dtype=torch.float32), requires_grad=False
        )

        # Enable training only for special tokens
        embedding_layer.weight.requires_grad_(False)  # Freeze all weights initially
        special_token_indices = torch.tensor(
            [0, 1, 2, 3], dtype=torch.long
        )  # Indices of special tokens
        embedding_layer.weight[special_token_indices].requires_grad_(
            True
        )  # Unfreeze only special tokens
        return embedding_layer

    @classmethod
    def from_pretrained_glove(cls, glove_file: str, embedding_dim: int):
        embeddings = load_glove_embeddings(glove_file)
        itos = {0: cls.PAD, 1: cls.SOS, 2: cls.EOS, 3: cls.UNK}
        itos.update({word: idx for idx, word in enumerate(embeddings.keys(), start=4)})
        stoi = {v: k for k, v in itos.items()}
        return cls(
            itos, stoi, embedding_dim=embedding_dim, pretrained_weight=embeddings
        )

    def numericalize(self, text, max_len=-1):
        """
        Convert a text sentence to a list of indices. Add <SOS> at the beginning and <EOS> at the end of the sequence.

        :param text: input sentence as a string
        :param max_len: max length of returned tokens
        """
        indices = [self.stoi[self.SOS]]
        # Convert each word to the corresponding index. If word is not in vocab, use <UNK>
        for word in preprocess(text, return_str=False):
            indices.append(self.stoi.get(word, self.stoi[self.UNK]))
        if max_len > 0:
            indices = indices[: max_len - 1]  # Trim to max_len - 1
        indices.append(self.stoi[self.EOS])

        return indices

    def denumericalize(self, indices, remove_special_tokens=True):
        tokens = [
            self.itos[i]
            for i in indices
            if i not in self.special_indices or not remove_special_tokens
        ]
        return postprocess(tokens)

    @property
    def vocab_size(self):
        return len(self.stoi)


class Seq2SeqDataset(Dataset):
    def __init__(
        self,
        input_texts,
        output_texts,
        src_vocab: Vocabulary,
        tgt_vocab: Vocabulary,
        max_len=150,
    ):
        """
        Initialize the dataset with input and output texts, a vocabulary dictionary, and maximum sequence length.

        :param input_texts: list of input sentences
        :param output_texts: list of output sentences
        :param vocab: dictionary mapping words to indices, including special tokens
        """
        inputs = [
            np.array(src_vocab.numericalize(text, max_len)) for text in input_texts
        ]
        outputs = [
            np.array(tgt_vocab.numericalize(text, max_len)) for text in output_texts
        ]
        # sorted by outputs length
        batch = list(zip(inputs, outputs))
        batch.sort(key=lambda x: x[1].shape[0])
        inputs, outputs = zip(*batch)
        self.inputs = list(inputs)
        self.outputs = list(outputs)

        self.src_vocab = src_vocab
        self.tgt_vocab = tgt_vocab
        self.max_len = max_len

    def __len__(self):
        return len(self.inputs)

    def __getitem__(self, idx):
        return {
            "input": torch.tensor(self.inputs[idx], dtype=torch.long),
            "output": torch.tensor(self.outputs[idx], dtype=torch.long),
        }


def data_collate_fn(batch):
    input_seqs = [item["input"] for item in batch]
    target_seqs = [item["output"] for item in batch]
    input_lens = [len(seq) for seq in input_seqs]
    target_lens = [len(seq) for seq in target_seqs]

    return {
        "input": torch.nn.utils.rnn.pad_sequence(input_seqs, batch_first=True),
        "output": torch.nn.utils.rnn.pad_sequence(target_seqs, batch_first=True),
        "input_len": torch.tensor(input_lens),
        "output_len": torch.tensor(target_lens),
    }
