import abc
import csv
import os
from typing import Generic, Hashable, IO, Iterable, TypeVar

import numpy as np

Token = TypeVar('Token', bound=Hashable)


class Vocab(Generic[Token], abc.ABC):
    @abc.abstractmethod
    def __len__(self) -> int:
        pass

    @abc.abstractmethod
    def add_token(self, token: str) -> int:
        pass

    @abc.abstractmethod
    def get_token(self, index: int) -> str:
        pass

    @abc.abstractmethod
    def index_token(self, token: str) -> int:
        pass

    # io

    def save_to_file(self, path: os.PathLike | str):
        with open(path, 'wt', encoding='utf-8', newline='') as fp:
            self.write_to_stream(fp)

    @classmethod
    def load_from_file(cls, path: os.PathLike | str) -> 'SimpleVocab':
        with open(path, 'rt', encoding='utf-8', newline='') as fp:
            return SimpleVocab.read_from_stream(fp)

    def write_to_stream(self, io: IO):
        writer = csv.writer(io)
        writer.writerow(["token"])
        for index in range(len(self)):
            token = self.get_token(index)
            token_str = self.encode_token(token)
            writer.writerow([token_str])

    @classmethod
    def read_from_stream(cls, io: IO) -> 'SimpleVocab':
        reader = csv.reader(io)
        header = next(reader)
        token_col = header.index("token")

        vocab = SimpleVocab()
        for row in reader:
            token_str = row[token_col]
            token = cls.decode_token(token_str)
            vocab.add_token(token)
        return vocab

    def encode_token(self, token: Token) -> str:
        return str(token)

    @classmethod
    def decode_token(cls, token_str: str) -> Token:
        return token_str


class SimpleVocab(Vocab[Token]):
    def __init__(self,
        index_to_token: Iterable[Token] = None,
        token_to_index: dict[Token, int] = None
    ):
        self.index_to_token: list[Token] = list(index_to_token) if index_to_token is not None else []
        self.token_to_index: dict[Token, int] = dict(token_to_index) if token_to_index is not None else {}

    def __len__(self):
        return len(self.index_to_token)

    def add_token(self, token: Token) -> int:
        index = self.token_to_index.get(token, None)
        if index is None:
            index = len(self.index_to_token)
            self.index_to_token.append(token)
            self.token_to_index[token] = index
        return index

    def get_token(self, index: int) -> Token:
        return self.index_to_token[index]

    def index_token(self, token: Token) -> int:
        return self.token_to_index[token]


class VocabWithCount(Vocab, abc.ABC):
    def add_token(self, token: Token, count: int = 1) -> int:
        pass

    @abc.abstractmethod
    def get_token_count(self, index: int) -> int:
        pass


class SimpleVocabWithCount(SimpleVocab, VocabWithCount):
    def __init__(self,
        index_to_token: Iterable[Token] = None,
        index_to_count: Iterable[int] = None,
        token_to_index: dict[Token, int] = None
    ):
        super().__init__(index_to_token, token_to_index)
        self.index_to_count: list[int] = list(index_to_count) if index_to_count is not None else []

    def __len__(self):
        return len(self.index_to_token)

    def add_token(self, token: Token, count: int = 1):
        index = self.token_to_index.get(token, None)
        if index is None:
            index = len(self.index_to_token)
            self.index_to_token.append(token)
            self.index_to_count.append(count)
            self.token_to_index[token] = index
        else:
            self.index_to_count[index] += count
        return index

    def get_token_count(self, index: int) -> int:
        return self.index_to_count[index]

    def reindex(self, indices: Iterable[int]) -> 'SimpleVocabWithCount':
        indices = tuple(indices)
        new_index_to_token = [self.index_to_token[index] for index in indices]
        new_index_to_count = [self.index_to_count[index] for index in indices]
        new_token_to_index = {token: index for index, token in enumerate(new_index_to_token)}
        return SimpleVocabWithCount(new_index_to_token, new_index_to_count, new_token_to_index)

    def truncate(self, size: int) -> VocabWithCount:
        if len(self) < size:
            return self
        return self.reindex(np.arange(size))

    def sort_by_count(self, reverse: bool = True) -> 'SimpleVocabWithCount':
        indices = np.argsort(self.index_to_count)
        if reverse:
            indices = indices[::-1]
        return self.reindex(indices)

    # io

    @classmethod
    def load_from_file(cls, path: str) -> 'SimpleVocabWithCount':
        with open(path, 'rt', encoding='utf-8', newline='') as fp:
            return SimpleVocabWithCount.read_from_stream(fp)

    def write_to_stream(self, io: IO):
        writer = csv.writer(io)
        writer.writerow(["token", "count"])
        for index in range(len(self)):
            token = self.get_token(index)
            token_str = self.encode_token(token)
            count = self.get_token_count(index)
            writer.writerow([token_str, count])

    @classmethod
    def read_from_stream(cls, io: IO) -> 'SimpleVocabWithCount':
        reader = csv.reader(io)
        header = next(reader)
        token_col = header.index("token")
        count_col = header.index("count")
        vocab = SimpleVocabWithCount()
        for row in reader:
            token_str = row[token_col]
            token = cls.decode_token(token_str)
            count = int(row[count_col])
            vocab.add_token(token, count)
        return vocab


def compute_vocab_base_accuracy(vocab: VocabWithCount) -> float:
    tokens_count = np.asarray([vocab.get_token_count(i) for i in range(len(vocab))], dtype=np.float32)
    total_count = np.sum(tokens_count)
    tokens_prob = tokens_count / total_count
    return np.max(tokens_prob)


def compute_vocab_entropy(vocab: VocabWithCount) -> float:
    tokens_count = np.asarray([vocab.get_token_count(i) for i in range(len(vocab))], dtype=np.float32)
    total_count = np.sum(tokens_count)
    tokens_prob = tokens_count / total_count
    entropy = -np.sum(tokens_prob * np.log2(tokens_prob))
    return entropy
