#!/usr/bin/env python3

# Copyright © 2025 Wenze Wei
#
# This file is part of Pisces L1.
#
# Licensed under the Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0).
# You may not use this file except in compliance with the License.
# Commercial use is strictly prohibited.
# You may obtain a copy of the License at
#
#     https://creativecommons.org/licenses/by-nc/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import re
import json
import urllib.request

class BPETokenizer:
    """
    A Byte Pair Encoding (BPE) tokenizer implementation.

    This tokenizer can load pre-trained vocabulary and merge rules, or generate
    a simple vocabulary from ASCII characters if the pre-trained files are not found.
    It supports token encoding, decoding, and adding new tokens to the vocabulary.
    """
    def __init__(self, vocab_path=None, merges_path=None, special_tokens=None):
        """
        Initialize the BPE tokenizer.

        Args:
            vocab_path (str, optional): Path to the vocabulary file (vocab.json). Defaults to None.
            merges_path (str, optional): Path to the merges file (merges.txt). Defaults to None.
            special_tokens (list, optional): List of special tokens. Defaults to ["<s>", "</s>", "<unk>", "<pad>"].
        """
        # Initialize the vocabulary file path
        self.vocab_path = vocab_path
        # Initialize the merges file path
        self.merges_path = merges_path
        # If the vocabulary file exists, load it
        if vocab_path and os.path.exists(vocab_path):
            with open(vocab_path, "r", encoding="utf-8") as f:
                self.encoder = json.load(f)
            # Create the decoder by inverting the encoder
            self.decoder = {v: k for k, v in self.encoder.items()}
        else:
            # Generate base tokens from ASCII characters 32 to 126
            base_tokens = [chr(i) for i in range(32, 127)]
            # Create a simple encoder using base tokens
            self.encoder = {tok: i for i, tok in enumerate(base_tokens)}
            # Create the decoder by inverting the simple encoder
            self.decoder = {i: tok for tok, i in self.encoder.items()}
            print("❌\tNo vocab.json found, using dummy vocab.")
        # Initialize the BPE merge ranks
        self.bpe_ranks = {}
        # If the merges file exists, load and process it
        if merges_path and os.path.exists(merges_path):
            with open(merges_path, "r", encoding="utf-8") as f:
                merges = [tuple(line.strip().split()) for line in f if not line.startswith("#") and line.strip()]
            self.bpe_ranks = {pair: i for i, pair in enumerate(merges)}
        else:
            print("❌\tNo merges.txt found, using char-level BPE.")
        # Initialize special tokens, use default if not provided
        self.special_tokens = special_tokens or ["<s>", "</s>", "<unk>", "<pad>"]
        # Add special tokens to the encoder and decoder if they don't exist
        for tok in self.special_tokens:
            if tok not in self.encoder:
                self.encoder[tok] = len(self.encoder)
                self.decoder[self.encoder[tok]] = tok
        # Get the ID of the unknown token
        self.unk_id = self.encoder["<unk>"]
        # Get the ID of the padding token
        self.pad_id = self.encoder["<pad>"]
        # Get the ID of the beginning-of-sequence token
        self.bos_id = self.encoder["<s>"]
        # Get the ID of the end-of-sequence token
        self.eos_id = self.encoder["</s>"]

    def __len__(self):
        """
        Returns the size of the vocabulary.

        Returns:
            int: The number of tokens in the vocabulary.
        """
        return len(self.encoder)

    def add_tokens(self, new_tokens):
        """
        Adds new tokens to the vocabulary.

        Args:
            new_tokens (list): List of new tokens to add.

        Returns:
            int: The number of tokens successfully added.
        """
        added_count = 0
        # Iterate through new tokens and add them if they don't exist
        for token in new_tokens:
            if token not in self.encoder:
                new_id = len(self.encoder)
                self.encoder[token] = new_id
                self.decoder[new_id] = token
                if token not in self.special_tokens:
                    self.special_tokens.append(token)
                added_count += 1
        return added_count

    def save_pretrained(self, save_directory):
        """
        Saves tokenizer files (vocab, merges) to a directory.

        Args:
            save_directory (str): Directory path to save the tokenizer files.
        """
        os.makedirs(save_directory, exist_ok=True)
        # Save vocabulary
        vocab_file = os.path.join(save_directory, "vocab.json")
        with open(vocab_file, "w", encoding="utf-8") as f:
            json.dump(self.encoder, f, ensure_ascii=False, indent=2)

        # Copy merges file if it exists
        if self.merges_path and os.path.exists(self.merges_path):
            import shutil
            merges_file = os.path.join(save_directory, "merges.txt")
            shutil.copyfile(self.merges_path, merges_file)
            
    def bpe(self, token):
        """
        Apply Byte Pair Encoding to a single token.

        Args:
            token (str): The input token to be processed.

        Returns:
            list: A list of BPE tokens after processing.
        """
        # If the token is a special token, return it directly
        if token in self.special_tokens:
            return [token]
        # Convert the token to a tuple of characters
        word = tuple(token)
        # Generate all adjacent pairs of characters
        pairs = set(zip(word, word[1:]))
        # If there are no pairs, return the token directly
        if not pairs:
            return [token]
        while True:
            min_pair = None
            min_rank = float("inf")
            # Find the pair with the lowest merge rank
            for pair in pairs:
                rank = self.bpe_ranks.get(pair, float("inf"))
                if rank < min_rank:
                    min_rank = rank
                    min_pair = pair
            # If no pair to merge, break the loop
            if min_pair is None or min_pair not in self.bpe_ranks:
                break
            first, second = min_pair
            new_word = []
            i = 0
            # Perform the merge operation on the word
            while i < len(word):
                try:
                    j = word.index(first, i)
                    new_word.extend(word[i:j])
                    i = j
                except:
                    new_word.extend(word[i:])
                    break
                if i < len(word)-1 and word[i] == first and word[i+1] == second:
                    new_word.append(first+second)
                    i += 2
                else:
                    new_word.append(word[i])
                    i += 1
            word = tuple(new_word)
            # If the word is reduced to a single token, break the loop
            if len(word) == 1:
                break
            # Generate new pairs for the merged word
            pairs = set(zip(word, word[1:]))
        return word

    def encode(self, text, return_tensors=None):
        """
        Encode text into a list of token IDs.

        Args:
            text (str): The input text to be encoded.
            return_tensors (str, optional): If "pt", return a PyTorch tensor. Defaults to None.

        Returns:
            list or torch.Tensor: A list of token IDs or a PyTorch tensor.
        """
        # Add spaces around special tokens in the text
        for tok in self.special_tokens:
            text = text.replace(tok, f" {tok} ")
        # Tokenize the text into words and special tokens
        tokens = re.findall(r"\w+|[^\w\s]|<[^>]+>", text, re.UNICODE)
        ids = []
        # Encode each token into IDs
        for token in tokens:
            bpe_tokens = self.bpe(token) if self.bpe_ranks else [token]
            for bpe_tok in bpe_tokens:
                if bpe_tok in self.encoder:
                    ids.append(self.encoder[bpe_tok])
                else:
                    ids.append(self.unk_id)
                    # print(f"[Tokenizer] OOV token: {bpe_tok}")
        # If return_tensors is "pt", convert IDs to a PyTorch tensor
        if return_tensors == "pt":
            import torch
            return torch.tensor([ids], dtype=torch.long)
        return ids

    def encode_batch(self, texts, return_tensors=None):
        """
        Encode a batch of texts into lists of token IDs.

        Args:
            texts (list): A list of input texts to be encoded.
            return_tensors (str, optional): If "pt", return PyTorch tensors. Defaults to None.

        Returns:
            list: A list of lists of token IDs.
        """
        # Encode a batch of texts
        return [self.encode(t, return_tensors=None) for t in texts]

    def decode(self, ids, skip_special_tokens=True):
        """
        Decode a list of token IDs into text.

        Args:
            ids (list): A list of token IDs to be decoded.
            skip_special_tokens (bool, optional): Whether to skip special tokens. Defaults to True.

        Returns:
            str: The decoded text.
        """
        # Convert IDs to tokens
        tokens = [self.decoder.get(i, "<unk>") for i in ids]
        # Skip special tokens if specified
        if skip_special_tokens:
            tokens = [t for t in tokens if t not in self.special_tokens]
        text = " ".join(tokens)
        # Clean up the decoded text
        text = text.replace(" ##", "")
        text = text.replace("Ġ", "")
        return text.strip()

    @property
    def pad_token_id(self):
        """
        Get the ID of the padding token.

        Returns:
            int: The ID of the padding token.
        """
        return self.pad_id

    @property
    def eos_token_id(self):
        """
        Get the ID of the end-of-sequence token.

        Returns:
            int: The ID of the end-of-sequence token.
        """
        return self.eos_id

    @property
    def bos_token_id(self):
        """
        Get the ID of the beginning-of-sequence token.

        Returns:
            int: The ID of the beginning-of-sequence token.
        """
        return self.bos_id

    @property
    def unk_token_id(self):
        """
        Get the ID of the unknown token.

        Returns:
            int: The ID of the unknown token.
        """
        return self.unk_id

def download_if_missing(url, local_path):
    """
    Download the file if it doesn't exist locally.

    Args:
        url (str): The URL of the file to download.
        local_path (str): The local path to save the file.
    """
    # Download the file if it doesn't exist locally
    if not os.path.exists(local_path):
        print(f"✅\tDownloading {os.path.basename(local_path)} ...")
        urllib.request.urlretrieve(url, local_path)
        print(f"✅\tDownloaded {local_path}")

def get_tokenizer():
    """
    Get a BPETokenizer instance by searching for vocabulary and merges files in different locations.

    Returns:
        BPETokenizer: A BPETokenizer instance initialized with found vocabulary and merges files.

    Raises:
        FileNotFoundError: If either vocab.json or merges.txt is not found.
    """
    vocab_path, merges_path = None, None
    # Find the vocabulary file in different locations
    for path in ["tokenizer/vocab.json", "vocab.json", os.environ.get("PISCES_VOCAB_PATH")]:
        if path and os.path.exists(path):
            vocab_path = path
            break
    # Find the merges file in different locations
    for path in ["tokenizer/merges.txt", "merges.txt", os.environ.get("PISCES_MERGES_PATH")]:
        if path and os.path.exists(path):
            merges_path = path
            break
    # Raise an error if either file is not found
    if vocab_path is None or merges_path is None:
        raise FileNotFoundError(
            "❌\tPisces BPETokenizer: vocab.json or merges.txt not found! "
            "Please put them in the 'tokenizer/' directory."
        )
    return BPETokenizer(vocab_path, merges_path)