import re
from typing import List, Union
import tiktoken
from nltk.tokenize import sent_tokenize


class TokenCounter:
    """
    num_tokens_from_string 方法，计算单个字符串的 token 数量
    num_tokens_from_list_string 方法，计算字符串列表的总 token 数量
    text_truncation 方法，支持按最大长度截断文本

    """

    def __init__(self) -> None:
        self.encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")

    def num_tokens_from_string(self, string: str) -> int:
        """
        计算单个字符串的 token 数量
        """
        try:
            return len(self.encoding.encode(string, disallowed_special=()))
        except Exception as e:
            # 如果出现编码错误，尝试清理文本
            cleaned_string = string.replace('<|endoftext|>', '')
            return len(self.encoding.encode(cleaned_string, disallowed_special=()))

    def num_tokens_from_list_string(self, list_of_string: List[str]) -> int:
        """
        计算字符串列表的总 token 数量
        """
        return sum(self.num_tokens_from_string(s) for s in list_of_string)

    def text_truncation(self, text: str, max_len: int = 1000) -> str:
        """
        按最大长度(token长度)截断文本
        """
        encoded_id = self.encoding.encode(text, disallowed_special=())
        return self.encoding.decode(encoded_id[:min(max_len, len(encoded_id))])


def tokenize_sentences(text: str) -> str:
    """
    对输入文本进行句子分割，并格式化为包含句号索引的字符串。

    :param text: 原始文本
    :return: 格式化后的字符串，每个句子均以 "sen_id:{索引}" 和 "sentence_text:{句子内容}" 标识
    """
    sentences = sent_tokenize(text)
    return "\n".join(f"sen_id:{sen_id}\nsentence_text:{sentence.replace(chr(10), ' ')}" for sen_id, sentence in enumerate(sentences))


def chunking(chunk_text_list: Union[str, List[str]], reduce_length: int = 0, max_length: int = 3000) -> List[str]:
    """
    将文本分块，确保每个块的 token 数量不超过最大长度。

    :param chunk_text_list: 输入文本或文本列表
    :param reduce_length: 减少的长度
    :param max_length: 最大长度
    :return: 分块后的文本列表
    """
    token_counter = TokenCounter()
    truncation_length = 2500 - reduce_length
    chunk_list = []

    if isinstance(chunk_text_list, str):
        chunk_text_list = [chunk_text_list]

    for chunk_text in chunk_text_list:
        chunk_tokens = token_counter.num_tokens_from_string(chunk_text)
        if chunk_tokens > max_length:
            chunk_text = re.sub("<html>.*?</html>", "",
                                chunk_text, flags=re.DOTALL)
            chunk_tokens = token_counter.num_tokens_from_string(chunk_text)
            if chunk_tokens > max_length:
                chunk_text = token_counter.text_truncation(
                    chunk_text, truncation_length)
        chunk_list.append(chunk_text)

    return chunk_list


if __name__ == "__main__":
    # token_counter = TokenCounter()
    # print(token_counter.num_tokens_from_string("tiktoken is great!"))
    # print(token_counter.num_tokens_from_list_string(
    #     ["tiktoken is great!", "tiktoken is great!"]))
    # print(token_counter.text_truncation("tiktoken is great!", 5))
    print(chunking("tiktoken is great!aaaaaa", 0, 3000))
