import os
import tiktoken
from langchain_community.document_loaders import DirectoryLoader
from logger import MyLogging
from test_directoryloader import load_documents, load_documents_test



def count_tokens(text: str) -> int:
    encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
    return len(encoding.encode(text))

def load_and_count_tokens(directory: str="./data", file_extension: str=".md"):
    # loader = DirectoryLoader(directory, glob=f"**/*{file_extension}")

    documents = load_documents(directory, file_extension)
    total_tokens = 0
    file_tokens = {}

    for doc in documents:
        tokens = count_tokens(doc.page_content)
        total_tokens += tokens
        file_tokens[doc.metadata['source']] = tokens

    return total_tokens, file_tokens

# 使用示例
if __name__ == "__main__":
    # 初始化logger
    logger = MyLogging(__name__, path='log/token_counter.log')

    data_directory = "./data"
    # 打印 data_directory 绝对路径
    logger.info(f"data_directory absolute path: {os.path.abspath(data_directory)}")

    # 测试文档加载和分割
    chunks = load_documents_test(logger, "temp")

    total_tokens, file_tokens = load_and_count_tokens(data_directory)

    logger.info(f"Total tokens in all files: {total_tokens}")
    logger.info("\nTokens per file:")
    for file, tokens in file_tokens.items():
        logger.info(f"{file}: {tokens} tokens")