import sys
import os
import numpy as np

# 添加项目根目录到Python路径
sys.path.append(os.path.join(os.path.dirname(__file__), "."))

# 直接导入Tokenizer类
from cs336_basics.tokenizer import Tokenizer


def encode_large_file_with_special_token(
    tokenizer, input_path, output_path, special_token="", chunk_size=10 * 1024 * 1024
):
    """分块读取大文件并按特殊标记处理，避免在特殊标记中间拆分"""
    # 确保输出目录存在
    os.makedirs(os.path.dirname(output_path), exist_ok=True)

    print(f"Processing {input_path}...")

    # 打开输出文件用于写入
    with open(output_path, "wb") as out_f:
        buffer = ""  # 缓冲区存储未处理的文本
        total_tokens = 0

        with open(input_path, "r", encoding="utf-8") as in_f:
            while True:
                # 读取数据块
                chunk = in_f.read(chunk_size)
                if not chunk:
                    # 处理缓冲区中剩余的内容
                    if buffer:
                        token_ids = tokenizer.encode(buffer)
                        token_array = np.array(token_ids, dtype=np.uint16)
                        out_f.write(token_array.tobytes())
                        total_tokens += len(token_ids)
                        print(f"Final chunk processed, total tokens: {total_tokens}")
                    break

                # 将新读取的块添加到缓冲区
                buffer += chunk

                # 查找最后一个特殊标记的位置，确保我们不会在特殊标记中间切断
                last_complete_index = buffer.rfind(special_token)
                if last_complete_index != -1:
                    # 处理到包含完整特殊标记的部分
                    process_content = buffer[: last_complete_index + len(special_token)]
                    buffer = buffer[last_complete_index + len(special_token) :]

                    # 编码并保存处理后的内容
                    if process_content:
                        token_ids = tokenizer.encode(process_content)
                        token_array = np.array(token_ids, dtype=np.uint16)
                        out_f.write(token_array.tobytes())
                        total_tokens += len(token_ids)
                        print(f"Chunk processed, total tokens so far: {total_tokens}")
                elif len(buffer) > chunk_size * 2:
                    # 如果缓冲区变得太大但仍未找到特殊标记，强制处理一部分
                    # 这种情况在特殊标记很少或不存在时可能发生
                    process_content = buffer[:chunk_size]
                    buffer = buffer[chunk_size:]

                    token_ids = tokenizer.encode(process_content)
                    token_array = np.array(token_ids, dtype=np.uint16)
                    out_f.write(token_array.tobytes())
                    total_tokens += len(token_ids)
                    print(
                        f"Forced chunk processed, total tokens so far: {total_tokens}"
                    )

        print(f"Finished processing. Total tokens: {total_tokens}")
        print(f"Saved to {output_path}")


def main():
    # 加载训练好的分词器
    tokenizer = Tokenizer.from_files(
        "./cs336_basics/out/ts-train-vocab.txt",
        "./cs336_basics/out/ts-train-merges.txt",
    )

    # 处理训练数据
    encode_large_file_with_special_token(
        tokenizer,
        "C:/Users/cwj/Desktop/cs336/a1/assignment1-basics-main/cs336/data/TinyStoriesV2-GPT4-train.txt",
        r"C:\Users\cwj\Desktop\cs336\a1\assignment1-basics-main\cs336\cs336_basics\train_val_data\train.bin",
        "<|endoftext|>",
    )

    # 处理验证数据
    encode_large_file_with_special_token(
        tokenizer,
        "C:/Users/cwj/Desktop/cs336/a1/assignment1-basics-main/cs336/data/TinyStoriesV2-GPT4-valid.txt",
        r"C:\Users\cwj\Desktop\cs336\a1\assignment1-basics-main\cs336\cs336_basics\train_val_data\valid.bin",
        "<|endoftext|>",
    )


if __name__ == "__main__":
    main()
