import json
import os
import gc
from tokenizers import Tokenizer, models, pre_tokenizers, trainers, decoders
from tqdm import tqdm


def read_texts_from_jsonl(file_path, max_samples=None):
    """
    分批读取JSONL文件，避免内存溢出
    
    Args:
        file_path: JSONL文件路径
        max_samples: 最大样本数，用于测试
    """
    if not os.path.exists(file_path):
        print(f"错误：文件不存在 {file_path}")
        return
    
    count = 0
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in tqdm(f, desc="读取数据"):
            if max_samples and count >= max_samples:
                break
            
            try:
                data = json.loads(line.strip())
                text = f"{data['title']}\n{data['content']}"
                yield text
                count += 1
                
                # 定期清理内存
                if count % 10000 == 0:
                    gc.collect()
                    
            except json.JSONDecodeError as e:
                print(f"JSON解析错误: {e}")
                continue
            except Exception as e:
                print(f"处理错误: {e}")
                continue


def train_tokenizer(data_path='./zhwiki_dataset.jsonl', 
                   vocab_size=6400, 
                   max_samples=None,
                   output_dir='./model/'):
    """
    训练tokenizer
    
    Args:
        data_path: JSONL文件路径
        vocab_size: 词汇表大小
        max_samples: 最大样本数（用于测试）
        output_dir: 输出目录
    """
    
    print(f"开始训练tokenizer...")
    print(f"数据文件: {data_path}")
    print(f"词汇表大小: {vocab_size}")
    print(f"最大样本数: {max_samples if max_samples else '全部'}")
    
    # 检查文件是否存在
    if not os.path.exists(data_path):
        print(f"错误：数据文件不存在 {data_path}")
        print("请先运行 create_dataset.py 生成JSONL文件")
        return
    
    # 创建输出目录
    os.makedirs(output_dir, exist_ok=True)
    
    # 初始化tokenizer
    tokenizer = Tokenizer(models.BPE())
    tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
    
    # 特殊token
    special_tokens = ["<|endoftext|>", "<|im_start|>", "<|im_end|>"]
    
    # 训练器配置
    trainer = trainers.BpeTrainer(
        vocab_size=vocab_size,
        special_tokens=special_tokens,
        show_progress=True,
        initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
        min_frequency=2  # 最小频率，减少内存使用
    )
    
    try:
        # 读取数据
        print("读取训练数据...")
        texts = read_texts_from_jsonl(data_path, max_samples)
        
        # 训练tokenizer
        print("开始训练...")
        tokenizer.train_from_iterator(texts, trainer=trainer)
        
        # 设置解码器
        tokenizer.decoder = decoders.ByteLevel()
        
        # 保存tokenizer
        print("保存tokenizer...")
        tokenizer.save(os.path.join(output_dir, "tokenizer.json"))
        tokenizer.model.save(output_dir)
        
        # 创建配置文件
        config = {
            "add_bos_token": False,
            "add_eos_token": False,
            "add_prefix_space": False,
            "added_tokens_decoder": {
                "0": {
                    "content": "<|endoftext|>",
                    "lstrip": False,
                    "normalized": False,
                    "rstrip": False,
                    "single_word": False,
                    "special": True
                },
                "1": {
                    "content": "<|im_start|>",
                    "lstrip": False,
                    "normalized": False,
                    "rstrip": False,
                    "single_word": False,
                    "special": True
                },
                "2": {
                    "content": "<|im_end|>",
                    "lstrip": False,
                    "normalized": False,
                    "rstrip": False,
                    "single_word": False,
                    "special": True
                }
            },
            "additional_special_tokens": [],
            "bos_token": "<|im_start|>",
            "clean_up_tokenization_spaces": False,
            "eos_token": "<|im_end|>",
            "legacy": True,
            "model_max_length": 8192,
            "pad_token": "<|endoftext|>",
            "sp_model_kwargs": {},
            "spaces_between_special_tokens": False,
            "tokenizer_class": "PreTrainedTokenizerFast",
            "unk_token": "<|endoftext|>",
            "chat_template": """
{% if messages[0]['role'] == 'system' %}
{% set system_message = messages[0]['content'] %}
{{ '<|im_start|>system\\n' + system_message + '<|im_end|>\\n' }}
{% else %}
{{ '<|im_start|>system\\nYou are a helpful assistant<|im_end|>\\n' }}
{% endif %}
{% for message in messages %}
{% set content = message['content'] %}
{% if message['role'] == 'user' %}
{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}
{% elif message['role'] == 'assistant' %}
{{ content + '<|im_end|>' + '\\n' }}
{% endif %}{% endfor %}
            """
        }
        
        # 保存配置文件
        config_path = os.path.join(output_dir, "tokenizer_config.json")
        with open(config_path, "w", encoding="utf-8") as config_file:
            json.dump(config, config_file, ensure_ascii=False, indent=4)
        
        print(f"✅ Tokenizer训练完成！")
        print(f"输出目录: {output_dir}")
        
        # 显示统计信息
        vocab_size_actual = len(tokenizer.get_vocab())
        print(f"实际词汇表大小: {vocab_size_actual}")
        
    except Exception as e:
        print(f"❌ 训练失败: {e}")
        raise


def eval_tokenizer(model_dir='./model/'):
    """
    测试训练好的tokenizer
    """
    try:
        from transformers import AutoTokenizer
        
        print("测试tokenizer...")
        
        # 加载tokenizer
        tokenizer = AutoTokenizer.from_pretrained(model_dir)
        
        # 测试聊天模板
        messages = [
            {"role": "system", "content": "你是一个优秀的聊天机器人，总是给我正确的回应！"},
            {"role": "user", "content": '你来自哪里？'},
            {"role": "assistant", "content": '我来自地球'}
        ]
        
        new_prompt = tokenizer.apply_chat_template(
            messages,
            tokenize=False
        )
        print("聊天模板测试:")
        print(new_prompt)
        
        # 统计信息
        actual_vocab_size = len(tokenizer)
        print(f'Tokenizer实际词表长度: {actual_vocab_size}')
        
        # 编码测试
        model_inputs = tokenizer(new_prompt)
        print(f'编码长度: {len(model_inputs["input_ids"])}')
        
        # 解码测试
        input_ids = model_inputs['input_ids']
        response = tokenizer.decode(input_ids, skip_special_tokens=False)
        print(f'解码和原始文本是否一致: {response == new_prompt}')
        
        print("✅ Tokenizer测试通过！")
        
    except Exception as e:
        print(f"❌ Tokenizer测试失败: {e}")


if __name__ == "__main__":
    train_tokenizer(
        data_path='./zhwiki_dataset.jsonl',
        vocab_size=6400,
        max_samples=100000,  # 只使用1万个样本进行测试
        output_dir='./model/'
    )
    
    # 测试tokenizer
    eval_tokenizer()