import os
import requests
from tokenizers import Tokenizer
"""BPE is Byte Pair Encoding"""
from tokenizers.models import BPE
from tokenizers.trainers import BpeTrainer
from tokenizers.pre_tokenizers import Whitespace
from transformers import AutoTokenizer, AutoModel
from datasets import load_dataset
"""
There are tow key thing about tokenizers on Huggingface website
1.Create a new tokenizer  manually from the head
2.After builded tokenizer, then use it.
"""

def download_wikitext_103_raw_v1_from_hf():
    """
    According to Id of datasets of Huggingface, to download dataset
    ig. iohadrubin/wikitext-103-raw-v1
    """
    dataset = load_dataset("iohadrubin/wikitext-103-raw-v1")
    dataset.save_to_disk("../datas/tokenizers")

def load_wikitext():
    """
    Download and organize the Wikitext-103 dataset
    """
    # 1. 下载 wikitext-103-raw-v1 数据集
    dataset = load_dataset('wikitext', 'wikitext-103-raw-v1')

    # 2. 创建本地目录来保存原始文件
    data_dir = "../datas/wikitext-103-raw"
    os.makedirs(data_dir, exist_ok=True)

    # 3. 将每个split的数据保存为对应的 .raw 文件
    splits = ["test", "train", "validation"]  # 注意：数据集中的验证集叫 "validation" 而不是 "valid"

    for split in splits:
        # 获取该split的所有文本行
        texts = dataset[split]["text"]

        # 写入到对应的 .raw 文件中
        filename = os.path.join(data_dir, f"wiki.{split}.raw")
        with open(filename, "w", encoding="utf-8") as f:
            for text in texts:
                if text.strip():  # 只写入非空行
                    f.write(text + "\n")

        print(f"已保存: {filename}")

    print("所有文件已准备就绪！")

def build_tokenizer():
    tokenizer = Tokenizer(BPE(unk_token="[UNK]"))
    trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
    tokenizer.pre_tokenizer = Whitespace()
    files = [f"../datas/wikitext-103-raw/wiki.{split}.raw" for split in ["test", "train", "validation"]]
    print(f"files: {files}")
    tokenizer.train(files, trainer)
    tokenizer.save("../datas/wikitext-103-raw/tokenizer-wiki.json")

def get_tokenizer_from_files():
    tokenizer = Tokenizer.from_file("../datas/wikitext-103-raw/tokenizer-wiki.json")
    return tokenizer

def save_tokenizer_to_local():
    """从模型中获取tokenizer并保存至本地指定目录"""
    tokenizer = AutoTokenizer.from_pretrained('uer/roberta-base-finetuned-dianping-chinese')
    tokenizer.save_pretrained("../tokenizer_repo/")

def get_tokenizer_from_local():
    """加载本地tokenizer"""
    tokenizer = AutoTokenizer.from_pretrained("../tokenizer_repo/")
    return tokenizer

sentence = "弱小的我也有大梦想"

def get_tokens():
    """
    分词结果：
    ['弱', '小', '的', '我', '也', '有', '大', '梦', '想']
    """
    tokenizer = get_tokenizer_from_local()
    tokens = tokenizer.tokenize(sentence)
    print(f"tokens:{tokens}")
    return tokens

if __name__ == "__main__":
    os.environ['HTTP_PROXY'] = 'http://127.0.0.1:10792'
    os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:10792'
    # download_wikitext_103_raw_v1_from_hf()
    # load_wikitext()
    # download_with_requests()
    # build_tokenizer()

    tokenizer = get_tokenizer_from_files()
    output = tokenizer.encode("Hello, y'all! How are you 😁 ?")
    print(f"tokens:{output.tokens}")
    print(f"ids:{output.ids}")
    print(f"offsets:{output.offsets}")

    save_tokenizer_to_local()
    # get_tokenizer_from_local()
    get_tokens()