import os
from transformers import AutoTokenizer, AutoModel, CONFIG_NAME, WEIGHTS_NAME


os.environ['HTTP_PROXY'] = 'http://127.0.0.1:10792'
os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:10792'

def get_model():
    """下载模型并保存至本地指定目录"""
    model = AutoModel.from_pretrained("uer/roberta-base-finetuned-dianping-chinese",
                                      cache_dir="../models_repo",
                                      download_mode="reuse_cache_if_exists")
    return model
def get_tokenizer_from_hf_model():
    """在线加载指定模型的tokenizer"""
    tokenizer = AutoTokenizer.from_pretrained('uer/roberta-base-finetuned-dianping-chinese')
    return tokenizer

def get_tokenizer_from_local_model():
    """加载本南模型的tokenizer"""
    tokenizer = AutoTokenizer.from_pretrained("../models_repo/")
    return tokenizer

def save_tokenizer_to_local():
    """从模型中获取tokenizer并保存至本地指定目录"""
    tokenizer = AutoTokenizer.from_pretrained('uer/roberta-base-finetuned-dianping-chinese')
    tokenizer.save_pretrained("../tokenizer_repo/")

def get_tokenizer_from_local():
    """加载本地tokenizer"""
    tokenizer = AutoTokenizer.from_pretrained("../tokenizer_repo/")
    return tokenizer

sentence = "弱小的我也有大梦想"

def get_tokens():
    """
    分词结果：
    ['弱', '小', '的', '我', '也', '有', '大', '梦', '想']
    """
    tokenizer = get_tokenizer_from_local()
    tokens = tokenizer.tokenize(sentence)
    print(f"tokens:{tokens}")
    return tokens

def get_vocab():
    """
    词表、词表大小结果：
    vocab:{'茜': 5752, '##門': 20328, '∽': 390, ...,'登': 4633, '##嶺': 15384}
    vocab_size:21128
    """
    tokenizer = AutoTokenizer.from_pretrained("../tokenizer_repo/")
    vocab = tokenizer.vocab
    vocab_size = tokenizer.vocab_size
    print(f"vocab:{vocab}")
    print(f"vocab_size:{vocab_size}")
    return vocab, vocab_size

def tokens_convert_ids():
    """
    tokens与ids相互转换的结果
    - tokens convert ids:
    ids:[2483, 2207, 4638, 2769, 738, 3300, 1920, 3457, 2682]
    - ids convert tokens:
    tokens:['弱', '小', '的', '我', '也', '有', '大', '梦', '想']
    - tokens convert string
    str_sen:弱 小 的 我 也 有 大 梦 想
    """
    tokenizer = get_tokenizer_from_local()
    tokens = tokenizer.tokenize(sentence)
    ids = tokenizer.convert_tokens_to_ids(tokens)
    print(f"ids:{ids}")
    print("==========================")
    tokens = tokenizer.convert_ids_to_tokens(ids)
    print(f"tokens:{tokens}")
    str_sen = tokenizer.convert_tokens_to_string(tokens)
    print(f"str_sen:{str_sen}")

def tokenizer_encode_decode():
    """
    基于tokenizer的编码与解码
    ids of encode:[101, 2483, 2207, 4638, 2769, 738, 3300, 1920, 3457, 2682, 102]
    str of decode:[CLS] 弱 小 的 我 也 有 大 梦 想 [SEP]
    """
    tokenizer = get_tokenizer_from_local()
    ids = tokenizer.encode(sentence, add_special_tokens=True)
    print(f"ids of encode:{ids}")
    str_sen = tokenizer.decode(ids, skip_special_tokens=False)
    print(f"str of decode:{str_sen}")

def tokenizer_fill_truncation():
    """
    基于tokenizer的填充和截断结果:
    ids of encode:[101, 2483, 2207, 4638, 2769, 738, 3300, 1920, 3457, 2682, 102]
    str of decode:[CLS] 弱 小 的 我 也 有 大 梦 想 [SEP]
    """
    tokenizer = get_tokenizer_from_local()
    ids = tokenizer.encode(sentence, padding="max_length", max_length=15)
    print(f"ids:{ids}")
    print("============================")
    ids = tokenizer.encode(sentence, max_length=5, truncation=True)
    print(f"ids:{ids}")

def batch_tokenize():
    """
    批量tokenizer的结果
    res:{'input_ids': [[101, 2483, 2207, 4638, 2769, 738, 3300, 1920, 3457, 2682, 102],
    [101, 3300, 3457, 2682, 6443, 6963, 749, 679, 6629, 102],
    [101, 6841, 6852, 3457, 2682, 4638, 2552, 8024, 3683, 3457, 2682, 3315, 6716, 8024, 3291, 1377, 6586, 102]],
    'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
    'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
    [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
    [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}
    """
    sentences = ["弱小的我也有大梦想",
            "有梦想谁都了不起",
            "追逐梦想的心，比梦想本身，更可贵"]
    tokenizer = get_tokenizer_from_local()
    res = tokenizer(sentences)
    print(f"res:{res}")

def tokenizer_slow_fast():
    """
    快慢tokenizer，默认为快tokenizer
    """
    fast_tokenizer = AutoTokenizer.from_pretrained("../tokenizer_repo/")
    print(f"fast_tokenizer:{fast_tokenizer}")
    slow_tokenizer = AutoTokenizer.from_pretrained("../tokenizer_repo/", use_fast=False)
    print(f"slow_tokenizer:{slow_tokenizer}")

def tokenizer_special():
    """
    trust_remote_code=True表示信任远程仓库的模型代码，历为有些模型并非标准的Huggingface模式
    加上 trust_remote_code=True 表示允许使用 Hugging Face 仓库里自定义的分词逻辑，否则只会加载标准格式的分词器
    """
    tokenizer = AutoTokenizer.from_pretrained("Skywork/Skywork-13B-base", trust_remote_code=True)

if __name__ == "__main__":
    # get_tokens()
    # get_vocab()
    # tokens_convert_ids()
    # tokenizer_encode_decode()
    # batch_tokenize()
    tokenizer_slow_fast()