# import os
# os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
from transformers import BertTokenizer
import transformers
print(transformers.__version__)
import torch


tokenizer = BertTokenizer.from_pretrained("bert-base-cased")

# Transformer's tokenizer - input_ids
# sequence_1 = "A Titan RTX has 24GB of VRAM"
# print("Original sequence: ", sequence)
# tokenized_sequence = tokenizer(sequence_1)
# print("Tokenized sequence: ", tokenized_sequence)
# encodings = tokenizer(sequence)
# encoded_sequence = encodings['input_ids']
# print("Encoded sequence: ", encoded_sequence)
# decoded_encodings = tokenizer.decode(encoded_sequence)
# print("Decoded sequence: ", decoded_encodings)


# Transformer's tokenizer - attention_mask
sequence_a = "This is a short sequence."
sequence_b = "This is a rather long sequence. It is at least longer than the sequence A."
sequence_c = "Jack is a my cat."
# print("Sequence a: ",sequence_a)
# print("Sequence b: ",sequence_b)
# encoded_sequence_a = tokenizer(sequence_a)["input_ids"]
# encoded_sequence_b = tokenizer(sequence_b)["input_ids"]
# encoded_sequence_c = tokenizer(sequence_1)["input_ids"]
# print("A's encoding length={}. \nB's encoding length={}.\nC's encoding length={}".
#       format(len(encoded_sequence_a),len(encoded_sequence_b),len(encoded_sequence_c)))
# padded_sequence_ab = tokenizer([sequence_a,sequence_b,sequence_1],padding=True)
# print("Padded sequence(A,B):", padded_sequence_ab["input_ids"])
# print("Attention mask(A,B):", padded_sequence_ab["attention_mask"])


# Transformer's tokenizer - token type id
encodings_ab = tokenizer(sequence_a, sequence_b,sequence_c)
print("Encoded sequence(AB):", len(encodings_ab["input_ids"]))
decoded_ab = tokenizer.decode(encodings_ab["input_ids"])
print("Decoded sequence(AB):", decoded_ab)
print("Token type ids(AB):", encodings_ab["token_type_ids"])


from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-Coder-V2-Lite-Base", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained("deepseek-ai/DeepSeek-Coder-V2-Lite-Base", trust_remote_code=True, torch_dtype=torch.bfloat16).cuda()
