print('Started.')
import time
import redis
from util_mongo import get_history
import pymongo as pm
from common import MONGODB_NAME, VALUE, KEY, IO_PREFIX
from PyCmpltrtok.common import sep
print('Importing transformers ...')
from transformers import AutoConfig, GenerationConfig, AutoTokenizer, AutoModelForCausalLM
print('Importing over.')

print('-------------------------------------------------------')
print('正在加载模型……')
# model_name = "THUDM/chatglm2-6b-int4"
# model_name = "/root/.cache/huggingface/hub/models--THUDM--chatglm2-6b-int4/snapshots/66ecaf1db3a5085714e133357ea4824b69698743"
# model_name = "Qwen/Qwen-1_8B-Chat"
# model_name = "/mnt/d/_dell7590_root/sync/1_usb/N1/large_sci.com.models/hf/Qwen-1_8B-Chat"  # WSL
# model_name = "/home/yunpeng/models/hf/Qwen-1_8B-Chat"  # ASUS NEW
model_name = "models/hf/Qwen-1_8B-Chat"  # ASUS NEW, ln -s


def float_set(config, option):
    config.bf16 = False
    config.fp16 = False
    config.fp32 = False

    if option == "bf16":
        config.bf16 = True
    elif option == "fp16":
        config.fp16 = True
    elif option == "fp32":
        config.fp32 = True
    else:
        print("Invalid option. Please choose one from 'bf16', 'fp16' and 'fp32'.")
        
        
config = AutoConfig.from_pretrained(
    model_name,
    trust_remote_code=True,
)
# NOTE: if you use the old version of model file, please remove the comments below
# config.use_flash_attn = False
float_set(config, "fp16")
generation_config = GenerationConfig.from_pretrained(
    model_name, trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    config=config,
    low_cpu_mem_usage=True,
    trust_remote_code=True
)
model = model.eval()
if hasattr(model.config, "use_dynamic_ntk") and model.config.use_dynamic_ntk:
    model.config.max_sequence_length = 16384
tokenizer = AutoTokenizer.from_pretrained(
    model_name, trust_remote_code=True
)
tokenizer.eos_token_id = config.eos_token_id
tokenizer.bos_token_id = config.bos_token_id
tokenizer.pad_token_id = generation_config.pad_token_id
model.config.eos_token_id = tokenizer.eos_token_id
model.config.bos_token_id = tokenizer.bos_token_id
model.config.pad_token_id = tokenizer.pad_token_id
model = model.cuda()
sep()
sep()
print(model)
sep()
sep()
print('模型已经加载完毕。')

