import torch
import numpy as np
import random

import os

seed = 7999
random.seed(seed)  # Python的随机性
os.environ['PYTHONHASHSEED'] = str(seed)  # 设置Python哈希种子，为了禁止hash随机化，使得实验可复现
np.random.seed(seed)  # numpy的随机性
torch.manual_seed(seed)  # torch的CPU随机性，为CPU设置随机种子
torch.cuda.manual_seed(seed)  # torch的GPU随机性，为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed)  # if you are using multi-GPU.   torch的GPU随机性，为所有GPU设置随机种子


# 昆山现场
# llm path=os.path.join('/model','chatglm3-6b')
model_name = os.getenv('MODEL_NAME', 'chatglm3-6b')
# model_path = os.getenv('MODEL_PATH', os.path.join('/', 'usr', 'src', 'ZhipuAI', 'chatglm3-6b'))
model_path = os.path.join('/', 'hy-tmp', 'models', 'chatglm3-6b')

# from .vicuna import Vicuna as LLMClass
# llm_path = os.path.join('/', 'usr', 'pigcha', 'vicuna-7b-v1.5')
# MODEL_NAME = 'vicuna-7b'


if model_name == 'chatglm3':
    from .chatglm3 import ChatGLM3 as LLMClass
elif model_name == 'vicuna':
    from .vicuna import Vicuna as LLMClass
elif model_name == 'qwen2':
    from .qwen2 import Qwen2 as LLMClass
else:
    from .vllm_model import VllmModel as LLMClass
    print(f'unnamed model: {model_name}', flush=True)

llm_model = LLMClass(model_path, history=False)