"""
运行多个daemon.py的办法

比如用0-3 4个显卡卡运行4个daemon.py
CUDA_VISIBLE_DEVICES=0 python daemon.py
CUDA_VISIBLE_DEVICES=1 python daemon.py
CUDA_VISIBLE_DEVICES=2 python daemon.py
CUDA_VISIBLE_DEVICES=3 python daemon.py

"""

print('Started.')
import time
import redis
from util_mongo import get_history
import pymongo as pm
import os
from common import MONGODB_NAME, VALUE, KEY, IO_PREFIX
from PyCmpltrtok.common import sep
from PyCmpltrtok.common_gpgpu import print_gpu_utilization
from PyCmpltrtok.common_torch import check_dtype
from accelerate import Accelerator
from accelerate.state import AcceleratorState

GPU_IDX = int(os.environ.get('CUDA_VISIBLE_DEVICES', 0))
print(f'GPU_IDX={GPU_IDX}')

print('Importing transformers ...')
from transformers import AutoConfig, GenerationConfig, AutoTokenizer, AutoModelForCausalLM, AutoModel
print('Importing over.')

# 连接本机6379端口上的redis的第0个数据库
print('Connecting to redis ...')
rdb = redis.Redis('127.0.0.1', 6379, 0, password='lgdz4qEdt/ezElyQnXFYXB80iM3OxEbAWRjMFPcIXH5ni6eQ8QOlfp7G7gvV1svPu2Bv7v')
rdb.get('test')
print('Connected to redis.')

# 连接Mongodb
print('Connecting to MongoDB ...')
mongo = pm.MongoClient(
    '127.0.0.1', 27017, serverSelectionTimeoutMS=3000,
    username='root', password='p1983mdA1-ei',
)
mdb = mongo[MONGODB_NAME]
get_history(mdb, 'dummy')
print('Connected to MongoDB.')

print('-------------------------------------------------------')
print('正在加载模型……')
# model_name = "THUDM/chatglm2-6b-int4"
# model_name = "/root/.cache/huggingface/hub/models--THUDM--chatglm2-6b-int4/snapshots/66ecaf1db3a5085714e133357ea4824b69698743"
# model_name = "/home/yunpeng/models/hf/chatglm2-6b-int4"
model_name = "/home/yunpeng/models/hf/chatglm2-6b"
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
# model = AutoModel.from_pretrained(model_name,trust_remote_code=True).cuda()


# ValueError: When using DeepSpeed `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config fileor assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`.
accelerator = Accelerator()
AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu'] = 1
model = AutoModel.from_pretrained(model_name,trust_remote_code=True)
model = accelerator.prepare(model)


model = model.eval()
sep()
sep()
print(model)
sep()
sep()
print('模型已经加载完毕。')
print_gpu_utilization(GPU_IDX)
dtypes = check_dtype(model)
print(f'dtypes: {dtypes}')


def model_infer(xinput, username):
    """
    NLP LLM模型

    :param xinput: 输入文本
    :return: 根据输入文本的长度，输出相应的文本
    """
    # 获取聊天历史
    xlog = get_history(mdb, username, more_info=False)
    
    # 模型推理
    print('-------------history-----------------')
    for i, (xin, xout) in enumerate(xlog):
        print(i, '>>>>', xin)
        print(i, '<<<<', xout)
    print('-------------this turn---------------')
    print('>>>>', '>>>>', xinput)
    xgenerator = model.stream_chat(tokenizer, xinput, history=xlog)
    
    return xgenerator


if '__main__' == __name__:

    print('------------------READY---------------------------')

    # 后台一直运行
    while True:

        # 从管道拿uuid
        xuuid = rdb.rpop('queue')

        if xuuid is None:
            # 没有数据则等1ms后重试
            time.sleep(0.001)
            continue
        xuuid = xuuid.decode('utf8')
        print('UUID:', xuuid)

        # 拿输入
        xinput = rdb.hget('uuid2input', xuuid)
        if xinput is None:
            # 为了健壮性
            time.sleep(0.001)
            continue
        xinput = xinput.decode('utf8')
        print('input:', xinput)
        
        # 拿用户名
        username = rdb.hget('uuid2username', xuuid)
        if username is None:
            # 为了健壮性
            time.sleep(0.001)
            continue
        username = username.decode('utf8')
        print('username:', username)

        # 模型推理
        # 动态响应
        xgenerator = model_infer(xinput, username)
        for xout, xhis in xgenerator:
            rdb.hset('username2dynamic', username, xout)
            print('.', end='', flush=True)
        print(flush=True)
        # 最终结果
        xoutput = xout
        rdb.hdel('username2dynamic', username)
        print('<<<<', '<<<<', xoutput)

        # 把输出放回
        rdb.hset('uuid2output', xuuid, xoutput.encode('utf8'))
