"""
按下面方式调用
R_HOST=101.200.228.56 R_PORT=6379 M_HOST=101.200.228.56 M_PORT=27017 python daemon.py


运行多个daemon.py的办法（在上面的基础上继续添加环境变量CUDA_VISIBLE_DEVICES）

比如用0-3 4个显卡卡运行4个daemon.py
CUDA_VISIBLE_DEVICES=0 python daemon.py
CUDA_VISIBLE_DEVICES=1 python daemon.py
CUDA_VISIBLE_DEVICES=2 python daemon.py
CUDA_VISIBLE_DEVICES=3 python daemon.py

"""

import time
import os
import redis
import pymongo as pm
from PyCmpltrtok.util_mongo import get_history, enqueue, VALUE
import signal

MONGODB_NAME = 'wechat_llm'
IS_FAKE = int(os.environ.get('IS_FAKE', 0))  # 是否使用假模型
if IS_FAKE:
    print('将使用假模型。')

# 定义假模型映射
fake_chat_dict = {
    0: '十全十美！',
    1: '一把钢枪交给我！',
    2: '二话不说为祖国！',
    3: '三山五岳任我走。',
    4: '四海为家。',
    5: '五福同寿。',
    6: '六六大顺！',
    7: '97香港回归。',
    8: '零八奥运。',
    9: '九九归一。',
}

if IS_FAKE:
    
    def model_infer(xinput, username, model, tokenizer, mdb):
        """
        NLP LLM模型

        :param xinput: 输入文本
        :return: 根据输入文本的长度，输出相应的文本
        """
        # 获取聊天历史
        xlog = get_history(mdb, username)
        
        # 模型推理
        print('-------------history-----------------')
        for i, (xin, xout) in enumerate(xlog):
            print(i, '>>>>', xin)
            print(i, '<<<<', xout)
        print('-------------this turn---------------')
        print('>>>>', '>>>>', xinput)
        
        xlen = len(xinput)
        xoutput = f'您说了{xlen}个字符。（{xinput[:5]}……）' + fake_chat_dict[xlen % 10] + f'(多轮对话之前轮数{len(xlog)})'
        for i in range(len(xoutput) + 1):
            time.sleep(0.075)
            yield xoutput[:i], []

else:
    
    def model_infer(xinput, username, model, tokenizer, mdb):
        """
        NLP LLM模型

        :param xinput: 输入文本
        :return: 根据输入文本的长度，输出相应的文本
        """
        # 获取聊天历史
        xlog = get_history(mdb, username)
        
        # 模型推理
        print('-------------history-----------------')
        for i, (xin, xout) in enumerate(xlog):
            print(i, '>>>>', xin)
            print(i, '<<<<', xout)
        print('-------------this turn---------------')
        print('>>>>', '>>>>', xinput)
        
        xgenerator = model.stream_chat(tokenizer, xinput, history=xlog)
        return xgenerator

if '__main__' == __name__:

    def _main():
        # 连接Redis
        rdb = redis.Redis(os.environ['R_HOST'], int(os.environ['R_PORT']), 0)
        rdb.get('try_it')

        # 连接Mongodb
        mongo = pm.MongoClient(os.environ['M_HOST'], int(os.environ['M_PORT']), serverSelectionTimeoutMS=3000)
        mdb = mongo[MONGODB_NAME]
        get_history(mdb, 'user_xxxx', limit=1)  # try it

        print('-------------------------------------------------------')
        print('正在加载模型……')

        if IS_FAKE:
            print('使用假模型')
            model, tokenizer = None, None
        else:
            from transformers import AutoTokenizer, AutoModel

            # model_name = "THUDM/chatglm2-6b-int4"
            # model_name = "/root/.cache/huggingface/hub/models--THUDM--chatglm2-6b-int4/snapshots/66ecaf1db3a5085714e133357ea4824b69698743"
            model_name = '/root/autodl-tmp/chatglm2-6b-int4'
            tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
            model = AutoModel.from_pretrained(model_name,trust_remote_code=True).cuda()
            model = model.eval()
        print('模型已经加载完毕。')

        print('------------------READY---------------------------')

        is_go_on = True  # 是否继续的标志

        def signal_handler(sig, frame):
            nonlocal is_go_on
            is_go_on = False
            print('已经接收到SIGINT信号，将在当前推理结束后终止。')
        
        # 注册Ctrl+C（SIGINIT）的信号处理
        signal.signal(signal.SIGINT, signal_handler)

        # 后台一直运行
        while True:

            # 如果不再继续
            if not is_go_on:
                print('Daemon被终止。')
                break

            # 从管道拿uuid
            xuuid = rdb.rpop('queue')

            if xuuid is None:
                # 没有数据则等1ms后重试
                time.sleep(0.001)
                continue
            xuuid = xuuid.decode('utf8')
            print('UUID:', xuuid)

            # 拿输入
            xinput = rdb.hget('uuid2input', xuuid)
            if xinput is None:
                # 为了健壮性
                time.sleep(0.001)
                continue
            xinput = xinput.decode('utf8')
            print('input:', xinput)
            
            # 拿用户名
            username = rdb.hget('uuid2username', xuuid)
            if username is None:
                # 为了健壮性
                time.sleep(0.001)
                continue
            username = username.decode('utf8')
            print('username:', username)

            # 模型推理
            # 动态响应
            rdb.hdel('username2dynamic', username)
            rdb.hdel('username2output', username)
            rdb.hdel('uuid2output', xuuid)
            xgenerator = model_infer(xinput, username, model, tokenizer, mdb)
            xoutput = ''
            for xout, xhis in xgenerator:
                rdb.hset('username2dynamic', username, xout)
                print('.', end='', flush=True)
                xoutput = xout
            print(flush=True)
            # 最终结果
            print('<<<<', '<<<<', xoutput)

            # 清理数据
            rdb.hdel('uuid2input', xuuid)
            rdb.hdel('uuid2username', xuuid)

            # 把输出放回
            xencoded = xoutput.encode('utf8')
            rdb.hset('uuid2output', xuuid, xencoded)
            rdb.hset('username2output', username, xencoded)

    _main()
    
