"""
运行多个daemon.py的办法

比如用0-3 4个显卡卡运行4个daemon.py
CUDA_VISIBLE_DEVICES=0 python daemon.py
CUDA_VISIBLE_DEVICES=1 python daemon.py
CUDA_VISIBLE_DEVICES=2 python daemon.py
CUDA_VISIBLE_DEVICES=3 python daemon.py

"""

import time
import redis
import pymongo as pm
import json
from PyCmpltrtok.common import sep, dl2ld, ld2dl

BATCH_SIZE = 4

# 连接本机6379端口上的redis的第0个数据库
rdb = redis.Redis('127.0.0.1', 6379, 0)

print('-------------------------------------------------------')
print('正在加载模型……')
import torch
from transformers import AutoModel, AutoTokenizer, AutoConfig
from transformers import PreTrainedModel, PretrainedConfig
from transformers.modeling_outputs import SequenceClassifierOutput

# bert_model_dir = 'bert-base-chinese'
bert_model_dir = '/home/peiyp2004/.cache/huggingface/hub/models--bert-base-chinese/snapshots/8d2a91f91cc38c96bb8b4556ba70c392f8d5ee55'


class MyTextClfBertBaseZhConf(PretrainedConfig):
    model_type = 'MyTextClfBertBaseZh'

    def __init__(self, bert_model_dir=bert_model_dir, dev=0, n_hidden=128, **kwargs):
        super().__init__(**kwargs)
        self.n_hidden = n_hidden
        self.bert_model_dir = bert_model_dir
        self.dev = dev  # TypeError: Object of type device is not JSON serializable
        

class MyTextClfBertBaseZh(PreTrainedModel):
    config_class = MyTextClfBertBaseZhConf
    
    def __init__(self, conf, **kwargs):
        super().__init__(conf, **kwargs)

        self.config = conf
        self._dev = torch.device(self.config.dev)
        self._tokenizer = AutoTokenizer.from_pretrained(self.config.bert_model_dir)
        self._base_model = AutoModel.from_pretrained(self.config.bert_model_dir)
        fc1 = torch.nn.Linear(768, self.config.n_hidden)
        fc2 = torch.nn.Linear(self.config.n_hidden, 2)

        clf = [
            fc1,
            torch.nn.ReLU(),
            torch.nn.Dropout(0.5),
            fc2,
            torch.nn.ReLU(),
        ]
        self._clf = torch.nn.Sequential(*clf)
        self.to(self._dev)

    def forward(self, input_ids, token_type_ids, attention_mask, labels=None):
        xout = self._base_model(
            input_ids=input_ids.to(self._dev),
            token_type_ids=token_type_ids.to(self._dev),
            attention_mask=attention_mask.to(self._dev),
        )
        logits = self._clf(xout['pooler_output'])  # last_hidden_state vs pooler_output
        loss = None
        if labels is not None:
            labels = labels.to(self._dev)
            loss = torch.nn.CrossEntropyLoss()(logits, labels)
        return SequenceClassifierOutput(
            loss=loss,
            logits=logits,
        )
    
    def get_tokenizer(self):
        return self._tokenizer
    
    
OUTPUT_DIR = '/home/peiyp2004/jupyter_notebook.large.d/bert-base-chinese.on.waimai2023_12_12_22_05_33_625826_temp1_len2000_freeze0/checkpoint-900'
AutoConfig.register("MyTextClfBertBaseZh", MyTextClfBertBaseZhConf)
AutoModel.register(MyTextClfBertBaseZhConf, MyTextClfBertBaseZh)

config = AutoConfig.from_pretrained(OUTPUT_DIR)
model = AutoModel.from_pretrained(OUTPUT_DIR)
tokenizer = AutoTokenizer.from_pretrained(OUTPUT_DIR)

model = model.eval()
print('模型已经加载完毕。')


if '__main__' == __name__:

    print('------------------READY---------------------------')

    # 后台一直运行
    while True:

        # 从管道拿uuid
        xuuid_list = []
        for _ in range(BATCH_SIZE):
            time.sleep(0.001)  # 每次拿数据等1ms
            xuuid = rdb.rpop('queue')
            if xuuid is None:
                # 队列暂时已空
                break
            xuuid = xuuid.decode('utf8')
            print('UUID:', xuuid)
            xuuid_list.append(xuuid)

        if not xuuid_list:
            # 没有数据则等1ms后重试
            time.sleep(0.001)
            continue
        
        sep(f'x{len(xuuid_list)}')

        # 拿输入
        xinput_list = []
        for xuuid in xuuid_list:
            xinput = rdb.hget('uuid2input', xuuid)
            if xinput is not None:
                xinput = xinput.decode('utf8')
            else:
                xinput = ''
            xinput_list.append(xinput)
        print('Input:', xinput_list)
        
        # 前处理
        tensor = tokenizer(xinput_list, padding=True, truncation=True, max_length=512, return_tensors='pt')
        # 模型推理
        with torch.no_grad():
            logits = model(**tensor)['logits']
        # 后处理
        pred = logits.argmax(dim=-1).tolist()
        logits = logits.tolist()
        
        for i, p in enumerate(pred):
            l = logits[i]
            xuuid = xuuid_list[i]
            xinput = xinput_list[i]
            
            xoutput = {
                'pred': p,
                'logits': l,
                'uuid': xuuid,
                'input': xinput
            }
            print('output:', xoutput)
            xoutput = json.dumps(xoutput)

            # 把输出放回
            rdb.hset('uuid2output', xuuid, xoutput.encode('utf8'))
