'''
https://hf-mirror.com/TinyLlama/TinyLlama-1.1B-Chat-v1.0
测试tinyLlama 1.1B效果不错，比Qwen1.8B经过量化的都好很多
'''

# Install transformers from source - only needed for versions <= v4.34
# pip install git+https://github.com/huggingface/transformers.git
# pip install accelerate

import os
from datetime import datetime
import queue
import time
import threading
import torch
from my_util import Logger, end_sentence, decode_base64

os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
from transformers import pipeline
from drifting_bottle_bot_sql import insert_table_opt, update_table_opt, select_table_opt

MAX_QUEUE_SIZE = 10    # 问题消息队列当中最大问题数量
MAX_ANSWER_LEN = 1024    # 机器人回答问题的最大长度

loger = Logger()

def get_substring_after_split(main_string, delimiter):
    parts = main_string.split(delimiter)
    if len(parts) > 1:  # 确保有多个部分
        return parts[1].lstrip('\n')  # 返回第一个分隔符后面的部分
    else:
        return None

def load_pipeline():
    Q_pipe = pipeline("text-generation", model=os.path.join(os.getcwd(),
                    "static", "model",
                    "TinyLlama", "TinyLlama-1.1B-Chat-v1.0"),
                    torch_dtype=torch.bfloat16,
                    device_map="auto")
    return Q_pipe

def generate_text(content, max_len=MAX_ANSWER_LEN):
    """
    根据给定的prompt生成文本
    """
    messages = [
        {
            "role": "提示",
            "content": "这是个友好的聊天机器人...",
        },
        {"role": "user", "content": content},
    ]
    # insert_table_opt(content)
    prompt = Q_pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    datetime1 = datetime.now()
    outputs = Q_pipe(prompt, max_new_tokens=max_len, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
    datetime2 = datetime.now()
    time12_interval = datetime2 - datetime1
    loger.info(f"耗时 {time12_interval}s")

    '''
    结论：修改top_p不会显著降低推理时间，并且中英文相同的问题，中文问题推理时间是英文的两倍
    do_sample修改成False基本不会降低推理时间
    只有max_new_tokens才能显著降低推理时间，但是max_new_tokens与推理时间不是呈线性关系
    比如max_new_tokens=256，推理时间2分钟
    当max_new_tokens=32的时候，推理时间才会变成约1分钟
    因此，不如将max_new_tokens设置大些用于获取比较完整的答案
    '''

    return outputs

# 消费者
def loop_process_bot():
    while True:
        get_from_db = select_table_opt(datetime.now(), 1, 0)
        if not get_from_db:
            time.sleep(1)   # 如果没有数据则挂起一会
            continue
        question = decode_base64(get_from_db[0][3])
        loger.info(f'[Consuming]: {question}')
        loger.info(f"Reading process questions: {question}")
        try:
            gen_text = generate_text(question)
            res = gen_text[0]["generated_text"]
            answer = get_substring_after_split(res, "<|assistant|>")
            if len(answer):
                loger.debug(f"Answer: {answer}")
            else:
                answer = "I don't know"
                loger.error(f"No answer: {answer}")
            update_table_opt(question, answer)
            res = end_sentence(res, len(res))
            loger.debug(f"生成的文本：{res}")
        except Exception as e:
            loger.error(f"Exception error: {e}")

# 生产者
def question_to_bot(question):
    loger.info(f'[Producing]: {question}')
    bot_queue.put(question)  # 将消息放入队列

# 消费者
def question_to_db():
    while True:
        question = bot_queue.get()  # 从队列中获取消息
        if question is None:  # 假设None是停止信号
            time.sleep(1/1000)
            continue
        else:
            loger.debug(f"get question from queue: {question}")
            insert_table_opt(question)
            bot_queue.task_done()  # 告诉队列该任务已完成

def bot_init():
    '''
    初始化机器人
    :return:
    '''
    global Q_pipe
    Q_pipe = load_pipeline()

    loger.info('load pipe ok')

    # 创建一个消息队列
    global bot_queue
    bot_queue = queue.Queue()

    loger.info('create bot queue ok')

    # 创建消息队列守护线程，使用消息队列控制插入数据库的速度（可以不使用）
    global daemon_db_thread
    daemon_db_thread = threading.Thread(target=question_to_db)
    daemon_db_thread.daemon = True  # 设置为守护线程
    daemon_db_thread.start()  # 启动线程

    # 创建聊天机器人守护线程
    global daemon_thread
    daemon_thread = threading.Thread(target=loop_process_bot)
    daemon_thread.daemon = True  # 设置为守护线程
    daemon_thread.start()  # 启动线程

def data_to_bot(question):
    '''
    传数据给聊天机器人
    :param question: 问题
    :return:
    '''
    if len(question) == 0:
        return
    elif bot_queue.qsize() >= MAX_QUEUE_SIZE:
        loger.warning(f"having {MAX_QUEUE_SIZE} questions in queue, waiting ...")
        return
    else:
        loger.info(f"get quesiont {question}")

    try:
        question_to_bot(question)
    except Exception as e:
        loger.error("发生错误：", e)

def bot_quit():
    '''
    退出聊天机器人
    :return:
    '''
    # 等待消费者线程完成所有任务
    loger.info("ready to quit bot ...")
    bot_queue.join()  # 等待队列中所有任务完成
    loger.info("queue join ok!")
    daemon_thread.join()
    loger.info("quit bot ok!")
    daemon_db_thread.join()
    loger.info("quit queue ok!")