import os
from ipex_llm.transformers import AutoModel
from transformers import AutoTokenizer
import subprocess

import time

from socketio import Client


socket_flag2 = 0  # socketio连接标识

sio2 = Client()
    # 发送处理进度给服务器
def send_message_to_server2(message):
    sio2.emit('cpu_api_message', {'message': message}, namespace='/cpu_api')

    
MODEL_PATH = os.environ.get('MODEL_PATH', r'D:\AtomGit\model\chatglm3-ipex-int4')

TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", r'D:\AtomGit\model\chatglm3-6b')


print('----------- ipex-gpu预热中... -----------')

model = AutoModel.load_low_bit(MODEL_PATH,trust_remote_code=True)

model = model.to('xpu')

tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH, trust_remote_code=True)

# 格式化提示
CHATGLM_V3_INIT_PROMPT_TEMPLATE = "这是Warm-up过程，无需做出回答\n"

prompt = CHATGLM_V3_INIT_PROMPT_TEMPLATE

# 编码提示
input_ids = tokenizer.encode(prompt, return_tensors="pt")

input_ids = input_ids.to('xpu')

output = model.generate(input_ids,
                    do_sample=False,
                    max_new_tokens=32) # warm-up

print('----------- ipex-gpu预热成功 -----------')

print("----------- chatglm3模型导入成功 -----------")

    
def GetExtract(
            text_path,
            time_path,
            extract_path,
            json_path,
            video_name,
            n_predict=5000):
    
    global socket_flag2
    if (socket_flag2 == 0):
        sio2.connect('http://127.0.0.1:8090', namespaces=['/cpu_api'])
        
    socket_flag2 = 1

    # 创建并启动新进程
    
    
    send_message_to_server2(60)
    
    print("----------- 模型生成摘要中...... -----------")

    CHATGLM_V3_PROMPT_TEMPLATE = "总结下面的文字的大纲并，用数字编号来表示层级结构，例如1.1是1的子结构，请尽量给出详细的结构，不要带有任何无关的回复\n{prompt}\n答："

    # 打开文本文件
    with open (text_path, "r", encoding="utf-8") as f:
        prompt = f.read()

    # 格式化提示
    prompt = CHATGLM_V3_PROMPT_TEMPLATE.format(prompt=prompt)
    
    

    # 编码提示
    input_ids = tokenizer.encode(prompt, return_tensors="pt")

    # 将输入送到GPU上（如果您的模型是在GPU上训练的）
    input_ids = input_ids.to('xpu')
    
    send_message_to_server2(80)

    # 开始时间
    st = time.time()


    # 生成输出
    output = model.generate(input_ids, max_new_tokens=n_predict)

    # 结束时间
    end = time.time()
    
    send_message_to_server2(98)
    
    # 解码输出
    output_str = tokenizer.decode(output[0], skip_special_tokens=True)

    # 处理输出，分离“答：”前的部分
    parts = output_str.split("答：")
    if len(parts) > 1:
        output_str = parts[1]

    # 打印时间消耗
    print(f'----------- 模型生成知识点摘要用时： {end-st} s -----------')

    # 将生成的摘要保存到文件
    with open(extract_path, "w", encoding="utf-8") as f:
        f.write(output_str)
    
    # GetTreeJson(extract_path, time_path, json_path)
    
    subprocess.run(["python", "generate_data.py", "--param1", extract_path, "--param2", time_path, "--param3", json_path, "--param4", video_name])
