import os
from ipex_llm.transformers import AutoModel
from transformers import AutoTokenizer
import subprocess

import time
import torch
import sys
from socketio import Client

import multiprocessing

sio2 = Client()
    # 发送处理进度给服务器
def send_message_to_server2(message):
    sio2.emit('cpu_api_message', {'message': message}, namespace='/cpu_api')

def run_client2():
    # 连接到服务器，并指定命名空间为 '/cpu_api'
   
    # 运行客户端
    sio2.wait()
    
def GetExtract(
            text_path,
            time_path,
            extract_path,
            json_path,
            n_predict=5000):
    print("模型导入中......")
    

    
    # MODEL_PATH = os.environ.get('MODEL_PATH', r'E:\Model\chatglm\ChatGLM3\chatglm3-ipex-int8')
    MODEL_PATH = os.environ.get('MODEL_PATH', r'E:\Model\chatglm\ChatGLM3\chatglm3-ipex-int4')

    TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", r'E:\Model\chatglm\ChatGLM3\chatglm3-6b')
    

    # TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH)
    
    model = AutoModel.load_low_bit(MODEL_PATH,trust_remote_code=True)

    tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH, trust_remote_code=True)
    
    

    # model.to('xpu')
    
    print("模型导入成功")
    
    send_message_to_server2(45)
    
    print("模型推理中......")
    print(text_path)


    CHATGLM_V3_PROMPT_TEMPLATE = "总结下面的文字的大纲并，用数字编号来表示层级结构，例如1.1是1的子结构，请尽量给出详细的结构，不要带有任何无关的回复\n{prompt}\n答："

    # 打开文本文件
    with open (text_path, "r", encoding="utf-8") as f:
        prompt = f.read()

    # 格式化提示
    prompt = CHATGLM_V3_PROMPT_TEMPLATE.format(prompt=prompt)
    
    

    # 编码提示
    input_ids = tokenizer.encode(prompt, return_tensors="pt")

    # 将输入送到GPU上（如果您的模型是在GPU上训练的）
    # input_ids = input_ids.to('xpu')
    
    send_message_to_server2(50)

    # 开始时间
    st = time.time()


    # 生成输出
    output = model.generate(input_ids, max_new_tokens=n_predict)

    # 结束时间
    end = time.time()
    
    send_message_to_server2(80)
    
    # 解码输出
    output_str = tokenizer.decode(output[0], skip_special_tokens=True)

    # 处理输出，分离“答：”前的部分
    parts = output_str.split("答：")
    if len(parts) > 1:
        output_str = parts[1]

    # 打印时间消耗
    print(f'模型生成知识点摘要用时： {end-st} s')

    # 将生成的摘要保存到文件
    with open(extract_path, "w", encoding="utf-8") as f:
        f.write(output_str)
    
    # GetTreeJson(extract_path, time_path, json_path)
    send_message_to_server2(85)
    subprocess.run(["python", "generate_data.py", "--param1", extract_path, "--param2", time_path, "--param3", json_path])

    # os.remove(r"temp\solar_structure.db")
    
if __name__=="__main__":
    
    # 创建并启动新进程
    sio2.connect('http://127.0.0.1:8090', namespaces=['/cpu_api'])
  
    client_process = multiprocessing.Process(target=run_client2)
    client_process.start()
    
    # 解析参数
    params = {}
    for i in range(1, len(sys.argv), 2):
        params[sys.argv[i]] = sys.argv[i+1]
  
    
    time_1=time.time()
    
    GetExtract(params['--param1'], params['--param2'], params['--param3'], params['--param4'])
    time_2=time.time()
    print(f"音频转录总耗时：{time_2-time_1}秒")
