import json
import os
import platform
from transformers import AutoTokenizer, AutoModel
from redis import Redis
import torch
from typing import Dict, Tuple, Union, Optional
from torch.nn import Module

# # redis_client = Redis(host='10.144.79.100', port=6379, db=0, decode_responses=True)
redis_client = Redis(host='127.0.0.1', port=6379, db=0, decode_responses=True, password='vsoft@123&')

# MODEL_PATH = os.environ.get('MODEL_PATH', '/data/minio01/model_file/chatglm3-6b')
# TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH)

MODEL_PATH = os.environ.get('MODEL_PATH', '/data/minio01/model_file/chatglm3-6b')
TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH)

tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH, trust_remote_code=True)
model_glm = AutoModel.from_pretrained(MODEL_PATH, trust_remote_code=True).eval()
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
model_glm.to(device)

os_name = platform.system()
clear_command = 'cls' if os_name == 'Windows' else 'clear'
stop_stream = False


def run_glm(prompt, assistant_message_id):
    past_key_values, history = None, []
    full_response = ""  # 用于存储完整的回复

    redis_client.set(assistant_message_id, "start")
    # 设置键的失效时间为600秒
    redis_client.expire(assistant_message_id, 600)

    current_length = 0
    for response in model_glm.stream_chat(tokenizer, prompt, history=[], top_p=1,
                                          temperature=0.01,
                                          past_key_values=past_key_values,
                                          return_past_key_values=True):
        stop_stream = redis_client.get(assistant_message_id)
        if stop_stream == "end":
            break
        else:
            response = response[0]  # 获取第一个元素作为回复
            partial_response = response[current_length:]  # 获取部分回复
            print(partial_response, end="", flush=True)  # 即时打印部分回复
            full_response += partial_response
            current_length = len(response)
            yield json.dumps({'text': partial_response}) + "&&##&&"


def run_glm_all(prompt, assistant_message_id):
    past_key_values, history = None, []
    full_response = ""  # 用于存储完整的回复

    redis_client.set(assistant_message_id, "start")
    # 设置键的失效时间为600秒
    redis_client.expire(assistant_message_id, 600)

    current_length = 0
    for response in model_glm.stream_chat(tokenizer, prompt, history=[], top_p=1,
                                          temperature=0.01,
                                          past_key_values=past_key_values,
                                          return_past_key_values=True):
        stop_stream = redis_client.get(assistant_message_id)
        if stop_stream == "end":
            break
        else:
            response = response[0]  # 获取第一个元素作为回复
            partial_response = response[current_length:]  # 获取部分回复
            print(partial_response, end="", flush=True)  # 即时打印部分回复
            full_response += partial_response
            current_length = len(response)
    return full_response
