import torch
import re
import traceback
from model_utils import load_model



def GLM_respond(models_dir, model_name, _question, _chat_bot, _app_cfg, params_form, num_beams, repetition_penalty, 
               repetition_penalty_2, top_p, top_k, temperature):
    model, tokenizer, processor = load_model(models_dir, model_name)
    try:
        # 检查会话状态
        if _app_cfg.get('ctx', None) is None:
            _chat_bot.append((_question, "请先上传图片"))
            return '', _chat_bot, _app_cfg
        
        # 复制_app_cfg字典中的'ctx'键对应的值，以避免直接修改原字典
        _context = _app_cfg['ctx'].copy()
        # 如果_context不为空，则将用户的问题添加到_context中
        if _context:
            _context.append({"role": "user", "content": _question})
        # 如果_context为空，则初始化_context并添加用户的问题
        else:
            _context = [{"role": "user", "content": _question}] 
        # 打印用户的问题
        print('<User>:', _question)

        messages = [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": _question}]}]
        
        model_inputs = tokenizer.apply_chat_template(
            messages, add_generation_prompt=True, tokenize=True, return_tensors="pt", return_dict=True
        ).to(next(model.parameters()).device)

        # 初始化生成参数
        generate_params = {
            "input_ids": model_inputs["input_ids"].to(model.device),
            "attention_mask": model_inputs["attention_mask"].to(model.device),
            "max_new_tokens": 8192,
            "top_p": top_p,
            "repetition_penalty": 1.2,
            "temperature": 0.7,
            "eos_token_id": [59246, 59253, 59255],
        }

        # 参数模式选择
        if params_form == "Beam Search":
            generate_params.update({
                "do_sample": False,
                "num_beams": num_beams,
                "repetition_penalty": repetition_penalty
            })
        else:
            generate_params.update({
                "do_sample": True,
                "top_p": top_p,
                "top_k": top_k,
                "temperature": temperature,
                "repetition_penalty": repetition_penalty_2
            })

        # 图像编码
        if _app_cfg['img'] is not None:
            image_input = _app_cfg['img']
            pixel_values = torch.tensor(
                processor(image_input).pixel_values).to(next(model.parameters()).device)
            generate_params["pixel_values"] =  pixel_values

        # 构建对话历史
        # history = _app_cfg.get('ctx', [])
        # if history:
        #     formatted_history = "\n".join(
        #         [f"{msg['role']}: {msg['content']}" for msg in history[-4:]])
        #     full_input = f"历史对话:\n{formatted_history}\n用户新提问: {_question}"
        # else:
        #     full_input = _question

        # 生成响应
        response = model.generate(
            **generate_params
        )

        # 后处理
        # clean_response = re.sub(r'<[^>]+>', '', response[0]).strip()
        
        # 更新会话状态
        _app_cfg.setdefault('ctx', []).extend([
            {"role": "user", "content": _question},
            {"role": "assistant", "content": response}
        ])
        
        _chat_bot.append((_question, response))
        
    except torch.cuda.OutOfMemoryError:
        response = "显存不足，请尝试减小生成长度或更换较小模型"
        _chat_bot.append((_question, response))
    except Exception as e:
        traceback.print_exc()
        response = f"生成错误: {str(e)}"
        _chat_bot.append((_question, response))

    return '', _chat_bot, _app_cfg