import traceback

import torch
from torchvision.transforms.functional import InterpolationMode
from model_utils import load_model


def QwenVL_respond(models_dir, model_name, _question, _chat_bot, _app_cfg, params_form, num_beams,
                     repetition_penalty, repetition_penalty_2, top_p, top_k, temperature, history=None):
    try:
        model, _, processor = load_model(models_dir, model_name)

        # 检查会话状态
        if _app_cfg.get('ctx', None) is None:
            _chat_bot.append((_question, "请先上传图片"))
            return '', _chat_bot, _app_cfg
        
        # 复制_app_cfg字典中的'ctx'键对应的值，以避免直接修改原字典
        _context = _app_cfg['ctx'].copy()
        # 如果_context不为空，则将用户的问题添加到_context中
        if _context:
            _context.append({"role": "user", "content": _question})
        # 如果_context为空，则初始化_context并添加用户的问题
        else:
            _context = [{"role": "user", "content": _question}] 
        # 打印用户的问题
        print('<User>:', _question)

        # 格式化历史对话
        # history = None
        # for i, (role, content) in enumerate(history):
        #     history.append({"role": role, "content": content})

        # 初始化生成参数
        generate_params = {
            "max_new_tokens": 8192,
            "top_p": top_p,
            "repetition_penalty": 1.2,
            "temperature": 0.7,
        }

        # 参数模式选择
        if params_form == "Beam Search":
            generate_params.update({
                "do_sample": False,
                "num_beams": num_beams,
                "repetition_penalty": repetition_penalty
            })
        else:
            generate_params.update({
                "do_sample": True,
                "top_p": top_p,
                "top_k": top_k,
                "temperature": temperature,
                "repetition_penalty": repetition_penalty_2
            })

        # 图像编码
        if _app_cfg['img'] is not None:
            image_input = _app_cfg['img']

        # 生成回答
        conversation = [
        {
        "role": "user",
        "content": [
            {
                "type": "image",
            },
            {"type": "text", "text": "Describe this image."},
            ],
        }
        ]
        text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)

        inputs = processor(
            text=[text_prompt], images=[image_input], padding=True, return_tensors="pt"
        ).to(model.device)

        output_ids = model.generate(**inputs, max_new_tokens=8192)
        generated_ids = [
            output_ids[len(input_ids) :]
            for input_ids, output_ids in zip(inputs.input_ids, output_ids)
        ]
        response = processor.batch_decode(
            generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
        )
        print(f'User: {_question}\nAssistant: {response}')

        # 将助手（assistant）的角色和回答内容添加到上下文（_context）列表中
        _context.append({"role": "assistant", "content": response}) 
        # 将问题和回答以元组形式添加到聊天机器人（_chat_bot）列表中
        _chat_bot.append((_question, response))
        _app_cfg['ctx'] = _context

    except torch.cuda.OutOfMemoryError:
        clean_response = "显存不足，请尝试减小生成长度或更换较小模型"
        _chat_bot.append((_question, clean_response))
    except Exception as e:
        traceback.print_exc()
        clean_response = f"生成错误: {str(e)}"
        _chat_bot.append((_question, clean_response))

    return '', _chat_bot, _app_cfg