# 处理代码加载
import torch
from transformers import AutoProcessor, LlavaForConditionalGeneration,LlavaProcessor

import os
import random


from refer import REFER
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
from tqdm import tqdm

def load_VLM_model(model_path):
    # 使用device_map自动分配模型到多个GPU
    llava_model = LlavaForConditionalGeneration.from_pretrained(
        model_path,
        torch_dtype=torch.bfloat16,
        low_cpu_mem_usage=True,
        attn_implementation="eager",
        device_map="auto"  # 自动分配到可用GPU
    )
    llava_processor = LlavaProcessor.from_pretrained(model_path, patch_size=14, use_fast=True)
    return llava_model, llava_processor

def load_refer(data_root, dataset, splitBy):
    refer = REFER(data_root, dataset, splitBy)
    # print stats about the given dataset
    print ('dataset [%s_%s] contains: ' % (dataset, splitBy))
    ref_ids = refer.getRefIds()
    image_ids = refer.getImgIds()
    print ('%s expressions for %s refs in %s images.' % (len(refer.Sents), len(ref_ids), len(image_ids)))

    print ('\nAmong them:')
    if dataset == 'refclef':
        if splitBy == 'unc':
            splits = ['train', 'val', 'testA', 'testB', 'testC']
        else:
            splits = ['train', 'val', 'test']
    elif dataset == 'refcoco':
        splits = ['train', 'val', 'test']
    elif dataset == 'refcoco+':
        splits = ['train', 'val', 'test']
    elif dataset == 'refcocog':
        splits = ['train', 'val']  # we don't have test split for refcocog right now.
        
    for split in splits:
        ref_ids = refer.getRefIds(split=split)
        print ('%s refs are in split [%s].' % (len(ref_ids), split))

    return refer, splits


# 注意力图生成
from llava_methods import *
from utils import *
from PIL import Image, ImageDraw

def min_max_scale(array):
     # 检查array是否为空
    if array.size == 0:
        print(f"警告：生成的注意力图为空，数组内容: {array}")
        return None
    min_val = np.min(array)
    max_val = np.max(array)
    if max_val - min_val == 0:  # 全0或全相同值
        return np.zeros_like(array)
    scaled_array = (array - min_val) / (max_val - min_val)
    return scaled_array
def doubao_prompt(image_path, object):

    import base64
    import os
    # 通过 pip install volcengine-python-sdk[ark] 安装方舟SDK
    from volcenginesdkarkruntime import Ark

    # 初始化一个Client对象，从环境变量中获取API Key
    client = Ark(
        api_key=os.getenv('ARK_API_KEY'),
        )
    # 定义方法将指定路径图片转为Base64编码
    def encode_image(image_path):
        with open(image_path, "rb") as image_file:
            return base64.b64encode(image_file.read()).decode('utf-8')

    # 将图片转为Base64编码
    base64_image = encode_image(image_path)

    response = client.chat.completions.create(
    # 替换 <MODEL> 为模型的Model ID
    model="doubao-1.5-vision-pro-250328",
    messages=[
        {
        "role": "user",
        "content": [
            {
            "type": "image_url",
            "image_url": {
            # 需要注意：传入Base64编码前需要增加前缀 data:image/{图片格式};base64,{Base64编码}：
            # PNG图片："url":  f"data:image/png;base64,{base64_image}"
            # JPEG图片："url":  f"data:image/jpeg;base64,{base64_image}"
            # WEBP图片："url":  f"data:image/webp;base64,{base64_image}"
                "url":  f"data:image/jpg;base64,{base64_image}"
            },         
            },
            {
            "type": "text",
            "text": f"你帮我根据图片和物体关键词{object}，生成一个查询关键物体特征位置的英文prompt，输出陈述句，并用“locate the position”的类似句式查询目标位置，不要超过10个词",
            },
            ],
        }
    ],
    )

    return response.choices[0].message.content

import cv2
import numpy as np

def keep_largest_convex_hull(binary_array):
    """
    在仅含0和1的2D numpy数组中检测所有凸包，并只保留最大的凸包
    
    参数:
        binary_array: 输入的2D numpy数组(0表示背景，1表示前景)
    
    返回:
        只包含最大凸包的二值数组(与输入同尺寸和类型)
    """
    # 转换为OpenCV格式并查找轮廓
    contours, _ = cv2.findContours(
        (binary_array * 255).astype(np.uint8),
        cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE
    )
    
    # 如果没有轮廓，返回全0数组
    if not contours:
        return np.zeros_like(binary_array)
    
    # 找到最大凸包并绘制结果
    result = np.zeros_like(binary_array, dtype=np.uint8)
    cv2.drawContours(
        result,
        [max((cv2.convexHull(c) for c in contours), key=cv2.contourArea)],
        0, 1, -1
    )
    
    return result.astype(binary_array.dtype)


def generate_attention_map(model, processor, image_path, object):

    # question = f'Find the {object} in the picture and locate the position.'
    # question = doubao_prompt(image_path, object)
    # general_question = 'Write a general description of the image.'
    # import json
    # with open("output.json", "a", encoding="utf-8") as f:
    #     json.dump(question, f, ensure_ascii=False, indent=4)  # 格式化输出
    #     f.write("\n")
    # prompt = f"<image>\nUSER: {question} Using a single word or phrase.\nASSISTANT:"
    # general_prompt = f"<image>\nUSER: {general_question} Answer the question using a single word or phrase.\nASSISTANT:"

    question = f'Find the {object} in the picture and locate the position.'
    general_question = 'Write a general description of the image.'

    prompt = f"<image>\nUSER: {object}\nASSISTANT:"
    general_prompt = f"<image>\nUSER: {general_question} Answer the question using a single word or phrase.\nASSISTANT:"
    image = Image.open(image_path).convert("RGB")
 
    att_map = high_res(enhanced_rel_attention_llava, image, prompt, general_prompt, model, processor)

    # 此处的att_map是正方形的，需要裁剪
    att_map = square_array_to_orin(att_map,image.size)
    
    # 缩放数值到0-1之间
    att_map = min_max_scale(att_map)
    
    return att_map

# REC任务IoU计算
# IoU function
def computeIoU(box1, box2):
    # each box is of [x1, y1, w, h]
    inter_x1 = max(box1[0], box2[0])
    inter_y1 = max(box1[1], box2[1])
    inter_x2 = min(box1[0]+box1[2]-1, box2[0]+box2[2]-1)
    inter_y2 = min(box1[1]+box1[3]-1, box2[1]+box2[3]-1)

    if inter_x1 < inter_x2 and inter_y1 < inter_y2:
        inter = (inter_x2-inter_x1+1)*(inter_y2-inter_y1+1)
    else:
        inter = 0
    union = box1[2]*box1[3] + box2[2]*box2[3] - inter
    return float(inter)/union

def calculate_REC(ref_box, ann_box ,IoU_threshold=0.5):
    # ref_box = refer.refToAnn[ref_id]['bbox']
    # ann_box = ann['bbox']
    IoU = computeIoU(ref_box, ann_box)
    if IoU >= IoU_threshold:
        # print ('IoU=[%.2f], correct comprehension!' % IoU)
        result = True
    else:
        # print ('IoU=[%.2f], wrong comprehension!' % IoU)
        result = False
    return result

# RES任务cIoU计算
import math
def computecIoU(mask1, mask2):
    intersection = np.logical_and(mask1, mask2).sum()
    union = np.logical_or(mask1, mask2).sum()
    return intersection / union if union > 0 else 0
# 获取边界框坐标
def get_bbox(mask):
    rows = np.any(mask, axis=1)
    cols = np.any(mask, axis=0)
    y1, y2 = np.where(rows)[0][[0, -1]]
    x1, x2 = np.where(cols)[0][[0, -1]]
    return x1, y1, x2, y2
def calculate_RES(ref_mask, ann_mask):
    ref_mask = ref_mask.astype(np.uint8)
    ann_mask = ann_mask.astype(np.uint8)
     # 计算IoU
    iou = computecIoU(ref_mask, ann_mask)
    
    x1, y1, x2, y2 = get_bbox(ref_mask)
    x3, y3, x4, y4 = get_bbox(ann_mask)
    
    # 计算中心点距离
    center1 = np.array([(x1 + x2) / 2, (y1 + y2) / 2])
    center2 = np.array([(x3 + x4) / 2, (y3 + y4) / 2])
    rho = np.linalg.norm(center1 - center2) ** 2
    
    # 计算最小包围框对角线长度
    c_width = max(x2, x4) - min(x1, x3)
    c_height = max(y2, y4) - min(y1, y3)
    c_diag = c_width ** 2 + c_height ** 2
    
    # 计算宽高比惩罚项
    v = (4 / math.pi ** 2) * (math.atan((x2 - x1) / (y2 - y1 + 1e-5)) - 
                              math.atan((x4 - x3) / (y4 - y3 + 1e-5))) ** 2
    alpha = v / (1 - iou + v + 1e-5)
    
    # 计算CIoU
    ciou = iou - (rho / c_diag) - alpha * v
    # print(ciou)
    return ciou


def composite_attn_map(atten_maps):
    atten_map = np.sum(atten_maps, axis=0)/len(atten_maps)
    if atten_map.size == 0:
        print("Input array is empty")
    # print("Input array shape:", atten_map.shape)  # 检查形状
    att_map = min_max_scale(atten_map)
    return att_map


if __name__ == "__main__":

    random.seed(42)  # 设置随机种子为42
    print("随机数：")
    print(random.random())  # 每次运行输出相同的随机数
    import sys
    # 获取所有命令行参数（列表形式）
    args = sys.argv  

    # 提取 --threshold 的值
    threshold = None
    if '--threshold' in args:
        index = args.index('--threshold') + 1
        if index < len(args):
            threshold = float(args[index])  # 转换为数值类型
    print("threshold:",threshold)

    dataset = None
    if '--dataset' in args:
        index = args.index('--dataset') + 1
        if index < len(args):
            dataset = args[index]  
    print("dataset:",dataset)

    splitBy = None
    if '--splitBy' in args:
        index = args.index('--splitBy') + 1
        if index < len(args):
            splitBy = args[index]  
    print("splitBy:",splitBy)

    eval_type = None
    if '--eval_type' in args:
        index = args.index('--eval_type') + 1
        if index < len(args):
            eval_type = args[index]  
    print("eval_type:",eval_type)

    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"  # 指定使用第0和第1号GPU

    model_path="/media/dual-4090/sata1/GuoTianxing/model/VLM/llava-v1.5-7b-hf"
    #加载模型和processor
    llava_model, llava_processor = load_VLM_model(model_path)

    data_root = '/media/dual-4090/sata1/GuoTianxing/dataset/refer/data'  # contains refclef, refcoco, refcoco+, refcocog and images
    # refclef 采用unc分割方法      splits = ['train', 'val', 'testA', 'testB', 'testC']
    # refcoco 采用unc分割方法      splits = ['train', 'val', 'test']
    # refcoco+ 采用unc分割方法      splits = ['train', 'val', 'test']
    # refcocog 采用umd分割方法       splits = ['train', 'val'] 


    # 加载数据集
    refer, splits = load_refer(data_root, dataset, splitBy)

    with open('test.txt', 'a', encoding='utf-8') as f:

        for split in splits:
            split_result=[]
            ref_ids = refer.getRefIds(split=split)
            print ('%s refs are in split [%s].' % (len(ref_ids), split))
            NUM = 10000
            for i in tqdm(range(NUM), desc="Processing images"):
            # for i in tqdm(range(624,628), desc="Processing images"):    
                ref_id = ref_ids[i]
                ref = refer.Refs[ref_id]
                # print(ref)
                # print(len(ref['sentencefile_names']))
                name, ext = os.path.splitext(ref['file_name'])
                # print(name)
                # 去除最后一个下划线及其后的数字
                new_name = name.rsplit('_', 1)[0] + ext
                image_path =  os.path.join(refer.IMAGE_DIR, new_name)

             
                if len(ref['sentences']) == 1:
                    object = ref['sentences'][0]['sent']
                    atten_map = generate_attention_map(llava_model, llava_processor, image_path, object)
                else:
                    atten_maps = []
                    for sent in ref['sentences']:
                        # if len(sent['tokens']) ==1:
                        #     continue
                        object = sent['sent']
                        atten_map = generate_attention_map(llava_model, llava_processor, image_path, object)
                        if atten_map is not None:
                            atten_maps.append(atten_map)
                   
                    if len(atten_maps) == 0:
                        print("ATTEN_MAPS is EMPTY, please check the ref:")
                        print(ref)
                        exit()
                    
                    # 对于多个描述产生的atten_maps需要处理
                    atten_map = composite_attn_map(atten_maps)
             

                # 产生atten_map元素为0或1的np二维数组
                atten_map = np.where(atten_map >= threshold, 1, 0)

                # 在此选择最大凸包
                atten_map = keep_largest_convex_hull(atten_map)

                if eval_type == "REC":
                    # 获取正确区域对应的box
                    # ref_box = refer.refToAnn[ref_id]['bbox']
                    ref_box = refer.getRefBox(ref_id)
                    atten_box = get_bbox(atten_map)
                    temp = calculate_REC(ref_box,atten_box)

                elif eval_type == "RES":
                    # 获取正确区域的seg_map，返回的是只有0和1的np二维数组，
                    ref_map = refer.getMask(ref)["mask"]
                    temp = calculate_RES(ref_map,atten_map)
                    ref_map = ref_map.astype(np.uint8)

                output_picture = False   
                if output_picture:
                    atten_map = atten_map.astype(np.uint8)
                    import cv2
                    original_img = cv2.imread(image_path)
                    original_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2RGB)  # 转为 RGB
                    def create_overlay(array, color=(255, 0, 0), alpha=1):
                        overlay = np.zeros((*array.shape, 4), dtype=np.uint8)
                        overlay[array == 1] = [*color, int(255 * alpha)]
                        return overlay
                    def blend_with_original(original, overlay):
                        original_rgba = cv2.cvtColor(original, cv2.COLOR_RGB2RGBA)
                        overlay_alpha = overlay[:, :, 3] / 255.0
                        blended = original_rgba.copy()
                        for c in range(3):
                            blended[:, :, c] = (1 - overlay_alpha) * original_rgba[:, :, c] + overlay_alpha * overlay[:, :, c]
                        return blended
                    overlay1 = create_overlay(ref_map, color=(255, 0, 0), alpha=0.5)  # 红色
                    overlay2 = create_overlay(atten_map, color=(0, 255, 0), alpha=0.5)  # 绿色
                    blended1 = blend_with_original(original_img, overlay1)
                    blended2 = blend_with_original(original_img, overlay2)

                    combined = np.hstack([blended1, blended2])
                    cv2.imwrite(f'./picture/{name}.png', cv2.cvtColor(combined, cv2.COLOR_RGBA2BGRA))


                split_result.append(temp)
                
            # 在代码开始处添加（确保在条件判断之前文件被创建/打开）
            import datetime
            timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")

          
            f.write(f"[{timestamp}]")
            if eval_type == "REC":
                accuracy = sum(split_result) / len(split_result)
                result_str = f"数据集：{dataset}，分割方法：{splitBy}，数据块：{split}，阈值：{threshold}，次数：{NUM}，评估类型：REC，正确率: {accuracy}"
                print(result_str)
                f.write(result_str + '\n')
                
            elif eval_type == "RES":
                accuracy = sum(split_result) / len(split_result)
                result_str = f"数据集：{dataset}，分割方法：{splitBy}，数据块：{split}，阈值：{threshold}，次数：{NUM}，评估类型：RES，正确率: {accuracy}"
                print(result_str)
                f.write(result_str + '\n')
