# 处理代码加载
import torch
from transformers import AutoProcessor, LlavaForConditionalGeneration,LlavaProcessor

import os
import random


from refer import REFER
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
from tqdm import tqdm

def load_VLM_model(model_path):
    # 使用device_map自动分配模型到多个GPU
    llava_model = LlavaForConditionalGeneration.from_pretrained(
        model_path,
        torch_dtype=torch.bfloat16,
        low_cpu_mem_usage=True,
        attn_implementation="eager",
        device_map="auto"  # 自动分配到可用GPU
    )
    llava_processor = LlavaProcessor.from_pretrained(model_path, patch_size=14)
    return llava_model, llava_processor

def load_refer(data_root, dataset, splitBy):
    refer = REFER(data_root, dataset, splitBy)
    # print stats about the given dataset
    print ('dataset [%s_%s] contains: ' % (dataset, splitBy))
    ref_ids = refer.getRefIds()
    image_ids = refer.getImgIds()
    print ('%s expressions for %s refs in %s images.' % (len(refer.Sents), len(ref_ids), len(image_ids)))

    print ('\nAmong them:')
    if dataset == 'refclef':
        if splitBy == 'unc':
            splits = ['train', 'val', 'testA', 'testB', 'testC']
        else:
            splits = ['train', 'val', 'test']
    elif dataset == 'refcoco':
        splits = ['train', 'val', 'test']
    elif dataset == 'refcoco+':
        splits = ['train', 'val', 'test']
    elif dataset == 'refcocog':
        splits = ['train', 'val']  # we don't have test split for refcocog right now.
        
    for split in splits:
        ref_ids = refer.getRefIds(split=split)
        print ('%s refs are in split [%s].' % (len(ref_ids), split))

    return refer, splits


# 注意力图生成
from llava_methods import *
from utils import *
from PIL import Image, ImageDraw

def min_max_scale(array):
     # 检查array是否为空
    if array.size == 0:
        print(f"警告：生成的注意力图为空，数组内容: {array}")
    min_val = np.min(array)
    max_val = np.max(array)
    if max_val - min_val == 0:  # 全0或全相同值
        return np.zeros_like(array)
    scaled_array = (array - min_val) / (max_val - min_val)
    return scaled_array

def generate_attention_map(model, processor, image_path, object):
    question = f'Find all the {object} in the picture and locate their positions.'
    general_question = 'Write a general description of the image.'

    prompt = f"<image>\nUSER: {question} Answer the question using a single word or phrase.\nASSISTANT:"
    general_prompt = f"<image>\nUSER: {general_question} Answer the question using a single word or phrase.\nASSISTANT:"

    image = Image.open(image_path).convert("RGB")
 
    att_map = high_res(rel_attention_llava, image, prompt, general_prompt, model, processor)
    
    
    # 此处的att_map是正方形的，需要裁剪
    att_map = square_array_to_orin(att_map,image.size)
 
    # 缩放数值到0-1之间
    att_map = min_max_scale(att_map)
    
    return att_map

# REC任务IoU计算
# IoU function
def computeIoU(box1, box2):
    # each box is of [x1, y1, w, h]
    inter_x1 = max(box1[0], box2[0])
    inter_y1 = max(box1[1], box2[1])
    inter_x2 = min(box1[0]+box1[2]-1, box2[0]+box2[2]-1)
    inter_y2 = min(box1[1]+box1[3]-1, box2[1]+box2[3]-1)

    if inter_x1 < inter_x2 and inter_y1 < inter_y2:
        inter = (inter_x2-inter_x1+1)*(inter_y2-inter_y1+1)
    else:
        inter = 0
    union = box1[2]*box1[3] + box2[2]*box2[3] - inter
    return float(inter)/union

def calculate_REC(ref_box, ann_box ,IoU_threshold=0.5):
    # ref_box = refer.refToAnn[ref_id]['bbox']
    # ann_box = ann['bbox']
    IoU = computeIoU(ref_box, ann_box)
    if IoU >= IoU_threshold:
        # print ('IoU=[%.2f], correct comprehension!' % IoU)
        result = True
    else:
        # print ('IoU=[%.2f], wrong comprehension!' % IoU)
        result = False
    return result

# RES任务cIoU计算
import math
def computecIoU(mask1, mask2):
    intersection = np.logical_and(mask1, mask2).sum()
    union = np.logical_or(mask1, mask2).sum()
    return intersection / union if union > 0 else 0
# 获取边界框坐标
def get_bbox(mask):
    rows = np.any(mask, axis=1)
    cols = np.any(mask, axis=0)
    y1, y2 = np.where(rows)[0][[0, -1]]
    x1, x2 = np.where(cols)[0][[0, -1]]
    return x1, y1, x2, y2
def calculate_RES(ref_mask, ann_mask):
     # 计算IoU
    iou = computecIoU(ref_mask, ann_mask)
    
    x1, y1, x2, y2 = get_bbox(ref_mask)
    x3, y3, x4, y4 = get_bbox(ann_mask)
    
    # 计算中心点距离
    center1 = np.array([(x1 + x2) / 2, (y1 + y2) / 2])
    center2 = np.array([(x3 + x4) / 2, (y3 + y4) / 2])
    rho = np.linalg.norm(center1 - center2) ** 2
    
    # 计算最小包围框对角线长度
    c_width = max(x2, x4) - min(x1, x3)
    c_height = max(y2, y4) - min(y1, y3)
    c_diag = c_width ** 2 + c_height ** 2
    
    # 计算宽高比惩罚项
    v = (4 / math.pi ** 2) * (math.atan((x2 - x1) / (y2 - y1 + 1e-5)) - 
                              math.atan((x4 - x3) / (y4 - y3 + 1e-5))) ** 2
    alpha = v / (1 - iou + v + 1e-5)
    
    # 计算CIoU
    ciou = iou - (rho / c_diag) - alpha * v
    # print(ciou)
    return ciou


def composite_attn_map(atten_maps):
    atten_map = np.sum(atten_maps, axis=0)
    if atten_map.size == 0:
        print("Input array is empty")
    # print("Input array shape:", atten_map.shape)  # 检查形状
    att_map = min_max_scale(atten_map)
    return att_map


if __name__ == "__main__":

    random.seed(42)  # 设置随机种子为42
    print("随机数：")
    print(random.random())  # 每次运行输出相同的随机数

    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"  # 指定使用第0和第1号GPU

    model_path="/media/dual-4090/sata1/GuoTianxing/model/VLM/llava-v1.5-7b-hf"
    #加载模型和processor
    llava_model, llava_processor = load_VLM_model(model_path)

    data_root = '/media/dual-4090/sata1/GuoTianxing/dataset/refer/data'  # contains refclef, refcoco, refcoco+, refcocog and images
    # refclef 采用unc分割方法      splits = ['train', 'val', 'testA', 'testB', 'testC']
    # refcoco 采用unc分割方法      splits = ['train', 'val', 'test']
    # refcoco+ 采用unc分割方法      splits = ['train', 'val', 'test']
    # refcocog 采用umd分割方法       splits = ['train', 'val'] 

    dataset = 'refcoco+' 
    splitBy = 'unc'
    # 按照split来进行遍历
    eval_type = "RES"  # “REC” or "RES" or "ReasonSeg"

    # 加载数据集
    refer, splits = load_refer(data_root, dataset, splitBy)


    for split in splits:
        split_result=[]
        ref_ids = refer.getRefIds(split=split)
        print ('%s refs are in split [%s].' % (len(ref_ids), split))
        for i in tqdm(range(100), desc="Processing images"):
            ref_id = ref_ids[i]
            ref = refer.Refs[ref_id]
            # print(len(ref['sentencefile_names']))
            name, ext = os.path.splitext(ref['file_name'])
            # 去除最后一个下划线及其后的数字
            new_name = name.rsplit('_', 1)[0] + ext
            image_path =  os.path.join(refer.IMAGE_DIR, new_name)

            if len(ref['sentences']) == 1:
                object = ref['sentences'][0]['sent']
                atten_map = generate_attention_map(llava_model, llava_processor, image_path, object)
            else:
                atten_maps = []
                for sent in ref['sentences']:
                    object = sent['sent']
                    atten_map = generate_attention_map(llava_model, llava_processor, image_path, object)
                    atten_maps.append(atten_map)
                
                # 对于多个描述产生的atten_maps需要处理
                atten_map = composite_attn_map(atten_maps)

            # 我们把threshold拿到外面来处理
            threshold = 0.162
            # 产生atten_map元素为0或1的np二维数组
            atten_map = np.where(atten_map >= threshold, 1, 0)

            if eval_type == "REC":
                # 获取正确区域对应的box
                # ref_box = refer.refToAnn[ref_id]['bbox']
                ref_box = refer.getRefBox(ref_id)
                atten_box = get_bbox(atten_map)
                temp = calculate_REC(ref_box,atten_box)

            elif eval_type == "RES":
                # 获取正确区域的seg_map，返回的是只有0和1的np二维数组，
                ref_map = refer.getMask(ref)["mask"]
                temp = calculate_RES(ref_map,atten_map)

            split_result.append(temp)
            
        if eval_type == "REC":
            print(f"REC正确率: {sum(split_result)/len(split_result)}")

        elif eval_type == "RES":
            print(f"RES正确率: {sum(split_result)/len(split_result)}")
                    

            # print(image_path)
