'''
1, unzip -> images
2, for img in images:
    if is_font(img):fronts.append(img)
    elif is_back(img):backs.append(img)
    else:pass
    if fronts and backs:
        # how to find front corresponding to back
        # concat[front back] => is_contrast()
        for front in fronts:
            for back in backs:
                concat = horizontal_concat([front,back])
                judge concat => (save,break) or continue
'''
import os,shutil,pdb,zipfile
from PIL import Image
os.environ['CUDA_VISIBLE_DEVICES']='7'
def extract2tmp(zip_path, tmp_dir):
    """
    解压ZIP文件到临时目录，并返回所有解压文件的绝对路径列表
    
    Args:
        zip_path (str): 要解压的ZIP文件路径
        tmp_dir (str): 临时目录路径，用于存放解压后的文件
        
    Returns:
        list: 包含所有解压文件绝对路径的列表
    """
    # 确保临时目录存在
    os.makedirs(tmp_dir, exist_ok=True)
    
    img_file_paths = []
    
    try:
        # 打开ZIP文件
        with zipfile.ZipFile(zip_path, 'r') as zip_ref:
            # 获取ZIP文件中的所有文件列表
            file_list = zip_ref.namelist()
            
            # 解压所有文件到临时目录
            zip_ref.extractall(tmp_dir)
            
            # 构建所有解压文件的绝对路径
            for file_name in file_list:
                # 跳过目录（ZIP中的目录条目）
                if not file_name.endswith('/'):
                    file_path = os.path.join(tmp_dir, file_name)
                    # 使用abspath确保是绝对路径
                    abs_path = os.path.abspath(file_path)
                    img_file_paths.append(abs_path)
                    
            print(f"成功解压 {len(img_file_paths)} 个文件到 {tmp_dir}",flush=True)
            
    except zipfile.BadZipFile:
        print(f"错误：{zip_path} 不是有效的ZIP文件")
        return []
    except FileNotFoundError:
        print(f"错误：ZIP文件 {zip_path} 不存在")
        return []
    except Exception as e:
        print(f"解压过程中发生错误：{e}")
        return []
    
    return img_file_paths
from pathlib import Path
import cv2
import numpy as np
from utils.util_for_os import osj,ose,osb
get_data_dir = lambda category,date : f'/mnt/nas/datasets/diction/{category}{date}' # zip
get_save_dir = lambda category,date : f'/mnt/nas/datasets/diction/{category}{date}_img_front_back' # img
tmp_dir = 'tmp_save'
os.makedirs( tmp_dir  , exist_ok=True )
categories = ['coat','sweater','leather']
dates = ["0808","0811","0812","0813","0814","0815","0818","0819","0820","0821","0822"]

Q1 = 'Is there anyone in the picture? Just answer yes or no.'  # no
Q2 = '是否是一件完整的衣服'   # yes
Q3 = 'Is the background white? Just answer yes or no.'   # yes
Q5 = '是否是衣服' # yes
Q6 = '是否看到衣服的全貌' # yes
Q7 = '这是上身衣服吗'
check_no = lambda ans : 'no' in ans.lower() or '否' in ans or '不是' in ans
check_yes = lambda ans : 'yes' in ans.lower() or '是' in ans

bans = ['裙','裤']
check_Q7 = lambda ans : all( [ b not in ans for b in bans ] )

import torch
# Load model and processor
# model_path = "deepseek-ai/Janus-Pro-7B"
from transformers import AutoConfig, AutoModelForCausalLM
from janus.models import  VLChatProcessor
model_path = "/mnt/nas/zhangshu/Janus-Pro-7B"
config = AutoConfig.from_pretrained(model_path)
language_config = config.language_config
language_config._attn_implementation = 'eager'
vl_gpt = AutoModelForCausalLM.from_pretrained(model_path,
                                             language_config=language_config,
                                             trust_remote_code=True)
if torch.cuda.is_available():
    vl_gpt = vl_gpt.to(torch.bfloat16).cuda()
else:
    vl_gpt = vl_gpt.to(torch.float16)

vl_chat_processor = VLChatProcessor.from_pretrained(model_path)
tokenizer = vl_chat_processor.tokenizer
cuda_device = 'cuda' if torch.cuda.is_available() else 'cpu'

@torch.inference_mode()
def multimodal_understanding(image, question, seed=42, top_p=0.95, temperature=0.1):
    # Clear CUDA cache before generating
    torch.cuda.empty_cache()
    
    # set seed
    torch.manual_seed(seed)
    np.random.seed(seed)
    torch.cuda.manual_seed(seed)
    
    conversation = [
        {
            "role": "<|User|>",
            "content": f"<image_placeholder>\n{question}",
            "images": [image],
        },
        {"role": "<|Assistant|>", "content": ""},
    ]
    
    pil_images = [Image.fromarray(image)]
    prepare_inputs = vl_chat_processor(
        conversations=conversation, images=pil_images, force_batchify=True
    ).to(cuda_device, dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float16)
    
    
    inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
    
    outputs = vl_gpt.language_model.generate(
        inputs_embeds=inputs_embeds,
        attention_mask=prepare_inputs.attention_mask,
        pad_token_id=tokenizer.eos_token_id,
        bos_token_id=tokenizer.bos_token_id,
        eos_token_id=tokenizer.eos_token_id,
        max_new_tokens=512,
        do_sample=False if temperature == 0 else True,
        use_cache=True,
        temperature=temperature,
        top_p=top_p,
    )
    
    answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
    return answer
def get_answer(filepath,q,img_pil=None):
    if img_pil is not None:
        np_image = np.array(img_pil)  # shape: (H, W, 3)

        # 步骤2: RGB转BGR（OpenCV默认格式）
        cv2_image = cv2.cvtColor(np_image, cv2.COLOR_RGB2BGR)  # shape: (H, W, 3)
    else:
        cv2_image = cv2.imread( filepath )
    return multimodal_understanding( cv2_image , q )


def is_clo(filepath):
    if check_no( get_answer( filepath , Q1 ) ) and \
            check_yes(  get_answer( filepath , Q2 )  ) and \
                check_yes(  get_answer( filepath , Q3 ) ) and \
                        check_yes( get_answer( filepath , Q5 ) ) and \
                        check_yes( get_answer( filepath , Q6 ) ) and \
                            check_Q7( get_answer( filepath , Q7 ) ):
                                return True
    return False
Q8 = 'Is a front view from cloth'
Q9 = 'Is a back view from cloth'                            
def is_clo_front(ans_front,ans_back):
    if check_yes( ans_front ) and check_no( ans_back  ):return True
    return False
def is_clo_back(ans_front,ans_back):
    if check_no( ans_front ) and check_yes( ans_back  ):return True   
    return False 

Q10 = 'Is a front view and a back view from the same cloth'
# Q11 = 'Is the same color between the left and the right'
Q11 = "What's the color of the left clo? What's the color of the right clo? Is the same color between the left and the right ?"
def check_Q10(ans):
    if check_yes( ans ):return True 
    return False
def check_Q11(ans):
    if 'same' in ans.lower() :return True 
    return False

for category in categories:
    for date in dates:
        data_dir = get_data_dir( category , date )
        name_path = osj( data_dir , 'names.txt' )
        assert ose( name_path ) , name_path
   
from tqdm import tqdm  
from utils.util_flux import process_img_1024,horizontal_concat_images
def clear_unzip_files(img_file_paths):
    # 完成以后 删除 之前解压到 tmp_save 下 文件
    for file_path in img_file_paths:
        os.remove( file_path )
    print('清除 unzip 文件',flush=True)
    
data_dir = '/mnt/nas/datasets/diction'
# concat_img_path = input('filepath >>')
concat_img_path = "coat0808_img_front_back/2024_25秋冬_日本_风衣_72024103_32489533.jpg__2024_25秋冬_日本_风衣_72024103_32489538.jpg"
concat_img_path = osj(data_dir , concat_img_path  ) 
assert ose( concat_img_path ),concat_img_path

concat_img = Image.open( concat_img_path )

# for category in categories:
#     for date in dates:
#         data_dir = get_data_dir( category , date )
#         name_path = osj( data_dir , 'names.txt' )
#         assert ose( name_path ) , name_path
#         with open( name_path , encoding='utf-8' ) as f:
#             zip_names = f.readlines()
        
#         save_dir = get_save_dir( category , date )
#         if ose(save_dir):shutil.rmtree( save_dir )
#         os.makedirs( save_dir )
        
#         for zip_name in tqdm(zip_names):
            
#             # pdb.set_trace()
            
#             zip_name = zip_name.strip()
#             if not zip_name.endswith('.zip'):continue
            
#             zip_path = osj( data_dir , zip_name )
#             if not ose( zip_path ):
#                 print( zip_path,' not exists' )
                
#             # pdb.set_trace()
#             # 解压 到 tmp_dir 文件夹下
#             img_file_paths = []
#             try:
#                 img_file_paths = extract2tmp(zip_path,tmp_dir) # 获得 绝对路径
#             except:
#                 clear_unzip_files(img_file_paths)
#                 continue
#             clos = []
#             fronts = []
#             backs = []
#             for path in img_file_paths:
#                 if is_clo(path):
#                     clos.append( path )
#             if len(clos) < 2:
#                 clear_unzip_files(img_file_paths)
#                 continue
            
#             # len clos > 2
#             for path in clos:
#                 ans_front = get_answer( path , Q8 )
#                 ans_back = get_answer( path , Q9 )
#                 if is_clo_front(ans_front,ans_back):
#                     fronts.append( ( osb(path) , process_img_1024(path) ) )
#                 elif is_clo_back( ans_front,ans_back ):
#                     backs.append(  ( osb(path) , process_img_1024(path)) )
#             # 确保 前后 clo 都存在
#             if fronts and backs:
#                 for back_name,back in backs:
#                     for front_name,front in fronts:
#                         concat_img = horizontal_concat_images([front , back])
                        
# check whether contrast in concat_img
ans = get_answer( '', Q10 , img_pil=concat_img )
if check_Q10( ans ):
    ans = get_answer( concat_img_path, Q11 , img_pil=None )
    if check_Q11( ans ):
        # concat_save_name = f'{front_name}__{back_name}'
        # concat_save_path = osj( save_dir , concat_save_name )
        # concat_img.save( concat_save_path )
        concat_img.save( 'tmp2.jpg' )
        # print( 'concat img find !!!!!' , flush=True)
        print( ans , flush=True )
            
# # 完成以后 删除 之前解压到 tmp_save 下 文件
# for file_path in img_file_paths:
#     os.remove( file_path )