import os
os.environ['CUDA_VISIBLE_DEVICES'] = '4'

from transformers import AutoProcessor, Gemma3nForConditionalGeneration, AutoModelForImageTextToText
from PIL import Image
from transformers.image_utils import load_image
import requests
import torch

from threading import Thread
from transformers.generation.streamers import TextIteratorStreamer


MAX_INPUT_TOKENS = int(os.getenv("MAX_INPUT_TOKENS", "10_000"))

model_id = "/data/models/gemma-3n-E4B-it"

model = AutoModelForImageTextToText.from_pretrained(model_id, 
                                                    # device_map="auto", 
                                                    torch_dtype=torch.bfloat16).cuda()

processor = AutoProcessor.from_pretrained(model_id)



def fn_one_(image, prompt='Is there a human body in the image? Please answer only “yes” or “no”'):
    messages = [
        {
            "role": "system",
            "content": [{"type": "text", "text": "You are a helpful assistant."}]
        },
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": prompt}
            ]
        }
    ]


    inputs = processor.apply_chat_template(
        messages,
        add_generation_prompt=True,
        tokenize=True,
        return_dict=True,
        return_tensors="pt",
    )
    inputs = inputs.to(device=model.device, dtype=torch.bfloat16)

    streamer = TextIteratorStreamer(processor, timeout=30.0, skip_prompt=True, skip_special_tokens=True)
    max_new_tokens = 512
    generate_kwargs = dict(
        inputs,
        streamer=streamer,
        max_new_tokens=max_new_tokens,
        do_sample=False,
        disable_compile=True,
    )
    t = Thread(target=model.generate, kwargs=generate_kwargs)
    t.start()

    output = ""
    for delta in streamer:
        output += delta
        yield output

def fn_one(image, prompt='Is there a human body in the image? Please answer only “yes” or “no”'):
    messages = [
        {
            "role": "system",
            "content": [{"type": "text", "text": "You are a helpful assistant."}]
        },
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": prompt}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        add_generation_prompt=True,
        tokenize=True,
        return_dict=True,
        return_tensors="pt",
    ).to(model.device)
    # pdb.set_trace()

    input_len = inputs["input_ids"].shape[-1]

    with torch.inference_mode():
        # pdb.set_trace()
        generation = model.generate(**inputs, max_new_tokens=512, do_sample=False, disable_compile=True,)
        generation = generation[0][input_len:]

    decoded = processor.decode(generation, skip_special_tokens=True)
    # print(decoded)

    # if decoded not in ['yes', 'no']:
    #     raise f"{decoded}:, {prompt}"
    
    # pdb.set_trace()
    return decoded


def fn(image):

    # res = fn_one(image, prompt='Is there a human body in the image? Please answer only “yes” or “no”')
    # if 'no' not in res:
    #    return False

    # res = fn_one(image, prompt='Is the clothing in the image a complete piece of clothing? Please answer only “yes” or “no”')
    # if 'yes' not in res:
    #    return False

    # res = fn_one(image, prompt='Is the clothing in the image a flat lay with a clean background? Please answer only “yes” or “no”')
    # if 'yes'not in res:
    #    return False
    ques = '描述这件衣服的图案，不要描述袖子、口袋、丝巾、带子等其他部分的图案，不要描述衣服边缘的图案，只关注衣服大面积面料采用的图案，并用“yes”或“no”回答这种图案是否是规律的或者重复的'
    res = fn_one(image , ques)
    if 'no' in res:return False
    
    return True


def start_gradio():
    import gradio as gr

    with gr.Blocks( title='gama3 Img 2 Text' ) as demo:
        height = 512
        with gr.Row():
            img_prompt = gr.Image( label='Image Prompt',
                    type='pil',
                    height=height )
            with gr.Column():
                txt_prompt = gr.Text( label='Text prompt' )
                
                out = gr.Text( label='img answer' )
                
                sub_btn = gr.Button( "get answer" )
                
            sub_btn.click( fn_one , 
                        inputs=[img_prompt,txt_prompt],
                        outputs=out)
    demo.launch( server_port=20027,
                server_name='0.0.0.0' )

def start_process():
    from os.path import join as osj
    dir = '/mnt/nas/datasets/diction/ZipArchive0729'
    save_clothing_dir = f'{dir}_clo_gama3'
    save_human_dir = f'{dir}_human_gama3'
    import os,shutil,cv2
    # filenames = os.listdir(dir)
    if os.path.exists(save_clothing_dir):shutil.rmtree(save_clothing_dir)
    os.makedirs(save_clothing_dir)
    if os.path.exists(save_human_dir):shutil.rmtree(save_human_dir)
    os.makedirs(save_human_dir)

    # Q1 = 'Is there anyone in the picture? Just answer yes or no.'  # no
    # Q2 = '是否是一件完整的衣服'   # yes
    # Q3 = 'Is the background white? Just answer yes or no.'   # yes
    # check_no = lambda ans : 'no' in ans.lower() or '否' in ans or '不是' in ans
    # check_yes = lambda ans : 'yes' in ans.lower() or '是' in ans

    # count = 0
    with open(  osj( dir , 'names.txt' ) ,encoding='utf-8') as f:
        names = f.readlines() 
    import pdb
    from tqdm import tqdm
    # pdb.set_trace()
    # for entry in os.scandir( dir ):
    for filename in tqdm(names):
        # if entry.is_file() and entry.name.endswith('.jpg'):
        filename = filename.strip()
        if filename.endswith('.jpg'):
            # filename = entry.name
            pass
        else:continue

        
        filepath = os.path.join( dir , filename)

        img = load_image( filepath )

        if fn(img):
            shutil.copy2( osj(dir , filename) , osj(save_clothing_dir,filename) )
        else:
            shutil.copy2( osj(dir , filename) , osj(save_human_dir,filename) )



if __name__=='__main__':
    start_gradio()