import gradio as gr
if gr.NO_RELOAD:
    import torch
    import time
    import os
    import json
    import datetime
    from PIL import Image
    import numpy as np
    import random
import csv
from utils import list_models,get_pipe,get_scheduler,create_logger,get_mask,resize_to_nearest_multiple_of_8,clean_gpu_cache
from utils import GENERATE_PATH
from plugins.wd_tagger import Args,Predictor 

#=====全局变量======
if gr.NO_RELOAD:
    pipe = None
    pipe_info = [] # 配置
    seed = 0 # 使用的随机种子
    #img_gen_info = {} # 图片生成参数

    logger,_ = create_logger("webui",level=20)    
    use_t2i_adapter = False

    is_select_img = False # 是否选中了画廊的图像
    selected_imgname = "" # 被选中的图像名


    


#================

def predict(im):
    return im["composite"]

def save_image_and_info(args):
    """
    保存图片和生成参数
    """
    imgs = args[img_gr]
    imgname = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    for i in range(len(imgs)):
        if not os.path.exists(GENERATE_PATH):
            os.mkdir(GENERATE_PATH)
        Image.fromarray(imgs[i][0]).save(f"{GENERATE_PATH}/{imgname}_{i}.png")
        logger.info(f"图片保存到{imgname}.png")
        gr.Info(f"图片保存到{imgname}.png")
  
    params = {
        'ckpts':args[ckpts],
        'vaes':args[vaes],
        'scheduler': args[scheduler],
        'prompt': args[prompt],
        'negative_prompt': args[neg_prompt],
        'height': args[height],
        'width': args[width],
        'num_inference_steps': args[num_inference_steps_gr],
        'guidance_scale': args[guidance_scale],
        'clip_skip': args[clip_skip],
        'generator': args[random_device],
        'seed':seed
    
    }
    with open(f"generate_images/{imgname}.json", "w", encoding="utf-8") as f:
        json.dump(params, f, indent=4, ensure_ascii=False)
        logger.info(f"生图参数保存到{imgname}.json")
        gr.Info(f"生图参数保存到{imgname}.json")

def i2i_save_image_and_info(args):
    """
    保存图片和生成参数
    """
    npimg = args[i2i_res]
    imgname = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    if not os.path.exists(GENERATE_PATH):
        os.mkdir(GENERATE_PATH)
    npimg.save(f"{GENERATE_PATH}/{imgname}.png")
    logger.info(f"图片保存到{imgname}.png")
    gr.Info(f"图片保存到{imgname}.png")
    return
    params = {
        'ckpts':args[ckpts],
        'vaes':args[vaes],
        'scheduler': args[i2i_scheduler],
        'prompt': args[i2i_prompt],
        'negative_prompt': args[i2i_neg_prompt],
        'height': args[i2i_height],
        'width': args[i2i_width],
        'num_inference_steps': args[i2i_num_inference_steps],
        'guidance_scale': args[i2i_guidance_scale],
        'clip_skip': args[clip_skip],
        'generator': args[i2i_random_device],
        'seed':seed
    
    }
    with open(f"generate_images/{imgname}.json", "w", encoding="utf-8") as f:
        json.dump(params, f, indent=4, ensure_ascii=False)
        logger.info(f"生图参数保存到{imgname}.json")
        gr.Info(f"生图参数保存到{imgname}.json")

def i2i_upload_img(im):
    """在上传图片后，将height与width修改"""
    i2i_img = im["background"]

    i2i_img = i2i_img.convert('RGB')

    i2i_img = resize_to_nearest_multiple_of_8(i2i_img)
    return i2i_img.size[1],i2i_img.size[0]

def t2i(args):
    global pipe
    global pipe_info
    global seed
    

    # 创建 generator 并设置种子
    device = "cpu" if args[random_device] == "CPU" else "cuda"
    if args[random_seed]==-1:
        seed = random.randint(0, 2**32 - 1)
    else:
        seed = args[random_seed]
    generator = torch.Generator(device).manual_seed(seed)
        
    if args[use_t2i_adapter_gr]==False:
        logger.info("加载pipe")
        gr.Info('加载pipe')
        print(args[ckpts],args[vaes],args[scheduler])
        pipe = get_pipe(args[ckpts],args[vaes],args[scheduler])
        pipe.to('cuda')
        pipe_info = [args[ckpts],args[vaes],args[scheduler]]
        logger.info("开始推理")
        gr.Info('开始推理')
        pipe.enable_vae_slicing()
        #pipe.enable_vae_tiling()
        pipe.enable_model_cpu_offload()
        #pipe.enable_sequential_cpu_offload()
        clean_gpu_cache()
        with torch.no_grad():
            images = pipe(
                prompt=args[prompt],
                negative_prompt=args[neg_prompt],
                height = args[height],
                width= args[width],
                num_inference_steps=args[num_inference_steps_gr],
                guidance_scale=args[guidance_scale],
                clip_skip=args[clip_skip],
                num_images_per_prompt=args[num_images_gr],
                generator = generator,
            ).images
            clean_gpu_cache()
    if args[use_t2i_adapter_gr]==True or args[use_ip_adapter_gr]==True:
        logger.info("加载pipe")
        gr.Info('加载pipe')
        #========== 加载pipeline ================
        pipe = get_pipe(args[ckpts],args[vaes],args[scheduler],control = 't2i-adapter')
        pipe_info = [args[ckpts],args[vaes],args[scheduler]]
        pipe.to('cuda')
        #========== 加载 IP-Adapter==============
        if args[use_ip_adapter_gr]==True:
            gr.Info("使用ip-adapter")
            pipe.load_ip_adapter("./ip-adapter",subfolder='sdxl_models',weight_name='ip-adapter_sdxl.safetensors',local_files_only=True)

        #========== 显存控制==============
        pipe.enable_vae_slicing()
        pipe.enable_vae_tiling()
        pipe.enable_model_cpu_offload()
            # pipe.enable_sequential_cpu_offload()
        clean_gpu_cache()
        #========== 预处理提示词 =================
        from sd_embed.embedding_funcs import get_weighted_text_embeddings_sdxl
        prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds = get_weighted_text_embeddings_sdxl(pipe,args[prompt],args[neg_prompt],pad_last_block=False)

        #========== 执行推理==============
        logger.info("开始推理_t2i_adapter")
        gr.Info('开始推理_t2i_adapter')
        with torch.no_grad():
            images = pipe(
                #prompt=args[prompt],
                #negative_prompt=args[neg_prompt],
                prompt_embeds=prompt_embeds,
                negative_prompt_embeds=negative_prompt_embeds,
                pooled_prompt_embeds=pooled_prompt_embeds,
                negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
                height = args[height],
                width= args[width],
                num_inference_steps=args[num_inference_steps_gr],
                guidance_scale=args[guidance_scale],
                clip_skip=args[clip_skip],
                num_images_per_prompt=args[num_images_gr],
                generator = generator,
                #==================
                ip_adapter_image=args[ip_img_gr] if args[use_t2i_adapter_gr]==True else None,
                image=args[preprocessed_img_gr] if args[use_t2i_adapter_gr]==True else None,
                adapter_conditioning_scale=args[adapter_scale_gr],
                adapter_conditioning_factor=args[adapter_factor_gr],
            ).images
            clean_gpu_cache()

    return [(im,str(seed)) for im in images]

def i2i_send_to_inpaint(res_img):
    return res_img

def i2i(args):
    global pipe
    global pipe_info
    global seed

    device = "cpu" if args[i2i_random_device] == "CPU" else "cuda"
    if args[i2i_random_seed]==-1:
        seed = random.randint(0, 2**32 - 1)
    else:
        seed = args[i2i_random_seed]
    generator = torch.Generator(device).manual_seed(seed)

    logger.info("加载pipe")
    gr.Info('加载pipe')
    pipe = get_pipe(args[ckpts],args[vaes],args[i2i_scheduler])
    pipe_info = [args[ckpts],args[vaes],args[i2i_scheduler]]
    
    logger.info("开始推理i2i")
    gr.Info('开始推理i2i')
    pipe.enable_vae_slicing()
    pipe.enable_vae_tiling()
    pipe.enable_model_cpu_offload()
    #pipe.enable_sequential_cpu_offload() # 会出错meta tensor
    img_dict = args[img_editor]
    i2i_img = img_dict["background"].copy()

    i2i_img = i2i_img.convert('RGB')

    i2i_img = resize_to_nearest_multiple_of_8(i2i_img)

    #===
    mask_tensor = get_mask(np.array(img_dict["background"]),np.array(img_dict["composite"]))
    clean_gpu_cache()
    #==
    with torch.no_grad():
        images = pipe(
            prompt=args[i2i_prompt],
            negative_prompt=args[i2i_neg_prompt],
            height = i2i_img.size[1],
            width= i2i_img.size[0],
            image = i2i_img,
            mask_image = mask_tensor,
            strength = args[i2i_inpaint_strength_gr],
            num_inference_steps=args[i2i_num_inference_steps],
            guidance_scale=args[i2i_guidance_scale],
            clip_skip=args[clip_skip],
            num_images_per_prompt=1,
            generator = generator,
        ).images
        clean_gpu_cache()
    return images[0]

def show_info(evt: gr.SelectData):
    """显示图片的生成参数"""
    print("select")
    #==== 
    global selected_imgname
    selected_imgname = evt.value["caption"]
    #====
    imgname = evt.value["caption"]
    parts = imgname.rsplit('_', 1)  # 从右往左分割一次
    print(parts)
    if len(parts) > 1 and parts[1].isdigit():
        jsonname =  parts[0]
    else:
        jsonname = parts
    
    info_path = f'{GENERATE_PATH}/{jsonname}.json'
    if os.path.exists(info_path):
        with open(info_path,'r',encoding="utf-8") as f:
            gen_info = json.load(f)
    else:
        gen_info = "None"
    

    return f"{gen_info}"

def get_tags():
    """获取tag说明"""
    tags_list = []
    with open('tags.csv', mode='r', encoding='utf-8') as file:
        reader = csv.reader(file)
        header = next(reader)  # 跳过表头
        tags_list.append(header)
        print("表头:", header)
        for row in reader:
            #print(row)
            tags_list.append(row)
    return tags_list[1:]

def save_tags(tags_list):
    """保存tag说明"""
    with open('tags.csv', mode='w', newline='', encoding='utf-8') as file:
        writer = csv.writer(file)

        # 写入表头
        writer.writerow(['tag', '简要描述', '额外描述'])

        # 写入多行数据
        writer.writerows(tags_list)
    return get_tags()

def delete_selected_img():
    if is_select_img==True:
        imgpath = f"{GENERATE_PATH}/{selected_imgname}.png"
        os.remove(imgpath)
        print(f"文件 {imgpath} 已成功删除")
    else:
        gr.Info("未选择图片！")
    return get_images()

def test_open_preview():
    print("open_preview")
    global is_select_img
    is_select_img = True

def test_close_preview():
    print("close_preview")
    global is_select_img
    is_select_img = False
   
def get_images():
    """获取目录下的所有图片"""
    import os
    if not os.path.exists(GENERATE_PATH):
        os.mkdir(GENERATE_PATH)
    names = os.listdir(GENERATE_PATH)
    paths = [(f"{GENERATE_PATH}/{name}",name.split('.')[0]) for name in names if name.endswith(".png") or name.endswith(".jpg")]
    return paths

def get_condition_img(im:Image.Image,aux):
    """调用预处理器，获取条件图"""
    from DWPose.inference import get_pose
    logger.info(aux)
    if aux == "DWPose":
        output = get_pose(im)
        return output
    else:
        return None

def gallery_send_to_t2i(gen_info):
    """将图片发送到t2i"""
    #print(gen_info)
    gen_info=gen_info.replace("'",'"')
    gen_info = json.loads(gen_info)
    return {
        ckpts: gen_info['ckpts'],
        vaes: gen_info['vaes'],
        prompt: gen_info['prompt'],
        neg_prompt: gen_info['negative_prompt'],
        num_inference_steps_gr: gen_info['num_inference_steps'],
        height: gen_info['height'],
        width: gen_info['width'],
        scheduler: gen_info['scheduler'] if 'scheduler' in gen_info else 'None',
        guidance_scale: gen_info['guidance_scale'],
        random_device: gen_info['generator'],
        random_seed: gen_info['seed'],
    }

def gallery_send_to_i2i(gen_info):
    """将图片发送到i2i"""
    #print(gen_info)
    gen_info=gen_info.replace("'",'"')
    gen_info = json.loads(gen_info)
    
    imgpath = f"{GENERATE_PATH}/{selected_imgname}.png"
    return {
        ckpts: gen_info['ckpts'],
        vaes: gen_info['vaes'],
        i2i_prompt: gen_info['prompt'],
        i2i_neg_prompt: gen_info['negative_prompt'],
        i2i_num_inference_steps: gen_info['num_inference_steps'],
        i2i_height: gen_info['height'],
        i2i_width: gen_info['width'],
        i2i_scheduler: gen_info['scheduler'] if 'scheduler' in gen_info else 'None',
        i2i_guidance_scale: gen_info['guidance_scale'],
        i2i_random_device: gen_info['generator'],
        i2i_random_seed: gen_info['seed'],
        img_editor: imgpath
    }

tagger_predictor = Predictor()
#=======================================================
#========================   UI  ========================
#=======================================================
with gr.Blocks() as demo:
    gr.Markdown("Start typing below and then click **Run** to see the output.")
    with gr.Row():
        ckpts = gr.Dropdown(list_models(),label="checkpioint")

        vaes = gr.Dropdown(['sd','sd++','自动加载'],value="自动加载",label="VAE")
        clip_skip = gr.Slider(-2,4,value=0,label="clip_skip",info="sdxl 默认为2，此时clip_skip应该置为0",interactive=True)
    #===========================文生图
    with gr.Tab("文生图"):
       
        prompt = gr.Textbox(placeholder="prompt",label='正面提示词',lines=3,max_lines=5,interactive=True)
        neg_prompt = gr.Textbox(placeholder="neg_prompt",lines=3,max_lines=5,interactive=True)
        with gr.Row():
            with gr.Column():
                with gr.Row():
                    scheduler = gr.Dropdown(['Euler a','DPM++ 2M Karras'],value='Euler a',label="采样器")
                    num_images_gr = gr.Slider(1,4,value=1,step=1,label="图片数量",interactive=True)
                    num_inference_steps_gr = gr.Slider(1,50,value=28,step=1,label="采样步数",interactive=True)
                    
                with gr.Row():    
                    width = gr.Slider(256,2048,value=1024,step=16,label="宽度",interactive=True)
                    height = gr.Slider(256,2048,value=1024,step=16,label="高度",interactive=True)            
                with gr.Row():
                        guidance_scale= gr.Slider(1,10,step=0.5,value=7,label="CFG引导系数",interactive=True,scale = 4) 
                        random_device = gr.Dropdown(['CPU','GPU'],value="CPU",label="随机数种子",scale = 1,min_width=120,interactive=True)
                        random_seed = gr.Number(minimum=-1,precision=0,step=1,value=-1, label="random_seed",interactive=True,scale = 3)
                with gr.Row():
                    use_t2i_adapter_gr = gr.Checkbox(label="使用t2i-adapter",min_width=80)
                    use_ip_adapter_gr = gr.Checkbox(label="使用ip-adapter")
                    use_hr_gr = gr.Checkbox(label="使用高清修复")
                with gr.Accordion("T2i-Adapter"):
                    adapter_scale_gr = gr.Slider(0,2,step=0.1,value=1, label="adapter缩放系数",interactive=True)
                    adapter_factor_gr = gr.Slider(0,1,step=0.1,value=1, label="adapter缩放因子",interactive=True)
                    select_aux_gr = gr.Dropdown(['DWPose','Lineart'],value='DWPose',label="预处理器",interactive=True)
                    run_preprocess_gr = gr.Button("获取预处理图像")
                    with gr.Row():
                        control_img_gr = gr.Image(type='pil',image_mode="RGB",interactive=True)
                        preprocessed_img_gr = gr.Image(type='pil',image_mode="RGB",interactive=False)
                with gr.Accordion("IP-Adapter"):
                    ip_img_gr = gr.Image(type='pil',image_mode="RGB",interactive=True)
                   
                
            with gr.Column():
                run_t2i_gr = gr.Button("Run")
                img_gr = gr.Gallery(type='numpy',format='png',interactive=False)
                with gr.Row():
                    save_bnt = gr.Button("保存",min_width=40)
                    _bnt1 = gr.Button("送至图生图",min_width=40)
                    _bnt2 = gr.Button("待定",min_width=40)
                    _bnt3 = gr.Button("待定",min_width=40)
        #===事件
        run_preprocess_gr.click(
            fn = get_condition_img,
            inputs = [control_img_gr,select_aux_gr],
            outputs=[preprocessed_img_gr]
        )
        run_t2i_gr.click(
            fn=t2i, 
            inputs={
                ckpts,vaes,clip_skip,scheduler,
                prompt,neg_prompt,num_inference_steps_gr,num_images_gr,guidance_scale,width,height,random_device,random_seed,
                use_t2i_adapter_gr,use_hr_gr, use_ip_adapter_gr,# 控制器
                preprocessed_img_gr, # 控制条件图
                ip_img_gr, # ip adapter图
                adapter_scale_gr,adapter_factor_gr # adapter系数
                },
            outputs=img_gr
        )
        save_bnt.click(
            fn=save_image_and_info,
            inputs={img_gr,prompt,neg_prompt,ckpts,vaes,clip_skip,scheduler,num_inference_steps_gr,guidance_scale,clip_skip,width,height,random_device,random_seed},
        )    
    #===========================图生图 
    with gr.Tab("图生图"):
  
        i2i_prompt = gr.Textbox(placeholder="prompt",lines=3,max_lines=5,interactive=True)
        i2i_neg_prompt = gr.Textbox(placeholder="neg_prompt",lines=3,max_lines=5,interactive=True)
        with gr.Row():
            with gr.Column():
                img_editor = gr.ImageEditor(
                    #canvas_size=(800,800),
                    #container=False,
                    #fixed_canvas=True,
                    image_mode="RGB",
                    type="pil",
                    layers=False,
                )
                with gr.Row():
                    i2i_scheduler = gr.Dropdown(['Euler a','DPM++ 2M Karras'],value='Euler a',label="采样器",interactive=True)
                    i2i_num_inference_steps = gr.Slider(1,50,value=28,step=1,label="采样步数",interactive=True)
                with gr.Row():    
                    i2i_width = gr.Slider(256,2048,value=1024,step=16,label="宽度",interactive=True)
                    i2i_height = gr.Slider(256,2048,value=1024,step=16,label="高度",interactive=True)            
                i2i_inpaint_strength_gr =gr.Slider(0,1,step=0.1,value=0.8,label="重绘强度")
                with gr.Row():
                        i2i_guidance_scale= gr.Slider(1,10,step=0.5,value=7,label="CFG引导系数",interactive=True,scale = 4) 
                        i2i_random_device = gr.Dropdown(['CPU','GPU'],value="CPU",label="随机数种子",scale = 1,min_width=120,interactive=True)
                        i2i_random_seed = gr.Number(minimum=-1,precision=0,step=1,value=-1, label="random_seed",interactive=True,scale = 3)
                i2i_bnt = gr.Button(value='Run!')
            with gr.Column():
                i2i_res = gr.Image(image_mode="RGB",type="pil",format='png',interactive=False)
    
                with gr.Row():
                    i2i_save_bnt = gr.Button("保存",min_width=40)
                    i2i_to_inpaint_bnt = gr.Button("送至图生图",min_width=40)
                    i2i_bnt2 = gr.Button("待定",min_width=40)
                    i2i_bnt3 = gr.Button("待定",min_width=40)
        #====事件   
        i2i_to_inpaint_bnt.click(
            fn=i2i_send_to_inpaint,
            inputs=[i2i_res],
            outputs=[img_editor]
        )
        # ImageEditor.upload(fn, ···)    
        img_editor.upload(
            fn = i2i_upload_img,
            inputs=[img_editor],
            outputs=[i2i_height,i2i_width]
        )     

        i2i_bnt.click(
            fn = i2i,
            inputs={i2i_prompt,i2i_neg_prompt,ckpts,vaes,clip_skip,i2i_scheduler,i2i_num_inference_steps,i2i_guidance_scale,i2i_width,i2i_height,i2i_inpaint_strength_gr,i2i_random_device,i2i_random_seed,img_editor},
            outputs= {i2i_res}
        )
        i2i_save_bnt.click(
            fn = i2i_save_image_and_info,
            inputs={i2i_res}
        )
    #===========================画廊
    with gr.Tab("画廊"):
        #gr.Dataframe(value=get_tags,headers=['tag','简要描述','额外描述'],type="array",interactive=True)
        #gr.Dataset(components=[gr.Textbox(visible=False)], label="Text Dataset", samples=[ ["The quick brown fox jumps over the lazy dog"], ["Build & share delightful machine learning apps"], ["She sells seashells by the seashore"], ["Supercalifragilisticexpialidocious"], ["Lorem ipsum"], ["That's all folks!"] ], )
        with gr.Row():
            with gr.Column():
                refresh_gallery_btn = gr.Button("刷新画廊")
                showcase_gr = gr.Gallery(
                                label='aaaaaaaaaaa',
                                value=get_images,
                                format='png',
                                #rows=4,
                                columns=4,
                                container=True,
                                object_fit='scale-down',
                                #height=512,
                            )
                
            with gr.Column():
                info_gr = gr.TextArea(label="生成信息",value="prompt:aaaaaaaaaaaaaaaaaaaa\n neg_prompt:aaaaaaaaaccccccccccccccc",interactive=False)
                with gr.Row():
                    send_to_t2i_gr = gr.Button("送至文生图",min_width=40)
                    send_to_i2i_gr = gr.Button("送至图生图",min_width=40)
                    delete_img_gr = gr.Button("删除",min_width=40)

        tags_info_gr = gr.Dataframe(value=get_tags,headers=['tag','简要描述','额外描述'],type="array",show_search='search',interactive=True)
        save_tags_info_btn = gr.Button("保存更改")
        #====事件
        save_tags_info_btn.click(fn=save_tags,inputs=[tags_info_gr],outputs=[tags_info_gr])
        showcase_gr.select(fn=show_info,outputs=[info_gr])
        showcase_gr.preview_close(fn=test_close_preview)
        showcase_gr.preview_open(fn=test_open_preview)
        refresh_gallery_btn.click(fn=get_images,
                                    outputs=[showcase_gr]
                                    )
        delete_img_gr.click(fn=delete_selected_img,outputs=[showcase_gr])
        send_to_i2i_gr.click(fn=gallery_send_to_i2i,
                                inputs=[info_gr],
                                outputs=[i2i_prompt,i2i_neg_prompt,ckpts,vaes,clip_skip,i2i_scheduler,i2i_num_inference_steps,i2i_guidance_scale,i2i_width,i2i_height,i2i_random_device,i2i_random_seed,img_editor]
                            )

        send_to_t2i_gr.click(fn=gallery_send_to_t2i,
                            inputs=[info_gr],
                            outputs=[ckpts,vaes,clip_skip,scheduler,
                            prompt,neg_prompt,num_inference_steps_gr,guidance_scale,width,height,random_device,random_seed,
                            use_t2i_adapter_gr,use_hr_gr, # 附加器
                            preprocessed_img_gr, # 控制条件图
                            adapter_scale_gr, # adapter缩放系数
                            ])

    with gr.Tab("标签"): 
        with gr.Row():
            with gr.Column(variant="panel"):
                image = gr.Image(type="pil", image_mode="RGBA", label="Input")
                with gr.Row():
                    general_thresh = gr.Slider(
                        0,
                        1,
                        step=0.1,
                        value=Args.general_thresh,
                        label="General Tags Threshold",
                        scale=3,
                    )
                    general_mcut_enabled = gr.Checkbox(
                        value=False,
                        label="Use MCut threshold",
                        scale=1,
                    )
                with gr.Row():
                    character_thresh = gr.Slider(
                        0,
                        1,
                        step=0.1,
                        value=Args.character_thresh,
                        label="Character Tags Threshold",
                        scale=3,
                    )
                    character_mcut_enabled = gr.Checkbox(
                        value=False,
                        label="Use MCut threshold",
                        scale=1,
                    )
                with gr.Row():
                    tagger_submit = gr.Button(value="Submit", variant="primary", size="lg")
            with gr.Column(variant="panel"):
                sorted_general_strings = gr.Textbox(label="Output (string)",interactive=True)
                rating = gr.Label(label="Rating")
                character_res = gr.Label(label="Output (characters)")
                general_res = gr.Label(label="Output (tags)")
        tagger_submit.click(
            tagger_predictor.predict,
            inputs=[
                image,
                general_thresh,
                general_mcut_enabled,
                character_thresh,
                character_mcut_enabled,
            ],
            outputs=[sorted_general_strings, rating, character_res, general_res],
        )
                
    
        
   

if __name__ == "__main__":
    demo.launch(debug=True,share=True)