from contextlib import closing

import os
from modules import paths
import modules.scripts
from modules import processing,sd_models
from modules.generation_parameters_copypaste import create_override_settings_dict
from modules.shared import opts, cmd_opts
import modules.shared as shared
from modules.ui import plaintext_to_html
import gradio as gr


def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args):
    override_settings = create_override_settings_dict(override_settings_texts)
    if type(id_task) == str:
        id_task_array = id_task.split(":")
        if len(id_task_array) > 1:
            userId = id_task_array[1]
            outpath_samples_tmp = opts.outdir_samples or opts.outdir_txt2img_samples
            outpath_samples_tmp = outpath_samples_tmp + "/" + userId
            outpath_grids_tmp = opts.outdir_grids or opts.outdir_txt2img_grids
            outpath_grids_tmp = outpath_grids_tmp + "/" + userId
            
    sd_model_user = shared.sd_model.sd_checkpoint_info.title
    if(len(shared.userModels) > 0 and userId in shared.userModels):
        sd_model_user = shared.userModels[userId]
        # 用户的checkpoint与当前的模型的checkpoint不一致时,才去重新加载模型
        if sd_model_user != shared.sd_model.sd_checkpoint_info.title:
            index = sd_model_user.find("[")
            sd_model_filename = sd_model_user[0:index].strip()
            print("sd_model_filename is : " + sd_model_filename)
            checkpoint_info = sd_models.CheckpointInfo(os.path.join(paths.models_path,'Stable-diffusion',sd_model_filename))
            shared.sd_model = sd_models.reload_model_weights(info=checkpoint_info)
    else:
        # 存储了默认的模型 用于用户没有切换模型时候，别的用户切换了模型
        if "0" in shared.userModels:
            sd_model_user = shared.userModels["0"]
            shared.userModels[userId] = sd_model_user
            # 用户的checkpoint与当前的模型的checkpoint不一致时,才去重新加载模型
        if sd_model_user != shared.sd_model.sd_checkpoint_info.title:
            index = sd_model_user.find("[")
            sd_model_filename = sd_model_user[0:index].strip()
            print("sd_model_filename is : " + sd_model_filename)
            checkpoint_info = sd_models.CheckpointInfo(os.path.join(paths.models_path,'Stable-diffusion',sd_model_filename))
            shared.sd_model = sd_models.reload_model_weights(info=checkpoint_info)
    p = processing.StableDiffusionProcessingTxt2Img(
        sd_model=shared.sd_model,
        outpath_samples=outpath_samples_tmp,
        outpath_grids=outpath_grids_tmp,
        prompt=prompt,
        styles=prompt_styles,
        negative_prompt=negative_prompt,
        sampler_name=sampler_name,
        batch_size=batch_size,
        n_iter=n_iter,
        steps=steps,
        cfg_scale=cfg_scale,
        width=width,
        height=height,
        enable_hr=enable_hr,
        denoising_strength=denoising_strength if enable_hr else None,
        hr_scale=hr_scale,
        hr_upscaler=hr_upscaler,
        hr_second_pass_steps=hr_second_pass_steps,
        hr_resize_x=hr_resize_x,
        hr_resize_y=hr_resize_y,
        hr_checkpoint_name=None if hr_checkpoint_name == 'Use same checkpoint' else hr_checkpoint_name,
        hr_sampler_name=None if hr_sampler_name == 'Use same sampler' else hr_sampler_name,
        hr_prompt=hr_prompt,
        hr_negative_prompt=hr_negative_prompt,
        override_settings=override_settings,
    )

    p.scripts = modules.scripts.scripts_txt2img
    p.script_args = args

    p.user = request.username

    if cmd_opts.enable_console_prompts:
        print(f"\ntxt2img: {prompt}", file=shared.progress_print_out)

    with closing(p):
        processed = modules.scripts.scripts_txt2img.run(p, *args)

        if processed is None:
            processed = processing.process_images(p)

    shared.total_tqdm.clear()

    generation_info_js = processed.js()
    if opts.samples_log_stdout:
        print(generation_info_js)

    if opts.do_not_show_images:
        processed.images = []

    return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments")
