# Copyright (c) Alibaba, Inc. and its affiliates.
import os
import json
import numpy as np
import argparse
import torch
# from controlnet_aux import OpenposeDetector
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
from face_modules.face_adapter import FaceAdapter, Face_Extractor
from face_modules.merge_lora import merge_lora
from face_modules.constants import neg_prompt, pos_prompt_with_cloth, pos_prompt_with_style, base_models


from modelscope import snapshot_download
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from PIL import Image

from modelscope.hub.api import HubApi

YOUR_ACCESS_TOKEN = '039232d8-a99f-4988-ba3d-5546021d9054'

api = HubApi()
api.login(YOUR_ACCESS_TOKEN)

parser = argparse.ArgumentParser()
parser.add_argument("--pose_img", default='0.png', type=str, required=False, help="Path to the pose image.")
parser.add_argument("--face_img", default='woman_face1.png', type=str, required=False, help="Path to the human face image.")
parser.add_argument("--style", default=1, type=int, required=False, help="Number of style.")
parser.add_argument("--num_gen", default=10, type=int, required=False, help="Number of generated images.")
args = parser.parse_args()

def img_pad(pil_file, fixed_height=512, fixed_width=512):
    w, h = pil_file.size

    if h / float(fixed_height) >= w / float(fixed_width):
        factor = h / float(fixed_height)
        new_w = int(w / factor)
        pil_file.thumbnail(size=(new_w, fixed_height))
        pad_w = int((fixed_width - new_w) / 2)
        pad_w1 = (fixed_width - new_w) - pad_w
        array_file = np.array(pil_file)
        array_file = np.pad(array_file, ((0, 0), (pad_w, pad_w1), (0, 0)), 'constant')
    else:
        factor = w / float(fixed_width)
        new_h = int(h / factor)
        pil_file.thumbnail(size=(fixed_width, new_h))
        pad_h = fixed_height - new_h
        pad_h1 = 0
        array_file = np.array(pil_file)
        array_file = np.pad(array_file, ((pad_h, pad_h1), (0, 0), (0, 0)), 'constant')

    output_file = Image.fromarray(array_file)
    return output_file

def gen_portrait_pose(base_model_path, style_model_path, pose_model_path, pose_image, model_dir, output_dir, multiplier_style, pos_prompt, neg_prompt, input_img, face_adapter_scale, cfg_scale, num_images):
    # pre & post process model
    segmentation_pipeline = pipeline(
        Tasks.image_segmentation,
        'damo/cv_resnet101_image-multiple-human-parsing',
        model_revision='v1.0.1')
    
    face_detection = pipeline(task=Tasks.face_detection, model='damo/cv_resnet50_face-detection_retinaface')
    fact_model_dir = snapshot_download('iic/face_chain_fact_model', revision='v1.0.0')
    face_adapter_path = os.path.join(fact_model_dir, 'face_adapter/adapter_maj_25.ckpt')
    # face_adapter_path = './model/adapter_maj_25.ckpt' 需更改这里的路径指向自己训练的fact参数
    face_extracter = Face_Extractor(fr_weight_path=os.path.join(fact_model_dir, 'face_adapter/ms1mv2_model_TransFace_S.pt'), \
                                    fc_weight_path=os.path.join(fact_model_dir, 'face_adapter/adapter_maj_25.ckpt'))

    controlnet = ControlNetModel.from_pretrained(pose_model_path, torch_dtype=torch.float16)
    # openpose = OpenposeDetector.from_pretrained(os.path.join(model_dir, 'model_controlnet/ControlNet'))
    pipe = StableDiffusionControlNetPipeline.from_pretrained(base_model_path, safety_checker=None, controlnet=controlnet, torch_dtype=torch.float16)
    pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
    if style_model_path is None:
        model_dir = snapshot_download('Cherrytest/zjz_mj_jiyi_small_addtxt_fromleo', revision='v1.0.0')
        style_model_path = os.path.join(model_dir, 'zjz_mj_jiyi_small_addtxt_fromleo.safetensors')
    pipe = merge_lora(
            pipe,
            style_model_path,
            multiplier_style,
            device='cuda',
            from_safetensor=True)

    face_adapter = FaceAdapter(pipe, face_detection, segmentation_pipeline, face_extracter, face_adapter_path, 'cuda')
    face_adapter.set_scale(face_adapter_scale)
    outputs = []
    batch_size = 1
    pose_im = Image.open(pose_image)
    pose_im = img_pad(pose_im)
    # pose_im = openpose(pose_im, include_hand=False)
    pose_im.save(output_dir + '/pose_im.png')
    for i in range(int(num_images / batch_size)):
        images_style = face_adapter.generate(prompt=pos_prompt, face_image=input_img, pose_image=pose_im, height=512, width=512, 
            guidance_scale=cfg_scale, negative_prompt=neg_prompt, num_inference_steps=50, num_images_per_prompt=batch_size)
        if (np.array(face_detection(np.array(images_style[0]))['scores']) > 0.5).sum() ==1:
            outputs.append(np.array(images_style[0]))
    return outputs

def generate_pos_prompt(style_model, prompt_cloth):
    if style_model is not None:
        matched = list(filter(lambda style: style_model == style['name'], styles))
        if len(matched) == 0:
            raise ValueError(f'styles not found: {style_model}')
        matched = matched[0]
        if matched['model_id'] is None:
            pos_prompt = pos_prompt_with_cloth.format(prompt_cloth)
        else:
            pos_prompt = pos_prompt_with_style.format(matched['add_prompt_style'])
    else:
        pos_prompt = pos_prompt_with_cloth.format(prompt_cloth)
    return pos_prompt

styles = []
for base_model in base_models:
    style_in_base = []
    folder_path = f"./styles/{base_model['name']}"
    files = os.listdir(folder_path)
    files.sort()
    for file in files:
        file_path = os.path.join(folder_path, file)
        with open(file_path, "r") as f:
            data = json.load(f)
            style_in_base.append(data['name'])
            styles.append(data)
    base_model['style_list'] = style_in_base

pose_image = './input_pose/' + args.pose_img
input_img_name = './input_face/' + args.face_img
num_generate = args.num_gen
face_adapter_scale = 0.6
multiplier_style = 0.25
cfg_scale = 5.0
output_dir = './generated'
base_model = base_models[1]
style = styles[args.style]
model_id = style['model_id']
base_model_path = base_model['model_id']
sub_path = base_model['sub_path']
revision = base_model['revision']

if model_id == None:
    style_model_path = None
    pos_prompt = generate_pos_prompt(style['name'], style['add_prompt_style'])
else:
    if os.path.exists(model_id):
        model_dir = model_id
    else:
        model_dir = snapshot_download(model_id, revision=style['revision'])
    style_model_path = os.path.join(model_dir, style['bin_file'])
    pos_prompt = generate_pos_prompt(style['name'], style['add_prompt_style'])  # style has its own prompt


model_dir = snapshot_download('damo/face_chain_control_model', revision='v1.0.1')
pose_model_path = snapshot_download('changqi/my_controlnet')

base_model_path = snapshot_download(base_model_path, revision=revision)
base_model_path = os.path.join(base_model_path, sub_path)

input_img = Image.open(input_img_name).convert('RGB')
w, h = input_img.size
if max(w, h) > 1000:
    scale = 1000 / max(w, h)
    input_img = input_img.resize((int(w * scale), int(h * scale)))

os.makedirs(output_dir, exist_ok=True)
outputs = gen_portrait_pose(base_model_path, style_model_path, pose_model_path, pose_image, model_dir, output_dir, multiplier_style, pos_prompt, neg_prompt, input_img, face_adapter_scale, cfg_scale, num_generate)


for i, out_tmp in enumerate(outputs):
    out_tmp = Image.fromarray(out_tmp)
    out_tmp.save(os.path.join(output_dir, f'{i}.png'))