
import torch
import os
import argparse
import random
import numpy as np
from pathlib import Path 
import time
import torch.distributed as dist
import json 
import uvicorn, json, datetime
# from asgiref.sync import sync_to_async
import base64 
import hashlib 
from PIL import Image, ImageOps
import os
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from autocrop import Cropper

from PIL import Image, ImageOps
from pipeline.instructface_pipeline import StableDiffusionInstructFacePipeline
import torch
import numpy as np
from basicsr.archs.rrdbnet_arch import RRDBNet
from realesrgan import RealESRGANer

import random
import json
from accelerate import Accelerator
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import UNet2DConditionModel, AutoencoderKL, DDPMScheduler

# parser = argparse.ArgumentParser(
#                     prog='instruct_face_server',
#                     description='What the program does',
#                     epilog='Text at the bottom of help')
# parser.add_argument('--server_port', default=7860)           # positional argument
# parser.add_argument('--device_id', default="0")      # option that takes a value
# parser.add_argument('-v', '--verbose',
#                     action='store_true')  # on/off flag

# args = parser.parse_args()

server_port = 7860
gpu_id = 0 

device = f"cuda:{gpu_id}"

save_images_dir = "./images_save/"
save_images_dir_sr = "./images_save_sr/"
log_dir = "./logs/"

os.makedirs(log_dir, exist_ok=True)
os.makedirs(save_images_dir, exist_ok=True)
os.makedirs(save_images_dir_sr, exist_ok=True)

vae_path = "./sd-vae-ft-mse"

# 超分模型
SR_model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
netscale = 4
model_path = "./weights/RealESRGAN_x4plus.pth"
upsampler = RealESRGANer(
    scale=netscale,
    model_path=model_path,
    model=SR_model,
    gpu_id=gpu_id)
        
def genearteMD5(input_str):
    # 创建md5对象
    hl = hashlib.md5()
    hl.update(input_str.encode(encoding='utf-8'))
    return hl.hexdigest()

def img_to_md5(_path):
    fd = open(_path, 'rb')
    fmd5 = hashlib.md5(fd.read()).hexdigest()
    fd.close()
    return fmd5

def img_to_base64(_path):

    with open(_path, 'rb') as f:
        image_data = f.read()
        base64_data = base64.b64encode(image_data)  # base64编码
    
    return base64_data

def create_model(pretrained_model_name_or_path):
    # Load scheduler, tokenizer and models.
    noise_scheduler = DDPMScheduler.from_pretrained(pretrained_model_name_or_path, subfolder="scheduler")
    tokenizer = CLIPTokenizer.from_pretrained(
        pretrained_model_name_or_path, subfolder="tokenizer")
    text_encoder = CLIPTextModel.from_pretrained(
        pretrained_model_name_or_path, subfolder="text_encoder")

    vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder="vae")
    unet = UNet2DConditionModel.from_pretrained(
        pretrained_model_name_or_path, subfolder="unet")

    vae.requires_grad_(False)
    text_encoder.requires_grad_(False)

    models = {"noise_scheduler": noise_scheduler,
        "tokenizer": tokenizer,
        "text_encoder": text_encoder,
        "vae": vae,
        "unet": unet}

    return models

def create_pipeline(pretrained_model_name_or_path, resume_path, device):
    accelerator = Accelerator()
    accelerator.load_state(resume_path)
    models = create_model(pretrained_model_name_or_path=pretrained_model_name_or_path)

    text_encoder = models["text_encoder"]
    unet = models["unet"]

    vae = AutoencoderKL.from_pretrained(vae_path)
    pipeline = StableDiffusionInstructFacePipeline.from_pretrained(
        pretrained_model_name_or_path,
        text_encoder=text_encoder,
        vae=vae,
        unet=accelerator.unwrap_model(unet)
    )
    pipeline = pipeline.to(torch.device(device))

    return pipeline

def edit(prompt, input_image, num_inference_steps, image_guidance_scale, guidance_scale, seed):

    generator = torch.Generator(device=torch.device(device)).manual_seed(seed)
    result_image = pipeline(
                            prompt = prompt,
                            image = input_image,
                            num_inference_steps = num_inference_steps,
                            image_guidance_scale = image_guidance_scale,
                            guidance_scale = guidance_scale,
                            generator = generator,
                        )
    # result_image_gradio = ImageOps.fit(result_image, (320,320), method=Image.Resampling.LANCZOS)
    return result_image


def base64_decode(origin_str):
    # origin_str = origin_str.split(',')[1]
    de_str = base64.b64decode(origin_str)
    return de_str

def save_image(output_image):
    output_image_arr = np.array(output_image)
    ## super resolution
    img, _ = upsampler.enhance(output_image_arr, outscale=netscale)
    img = Image.fromarray(img.astype(np.uint8))

    # img.save(os.path.join(sample_path, f"{base_count:05}.png"))
    output_image_md5 = genearteMD5(str(output_image_arr))
    # output_image_save_path_sr = os.path.join(save_images_dir, f"{output_image_md5}.jpg")
    output_image_save_path = os.path.join(save_images_dir, f"{output_image_md5}.jpg")
    
    img.save(output_image_save_path)
    # output_image.save(output_image_save_path)
    return output_image_save_path, output_image_md5

def log_one_line(log_file, content):
    new_content = []
    for c in content:
        new_content.append(str(c))

    with open(log_file, "a+") as f:
        f.write(" ".join(new_content) + "\n")
import requests
def download_image(url, output_path):
#请求网页
    r = requests.get(url)
    r.raise_for_status() #
    
    #打开一个文件作为文件对象
    with open(output_path, 'wb') as f:
        f.write(r.content) #文件对象写入我们请求到的内容
        f.close() #关闭文档，就会在当前文件夹下生成一个名为image.jpg的文件，也就是我们需要下载的文件

from datetime import datetime

cropper = Cropper()

def init_flask():
    from fastapi import FastAPI, Request

    app = FastAPI()

    @app.post("/instruct_image")
    async def get_generate_h(request: Request):

        # cread_log_dir
        currentDateAndTime = datetime.now()
        year = currentDateAndTime.year
        month = currentDateAndTime.month 
        day = currentDateAndTime.day

        cur_log_file = f"{log_dir}/{year}-{month}-{day}.txt"
        # os.makedirs(cur_log_dir, exist_ok=True)

        json_post_raw = await request.json()
        config = json.loads(json_post_raw)

        result = {
            "code": 0,
            "msg": "success",
            "data": {},
        }
        print("request come in")
        prompt = config.get("prompt", "none")
        image: str = config.get("image", "none")
        num_inference_steps= config.get("num_inference_steps", 20)
        image_guidance_scale = config.get("image_guidance_scale", 1.5)
        guidance_scale = config.get("guidance_scale", 1.0)
        seed = config.get("seed", 4207)
        step_cnt = config.get("step_cnt", 0)
        step_number = config.get("step_number", 0)
        
        if prompt == "none" or image == "none":
            result["code"] = -1
            result["msg"] = "prompt or image is not None!"
            return result
        
        if len(image) == 32:
            ## md5 encoding 
            input_image_path = os.path.join(save_images_dir, f"{image}.jpg")
            if not os.path.isfile(input_image_path):
                result["code"] = -1
                result["msg"] = "md5 error, image is not exist"
                return result
            
            cropped_array = cropper.crop(input_image_path)
            input_image = Image.fromarray(cropped_array).convert("RGB").resize([256, 256])
            
            # input_image = Image.open(input_image_path).convert("RGB")
            output_image = edit(prompt, input_image=input_image, 
                                num_inference_steps=num_inference_steps,
                                image_guidance_scale=image_guidance_scale,
                                guidance_scale=guidance_scale,
                                seed=seed)

            output_image_save_path, save_md5 = save_image(output_image)
            
            log_one_line(cur_log_file, [currentDateAndTime, prompt, input_image_path, output_image_save_path, step_cnt, step_number])

            result["code"] = 0
            result["msg"] = "handle successfully"
            result["data"] = {
                "md5": save_md5,
                "image_base64": img_to_base64(output_image_save_path)
            }
            return result

        else :
            ## base64 encoding
            if image.startswith("http"):
                ## 说明是http链接，需要进行下载
                str_img_data = str(image)
                input_image_md5 = genearteMD5(str_img_data)
                save_input_image = os.path.join(save_images_dir, f"{input_image_md5}.jpg")
                download_image(image, save_input_image)

            else :
                imgdata = base64_decode(image)
                str_img_data = str(imgdata)
                input_image_md5 = genearteMD5(str_img_data)
                save_input_image = os.path.join(save_images_dir, f"{input_image_md5}.jpg")
                with open(save_input_image,'wb') as f:
                    f.write(imgdata)

            cropped_array = cropper.crop(save_input_image)
            input_image = Image.fromarray(cropped_array).convert("RGB").resize([256, 256])
            # input_image = Image.open(save_input_image).resize([256, 256]).convert("RGB")

            output_image = edit(prompt, input_image=input_image, 
                                num_inference_steps=num_inference_steps,
                                image_guidance_scale=image_guidance_scale,
                                guidance_scale=guidance_scale,
                                seed=seed)
            
            output_image_save_path, save_md5 = save_image(output_image)
            
            log_one_line(cur_log_file, [currentDateAndTime, prompt, save_input_image, output_image_save_path, step_cnt, step_number])

            result["code"] = 0
            result["msg"] = "handle successfully"
            result["data"] = {
                "md5": save_md5,
                "image_base64": img_to_base64(output_image_save_path)
            }
            return result

    return app 

# def set_random_seed(seed):
#     """Set random seed for reproducability."""
#     if seed is not None and seed > 0:
#         random.seed(seed)
#         np.random.seed(seed)
#         torch.manual_seed(seed)

def img_to_base64(_path):

    with open(_path, 'rb') as f:
        image_data = f.read()
        base64_data = base64.b64encode(image_data)  # base64编码
    
    return base64_data


# base64_image = img_to_base64("../temp.jpg")

# base_64_image_str = str(base64_image, encoding = "utf-8")

# print(base64_image[:100])
# print(str(base64_image)[:100])
# decode_image = base64_decode(base_64_image_str)

# with open("./1.jpg",'wb') as f:
#     f.write(decode_image)


# image = Image.open("./1.jpg").convert("RGB")

# exit(0)

# out = img_to_base64("./temp.jpg")
# print(out[:100])
# out = genearteMD5(str(np.array([[21, 2], [3, 1]])))
# print(out)
# exit()
# data_root = "/data2/zzd/dataset/instruct_dataset"
# save_dir = "./gradio_result/"
# pretrained_model_name_or_path = "/data2/zzd/pretrained_ckpt/instruct-pix2pix"
# pretrained_model_name_or_path = "/data2/zzd/code/InstructFace/checkpoints/InstructFace_1e-4_256/huggingface"
pretrained_model_name_or_path = "./huggingface"
resume_steps = 1000
# resume_path = f"/data2/zzd/code/InstructFace/checkpoints/InstructFace_1e-4_256/checkpoint-{resume_steps}"
resume_path = f"./checkpoint-1000"

img_size = 256

pipeline = create_pipeline(pretrained_model_name_or_path = pretrained_model_name_or_path,
                                        resume_path=resume_path,
                                        device=device)

flag = "Successfully Load Model"

app = init_flask()

uvicorn.run(app, host='0.0.0.0', port=server_port, workers=1)
