import os
from optimization.run_optimization import main, LazyRunOptimization
from argparse import Namespace
import torch
from torchvision.utils import make_grid
from torchvision.transforms import ToPILImage
import base64
from io import BytesIO
from googletrans import Translator




def getArgs(description="a person with purple hair", stylegan_size=1024, lr_rampup=0.05, lr=0.1,
            optimization_steps=300, experiment_type="edit", l2_lambda=0.008,
            id_lambda=0.000, stylespace=False,latent_save_path='Parameter/latent_code669.pt', truncation=0.7, create_video=False):
    # 附加参数
    args = {
        "description": description,  # 描述，用于生成图片的文本描述
        "ckpt": "stylegan2-ffhq-config-f.pt",  # StyleGAN2模型的预训练权重文件路径
        "stylegan_size": stylegan_size,  # StyleGAN2模型的生成图片的大小
        "lr_rampup": lr_rampup,  # 学习率上升期，即在这个步骤数内，学习率从0线性增长到lr
        "lr": lr,  # 学习率
        "step": optimization_steps,  # 优化步骤数
        "mode": experiment_type,  # 实验类型，可以是"edit"或"free_generation"
        "l2_lambda": l2_lambda,  # L2正则化的权重
        "id_lambda": id_lambda,  # 身份损失的权重
        'work_in_stylespace': stylespace,  # 是否在样式空间中工作，如果为False，则在潜在空间中工作
        "latent_path": latent_save_path,  # 潜在向量的保存路径
        "truncation": truncation,  # 截断值，用于控制生成图片的多样性和质量的平衡
        "save_intermediate_image_every": 1 if create_video else 20,  # 每隔多少步保存一次中间生成的图片
        "results_dir": "results",  # 结果保存目录
        "ir_se50_weights": "C:\\Users\\Dell\\PycharmProjects\\ThinkStation\\service\\styleCLIP\\model_ir_se50.pth"  # 用于计算身份损失的模型权重文件路径
    }
    return args

# #调用main函数进行图片优化
def runStyleCLIP(image_name,**args):
    #懒加载模型(第一次加载模型会比较慢)
    lazy_optimization = LazyRunOptimization(Namespace(**args))
    result = lazy_optimization.run_latent_optimization()
    #     #进行图片可视化
    result_half = result[1]#取出第二张图片
    result_image = ToPILImage()(make_grid(result_half.detach().cpu(), normalize=True, scale_each=True, padding=200))
    h, w = result_image.size
    result_image.resize((h // 2, w // 2))
    #控制台输出图片
    result_image.show()
    return result_image





    # # Convert image to base64 string
    # buffered = BytesIO()
    # result_image.save(buffered, format="PNG")
    # img_str = base64.b64encode(buffered.getvalue()).decode()

    # Return image as JSON
    return result_image

#
# def runStyleCLIP(**args):
#     result=main((Namespace(**args)))
#     result_half = result[1]
#     result_image = ToPILImage()(make_grid(result_half.detach().cpu(), normalize=True, scale_each=True, padding=200))
#     h, w = result_image.size
#     result_image.resize((h // 2, w // 2))
#     result_image.show()
#     result_image.save("outpute/result.png")
#
#     # Convert image to base64 string
#     buffered = BytesIO()
#     result_image.save(buffered, format="PNG")
#     img_str = base64.b64encode(buffered.getvalue()).decode()
#
#     # Return image as JSON
#     return {"image": img_str}



#将中文描述转换为英文描述
def translate_description(description):
    translator = Translator()
    result = translator.translate(description, dest='en')
    return result.text


#测试