from http import HTTPStatus
from urllib.parse import urlparse, unquote
from pathlib import PurePosixPath
import requests
from dashscope import ImageSynthesis
from datetime import datetime
import logging
import os
import tempfile
import time

import gradio as gr
import numpy as np
import rembg
import torch
from PIL import Image
from functools import partial

from tsr.system import TSR
from tsr.utils import remove_background, resize_foreground, to_gradio_3d_orientation

from ModelToVideo import render_video


import argparse

if torch.cuda.is_available():
    device = "cuda:0"
else:
    device = "cpu"

# 初始化模型配置
model = TSR.from_pretrained(
    "stabilityai/TripoSR",
    config_name="config.yaml",
    weight_name="model.ckpt",
)

# 调整 Chunk Size 以平衡速度和内存使用量
model.renderer.set_chunk_size(8192)
model.to(device)

rembg_session = rembg.new_session()

def check_input_image(input_image):
    if input_image is None:
        raise gr.Error("图像未上传!")

# 调用通义万象
def TongYi(prompt, sketch_image_url):
    # 获取当前时间并格式化为时间戳
    current_time = datetime.now()
    timestamp = current_time.strftime('%m%d-%H%M')

    # 确保输出目录存在
    output_directory = r'D:\Output'
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)

    if (prompt==""):
        prompt="请根据图片输出带深度的3D图"

    print('----sync call, please wait a moment----')
    rsp = ImageSynthesis.call(api_key="sk-ec65775d12b84078adb05443392f1a95",
                              model="wanx-sketch-to-image-lite",
                              prompt=prompt,
                              n=1,
                              style='<3d cartoon>',
                              size='768*768',
                              sketch_image_url=sketch_image_url,
                              task="image2image")

    print('response: %s' % rsp)
    if rsp.status_code == HTTPStatus.OK:
        # 保存至目录
        for result in rsp.output.results:
            # 解析URL，提取文件名
            url_file_name = PurePosixPath(unquote(urlparse(result.url).path)).parts[-1]
            name, ext = os.path.splitext(url_file_name)  # 分离文件名和后缀

            # 创建文件名及路径
            file_name = f"{timestamp}{ext}"  # 创建新的文件名，包含时间戳和原始后缀
            file_path = os.path.join(output_directory, file_name)
            with open(file_path, 'wb+') as f:
                f.write(requests.get(result.url).content)
            print("文件已保存至:", file_path)
            return file_path  # 返回文件路径
    else:
        print('sync_call Failed, status_code: %s, code: %s, message: %s' %
              (rsp.status_code, rsp.code, rsp.message))
        return None  # 如果调用失败，返回None

def preprocess(input_image, do_remove_background, foreground_ratio, tongyi_prompt):
    print('已进入preprocess()函数')
    def fill_background(image):
        image = np.array(image).astype(np.float32) / 255.0
        image = image[:, :, :3] * image[:, :, 3:4] + (1 - image[:, :, 3:4]) * 0.5
        image = Image.fromarray((image * 255.0).astype(np.uint8))
        return image

    # 调用通义万象模型,并根据TongYi()返回的图像路径打开图像M
    sketch_image_path = tempfile.NamedTemporaryFile(suffix=".png", delete=False).name
    input_image.save(sketch_image_path)
    generated_image_path = TongYi(tongyi_prompt, sketch_image_path)
    if not generated_image_path:
        raise ValueError("Failed to generate image using Tongyi Wanxiang.")
    tongyi_image = Image.open(generated_image_path)


    # 对通义万象模型的图像M进行移除背景
    if do_remove_background:
        image = tongyi_image.convert("RGB")
        image = remove_background(image, rembg_session)
        image = resize_foreground(image, foreground_ratio)
        image = fill_background(image)
    else:
        image = tongyi_image
        if image.mode == "RGBA":
            image = fill_background(image)
    return image


def generate(image, mc_resolution, formats=["obj", "glb"]):
    scene_codes = model(image, device=device)
    mesh = model.extract_mesh(scene_codes, True, resolution=mc_resolution)[0]
    mesh = to_gradio_3d_orientation(mesh)

    # 确保输出目录存在
    output_directory = r"D:\Python_Pro\TripoSR\output\1"

    if not os.path.exists(output_directory):
        os.makedirs(output_directory)

    # 保存模型和视频的文件路径
    # saved_model_path = os.path.join(output_directory, "mesh.obj")
    # output_video_path = os.path.join(output_directory, "output.mp4")

    saved_model_obj_path = os.path.join(output_directory, "mesh.obj")
    saved_model_glb_path = os.path.join(output_directory, "mesh.glb")
    output_video_path = os.path.join(output_directory, "output.mp4")

    # 导出模型
    mesh.export(saved_model_obj_path)
    mesh.export(saved_model_glb_path)

    # rv = []
    # for format in formats:
    #     mesh_path = tempfile.NamedTemporaryFile(suffix=f".{format}", delete=False)
    #     mesh.export(mesh_path.name)
    #     rv.append(mesh_path.name)
    #
    #     # 如果格式是 OBJ，保存到指定路径
    #     if format == "obj":
    #         mesh.export(saved_model_obj_path)
    #         print(f"模型已保存至: {saved_model_obj_path}")
    #
    #     # 调用 render_video 函数
    #     try:
    #         render_video(model_path=saved_model_obj_path, output_file=output_video_path)
    #     except ImportError as e:
    #         print(f"导入 3DToVideo 模块失败: {e}")
    #     except Exception as e:
    #         print(f"调用 render_video 函数时发生错误: {e}")

    # 生成视频
    try:
        render_video(model_path=saved_model_obj_path, output_file=output_video_path)
    except ImportError as e:
        print(f"导入 3DToVideo 模块失败: {e}")
        output_video_path = None
    except Exception as e:
        print(f"调用 render_video 函数时发生错误: {e}")
        output_video_path = None

    return saved_model_obj_path, saved_model_glb_path,output_video_path


# Gradio App
with gr.Blocks(title="简绘三维") as interface:
    gr.Markdown(
    """"
    # 简绘三维
    本项目三维建模主要使用 [TripoSR](https://github.com/VAST-AI-Research/TripoSR) 模型与[通义万相](https://dundunlu.com/web/tongyi/)结合生成输入。
     
     **温馨提示:**
    1. 如果结果不满意，可尝试更改前景比例进行调整。
    2. 提供的示例（最后一个示例除外）均已经过预处理，以示例为输入时请禁用 “剔除背景” 。
    3. 当输入图像是具有透明背景的 RGBA，图像内容居中并占据图像宽度或高度的 70% 以上时，请禁用“剔除背景”选项。
    """
    )
    with gr.Row(variant="panel"):
        with gr.Column():
            with gr.Row():
                input_image = gr.Image(
                    label="输入图像",
                    image_mode="RGBA",
                    sources="upload",
                    type="pil",
                    elem_id="content_image",
                )
                processed_image = gr.Image(label="处理后的图像", interactive=False)
            with gr.Row():
                with gr.Group():
                    tongyi_prompt = gr.Textbox(label="输入提示词", value="")
                    do_remove_background = gr.Checkbox(label="剔除背景", value=True)
                    foreground_ratio = gr.Slider(
                        label="前景比例",
                        minimum=0.5,
                        maximum=1.0,
                        value=0.85,
                        step=0.05,
                    )
                    mc_resolution = gr.Slider(
                        label="MC分辨率",
                        minimum=32,
                        maximum=320,
                        value=256,
                        step=32,
                    )
            with gr.Row():
                submit = gr.Button("生成",elem_id="generate", variant="primary")
        with gr.Column():
            with gr.Tab("OBJ"):
                output_model_obj = gr.Model3D(label="输出模型(OBJ格式)", interactive=False)
            with gr.Tab("GLB"):
                output_model_glb = gr.Model3D(label="输出模型 (GLB 格式)", interactive=False)
                gr.Markdown("注意：此处显示的型号外观较暗。下载以获得正确的结果。")
            video_output = gr.Video(label="生成的视频")

    submit.click(
        fn=preprocess,
        inputs=[
            input_image,
            do_remove_background,
            foreground_ratio,
            tongyi_prompt,
        ],
        outputs=[processed_image],
    ).success(
        fn=generate,
        inputs=[processed_image, mc_resolution],
        outputs=[output_model_obj, output_model_glb,video_output],
    )



if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--username', type=str, default=None, help='Username for authentication')
    parser.add_argument('--password', type=str, default=None, help='Password for authentication')
    parser.add_argument('--port', type=int, default=7860, help='Port to run the server listener on')
    parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
    parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
    parser.add_argument("--queuesize", type=int, default=1, help="launch gradio queue max_size")
    args = parser.parse_args()
    interface.queue(max_size=args.queuesize)
    interface.launch(
        auth=(args.username, args.password) if (args.username and args.password) else None,
        share=args.share,
        server_name="0.0.0.0" if args.listen else None,
        server_port=args.port
    )