import os.path
import shutil
import threading

import cv2
import numpy as np
import torch
from PIL import Image
from ffmpy import FFmpeg
from moviepy.editor import *

from lamacleaner.clear_helper import clear_water
from synth_helper import synth_video

output_path = r"./outputs"

lock = threading.Lock()  # 创建锁对象

fps_freq = 1  # 帧保存频率


# 获取文件名称
def getName(output_path):
    return os.path.basename(output_path).split('.')[0]


# 提取并另存为
def run_ffmpeg(input_audio: str, output_audio: str, format: str):
    ff = FFmpeg(inputs={input_audio: None},
                outputs={output_audio: '-f {} -vn'.format(format)})
    ff.run()
    return output_audio


# 参数接受处理
def extractWav(input_path: str, ext: str):
    output_file = os.path.join(output_path, '{}.{}'.format(getName(input_path), ext))
    if os.path.exists(output_file):
        os.remove(output_file)
    return run_ffmpeg(output_path, output_file, ext)


def video2pics(input_file, paltform):
    path, fullname = os.path.split(input_file)
    name, ext = os.path.splitext(fullname)
    output_dir = output_path + "/" + name
    if os.path.exists(output_dir):
        shutil.rmtree(output_dir)
    os.mkdir(output_dir)
    frame_dir = output_dir + "/" + "frames"
    audio_path = output_dir + "/" + "audio.mp3"
    run_ffmpeg(input_file, audio_path, "mp3")
    with lock:  # 锁定锁对象，确保每个线程都能够成功创建文件夹
        if os.path.exists(frame_dir):
            shutil.rmtree(frame_dir)
        os.mkdir(frame_dir)
    cnt = 0
    masks = []
    mask_dir = os.path.join("./masks", paltform)
    for mask_file in os.listdir(mask_dir):
        masks.append(os.path.join(mask_dir, mask_file))
    cap = cv2.VideoCapture(input_file)
    while True:
        ret, image = cap.read()
        if image is None:
            break
        w = image.shape[1]
        h = image.shape[0]
        # if cnt == 0:
            # for i in range(len(masks)):
            #     mask = np.array(Image.open(masks[i]))
            #     # 缩放图像到指定尺寸
            #     mask = mask.resize((h, w))
            #     cv2.imencode('.jpg', mask)[1].tofile(masks[i])
        with lock:  # 锁定锁对象，确保每个线程都能够成功保存图像
            frame_file = frame_dir + "/" + str(cnt) + '.jpg'
            _, img_encode = cv2.imencode('.jpg', image)
            img_data = img_encode.tobytes()
            if type(img_data) != bytes:
                break
            with open(frame_file, 'wb') as f:
                f.write(img_data)
            for mask in masks:
                clear_water(frame_file, mask, frame_file, "cuda")
        cnt = cnt + 1
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    if cnt == 0:
        return
    output_file = os.path.join(output_dir, fullname)
    video_clip = VideoFileClip(input_file)
    if os.path.exists(output_file):
        os.remove(output_file)
    fps = int(video_clip.fps / fps_freq)
    video_clip.close()
    synth_video(frame_dir, audio_path, output_file=output_file, fps=fps)


if __name__ == '__main__':

    # 检查CUDA是否可用
    if torch.cuda.is_available():
        print("CUDA is available")
        gpu_count = torch.cuda.device_count()
        print(f"Found {gpu_count} GPUs")

        # 获取GPU设备索引，0代表第一个GPU
        device = torch.device("cuda:0" if gpu_count > 0 else "cpu")

        # 创建一个随机张量并移动到GPU上
        tensor = torch.randn(2, 3).to(device)

        # 你的模型和前向传播可以在GPU上运行
        # model = TheModelClass(*args, **kwargs).to(device)
        # output = model(input_gpu).to('cpu')  # 将结果移回CPU进行后续处理
    else:
        print("CUDA is not available")

    inputs = [{"platform": "baidu", "video_file": "videos/1.mp4"},
              {"platform": "baidu", "video_file": "videos/2.mp4"},
              {"platform": "baidu", "video_file": "videos/3.mp4"},
              {"platform": "baidu", "video_file": "videos/4.mp4"}]
    for item in inputs:
        platform = item["platform"]
        file_path = item["video_file"]
        threading.Thread(target=video2pics, args=(file_path, platform)).start()
