# Copyright(C) 2023. Huawei Technologies Co.,Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import time

import cv2
import numpy as np
import yaml
from ais_bench.infer.interface import InferSession

from wav2lip_utils.audio_utils import wav_resample, melspectrogram
from wav2lip_utils.constant import *


class Wav2lip:
    def __init__(self, config_path):
        self.video_path = None
        self.wav2lip_model_path = None
        self.device_id = None
        self.parse_config(config_path)
        self.last_wav = None
        self.acc_mel_index = 0
        self.current_base_idx = 0
        self.last_mel_chunk = []
        self.last_mel_shift = 0
        self.mel_step_size = 16
        self.wav2lip_batch_size = 8
        self.cur_frame_idx = 0

    def parse_config(self, config_path):
        with open(config_path, 'r') as f:
            config = yaml.safe_load(f)
        self.device_id = config.get('device_id', 0)
        self.wav2lip_model_path = config.get('w2l_model_path', './assets/wav2lip_bs16.om')
        self.video_path = config.get('video_path', './assets/video.mp4')
        self.face_path = config.get('face_path', './assets/face.npy')
        self.umask_path = config.get('umask_path', './assets/umask.npy')

    def load_video_frames(self):
        video_stream = cv2.VideoCapture(self.video_path)
        fps = video_stream.get(cv2.CAP_PROP_FPS)

        print(f'video fps: {fps}')
        print(f'start loading video {os.path.basename(self.video_path)} frames')
        self.full_frames = []
        cnt = 1
        while True:
            still_reading, frame = video_stream.read()
            if not still_reading:
                video_stream.release()
                break
            if RESIZE_FACTOR > 1:
                frame = cv2.resize(frame, (frame.shape[1] // RESIZE_FACTOR, frame.shape[0] // RESIZE_FACTOR))

            if ROTATE:
                frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)

            if CROP:
                y1, y2, x1, x2 = CROP_COORDINATE
                frame = frame[y1:y2, x1:x2]
            cnt += 1
            self.full_frames.append(frame)

        if len(self.full_frames) > len(self.face_detection):
            self.full_frames = self.full_frames[:len(self.face_detection)]

        print(f'loading all video {len(self.full_frames)} frames')

    def load_face_mask(self):
        self.face_detection = np.load(self.face_path, allow_pickle=True)

        self.mask = np.load(self.umask_path)
        print(f"face load success")

    def init(self):
        self.model = InferSession(self.device_id, self.wav2lip_model_path)
        self.load_face_mask()
        self.load_video_frames()

    def get_acc_mel(self, wav, is_end=False):
        # 拼接前一次获得的wav
        concat_wav = wav if self.last_wav is None else np.hstack(
            (self.last_wav, wav))
        concat_mel = melspectrogram(concat_wav)
        # 输出上轮acc_mel之后的片段，非末尾段保留2帧mel
        acc_mel = concat_mel[:, self.acc_mel_index:] if is_end \
            else concat_mel[:, self.acc_mel_index:-2]
        self.last_wav = wav
        index_shift = 0 if self.acc_mel_index == 0 else -1
        self.acc_mel_index = acc_mel.shape[1] + index_shift
        return acc_mel

    def datagen(self, mels):
        img_batch, mel_batch, frame_batch, coords_batch, img_ref = [], [], [], [], []
        u_masked_img_batch = []

        for i, m in enumerate(mels):
            frame_to_save = self.full_frames[self.cur_frame_idx].copy()
            face, coords = self.face_detection[self.cur_frame_idx].copy()
            face = cv2.resize(face, (IMG_SIZE, IMG_SIZE))
            masked_face = self.mask[self.cur_frame_idx].copy()

            img_batch.append(face)
            mel_batch.append(m)
            frame_batch.append(frame_to_save)
            coords_batch.append(coords)
            u_masked_img_batch.append(masked_face)

            self.cur_frame_idx += 1
            self.cur_frame_idx = self.cur_frame_idx % len(self.full_frames)

            if len(img_batch) >= self.wav2lip_batch_size:
                img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)
                u_masked_img_batch = np.array(u_masked_img_batch)
                img_batch = np.concatenate((u_masked_img_batch, img_batch), axis=3) / 255.
                mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])
                yield img_batch, mel_batch, frame_batch, coords_batch
                img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []
                u_masked_img_batch = []

        if len(img_batch) > 0:
            img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)
            u_masked_img_batch = np.array(u_masked_img_batch)
            img_batch = np.concatenate((u_masked_img_batch, img_batch), axis=3) / 255.
            mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])

            yield img_batch, mel_batch, frame_batch, coords_batch

    def get_res_image(self, gen):
        res_imgs = []
        pad = False
        for i, (img_batch, mel_batch, frames, coords) in enumerate(gen):
            bs = img_batch.shape[0]
            if bs != self.wav2lip_batch_size:
                bs, h, w, c = img_batch.shape
                pad_size = self.wav2lip_batch_size - bs
                img_batch = np.concatenate((img_batch, np.zeros((pad_size, h, w, c), dtype=np.float32)))
                bs, h, w, c = mel_batch.shape
                mel_batch = np.concatenate((mel_batch, np.zeros((pad_size, h, w, c), dtype=np.float32)))
                pad = True
            img_batch = img_batch.astype(np.float32)
            mel_batch = mel_batch.astype(np.float32)
            pred = self.model.infer([mel_batch, img_batch])[0]
            if pad:
                pred = pred[:bs, ...]
                pad = False

            for p, f, c in zip(pred, frames, coords):
                y1, y2, x1, x2 = c
                p = cv2.resize(p.astype(np.uint8), (x2 - x1, y2 - y1))
                f[y1:y2, x1:x2] = p
                res_imgs.append(f)

        return res_imgs

    def run(self, in_q, out_q, init_done_event):
        self.init()
        init_done_event.set()
        print("[WAV2LIP] Wav2Lip module init done")

        one_round_cost = 0
        resample_time = 0
        prep_time = 0
        post_time = 0
        send_time = 0

        while True:

            wav_data, is_end = in_q.get()
            print(f"[WAV2LIP] in_q.get(): {wav_data[0]}, isEnd: {is_end}")
            ts = time.time()

            wav_data = wav_data.reshape(-1, )
            try:
                batch_data = wav_resample(wav_data, TTS_SAMPLE_RATE, W2L_SAMPLE_RATE)
            except:
                print(f'wav_data shape:{wav_data.shape}')
                print('too short continue')
                continue
            resample_time += time.time() - ts
            pre_start = time.time()
            mel = self.get_acc_mel(batch_data)

            mel_chunks = []

            mel_idx_multiplier = 80. / FPS # 80用于计算mel的帧数，FPS为视频帧率
            idx = 0

            while 1:
                mel_pos = idx * mel_idx_multiplier + self.last_mel_shift
                start_idx = int(mel_pos)
                shift = mel_pos - start_idx
                if start_idx + self.mel_step_size > len(mel[0]):
                    self.last_mel_chunk = mel[:, start_idx:]
                    self.last_mel_shift = shift
                    break
                mel_chunks.append(
                    mel[:, start_idx: start_idx + self.mel_step_size])
                idx += 1

            gen = self.datagen(mel_chunks)
            prep_time += time.time() - pre_start
            post_start = time.time()
            res_img = self.get_res_image(gen)
            post_time += time.time() - post_start
            # img list, wav np array with 24000 sr
            put_start = time.time()
            out_q.put((res_img, batch_data, is_end))

            send_time += time.time() - put_start
            one_sentence_cost = time.time() - ts
            one_round_cost += one_sentence_cost

            print("[WAV2LIP] Wav2Lip cost for 1 sentence: ", one_sentence_cost)

            if is_end:
                print("[WAV2LIP] Wav2Lip cost for 1 round conversation: ", one_round_cost)
                print("[WAV2LIP] Wav2Lip pre process cost for 1 round conversation: ", prep_time)
                print("[WAV2LIP] Wav2Lip post process cost for 1 round conversation: ", post_time)
                print("[WAV2LIP] Wav2Lip send cost for 1 round conversation: ", send_time)
                print("[WAV2LIP] Wav2Lip resamplewav cost for 1 round conversation: ", resample_time)
                one_round_cost = 0
                send_time = 0
                prep_time = 0
                resample_time = 0
                post_time = 0
                print("[WAV2LIP put data !!!!!!]", time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
            else:
                print("[WAV2LIP] Wav2Lip is_end = False")
