# !/usr/bin/python3
# coding: utf-8

import os, sys
import gc, cv2
import math
import json, copy
import random, time
import shlex
import torch
import subprocess
import shutil

import numpy as np
import pandas as pd
import torch.optim as optim
import torch.nn.functional as F
import torchvision.utils

from tqdm import tqdm
from glob import glob
from itertools import chain
from torchvision import transforms

from concurrent.futures import ThreadPoolExecutor

from rivagan.adversary import Adversary, Critic
from rivagan.attention import AttentiveEncoder, AttentiveDecoder
from rivagan.dense import DenseEncoder, DenseDecoder
from rivagan.dataloader import load_train_val
from rivagan.noise import Crop, Scale, Compression
from rivagan.utils import mjpeg, ssim, psnr, encoder_h264

def get_acc(y_true, y_pred):
    assert y_true.size() == y_pred.size()
    return (y_pred >= 0.0).eq(y_true >= 0.5).sum().float().item() / y_pred.numel()

def quantize(frames):
    # [-1.0, 1.0] -> {0, 255} -> [-1.0, 1.0]
    return ((frames + 1.0) * 127.5).int().float() / 127.5 - 1.0

def make_pair(frames, data_dim, use_bit_inverse=True, multiplicity=1):
    # Add multiplicity to further stabilize training.
    frames = torch.cat([frames] * multiplicity, dim=0).cuda()
    data = torch.zeros((frames.size(0), data_dim)).random_(0, 2).cuda()
    # Add the bit-inverse to stabilize training.
    if use_bit_inverse:
        frames = torch.cat([frames, frames], dim=0).cuda()
        data = torch.cat([data, 1.0 - data], dim=0).cuda()

    return frames, data

class RivaGAN(object):
    def __init__(self, model="attention", data_dim=32):
        self.model = model
        self.data_dim = data_dim
        self.adversary = Adversary().cuda()
        self.critic = Critic().cuda()
        if model == "attention":
            self.encoder = AttentiveEncoder(data_dim=data_dim).cuda()
            self.decoder = AttentiveDecoder(self.encoder).cuda()
            #print('--encoder model:', self.encoder)
            #print('--decoder model:', self.decoder)
        elif model == "dense":
            self.encoder = DenseEncoder(data_dim=data_dim).cuda()
            self.decoder = DenseDecoder(data_dim=data_dim).cuda()
        else:
            raise ValueError("Unknown model: %s" % model)
    
    def save(self, path_to_model):
        torch.save(self, path_to_model)

    def load(path_to_model):
        return torch.load(path_to_model)
    
    def get_mark_flag(self, water_marking, decoded_messages):
        ### If input video is a randomly cropped segment """
        ### First, find the initial frame with embedded watermark """
        ### Then, extracting the watermark of every embedded image frame
        
        #print('---decoded_messages:', decoded_messages)
        expand_value = decoded_messages.repeat(water_marking.shape[0], axis = 0)
        bitwise_avg_err = np.sum(np.abs(expand_value - water_marking), axis=1) / water_marking.shape[1]
        bitwise_err = np.min(bitwise_avg_err)
        
        mark_flag = False
        if bitwise_err < 0.15:
            mark_flag = True
            
        return mark_flag
    
    def __add_watermark(self,img_list):
        time_list=[]
        time_list2=[]
        time_list3=[]
        for frame_idx, img_path in tqdm(enumerate(img_list)):
            if frame_idx > 300:break
            if frame_idx % 1 == 0:
                #too slow 0.5
                t1=time.time()
                frame = cv2.imread(img_path)
                dt=time.time()-t1
                time_list3.append(dt)

                # 0.3
                t1=time.time()
                frame = torch.FloatTensor(np.expand_dims(frame,axis=0)) / 127.5 - 1.0
                frame = frame.permute(3, 0, 1, 2).unsqueeze(0).cuda()
                wm_frame = self.encoder(frame, self.data)
                wm_frame = torch.clamp(wm_frame, min=-1.0, max=1.0)
                wm_frame = ((wm_frame[0,:,0,:,:].permute(1,2,0) + 1.0) * 127.5)
                wm_frame = wm_frame.detach().cpu().numpy().astype("uint8")
                dt=time.time()-t1
                time_list.append(dt)

                #too slow 1.73
                t1=time.time()
                cv2.imwrite(img_path, wm_frame)
                dt=time.time()-t1
                time_list2.append(dt)
                
                del frame
                del wm_frame

        avg_func=lambda x:sum(x)/len(x)
        print("gpu time:{:.4f}".format(sum(time_list)),"  avg:",avg_func(time_list))
        print("cv2.imwrite time:{:.4f}".format(sum(time_list2)),"  avg:",avg_func(time_list2))
        print("cv2.read time:{:.4f}".format(sum(time_list3)),"  avg:",avg_func(time_list3))

    def encode(self, video_in, data, video_out, video_idx, fps=-1, tmp='pic'):
        assert data.shape[1] == self.data_dim
        has_audio = False
        #print("=" * 80)
        #print(f"=> video work dir： {video_in}")
        args = shlex.split(f"ffprobe -v quiet -print_format json -show_streams {video_in}")
        video_meta = subprocess.check_output(args).decode('utf-8')
        
        try:
            video_meta = json.loads(video_meta)
        
            if video_meta['streams'][0]['codec_type'] == "audio":
                print(f"=> audio in stream 0, exchange...")
                tmp_data = video_meta['streams'][0]
                video_meta['streams'].pop(0)
                video_meta['streams'].append(tmp_data)
            
            fps_split = video_meta['streams'][0]['r_frame_rate'].split('/')
            video_fps = round(int(fps_split[0]) / int(fps_split[1]), 2)
            #video_fps = int(video_meta['streams'][0]['r_frame_rate'].split('/')[0])
            bit_rate = int(video_meta['streams'][0]['bit_rate'])
        
        
            ### audio parames
            if len(video_meta['streams']) > 1:
                #print(f"=> video has audio ...")
                has_audio = True
                #print('----', video_meta['streams'][1])
                if 'sample_rate' in video_meta['streams'][1]:
                    audio_sample_rate = int(video_meta['streams'][1]['sample_rate'])
                    audio_channels    = int(video_meta['streams'][1]['channels'])
                    audio_bit_rate    = int(video_meta['streams'][1]['bit_rate'])
                elif 'sample_rate' in video_meta['streams'][2]:
                    audio_sample_rate = int(video_meta['streams'][2]['sample_rate'])
                    audio_channels    = int(video_meta['streams'][2]['channels'])
                    audio_bit_rate    = int(video_meta['streams'][2]['bit_rate'])
                else:
                    print("ERROR:get audio error")
                    
                print(f"=> audio: sample_rate:{audio_sample_rate},channels:{audio_channels},bit_rate:{audio_bit_rate}")

            print(f"=> pix: {video_meta['streams'][0]['height']}, {video_meta['streams'][0]['width']}")
            print(f"=> fps: {video_fps}")
            print(f"=> time: {video_meta['streams'][0]['duration']} ")
            print(f"=> code: {video_meta['streams'][0]['codec_name']}")
            print("-" * 60)
        except Exception as e:
            ret = f"ERROR: get video meta data failed: {e}"
            print(ret)
            return ret

        #if os.path.exists(tmp):
          #   shutil.rmtree(tmp)
        if not os.path.exists(tmp):
            os.makedirs(tmp)
        else:
            for filename in os.listdir(tmp):
                filepath = os.path.join(tmp,filename)
                os.remove(filepath)
            
        if fps == -1 or fps > video_fps:
            fps = video_fps
        
        ### extracting audios
        if has_audio:
            print(f"=> get audio")
            audio_path = os.path.join(tmp, 'tmp_audio.wav')
            if os.path.exists(audio_path):
                os.remove(audio_path)
            subprocess.call(f"ffmpeg -i {video_in} -vn -ar {audio_sample_rate} -ac {audio_channels} -ab {audio_bit_rate} -f wav -v quiet {audio_path}", shell=True)
        
        ### extracting images
        print(f"=> get images ")
        start_time = time.time()
        subprocess.call(f"ffmpeg -i {video_in} -q 1 -v quiet {tmp}/%04d.png", shell=True)
        print(f"=> time: {time.time() - start_time} s")

        print(f"=> add watermark ")
        start_time = time.time()
       
        ## parallel
        
        img_list = sorted(glob(f"{tmp}/*.png"))
        self.data = data
        ##self.__add_watermark(img_list)
        split_func=lambda x,n:[x[i:i + int(len(x)/n)] for i in range(0,len(x),int(len(x)/n))]
                            
        self.num_workers=2
        self.executor = ThreadPoolExecutor(max_workers=self.num_workers)

        #print(split_func(img_list, self.num_workers))
        tasks = split_func(img_list, self.num_workers)
        i=1
        for result in self.executor.map(self.__add_watermark, tasks):
            print("task{}:{}".format(i, result))
            i+=1
        
        ## parallel end
        
        print(f"\n=> time: {time.time() - start_time} s")
        print("-" * 60)
        
        print(f"=> to video ")
        start_time = time.time()
        # 将经过水印处理后的图片拼接成视频
        target_bit_rate = bit_rate * 1.05
        if has_audio:
            subprocess.call(f"ffmpeg -y -i {audio_path} -r {video_fps} -i {tmp}/%04d.png -vcodec h264 -profile main -b:v {target_bit_rate} -pix_fmt yuv420p -v quiet {video_out}", shell=True)
        else:
            subprocess.call(f"ffmpeg -y -r {video_fps} -i {tmp}/%04d.png -vcodec h264 -profile main -b:v {target_bit_rate} -pix_fmt yuv420p -v quiet {video_out}", shell=True)
        
        print(f"=> time: {time.time() - start_time} s")
        print(f"=>  {video_out}")
        print("=" * 80)
   
    def extract_from_img(self, img_path):
        frame = cv2.imread(img_path)
        frame = torch.FloatTensor(np.expand_dims(frame,axis=0)) / 127.5 - 1.0            # (L, H, W, 3)
        #frame = torch.FloatTensor([frame]) / 127.5 - 1.0            # (L, H, W, 3)
        frame = frame.permute(3, 0, 1, 2).unsqueeze(0).cuda()       # (1, 3, L, H, W)
        decoded_messages = self.decoder(frame)[0].unsqueeze(0).detach().cpu().numpy()
        decoded_rounded = decoded_messages.round().clip(0, 1)

        return decoded_rounded
    
    def decode(self, video_in, video_idx, water_marking, fps = -1, tmp = '.de_tmp'):        
        # 获取视频的属性
        #print("=" * 80)
        print(f"=>  {video_in}")
        # args = shlex.split(f"ffprobe -v quiet -print_format json -show_streams {video_in}")
        # video_meta = subprocess.check_output(args).decode('utf-8')
        # video_meta = json.loads(video_meta)
        
        # fps_split = video_meta['streams'][0]['r_frame_rate'].split('/')
        # video_fps = round(int(fps_split[0]) / int(fps_split[1]), 2)
        #video_fps = int(video_meta['streams'][0]['r_frame_rate'].split('/')[0])

       # print(f"=>  a:    meta['streams'][0]['height']}, {video_meta['streams'][0]['width']}")
        # print(f"=> fps:    {video_fps}")
        # print(f"=> :   {video_meta['streams'][0]['duration']} ")
        # print(f"=> :   {video_meta['streams'][0]['codec_name']}")
        # print("-" * 60)

        if not os.path.exists(tmp):
            os.makedirs(tmp)
        else:
            for filename in os.listdir(tmp):
                filepath = os.path.join(tmp,filename)
                os.remove(filepath)
            

        # if os.path.exists(tmp):
        #     subprocess.call(f"rm {tmp}/*.png", shell=True, stderr=subprocess.STDOUT)
        # else:
        #     subprocess.call(f"mkdir {tmp}", shell=True)  

        # if fps == -1 or fps > video_fps:
        #     fps = video_fps
        
        print("DEBUG: decode: get images...")
        subprocess.call(f"ffmpeg -y -i {video_in} -q 1 -v quiet {tmp}/%04d.png", shell=True)
        # subprocess.call(f"ffmpeg -y -i {video_in} -r {fps} -q 1 -v quiet {tmp}/%04d.png", shell=True)
        
        start_time = time.time()
        extract_frame_n = 0
        decoded_watermark_list = []
        
        print("DEBUG: decode: decoding...")
        # v_length = 20
        # print(f"INFO: decode Video Length: {v_length}s")
        for frame_idx, img_path in tqdm(enumerate(glob(f"{tmp}/*.png"))):
            # if frame_idx > v_length * fps:
            #     continue
            if frame_idx  > 300:break

            if frame_idx % 1 == 0:
                decoded_rounded = self.extract_from_img(img_path)
                extract_frame_n += 1                
                decoded_watermark_list.append(decoded_rounded)
                #print("=> ".format(video_idx, frame_idx))
            
            #elif (not flag) and frame_idx % 5 == init_frame_idx:
            #    decoded_rounded = self.extract_from_img(img_path)
            #    extract_frame_n += 1
            #    decoded_watermark_list.append(decoded_rounded)
            #    print("=> 第 [{}/{}] 视频帧提取水印完成 ...".format(video_idx, frame_idx))
        
        # print("-" * 60)
        # print(f"=>  {video_idx}  {extract_frame_n}")
        print(f"=> {time.time() - start_time} s")
        print("=" * 80)

        return decoded_watermark_list
