import os
import json
import time
import importlib
import argparse
import numpy as np
from collections import OrderedDict
import torch
import torch.nn as nn
import skimage.measure as measure  # tcw201904101622tcw
# from torch.autograd import Variable
# from dataset import TestDataset
import torchvision.transforms as transforms

from PIL import Image
import cv2  # 20190411175cd1tcwi

# from torchsummary import summary #tcw20190623
# from torchsummaryX import summary #tcw20190625
os.environ['CUDA_VISIBLE_DEVICES'] = '0'


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", type=str, default="lesrcnn")
    parser.add_argument("--ckpt_path", type=str, default="F:/project_old/LESRCNN/x4/lesrcnn_x4.pth")
    parser.add_argument("--group", type=int, default=1)
    parser.add_argument("--sample_dir", type=str, default="./")
    parser.add_argument("--test_data_dir", type=str, default="F:/project_old/LESRCNN/dataset/test_images/")
    parser.add_argument("--cuda", action="store_true")
    parser.add_argument("--scale", type=int, default=4)
    parser.add_argument("--shave", type=int, default=20)

    return parser.parse_args()


def save_image(tensor, filename):
    tensor = tensor.cpu()
    ndarr = tensor.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()
    im = Image.fromarray(ndarr)
    im.save(filename)
def get_image(tensor, filename):
    tensor = tensor.cpu()
    ndarr = tensor.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()
    return ndarr
    # im = Image.fromarray(ndarr)
    # im.save(filename)


def psnr(im1, im2):  # tcw201904101621
    def im2double(im):
        min_val, max_val = 0, 255
        out = (im.astype(np.float64) - min_val) / (max_val - min_val)
        return out

    im1 = im2double(im1)
    im2 = im2double(im2)
    psnr = measure.compare_psnr(im1, im2, data_range=1)
    return psnr


# tcw20190413043
def calculate_ssim(img1, img2, border=0):
    '''calculate SSIM
    the same outputs as MATLAB's
    img1, img2: [0, 255]
    '''
    if not img1.shape == img2.shape:
        raise ValueError('Input images must have the same dimensions.')
    h, w = img1.shape[:2]
    img1 = img1[border:h - border, border:w - border]
    img2 = img2[border:h - border, border:w - border]

    if img1.ndim == 2:
        return ssim(img1, img2)
    elif img1.ndim == 3:
        if img1.shape[2] == 3:
            ssims = []
            for i in range(3):
                ssims.append(ssim(img1, img2))
            return np.array(ssims).mean()
        elif img1.shape[2] == 1:
            return ssim(np.squeeze(img1), np.squeeze(img2))
    else:
        raise ValueError('Wrong input image dimensions.')


def ssim(img1, img2):
    C1 = (0.01 * 255) ** 2
    C2 = (0.03 * 255) ** 2

    img1 = img1.astype(np.float64)
    img2 = img2.astype(np.float64)
    kernel = cv2.getGaussianKernel(11, 1.5)
    window = np.outer(kernel, kernel.transpose())

    mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]  # valid
    mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
    mu1_sq = mu1 ** 2
    mu2_sq = mu2 ** 2
    mu1_mu2 = mu1 * mu2
    sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq
    sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq
    sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2

    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
                                                            (sigma1_sq + sigma2_sq + C2))
    return ssim_map.mean()


def rgb2ycbcr(img, only_y=True):
    '''
    same as matlab rgb2ycbcr
    only_y: only return Y channel
    Input:
        uint8, [0, 255]
        float, [0, 1]
    '''
    in_img_type = img.dtype
    img.astype(np.float32)
    if in_img_type != np.uint8:
        img *= 255.
    # convert
    if only_y:
        rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
    else:
        rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
                              [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
    if in_img_type == np.uint8:
        rlt = rlt.round()
    else:
        rlt /= 255.
    return rlt.astype(in_img_type)


def sample(net, device, lr, cfg):



    lr = lr.unsqueeze(0).to(device)
    sr = net(lr, cfg.scale).detach().squeeze(0)  # detach() break the reversed transformation.

    return get_image(sr,"")



def main(cfg,image):
    module = importlib.import_module("model.{}".format(cfg.model))
    ''' 
    net = module.Net(multi_scale=False, 
                     group=cfg.group)
    '''
    net = module.Net(scale=cfg.scale,
                     group=cfg.group)
    '''
    #net = MyModel
    params = list(net.parameters())
    k = 0
    for i in params:
        l = 1
	#print('' + str(list(i.size())))
	for j in i.size():
            l *= j
	    #print('' + str(l))
	    k = k + l
	print(''+ str(k))
    '''
    print(json.dumps(vars(cfg), indent=4, sort_keys=True))  # print cfg information according order.
    state_dict = torch.load(cfg.ckpt_path)
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k
        # name = k[7:] # remove "module."
        new_state_dict[name] = v

    net.load_state_dict(new_state_dict)
    # os.environ['CUDA_VISIBLE_DEVICES']='0,1'
    # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")  # 0 is number of gpu, if this gpu1 is work, you can set it into 1 (device=torch.device("cuda:1" if torch.cuda.is_available() else "cpu"))
    device = "cpu"

    net = net.to(device)
    # summary(net,[(3,240, 160),(3,1000, 2000)]) #tcw20190623
    # summary(net,[torch.zeros(1,3,240,160),2],2)
    transform = transforms.Compose([transforms.ToTensor()])
    image=Image.fromarray(image)
    return sample(net, device,transform(image), cfg)


if __name__ == "__main__":
    cfg = parse_args()
    from moviepy.editor import *

    video = VideoFileClip('test.mp4')

    audio = video.audio
    # audio.write_audiofile('test.mp3')



    cap=cv2.VideoCapture(video.filename)
    # fps=cap.get(cv2.CAP_PROP_FPS)
    width=cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    height=cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    fps=30
    flag=True
    frame_list=[]
    while True:
        sep,frame=cap.read()
        if sep:
            print(len(frame_list))
            image = main(cfg, frame)
            frame_list.append(image)



        else:

            cap.release()
            break



    out = cv2.VideoWriter('result.avi', cv2.VideoWriter_fourcc(*'DIVX'), fps, (frame_list[0].shape[1],frame_list[0].shape[0]))
    for one_frame in frame_list:
        out.write(one_frame)

    out.release()

    #合并音频和视频
    video2 = VideoFileClip('result.avi')
    video2.audio=audio
    video2.write_videofile("result.mp4")





