# USAGE
# python video.py --model_state_path 1_epoch.pth --style_path new_style

# import the necessary packages
from imutils.video import VideoStream
from imutils import paths
import itertools
import argparse
import imutils
import time
import cv2
from model import VGGEncoder, Decoder
import torch
from style_swap import style_swap
from loop import transf_image, denorm, trans
import numpy as np
# from loop import train

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model_state_path", type=str, default='10_epoch.pth',
	            help="path to directory containing neural style transfer models")
ap.add_argument("-s", "--style_path", type=str, default='new_style')
args = ap.parse_args()

# initialize the video stream, then allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
# print("[INFO] {}. {}".format(modelID + 1, modelPath))


def train(e, d, frame, s_tensor, patch_size=3):
    c = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    c_tensor = trans(c).unsqueeze(0).to(device)
    cf = e(c_tensor)
    sf = e(s_tensor)
    style_swap_res = style_swap(cf, sf, patch_size, 1)
    output = d(style_swap_res)
    out_denorm = denorm(output, device)
    return transf_image(out_denorm, nrow=1)


stylePaths = paths.list_files(args.style_path)
stylePaths = list(stylePaths)

styles = list(zip(range(0, len(stylePaths)), (stylePaths)))

styleIter = itertools.cycle(styles)
(styleID, stylePath) = next(styleIter)

device = torch.device(f'cuda:0')
e = VGGEncoder().to(device)
d = Decoder()
d.load_state_dict(torch.load(args.model_state_path, map_location='cuda:0'))
d = d.to(device)
# init first style
s = cv2.imread(stylePath)
s = cv2.cvtColor(s, cv2.COLOR_BGR2RGB)
s_tensor = trans(s).unsqueeze(0).to(device)
# loop over frames from the video file stream
while True:
    # grab the frame from the threaded video stream
    frame = vs.read()
    frame = imutils.resize(frame, width=400)
    orig = frame.copy()
    (h, w) = frame.shape[:2]
    output = train(e, d, frame, s_tensor)
    output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
    cv2.imshow('output', output)
    # process
    key = cv2.waitKey(1) & 0xFF

    if key == ord("n"):
        (styleID, stylePath) = next(styleIter)
        s = cv2.imread(stylePath)
        # b, g, r = cv2.split(s)
        # s = cv2.merge([r, g, b])
        # s = s.astype('float32') / 255.0
        # s = s.transpose(2, 0, 1)
        # s = np.expand_dims(s, axis=0)
        # s_tensor = torch.from_numpy(s).to(device)
        s = cv2.cvtColor(s, cv2.COLOR_BGR2RGB)
        s_tensor = trans(s).unsqueeze(0).to(device)

    elif key == ord("q"):
        break

cv2.destroyAllWindows()
vs.stop()