import argparse
import collections
import os
import shutil
from pathlib import Path

import cv2
import numpy as np
import torch
import torchvision.transforms as transforms
import yaml
from PIL import Image
from scipy import special
from torch.backends import cudnn

from data.constant import culane_row_anchor
from model.model import parsingNet
from utils import wingo


def detect():
    source, weights, wgview = opt.source, opt.weights, opt.wgview
    webcam = source.isnumeric() or source.startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
    video = not webcam and source.endswith(('.mp4', '.avi'))
    if not webcam and not video:
        raise NotImplementedError('Not webcam or video')

    gn, backbone, cnpl = cfg['griding_num'], cfg['backbone'], cfg['cls_num_per_lane']
    row_anchor = culane_row_anchor

    cudnn.benchmark = True
    net = parsingNet(pretrained=False, backbone='18', cls_dim=(gn + 1, cnpl, 4), use_aux=False).cuda()
    state_dict = torch.load(weights, map_location='cpu')['model']
    compatible_state_dict = collections.OrderedDict()
    for k, v in state_dict.items():
        if 'module.' in k:
            compatible_state_dict[k[7:]] = v
        else:
            compatible_state_dict[k] = v
    net.load_state_dict(compatible_state_dict, strict=False)
    print(net.eval())

    cap = cv2.VideoCapture(source)

    def resize(x_, y_):
        cap.set(3, x_)
        cap.set(4, y_)

    resize(480, 640)

    assert cap.isOpened(), f'Fail to open {source}'
    w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))  # default: w=1280, h=720
    fps = cap.get(cv2.CAP_PROP_FPS) % 100
    _, img = cap.read()
    print(' success (%gx%g at %.2f FPS).' % (w, h, fps))

    col_sample = np.linspace(0, 800 - 1, gn)
    col_sample_w = col_sample[1] - col_sample[0]
    idx = np.arange(gn) + 1
    idx = idx.reshape(-1, 1, 1)
    img_transforms = transforms.Compose([
        transforms.Resize((288, 800)),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    # Initialize video writer, ignore this in webcam mode
    fourcc = cv2.VideoWriter_fourcc(*'MJPG')

    basename = os.path.splitext(os.path.basename(source))[0] + '.avi' if video else 'output.avi'
    p = Path('/mnt/sda1/Videos/output') / basename
    print(basename)
    writer = cv2.VideoWriter(str(p), fourcc, 20.0, (w, h))
    if video:
        nframe = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1
        frame = 1
    else:
        nframe = None
        frame = None

    while cap.isOpened():
        ret, img = cap.read()
        if ret:
            img1 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img2 = Image.fromarray(img1)
            x = img_transforms(img2)
            x = x.unsqueeze(0).cuda() + 1

            out = net(x)  # torch.Size([1, 101, 56, 4])

            out_j = out[0].data.cpu().numpy()
            out_j = out_j[:, ::-1, :]
            prob = special.softmax(out_j[:-1, :, :], axis=0)
            loc = np.sum(prob * idx, axis=0)
            out_j = np.argmax(out_j, axis=0)
            loc[out_j == gn] = 0
            out_j = loc  # out_j.shape (18, 4)

            lines = wingo.MessageDict()
            for i in range(out_j.shape[1]):
                line = []
                if np.sum(out_j[:, i] != 0) > 2:
                    for k in range(out_j.shape[0]):
                        if out_j[k, i] > 0:
                            ppp = (int(out_j[k, i] * col_sample_w * w / 800) - 1,
                                   int(h * (row_anchor[cnpl - 1 - k] / 288)) - 1)
                            line.append(ppp)
                lines.add_data(key=i, value=line)
                if line:
                    cv2.polylines(img, np.int32([line]), isClosed=False, color=(0, 255, 0), thickness=10)

            if webcam and wgview:
                cv2.imshow(source, img)
                if cv2.waitKey(1) == ord('q'):  # q to quit
                    raise StopIteration

            if video:
                writer.write(img)
                print(f'{frame}/{nframe}')
                if frame == nframe:
                    break
                frame += 1

            # Send inference result (pickled) to host
            communicate = False
            if communicate:
                lines.send(cfg['host'], cfg['port'])

    cap.release()
    writer.release()
    cv2.destroyAllWindows()


if __name__ == "__main__":
    cfg = yaml.load(open('configs/config.yaml'), Loader=yaml.FullLoader)
    print('cfg\n', cfg)

    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', type=str, default='weights/ep092.pth', help='model.pth path(s)')
    parser.add_argument('--source', type=str, default='0', help='source')
    parser.add_argument('--wgview', action='store_true', help='set this true to view live stream')
    opt = parser.parse_args()
    print('opt\n', opt)

    with torch.no_grad():
        detect()
