#!/usr/bin/env python3
# -*- coding:utf-8 -*-

import threading
import os
import torch
import numpy as np
import cv2
import time
import argparse
import platform
from typing import Union
from queue import Queue
from PIL import Image, ImageOps
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, ToTensor
from model_utils.UDnet import mynet

from spirems import Subscriber, Publisher, cvimg2sms, sms2cvimg, sms2pcl, def_msg, QoS, BaseNode, get_extra_args, Rate
from spirems.mod_helper import download_model


class UnderwaterUDnetNode_Cuda(threading.Thread, BaseNode):
    def __init__(
        self,
        job_name: str,
        ip: str = '127.0.0.1',
        port: int = 9094,
        param_dict_or_file: Union[dict, str] = None,
        **kwargs
    ):
        threading.Thread.__init__(self)
        BaseNode.__init__(
            self,
            self.__class__.__name__,
            job_name,
            ip=ip,
            port=port,
            param_dict_or_file=param_dict_or_file,
            sms_shutdown=False,
            **kwargs
        )

        self.realtime_det = self.get_param("realtime_det", True)
        self.use_shm = self.get_param("use_shm", -1)
        self.model = self.get_param("model", "sms::UDnet.pth")
        self.specified_input_topic = self.get_param("specified_input_topic", "")
        self.specified_output_topic = self.get_param("specified_output_topic", "")
        self.params_help()

        self.b_use_shm = False
        if self.use_shm == 1 or (self.use_shm == -1 and platform.system() == 'Linux'):
            self.b_use_shm = True

        if self.model.startswith("sms::"):
            self.local_model = download_model(self.__class__.__name__, self.model)
            assert self.local_model is not None
        else:
            self.local_model = self.model

        device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.device = torch.device(device)
        self.udnet = mynet()
        self.udnet.load_state_dict(torch.load(self.local_model, map_location=lambda storage, loc: storage), strict=False)
        self.udnet.to(self.device)
        self.udnet.eval()
        print('Pre-trained model is loaded.')

        self.job_queue = Queue()
        self.queue_pool.append(self.job_queue)

        input_url = '/' + job_name + '/sensor/image_raw'
        if len(self.specified_input_topic) > 0:
            input_url = self.specified_input_topic
        output_url = '/' + job_name + '/sensor/image_enhanced'
        if len(self.specified_output_topic) > 0:
            output_url = self.specified_output_topic

        self._image_reader = Subscriber(
            input_url, 'std_msgs::Null', self.image_callback, ip=ip, port=port
        )
        self._image_writer = Publisher(
            output_url, 'memory_msgs::RawImage' if self.b_use_shm else 'sensor_msgs::CompressedImage',
            ip=ip, port=port
        )

        self.transform = Compose([
            ToTensor(),
        ])
        self.start()

    def release(self):
        BaseNode.release(self)
        self._image_reader.kill()
        self._image_writer.kill()
    
    def image_callback(self, msg):
        if self.realtime_det:
            if not self.job_queue.empty():
                self.job_queue.queue.clear()
        img = sms2cvimg(msg)
        self.job_queue.put({'msg': msg, 'img': img})
    
    def run(self):
        while self.is_running():
            msg_dict = self.job_queue.get(block=True)
            if msg_dict is None:
                break

            msg, img = msg_dict['msg'], msg_dict['img']
            img_arr = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img = Image.fromarray(img_arr)

            (ih, iw) = img.size
            dh = ih % 8
            dw = iw % 8
            new_h, new_w = ih - dh, iw - dw

            img = img.resize((new_h, new_w))
            # label = label.resize((new_h, new_w))

            if self.transform:
                img = self.transform(img)
                # label = self.transform(label)

            with torch.no_grad():
                img = img.unsqueeze(0)
                img = img.to(self.device)
                self.udnet.forward(img, img, training=False)
                t0 = time.time()
                prediction = self.udnet.sample(testing=True)
                t1 = time.time()
            
            enhanced_img = prediction.cpu().data
            enhanced_img = enhanced_img.squeeze().clamp(0, 1).numpy().transpose(1, 2, 0)
            enhanced_img = cv2.cvtColor(enhanced_img, cv2.COLOR_RGB2BGR)

            sms_msg = self._image_writer.cvimg2sms_mem(enhanced_img)
            self._image_writer.publish(sms_msg)
            # cv2.imshow("enhanced_img", enhanced_img)
            # cv2.waitKey(5)

        self.release()
        print('{} quit!'.format(self.__class__.__name__))


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--config',
        type=str,
        default='default_params.json',
        help='SpireCV2 Config (.json)')
    parser.add_argument(
        '--job-name', '-j',
        type=str,
        default='live',
        help='SpireCV Job Name')
    parser.add_argument(
        '--ip',
        type=str,
        default='127.0.0.1',
        help='SpireMS Core IP')
    parser.add_argument(
        '--port',
        type=int,
        default=9094,
        help='SpireMS Core Port')
    # args = parser.parse_args()
    args, unknown_args = parser.parse_known_args()
    if not os.path.isabs(args.config):
        current_path = os.path.abspath(__file__)
        params_dir = os.path.join(current_path[:current_path.find('spirecv-pro') + 11], 'params', 'spirecv2')
        args.config = os.path.join(params_dir, args.config)
    print("--config:", args.config)
    print("--job-name:", args.job_name)
    extra = get_extra_args(unknown_args)

    node = UnderwaterUDnetNode_Cuda(args.job_name, param_dict_or_file=args.config, ip=args.ip, port=args.port, **extra)
    node.join()
