import numpy as np
import cv2
from rknn.api import RKNN
import argparse
import itertools
import sys
from transformers import AutoTokenizer
from spirems import Subscriber, Publisher, cvimg2sms, sms2cvimg, def_msg, QoS, get_extra_args, BaseNode
from spirems.mod_helper import download_model
import threading
from typing import Union
import os
import platform
from queue import Queue


def text_tokenizer(text, model_name):
    try:
        text = [text]
        tokenizer = AutoTokenizer.from_pretrained(model_name, local_files_only=True)
        text = list(itertools.chain(*text))
        text = tokenizer(text=text, return_tensors='pt', padding=True)
    except:
        print("[ERROR] Please Run download_clip-vit-base-patch32.sh First!")
        sys.exit(1)

    return np.array(text['input_ids'])


def img_preprocess(img, crop_size, image_size):
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    h, w, _ = img.shape
    if h < crop_size:
        padh = (crop_size - h) // 2
        img = np.pad(img, ((padh, crop_size - h - padh), (0, 0), (0, 0)), mode='constant').astype(np.uint8)
    if w < crop_size:
        padw = (crop_size - w) // 2
        img = np.pad(img, ((0, 0), (padw, crop_size - w - padw), (0, 0)), mode='constant').astype(np.uint8)
    if h > crop_size and w > crop_size:
        start_x = (w - crop_size) // 2
        start_y = (h - crop_size) // 2
        img = img[start_y:start_y+crop_size, start_x:start_x+crop_size]
    img = cv2.resize(img, (image_size[0], image_size[1]))
    img = np.expand_dims(img, 0)

    return img


class ClipNode_Rknn(threading.Thread, BaseNode):
    def __init__(
        self,
        job_name: str,
        ip: str = '127.0.0.1',
        port: int = 9094,
        param_dict_or_file: Union[dict, str] = None,
        **kwargs
    ):
        threading.Thread.__init__(self)
        BaseNode.__init__(
            self,
            self.__class__.__name__,
            job_name,
            ip=ip,
            port=port,
            param_dict_or_file=param_dict_or_file,
            sms_shutdown=False,
            **kwargs
        )

        self.text_model = self.get_param("text_model", "sms::clip_text.rknn")
        self.img_model = self.get_param("img_model", "sms::clip_images.rknn")
        self.target = self.get_param("target", "rk3588")
        self.text = self.get_param("text", ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat",
            "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", 
            "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", 
            "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", 
            "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", 
            "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", 
            "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", 
            "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
            "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors",
            "teddy bear", "hair drier", "toothbrush"])
        self.imgsz = self.get_param("imgsz", [224, 224])
        self.seq_len = self.get_param("seq_len", 20)
        self.pad_val = self.get_param("pad_val", 49407)
        self.cropsz = self.get_param("cropsz", 224)
        self.realtime_det = self.get_param("realtime_det", True)
        self.use_shm = self.get_param("use_shm", -1)
        self.params_help()

        self.b_use_shm = False
        if self.use_shm == 1 or (self.use_shm == -1 and platform.system() == 'Linux'):
            self.b_use_shm = True

        if self.text_model.startswith("sms::"):
            self.local_text_model = download_model(self.__class__.__name__, self.text_model)
            assert self.local_text_model is not None
        else:
            self.local_text_model = self.text_model
        
        if self.img_model.startswith("sms::"):
            self.local_img_model = download_model(self.__class__.__name__, self.img_model)
            assert self.local_img_model is not None
        else:
            self.local_img_model = self.img_model

        image_url = '/' + job_name + '/sensor/image_raw'
        result_url = '/' + job_name + '/classifier/results'

        self.job_queue = Queue()
        self.queue_pool.append(self.job_queue)

        self._image_reader = Subscriber(
            image_url, 'std_msgs::Null', self.image_callback,
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._result_writer = Publisher(
            result_url, 'std_msgs::Null',
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._show_writer = Publisher(
            '/' + job_name + '/classifier/image_results', 'memory_msgs::RawImage' if self.b_use_shm else 'sensor_msgs::CompressedImage',
            ip=ip, port=port
        )

        self.text_outp = self.clip_text_run()

        self.rknn = RKNN()
        self.rknn.load_rknn(self.local_img_model)
        self.rknn.init_runtime(target=self.target)

        self.start()

    def release(self):
        BaseNode.release(self)
        self._image_reader.kill()
        self._result_writer.kill()
        self.rknn.release()

    def image_callback(self, msg):
        if self.realtime_det:
            while not self.job_queue.empty():
                self.job_queue.get()
        img = sms2cvimg(msg)
        self.job_queue.put({'msg': msg, 'img': img})

    def run(self):
        while self.is_running():
            img_msg = self.job_queue.get(block=True)
            if img_msg is None:
                break
            img_src, msg = img_msg['img'], img_msg['msg']

            img = img_preprocess(img_src, self.cropsz, self.imgsz)
            img_outp = self.rknn.inference(inputs=[img])[0]
            outputs = np.matmul(self.text_outp, img_outp.reshape(512, 1))
            outputs = np.multiply(outputs, np.exp(4.605170249938965))
            outputs = np.exp(outputs)/np.sum(np.exp(outputs))

            text_index = np.argmax(outputs) % len(self.text)
            score = outputs.max()

            res_msg = def_msg("std_msgs::Null") # !! spirecv_msgs::ClassResult
            res_msg['index'] = int(text_index)
            res_msg['class'] = self.text[res_msg['index']]
            res_msg['score'] = float(score)
            if 'img_id' in msg:
               res_msg['img_id'] = msg['img_id']
            if 'img_total' in msg:
                res_msg['img_total'] = msg['img_total']
            self._result_writer.publish(res_msg)

            cv2.putText(img_src, "{:.2f} {}".format(score, res_msg['class']), (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2)
            if self.b_use_shm:
                show_msg = self._show_writer.cvimg2sms_mem(img_src)
            else:
                show_msg = cvimg2sms(img_src)
            show_msg['spirecv_msgs::ClassResult'] = res_msg
            self._show_writer.publish(show_msg)
            # END

        self.release()
        print('{} quit!'.format(self.__class__.__name__))
    
    def clip_text_run(self):
        input_ids = text_tokenizer(self.text, "openai/clip-vit-base-patch32")
        text_num, seq_len = input_ids.shape
        if seq_len >= self.seq_len:
            input_data = input_ids[:, :self.seq_len]
        else:
            input_data = np.zeros((text_num, self.seq_len)).astype(np.float32)
            input_data[:, :seq_len] = input_ids
            input_data[:, seq_len:] = self.pad_val

        rknn = RKNN()
        rknn.load_rknn(self.local_text_model)
        rknn.init_runtime(target=self.target)
        outputs = []
        for i in range(text_num):
            outputs.append(rknn.inference(inputs=[input_data[i:i+1, :]])[0])

        rknn.release()
        return np.concatenate(outputs, axis=0)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--config',
        type=str,
        default='default_params.json',
        help='SpireCV2 Config (.json)')
    parser.add_argument(
        '--job-name',
        type=str,
        default='live',
        help='SpireCV Job Name')
    parser.add_argument(
        '--ip',
        type=str,
        default='127.0.0.1',
        help='SpireMS Core IP')
    parser.add_argument(
        '--port',
        type=int,
        default=9094,
        help='SpireMS Core Port')
    # args = parser.parse_args()
    args, unknown_args = parser.parse_known_args()
    if not os.path.isabs(args.config):
        current_path = os.path.abspath(__file__)
        params_dir = os.path.join(current_path[:current_path.find('spirecv-pro') + 11], 'params', 'spirecv2')
        args.config = os.path.join(params_dir, args.config)
    print("--config:", args.config)
    print("--job-name:", args.job_name)
    extra = get_extra_args(unknown_args)

    node = ClipNode_Rknn(args.job_name, param_dict_or_file=args.config, ip=args.ip, port=args.port, **extra)
    node.join()
