import rospy
import cv2
import torch
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from vision_msgs.msg import Detection2DArray
import os
import shutil
from CNNImageClassifyTrain import CNN
import pickle
import socket


class FeatureExtractor:
    def __init__(self):
        rospy.init_node('feature_extractor')
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber('/oakcam_ffc_4p_ros/front/image', Image, self.image_callback)
        self.detection_sub = rospy.Subscriber('/oakcam_ffc_4p_ros/yolov4_detections', Detection2DArray, self.detection_callback)
        self.feature_list = []
        self.image_list = []
        self.current_image = None
        self.current_detections = None
        self.last_select_time = None
        self.timer = rospy.Timer(rospy.Duration(0.05), self.process)
        self.BBOX_S_THRESHOLD = 1200 # bbox area threshold
        self.save_dir = "./image"  
        self.bbox_save_dir = "./bbox_image"  
        if os.path.isdir(self.save_dir):
            shutil.rmtree(self.save_dir)
            os.makedirs(self.save_dir)
        if os.path.isdir(self.bbox_save_dir):
            shutil.rmtree(self.bbox_save_dir)
            os.makedirs(self.bbox_save_dir)
        self.encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 75]
        self.bbox_count = 0
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.host = "192.168.10.60"
        self.port = 9816
        # CNN
        self.cnn_model = CNN()
        self.cnn_model.load_state_dict(torch.load('cnn_model.pth'))
        self.cnn_model.eval()

    def image_callback(self, img_msg):
        self.current_image = self.bridge.imgmsg_to_cv2(img_msg, "bgr8")

    def detection_callback(self, detection_msg):
        self.current_detections = self.get_human_detection(detection_msg.detections)

    def process(self, event):
        # select one image only if there is only one bbox in the image
        if self.current_image is not None \
            and self.current_detections is not None and len(self.current_detections) == 1:
            for i, detection in enumerate(self.current_detections):
                x, y, width, height = detection.bbox.center.x, detection.bbox.center.y, \
                                        detection.bbox.size_x, detection.bbox.size_y
                # select one image only if the bbox is big enough
                if width * height < self.BBOX_S_THRESHOLD:
                    continue
                roi = self.current_image[int(y-height/2):int(y+height/2), int(x-width/2):int(x+width/2)]
                # save bbox
                # save_bbox_path = os.path.join(self.bbox_save_dir, f"bbox_{self.bbox_count}.jpg")
                # if not os.path.exists(self.bbox_save_dir):
                #     os.makedirs(self.bbox_save_dir)
                # cv2.imwrite(save_bbox_path, roi)
                # self.bbox_count += 1

                feature, is_new_feature = self.extract_and_compare_cnn_feature(roi)

                if is_new_feature:
                    # bbox drawer
                    x = int(detection.bbox.center.x - detection.bbox.size_x / 2)
                    y = int(detection.bbox.center.y - detection.bbox.size_y / 2)
                    w = int(detection.bbox.size_x)
                    h = int(detection.bbox.size_y)
                    img = self.current_image.copy()
                    cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
                    _, img_encoded = cv2.imencode('.jpg', img, self.encode_param)
                    # make sure the first selected image is class 0
                    if len(self.feature_list) == 0 and feature != 0:
                        return 
                    self.image_list.append(img_encoded) 
                    self.feature_list.append(feature)
                    # save bbox
                    save_path = os.path.join(self.save_dir, f"image_{len(self.image_list)}.jpg")
                    if not os.path.exists(self.save_dir):
                        os.makedirs(self.save_dir)
                    cv2.imwrite(save_path, roi) # save roi or img
                    self.last_select_time = rospy.Time.now()
                    print(f"new feature:", "(", width, height, ")", " class: ", {feature})
                    print("--------------------------------------")
            self.current_image = None
            self.current_detections = None
        self.UDP_publish()

    def extract_and_compare_cnn_feature(self, img):
        # class 0: the gray one
        # class 1: the blue one
        # class 2: pedestrian 
        # extract
        image = cv2.resize(img, (30, 40))
        img_tensor = torch.from_numpy(image).permute(2, 0, 1).float()  
        with torch.no_grad():
            output = self.cnn_model(img_tensor)
            _, predicted = torch.max(output, 1)
            new_feature = predicted.item()
        # exclude the pedestrian and sleep for 5 secs after each successful selection to aviod repetition
        if new_feature == 2 or (self.last_select_time is not None 
                                and (rospy.Time.now() - self.last_select_time).to_sec() < 5.0):
            return new_feature, False
        # compare
        for feature in self.feature_list:
            if new_feature == feature:  # same class
                return new_feature, False
        return new_feature, True

    def get_human_detection(self, detections):
        human_detection = []
        for detection in detections:
            if detection.results[0].id == 0:
                human_detection.append(detection)
        return human_detection

    def UDP_publish(self):
        if len(self.image_list) >= 1:
            for i, image in enumerate(self.image_list, 0):
                serialized_data = pickle.dumps(("image_" + str(i), image.tobytes()))
                self.socket.sendto(serialized_data, (self.host, self.port))
                rospy.sleep(0.1)

if __name__ == '__main__':
    extractor = FeatureExtractor()
    rospy.spin()