import sys
import os
sys.path.append("/home/HwHiAiUser/acllite/")  #"/usr/local/Ascend/thirdpart/aarch64/acllite/"
import videocapture as video
import numpy as np
import cv2
import time 

from acllite_resource import AclLiteResource
from acllite_model import AclLiteModel
from acllite_imageproc import AclLiteImageProc
from acllite_image import AclLiteImage
from label import labels
from acllite_logger import log_error, log_info
from imagezmq import ImageSender
import socket
import time




class sampleYOLOV7(object):
    '''load the model, and do preprocess, infer, postprocess'''
    def __init__(self, model_path, model_width, model_height):
        self.model_path = model_path
        self.model_width = model_width
        self.model_height = model_height
        self.conf_thres = 0.25
        self.nms_thres = 0.45
        self.src_image = None

    def init_resource(self):
        
        self._resource = AclLiteResource()
        self._resource.init()
    
        self._dvpp = AclLiteImageProc(self._resource) 
        self._model = AclLiteModel(self.model_path)

    def preprocess_image(self, frame):
        print(frame.size,frame.width,frame.height)
        yuv_image = self._dvpp.jpegd(frame)
        print(yuv_image.size)
        self.resized_image = self._dvpp.resize(yuv_image, self.model_width, self.model_height)
        
        
    def preprocess_vis(self, frame):
        
        src_image = frame.byte_data_to_np_array().astype(np.uint8)
        self.src_image = cv2.cvtColor(src_image.reshape((frame.height*3//2, frame.width)), cv2.COLOR_YUV2RGB_NV21)

        self.resized_image = self._dvpp.resize(frame, self.model_width, self.model_height)

    def infer(self):
        
        #image_info = np.array([640, 640,640, 640],dtype=np.float32)
        self.result = self._model.execute([self.resized_image])
    
    def postprocess(self,path,frame_index,output_folder):
        
        outputs = self.result[0][0]
        if self.src_image is None:
            src_image = cv2.imread(path)
        else:
            src_image = self.src_image
        height, width, _ = src_image.shape 
        scale_x = width / self.model_width
        scale_y = height / self.model_height
        for i in range(outputs.shape[1]):
            x, y, width, height, score = outputs[:, i]
            if score > 0.5:
                top_left_x = int((x - width / 2) * scale_x)
                top_left_y = int((y - height / 2) * scale_y)
                bottom_right_x = int((x + width / 2) * scale_x)
                bottom_right_y = int((y + height / 2) * scale_y)
                cv2.rectangle(self.src_image, (top_left_x, top_left_y),
                              (bottom_right_x, bottom_right_y), (0, 255, 0), 2)
                label = f"hjch: {score:.2f}"
                cv2.putText(self.src_image, label, (top_left_x, top_left_y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

        #save_path = os.path.join(output_folder, f'output_{str(frame_index).zfill(4)}.jpg')
        #cv2.imwrite(save_path, src_image)  
        #log_info(f'Inference result saved to {save_path}')
        print("Sending frame")
        
        _, buffer = cv2.imencode('.jpg', src_image, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
        frame = cv2.imdecode(buffer, cv2.IMREAD_COLOR)
        return frame
        
        
    def release_resource(self):
       
        del self.resized_image
        del self._model
        del self._dvpp
        del self._resource

def video_infer(video_path, model,output_folder):
    cap = video.VideoCapture(video_path)
    frame_count = 0 
    while True:
        ret, frame = cap.read()
        #print(frame)
        if ret:
            print('cap read end! close subprocess cap read')
            break
        if frame_count % 20 == 0:
            print('start preprocess')
            model.preprocess_vis(frame)
           
            model.infer()
            
            frame = model.postprocess(video_path,frame_count,output_folder)
            
            sender.send_image(rpi_name, frame)
            print("Success")
            #cv2.waitKey(1)
        
        frame_count += 1
    del cap

def image_infer(image_path, model,output_folder):
    frame = AclLiteImage(image_path)
    
    
    start_time = time.time()
    model.preprocess_image(frame)
    end_time = time.time()  
    preprocess_time = (end_time - start_time) * 1000  
    log_info(f"Preprocess time: {preprocess_time:.2f} ms")
    
    start_time = time.time()  
    model.infer()
    end_time = time.time()  
    infer_time = (end_time - start_time) * 1000 
    log_info(f"Inference time: {infer_time:.2f} ms")
    
    start_time = time.time()
    model.postprocess(image_path,output_folder)
    end_time = time.time()  
    postprocess_time = (end_time - start_time) * 1000  
    log_info(f"Postprocess time: {postprocess_time:.2f} ms")


if __name__ == '__main__':
    model_path = '/home/HwHiAiUser/samples/EdgeAndRobotics-master/Samples/YOLOV5Video/model/hjch_rtsp.om'
    RTSP = 'rtsp://192.168.0.101:8554/chn1_h264'
    rpi_name = socket.gethostname()
    sender = ImageSender(connect_to='tcp://192.168.0.103:5555')
    output_folder = "output"
    model_width = 640
    model_height = 640
    
    model = sampleYOLOV7(model_path, model_width, model_height)
    model.init_resource()
    

    mode = "video"
    if mode == "image":
        path = "/home/HwHiAiUser/samples/EdgeAndRobotics-master/Samples/YOLOV5Video/data/0001.jpg"
        image_infer(path, model ,output_folder)
    elif mode == "video":
        path = 'rtsp://192.168.0.100:8554/chn1_h264'
        video_infer(path, model,output_folder)
    else:
        print('input mode is incorrect.')

    
    model.release_resource()