#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import os
import numpy as np
import cv2
from utils import draw_box, letterbox
from tqdm import tqdm
import time
import datetime

from acllite_resource import AclLiteResource
from acllite_model import AclLiteModel
from acllite_imageproc import AclLiteImageProc
from acllite_image import AclLiteImage
from acllite_logger import log_error, log_info


class SampleYOLOV10(object):
    '''load the yolov10 model, and do preprocess, infer, postprocess'''
    def __init__(self, model_path, model_width, model_height):
        self.model_path = model_path
        self.model_width = model_width
        self.model_height = model_height
        self.resource = None
        self.dvpp = None
        self.model = None

    def init_resource(self):
        # initial acl resource, create image processor, create model
        try:
            self.resource = AclLiteResource()
            self.resource.init()
            self.dvpp = AclLiteImageProc(self.resource)
            self.model = AclLiteModel(self.model_path)
        except Exception as e:
            log_error(f"Failed to initialize resources: {e}")

    def preprocess(self, image):
        image, self.ratio, self.dw, self.dh = letterbox(image, new_shape=(self.model_width, self.model_height))
        #print(f'self.ratio {self.ratio}, self.dw {self.dw}, self.dh {self.dh}')
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        #print(image)
        image = np.transpose(image, (2, 0, 1))
        image = image.astype(np.float32) / 255.0
        #print(image)
        self.input_tensor = np.expand_dims(image, axis=0)
        return self.input_tensor, self.ratio, self.dw, self.dh

    def infer(self):
        # infer frame
        self.output = self.model.execute([self.input_tensor])
        self.output = np.squeeze(self.output[0])
        return self.output
    
    def postprocess(self):
        result = []
        for i in range(self.output.shape[0]):
            # 读取类别置信度
            confidence = self.output[i][4]
            # 用阈值进行过滤
            if confidence > 0.5:
                # 读取类别索引
                label = int(self.output[i][5])
                # 读取类坐标值，把坐标还原到原始图像
                xmin = int((self.output[i][0] - int(round(self.dw - 0.1))) / self.ratio[0])
                ymin = int((self.output[i][1] - int(round(self.dh - 0.1))) / self.ratio[1])
                xmax = int((self.output[i][2] - int(round(self.dw + 0.1))) / self.ratio[0])
                ymax = int((self.output[i][3] - int(round(self.dh + 0.1))) / self.ratio[1])
                result.append([xmin, ymin, xmax, ymax, confidence, label])

        return result

    def release_resource(self):
        # release resource includes acl resource, data set and unload model
        if self.resource:
            del self.resource
        if self.dvpp:
            del self.dvpp
        if self.model:
            del self.model

def image_infer(input_path, model, output_path):
    """Detect objects in an image using YOLOv10 and save the annotated image."""
    frame = cv2.imread(input_path)
    time0 = time.perf_counter()
    model.preprocess(frame)
    time1 = time.perf_counter()
    model.infer()
    time2 = time.perf_counter()
    result = model.postprocess()
    time3 = time.perf_counter()
    
    log_info(f"preprocess time: {(time1 - time0) * 1000} ms")
    log_info(f"infer time: {(time2 - time1) * 1000} ms")
    log_info(f"postprocess time: {(time3 - time2) * 1000} ms")    
    # 可视化
    confidences = []
    for xmin, ymin, xmax, ymax, confidence, label in result:
        draw_box(frame, [xmin, ymin, xmax, ymax], confidence, label)
        confidences.append(confidence)
    cv2.imwrite(output_path, frame)

def infer_image(model, img):
    model.preprocess(img)
    model.infer()
    result = model.postprocess()
    class_ids = []
    boxes = []
    confidences = []
    for xmin, ymin, xmax, ymax, confidence, label in result:
        class_ids.append(label)
        boxes.append([xmin, ymin, xmax, ymax])
        confidences.append(confidence)
    return ( class_ids, boxes, confidences)

def generate_mask(img, class_ids, boxes, target_id):
    """根据目标框生成mask：框内白色(255)，框外黑色(0)"""
    # 获取原图尺寸（高度、宽度）
    h, w = img.shape[:2]  # img为OpenCV格式（h, w, c）
    
    # 创建全黑mask（单通道，uint8类型）
    mask = np.zeros((h, w), dtype=np.uint8)
    
    # 遍历每个目标框，填充为白色
    for idx in range(len(class_ids)):
        if class_ids[idx] != target_id:
            continue
        xmin, ymin, xmax, ymax = boxes[idx]
        
        # 1. 坐标转换为整数（OpenCV绘图需要整数坐标）
        # 注意：若框坐标是浮点数（如归一化后的相对坐标），需先转换为绝对像素坐标
        # 例如：xmin = int(xmin * w)，其他坐标同理（根据实际情况调整）
        xmin = int(round(xmin))
        ymin = int(round(ymin))
        xmax = int(round(xmax))
        ymax = int(round(ymax))
        
        # 2. 边界检查（避免坐标超出图像范围）
        xmin = max(0, xmin)
        ymin = max(0, ymin)
        xmax = min(w - 1, xmax)  # 图像宽度-1（索引从0开始）
        ymax = min(h - 1, ymax)  # 图像高度-1
        
        # 3. 在mask上填充目标框区域为白色（255）
        cv2.rectangle(mask, (xmin, ymin), (xmax, ymax), color=255, thickness=-1)  # thickness=-1表示填充
        break
    
    return mask

def video_infer(video_path, model, output_folder):
    """Detect objects in a video using YOLOv10 and save the annotated video."""
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        raise IOError("Error opening video file")

    fps = cap.get(cv2.CAP_PROP_FPS)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    # Create VideoWriter object to write the output video
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
    output_path = os.path.join(output_folder, f'output-{timestamp}.mp4')
    out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))

    preprocess_time = 0
    infer_time = 0
    postprocess_time = 0
    with tqdm(total=total_frames, desc="Processing frames", unit="frame") as pbar:
        while True:
            ret, frame = cap.read()
            if not ret:
                break
            
            time0 = datetime.datetime.now()
            model.preprocess(frame)
            time1 = datetime.datetime.now()
            model.infer()
            time2 = datetime.datetime.now()
            result = model.postprocess()
            time3 = datetime.datetime.now()

            preprocess_time += (time1 - time0).total_seconds()
            infer_time += (time2 - time1).total_seconds()
            postprocess_time += (time3 - time2).total_seconds()

            for xmin, ymin, xmax, ymax, confidence, label in result:
                draw_box(frame, [xmin, ymin, xmax, ymax], confidence, label)

            out.write(frame)  # Write the processed frame to the output video
            pbar.update(1)  # Update the progress bar

        cap.release()
        out.release()  # Release the VideoWriter
    log_info(f"preprocess time: {preprocess_time * 1000} ms")
    log_info(f"infer time: {infer_time * 1000} ms")
    log_info(f"postprocess time: {postprocess_time * 1000} ms")

def main():
    # 1. 创建初始化模型
    model_path = 'models/yolov10s_310P1.om'
    model = SampleYOLOV10(model_path, 640, 640)
    model.init_resource()
    
    # 2. 输入输出路径
    input_path = 'inputs/color.png'
    output_path = 'outputs/yolo_color.png'

    # 3. 推理
    image_infer(input_path, model, output_path)

    # 3. 推理
    frame = cv2.imread(input_path)
    class_ids, boxes, _= infer_image(model, frame)
    banana_id = 46
    mask = generate_mask(frame, class_ids, boxes, banana_id)

    # 4. 保存mask（可选）
    cv2.imwrite("outputs/mask_banana.png", mask)
    
    # 5. 释放资源
    model.release_resource()    
    # cv2.destroyAllWindows()


if __name__ == '__main__':
    main()
