"""
OCR Processor Module - Handles text detection and recognition
"""
import cv2
import numpy as np
import paddle
import math
import copy
import logging
from pathlib import Path
from PIL import Image

import openvino as ov
import sys
from pathlib import Path

# 添加项目根目录到 sys.path
project_root = Path(__file__).parent.parent
if str(project_root) not in sys.path:
    sys.path.insert(0, str(project_root))

from notebooks import pre_post_processing as processing
from .config import (
    DET_MODEL_PATH, REC_MODEL_PATH, INFERENCE_DEVICE,
    DET_INPUT_SIZE, DET_THRESHOLD, REC_IMAGE_SHAPE, REC_BATCH_SIZE,
    REC_DROP_SCORE, FONT_PATH, CHARACTER_DICT_PATH
)

# Configure logging
logger = logging.getLogger(__name__)
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)


class OCRProcessor:
    """Main OCR processor class for text detection and recognition"""
    
    def __init__(self):
        """Initialize OCR models"""
        self.core = ov.Core()
        self.device = INFERENCE_DEVICE
        
        # Load detection model
        self.det_model = self.core.read_model(model=str(DET_MODEL_PATH))
        self.det_compiled_model = self.core.compile_model(
            model=self.det_model, device_name=self.device
        )
        self.det_input_layer = self.det_compiled_model.input(0)
        self.det_output_layer = self.det_compiled_model.output(0)
        
        # Load recognition model with dynamic shape
        self.rec_model = self.core.read_model(model=str(REC_MODEL_PATH))
        for input_layer in self.rec_model.inputs:
            input_shape = input_layer.partial_shape
            input_shape[3] = -1
            self.rec_model.reshape({input_layer: input_shape})
        
        self.rec_compiled_model = self.core.compile_model(
            model=self.rec_model, device_name="AUTO"
        )
        self.rec_input_layer = self.rec_compiled_model.input(0)
        self.rec_output_layer = self.rec_compiled_model.output(0)
        
        # Build post-processor
        postprocess_params = {
            "name": "CTCLabelDecode",
            "character_type": "ch",
            "character_dict_path": str(CHARACTER_DICT_PATH),
            "use_space_char": True,
        }
        self.postprocess_op = processing.build_post_process(postprocess_params)
    
    def preprocess_detection(self, image, size=DET_INPUT_SIZE):
        """Preprocess image for text detection"""
        img = cv2.resize(image, (size, size))
        img = np.transpose(img, [2, 0, 1]) / 255
        img = np.expand_dims(img, 0)
        
        img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
        img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
        img -= img_mean
        img /= img_std
        return img.astype(np.float32)
    
    def postprocess_detection(self, frame, det_results):
        """Postprocess detection results"""
        ori_im = frame.copy()
        data = {"image": frame}
        data_resize = processing.DetResizeForTest(data)
        data_list = []
        keep_keys = ["image", "shape"]
        for key in keep_keys:
            data_list.append(data_resize[key])
        img, shape_list = data_list
        
        shape_list = np.expand_dims(shape_list, axis=0)
        pred = det_results[0]
        if isinstance(pred, paddle.Tensor):
            pred = pred.numpy()
        segmentation = pred > DET_THRESHOLD
        
        boxes_batch = []
        for batch_index in range(pred.shape[0]):
            src_h, src_w, ratio_h, ratio_w = shape_list[batch_index]
            mask = segmentation[batch_index]
            boxes, scores = processing.boxes_from_bitmap(pred[batch_index], mask, src_w, src_h)
            boxes_batch.append({"points": boxes})
        
        post_result = boxes_batch
        dt_boxes = post_result[0]["points"]
        dt_boxes = processing.filter_tag_det_res(dt_boxes, ori_im.shape)
        return dt_boxes
    
    def resize_norm_img(self, img, max_wh_ratio):
        """Resize image for recognition"""
        imgC, imgH, imgW = REC_IMAGE_SHAPE
        assert imgC == img.shape[2]
        
        character_type = "ch"
        if character_type == "ch":
            imgW = int((32 * max_wh_ratio))
        
        h, w = img.shape[:2]
        ratio = w / float(h)
        if math.ceil(imgH * ratio) > imgW:
            resized_w = imgW
        else:
            resized_w = int(math.ceil(imgH * ratio))
        
        resized_image = cv2.resize(img, (resized_w, imgH))
        resized_image = resized_image.astype("float32")
        resized_image = resized_image.transpose((2, 0, 1)) / 255
        resized_image -= 0.5
        resized_image /= 0.5
        
        padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
        padding_im[:, :, 0:resized_w] = resized_image
        return padding_im
    
    def prep_for_rec(self, dt_boxes, frame):
        """Prepare detected boxes for recognition"""
        ori_im = frame.copy()
        img_crop_list = []
        for bno in range(len(dt_boxes)):
            tmp_box = copy.deepcopy(dt_boxes[bno])
            img_crop = processing.get_rotate_crop_image(ori_im, tmp_box)
            img_crop_list.append(img_crop)
        
        img_num = len(img_crop_list)
        width_list = []
        for img in img_crop_list:
            width_list.append(img.shape[1] / float(img.shape[0]))
        
        indices = np.argsort(np.array(width_list))
        return img_crop_list, img_num, indices
    
    def batch_text_box(self, img_crop_list, img_num, indices, beg_img_no, batch_num):
        """Batch images for recognition"""
        norm_img_batch = []
        max_wh_ratio = 0
        end_img_no = min(img_num, beg_img_no + batch_num)
        
        for ino in range(beg_img_no, end_img_no):
            h, w = img_crop_list[indices[ino]].shape[0:2]
            wh_ratio = w * 1.0 / h
            max_wh_ratio = max(max_wh_ratio, wh_ratio)
        
        for ino in range(beg_img_no, end_img_no):
            norm_img = self.resize_norm_img(img_crop_list[indices[ino]], max_wh_ratio)
            norm_img = norm_img[np.newaxis, :]
            norm_img_batch.append(norm_img)
        
        norm_img_batch = np.concatenate(norm_img_batch)
        return norm_img_batch.copy()
    
    def process_frame(self, frame):
        """Process a single frame for OCR"""
        try:
            # Scale down if too large
            scale = 1280 / max(frame.shape)
            if scale < 1:
                frame = cv2.resize(src=frame, dsize=None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)

            logger.debug(f"Frame shape: {frame.shape}")

            # Text detection
            test_image = self.preprocess_detection(frame, DET_INPUT_SIZE)
            det_results = self.det_compiled_model([test_image])[self.det_output_layer]
            dt_boxes = self.postprocess_detection(frame, det_results)

            logger.debug(f"Detected {len(dt_boxes)} text boxes")

            # Handle case when no text is detected
            if len(dt_boxes) == 0:
                logger.info("No text detected in frame")
                return np.array([]), [], []

            # Prepare for recognition
            dt_boxes = processing.sorted_boxes(dt_boxes)
            img_crop_list, img_num, indices = self.prep_for_rec(dt_boxes, frame)

            logger.debug(f"Prepared {img_num} text regions for recognition")

            # Text recognition
            rec_res = [["", 0.0]] * img_num
            txts = []
            scores = []

            for beg_img_no in range(0, img_num, REC_BATCH_SIZE):
                try:
                    norm_img_batch = self.batch_text_box(img_crop_list, img_num, indices, beg_img_no, REC_BATCH_SIZE)
                    rec_results = self.rec_compiled_model([norm_img_batch])[self.rec_output_layer]
                    rec_result = self.postprocess_op(rec_results)

                    for rno in range(len(rec_result)):
                        rec_res[indices[beg_img_no + rno]] = rec_result[rno]

                    if rec_res:
                        txts = [rec_res[i][0] for i in range(len(rec_res))]
                        scores = [rec_res[i][1] for i in range(len(rec_res))]
                except Exception as e:
                    logger.error(f"Error in text recognition batch: {str(e)}", exc_info=True)
                    raise

            logger.debug(f"Recognized {len(txts)} texts")
            return dt_boxes, txts, scores

        except Exception as e:
            logger.error(f"Error processing frame: {str(e)}", exc_info=True)
            raise
    
    def draw_results(self, frame, dt_boxes, txts, scores):
        """Draw OCR results on frame"""
        image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
        draw_img = processing.draw_ocr_box_txt(
            image, dt_boxes, txts, scores, 
            drop_score=REC_DROP_SCORE, 
            font_path=str(FONT_PATH)
        )
        return draw_img

