import base64
import numpy as np
import cv2
import time
import math
from io import BytesIO
from PIL import Image, ImageFilter, ImageEnhance
from shapely.geometry import Polygon
cv_rotate_code = {
    '90': cv2.ROTATE_90_COUNTERCLOCKWISE,
    # '180': cv2.ROTATE_180,
    '270': cv2.ROTATE_90_CLOCKWISE
}

def read_images_angle(base64_image, model, direction):
    start_time = time.time()
    # 将Base64编码的图像数据解码为图像
    image_data = base64.b64decode(base64_image)
    np_array = np.frombuffer(image_data, np.uint8)
    image = cv2.imdecode(np_array, cv2.IMREAD_COLOR)

    angle = 0
    if direction == 'yes':
        cls_result = model.predict(input_data=image)
        angle = next(cls_result)[0]['label_names'][0]

    # 旋转图片
    rotated_image = None
    if angle in cv_rotate_code:
        rotated_image = cv2.rotate(image, cv_rotate_code[angle])
    else:
        angle = 0
        rotated_image = image

    height, width = image.shape[:2]
    rotated_height, rotated_width = rotated_image.shape[:2]

    cost = time.time() - start_time
    return rotated_image, angle, rotated_height, rotated_width, height, width, cost

def sharpen_image_base64(input_base64):
    start_time = time.time()
    
    # Decode base64 string to an image
    input_bytes = base64.b64decode(input_base64)
    input_image = Image.open(BytesIO(input_bytes))
    # Convert PIL image to a numpy array
    np_array = np.array(input_image)

    # Convert RGB to BGR for OpenCV
    np_array = cv2.cvtColor(np_array, cv2.COLOR_RGB2BGR)

    # Calculate image's Laplacian variance
    img_gray = cv2.cvtColor(np_array, cv2.COLOR_BGR2GRAY)
    var_laplacian = cv2.Laplacian(img_gray, cv2.CV_64F).var()

    # Choose sharpening strength based on Laplacian variance
    if var_laplacian < 50:
        sharpening_strength = 1.5
    elif var_laplacian < 100:
        sharpening_strength = 1.0
    else:
        sharpening_strength = 0.5

    # Define sharpening kernel based on the determined strength
    sharpening_kernel = np.array([[-1, -1, -1],
                                    [-1, sharpening_strength * 9 - 1, -1],
                                    [-1, -1, -1]], dtype=np.float32)

    # Apply the sharpening kernel
    sharpened_img = cv2.filter2D(np_array, -1, sharpening_kernel)

    # Convert BGR to RGB for PIL
    sharpened_img = cv2.cvtColor(sharpened_img, cv2.COLOR_BGR2RGB)

    # Convert numpy array to PIL image
    sharpened_image_pil = Image.fromarray(sharpened_img)

    # Save the sharpened image to a bytes object and encode to base64
    output_buffer = BytesIO()
    sharpened_image_pil.save(output_buffer, format='PNG')  # Save as PNG format
    output_bytes = output_buffer.getvalue()
    output_base64 = base64.b64encode(output_bytes).decode('utf-8')

    cost = time.time() - start_time
    return output_base64, cost

def pre_process_images(images, options):
    # 参数处理 默认进行预处理
    # pre_process = options.get('preprocess', 'yes')

    # pre_process_cost = 0
    # result_images = []
    # pre_process_res = []
    
    # for image in images:
    #     if pre_process == 'yes':
    #         result_image, cost = sharpen_image_base64(image)
    #         pre_process_cost = pre_process_cost + cost
    #         result_images.append(result_image)
    #     elif:
    #         result_images.append(image)
    # return result_images, pre_process_cost
    return images, 0

def angle_images(images, options, image_orientation_predictor):
    # 参数处理 默认进行方向检测
    direction = options.get('direction', 'yes')

    angle_cost = 0
    result_images = []
    pre_process_res = []
    
    for image in images:
        rotated_image, angle, rotated_height, rotated_width, original_height, original_width, cost = read_images_angle(image, image_orientation_predictor, direction)
        angle_cost = angle_cost + cost
        result_images.append(rotated_image)
        pre_process_res.append({
            'angle': angle,
            'width': rotated_width,
            'height': rotated_height,
            'org_width': original_width,
            'org_height': original_height,
        })
    return result_images, pre_process_res, angle_cost

def read_images_ocr_words(images, options, paddle_ocr, ocr_detection, ocr_recognition):
    # 参数处理 默认使用 paddle 模型
    model_type = options.get('model', 'paddle')

    if model_type == 'duguang':
        return read_images_ocr_words_duguang(images, options, paddle_ocr, ocr_detection, ocr_recognition)
    else:
        return read_images_ocr_words_paddle(images, options, paddle_ocr)

def read_images_ocr_words_duguang(images, options, paddle_ocr_detection, ocr_detection, ocr_recognition):
    start_time = time.time()
     # 创建结果列表
    images_words = []

    for image in images:
        paddle_det_boxes = execute_ignore_exception(read_image_det_paddle, [], image, paddle_ocr_detection)
        duguang_det_boxes = execute_ignore_exception(read_image_det_duguang, [], image, ocr_detection)
        merge_det_boxes = merge_overlapping_boxes(paddle_det_boxes, duguang_det_boxes)

        words = []
        for box in merge_det_boxes:
            image_crop = crop_image(image, box)
            rec_result = execute_ignore_exception(ocr_recognition, { 'text': [] }, image_crop)
            word_text = ' '.join(rec_result['text'])
            if word_text:
                words.append({
                    'text_region': [
                        [int(box[0][0]), int(box[0][1])],
                        [int(box[1][0]), int(box[1][1])],
                        [int(box[2][0]), int(box[2][1])],
                        [int(box[3][0]), int(box[3][1])]
                    ],
                    'text': word_text,
                    'confidence': 1,
                })

        images_words.append(words)

    cost = time.time() - start_time
    return images_words, cost

def read_image_det_paddle(image, paddle_ocr_detection):
    boxes = []
    paddle_result = paddle_ocr_detection.ocr(image, cls=False)
    # 填充paddle识别区域
    for line in paddle_result:
        if line is not None:
            for word in line:
                boxes.append(word[0])
    return boxes

def read_image_det_duguang(image, ocr_detection):
    boxes = []
    result = ocr_detection(image)
    
    for box in result['polygons']:
        boxes.append([
            [box[0], box[1]],
            [box[2], box[3]],
            [box[4], box[5]],
            [box[6], box[7]]
        ])
    return boxes

def execute_ignore_exception(func, default_value, *args, **kwargs):
    try:
        # 尝试执行传入的函数逻辑，并传递任意数量的参数
        return func(*args, **kwargs)
    except Exception as e:
        # 发生任何异常，打印异常信息（可选）并返回默认值
        print(f"execute exception occurred: {e}")
        return default_value

def merge_overlapping_boxes(primary_boxes, second_boxes):
    merged_boxes = primary_boxes.copy()  # 创建原始列表的副本
    be_merged_boxes = second_boxes.copy() # 创建待合并列表的副本
    primary_boxes_polygons = [Polygon(box) for box in primary_boxes]  # 列表解析来创建多边形列表

    for box in be_merged_boxes:
        box_polygon = Polygon(box)
        has_overlap = any(
            box_polygon.intersection(existing_box_polygon).area / box_polygon.area > 0.2
            for existing_box_polygon in primary_boxes_polygons
        )

        if not has_overlap:
            merged_boxes.append(box)  # 仅在没有重叠时添加新箱子

    return merged_boxes
    
    
def read_images_ocr_words_paddle(images, options, model):
    start_time = time.time()
     # 创建结果列表
    images_words = []

    for image in images:
        result = model.ocr(image, cls=False)
        words = []
        for line in result:
            if line is not None:
                for word in line:
                    words.append({
                        'text_region': word[0],
                        'text': word[1][0],
                        'confidence': word[1][1],
                    })
        images_words.append(words)

    cost = time.time() - start_time
    return images_words, cost

def read_images_tables(images, options, model):
    start_time = time.time()
    # 参数处理 默认扫描表格信息
    scan_tables = options.get('scan_tables', 'yes')
    
    # 创建结果列表
    images_table_list = []

    # 补充表格版面分析数据
    for image in images:
        if model is not None and scan_tables == 'yes':
            table_result = model(image)
            images_table_list.append(table_result['polygons'].tolist())
        else:
            images_table_list.append([])
    
    cost = time.time() - start_time
    return images_table_list, cost

def order_point(coor):
    arr = np.array(coor).reshape([4, 2])
    sum_ = np.sum(arr, 0)
    centroid = sum_ / arr.shape[0]
    theta = np.arctan2(arr[:, 1] - centroid[1], arr[:, 0] - centroid[0])
    sort_points = arr[np.argsort(theta)]
    sort_points = sort_points.reshape([4, -1])
    if sort_points[0][0] > centroid[0]:
        sort_points = np.concatenate([sort_points[3:], sort_points[:3]])
    sort_points = sort_points.reshape([4, 2]).astype('float32')
    return sort_points

def crop_image(img, position):
    def distance(x1,y1,x2,y2):
        return math.sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2))

    x1, y1 = position[0][0], position[0][1]
    x2, y2 = position[1][0], position[1][1]
    x3, y3 = position[2][0], position[2][1]
    x4, y4 = position[3][0], position[3][1]

    corners = np.zeros((4,2), np.float32)
    corners[0] = [x1, y1]
    corners[1] = [x2, y2]
    corners[2] = [x4, y4]
    corners[3] = [x3, y3]

    img_width = distance((x1+x4)/2, (y1+y4)/2, (x2+x3)/2, (y2+y3)/2)
    img_height = distance((x1+x2)/2, (y1+y2)/2, (x4+x3)/2, (y4+y3)/2)

    corners_trans = np.zeros((4,2), np.float32)
    corners_trans[0] = [0, 0]
    corners_trans[1] = [img_width - 1, 0]
    corners_trans[2] = [0, img_height - 1]
    corners_trans[3] = [img_width - 1, img_height - 1]

    transform = cv2.getPerspectiveTransform(corners, corners_trans)
    dst = cv2.warpPerspective(img, transform, (int(img_width), int(img_height)))
    return dst

def select_first_non_empty(str1, str2):
    # 返回第一个非空字符串
    if str1:
        return str1
    elif str2:
        return str2
    else:
        return str1