import cv2
import numpy as np
import json
import pandas as pd
import requests
from PIL import Image
from io import BytesIO
import base64
import urllib
import glob

def imread_with_pillow(file_path):
    """
    使用 Pillow 读取包含中文路径的图片并转换为 OpenCV 图像格式

    参数:
    file_path (str): 图片文件路径

    返回:
    image: OpenCV 图像对象
    """
    pil_image = Image.open(file_path)
    image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
    return image

def imread_unicode_path(file_path):
    """
    读取包含中文路径的图片

    参数:
    file_path (str): 图片文件路径

    返回:
    image: OpenCV 图像对象
    """
    with open(file_path, 'rb') as file:
        file_bytes = bytearray(file.read())
        file_array = np.asarray(file_bytes, dtype=np.uint8)
        image = cv2.imdecode(file_array, cv2.IMREAD_COLOR)
    return image

def save_image_with_unicode_path(image, path):
    # 使用cv2.imencode编码图像
    _, encoded_image = cv2.imencode('.png', image)
    # 使用numpy保存到指定路径
    with open(path, 'wb') as f:
        f.write(encoded_image)
        f.close()

def resize_image_to_width(image, width):
    """
    Resize the image to the specified width while maintaining aspect ratio.

    Parameters:
    - image: numpy.ndarray, the image to resize.
    - width: int, the desired width.

    Returns:
    - resized_image: numpy.ndarray, the resized image.
    """
    (h, w) = image.shape[:2]
    aspect_ratio = h / w
    new_height = int(width * aspect_ratio)
    resized_image = cv2.resize(image, (width, new_height))
    return resized_image

def crop_roi_from_image(image, box):
    """
    根据给定的矩形框从图像中截取 ROI（感兴趣区域），并做坐标校验

    参数:
    image (numpy.ndarray): 输入图像
    box (tuple): 矩形框 (x, y, width, height)

    返回:
    roi: 截取的 ROI 图像
    """
    # 提取矩形框的坐标和尺寸
    x, y, width, height = box
    
    # 获取图像的尺寸
    img_height, img_width = image.shape[:2]
    
    # 校验并调整矩形框的坐标和尺寸
    if x < 0: x = 0
    if y < 0: y = 0
    if x + width > img_width: width = img_width - x
    if y + height > img_height: height = img_height - y
    
    # 截取 ROI
    roi = image[y:y+height, x:x+width]
    
    return roi

def concatenate_images_vertically(img1, img2, output_path=None):
    """
    Concatenate two images vertically and save the result.

    Parameters:
    - img1_path: first cv2 image.
    - img2_path: second cv2 image.
    - output_path: str, path to save the concatenated image.

    Returns:
    - concatenated_image: numpy.ndarray, the concatenated image.
    """

    if img1 is None or img2 is None:
        #raise ValueError("One of the images is None. Check the file paths.")
        return None
    
    print('shape : ', img1.shape)
    print('shape : ', img2.shape)

    if len(img1.shape) < 2 or len(img2.shape) < 2:
        raise ValueError("One of the images is not valid. Check the file paths.")
    
    if len(img1.shape) != len(img2.shape):
        raise ValueError("Images must have the same number of channels.")
    
    if len(img1.shape) == 3:
        # 判断图片的深度是否一致，不一致的话需要转化为相同的深度再拼接
        if img1.shape[2] != img2.shape[2]:
            if img1.shape[2] == 1:
                img2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
            elif img2.shape[2] == 1:
                img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
            else:
                img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
                img2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)

    # Get the width of the images
    width1 = img1.shape[1]
    width2 = img2.shape[1]

    # Determine the maximum width
    max_width = max(width1, width2)

    #print('max_width : ', max_width)

    # Resize images to the maximum width
    if width1 < max_width:
        img1 = resize_image_to_width(img1, max_width)
        #print('test1 ')
    if width2 < max_width:
        img2 = resize_image_to_width(img2, max_width)
        #print('test2 ')

    #cv2.imwrite('./test1.jpg', img1)
    #cv2.imwrite('./test2.jpg', img2)

    #print('shape : ', img1.shape)
    #print('shape : ', img2.shape)

    # Concatenate images vertically
    concatenated_image = np.vstack((img1, img2))
    #print('test3')

    # Save the concatenated image
    if output_path is not None:
        cv2.imwrite(output_path, concatenated_image)

    return concatenated_image


# 通过原图和坐标列表来拼接图片
def concatenate_image_by_roi_coordinates(src, boxs):

    if src is None or len(boxs) < 1:
        return src
    
    rois = []
    for box in boxs:
        roi = crop_roi_from_image(src, box)
        rois.append(roi)

    if len(boxs) == 1:
        return rois[0]

    print('Concatenating first & second image...')
    concatenated_image = concatenate_images_vertically(rois[0], rois[1])
    print('Concatenating first & second image... Done.')
    for roi in rois[2:]:
        concatenated_image = concatenate_images_vertically(concatenated_image, roi)

    return concatenated_image




def concatenate_iamge_files_vertically(image_files, output_path):
    """
    Concatenate a list of images vertically and save the result.

    Parameters:
    - image_files: list, paths to the images to concatenate.
    - output_path: str, path to save the concatenated image.

    Returns:
    - concatenated_image: numpy.ndarray, the concatenated image.
    """

    if len(image_files) < 2:
        #raise ValueError("At least two image files are required.")
        return None
    
    img1 = cv2.imread(image_files[0])
    img2 = cv2.imread(image_files[1])
    
    concatenated_image = concatenate_images_vertically(img1, img2)
    for img_path in image_files[2:]:
        img = cv2.imread(img_path)
        concatenated_image = concatenate_images_vertically(concatenated_image, img)

    # Save the concatenated image
    if output_path is not None:
        cv2.imwrite(output_path, concatenated_image)

    return concatenated_image


def json_to_excel(json_file_path, excel_file_path):
    """
    Read content of json file and save as excel file.

    Parameters:
    - json_file_path: str, path to the json file.
    - excel_file_path: str, path to the excel file.

    Returns:
    - concatenated_image: numpy.ndarray, the concatenated image.
    """

    # 读取JSON文件
    with open(json_file_path, 'r', encoding='utf-8') as json_file:
        data = json.load(json_file)

    # 如果 JSON 数据是一个列表，转换为 DataFrame
    if isinstance(data, list):
        df = pd.DataFrame(data)
    else:
        # 如果 JSON 数据是一个字典，转换为 DataFrame
        df = pd.DataFrame([data])
    
    # 将 DataFrame 保存为 Excel 文件
    df.to_excel(excel_file_path, index=False)


def get_image_from_url(url):
    """
    Retrieve image data from a URL and convert it to a Pillow Image object.

    Parameters:
    - url: str, the URL of the image.

    Returns:
    - img: PIL.Image.Image, the image object if successful; None otherwise.
    """
    try:
        response = requests.get(url)
        response.raise_for_status()  # Check if the request was successful
        img = Image.open(BytesIO(response.content))
        return img
    except requests.RequestException as e:
        print(f"Request error: {e}")
    except IOError as e:
        print(f"Image processing error: {e}")
    return None

def get_file_content_as_base64(path, urlencoded=False):
    """
    获取文件base64编码

    Parameters:
    - param path: 文件路径
    - param urlencoded: 是否对结果进行urlencoded 

    - return: base64编码信息
    """

    with open(path, "rb") as f:
        content = base64.b64encode(f.read()).decode("utf8")
        if urlencoded:
            content = urllib.parse.quote_plus(content)

    return content


def img2base64(image, img_format='PNG'):
    '''
    cv2格式图片转base64
    '''
    try:
        image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
        img_buffer = BytesIO()
        image.save(img_buffer, format=img_format)
        byte_data = img_buffer.getvalue()
        base64_str = base64.b64encode(byte_data)
        base64_str = str(base64_str, "utf-8")  # bytes -> base64 string
        return base64_str
    except Exception as e:
        print('img2base64 failed.')
        return None

def pillow_to_cv2(pillow_image):

    # 检查图像模式
    if pillow_image.mode == "P":
        pillow_image = pillow_image.convert("RGB")

    # 将 Pillow 图像转换为 NumPy 数组
    numpy_image = np.array(pillow_image)
    
    # 检查图像模式并进行相应的颜色空间转换
    if pillow_image.mode == "RGB":
        # 将 RGB 转换为 BGR（OpenCV 使用 BGR 顺序）
        cv2_image = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR)
    elif pillow_image.mode == "L":
        # 灰度图像不需要颜色空间转换
        cv2_image = numpy_image
    else:
        raise ValueError("不支持的图像模式: {}".format(pillow_image.mode))
    
    return cv2_image

def save_image_with_chinese_path(image, output_path):
    # 将图像编码为字节数组
    _, img_encoded = cv2.imencode('.jpg', image)
    
    # 将字节数组写入文件
    with open(output_path, 'wb') as f:
        f.write(img_encoded)


def preprocess_image(image_path):
    # 读取图像
    image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
    # 二值化处理
    _, binary_image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY)
    return binary_image


def morphological_processing(binary_image):
    # 定义结构元素
    kernel = np.ones((5, 5), np.uint8)
    
    # 膨胀操作
    dilated_image = cv2.dilate(binary_image, kernel, iterations=2)
    
    # 闭运算（先膨胀后腐蚀）
    closed_image = cv2.morphologyEx(dilated_image, cv2.MORPH_CLOSE, kernel)
    
    return closed_image


def connected_component_analysis(processed_image):
    # 进行连通域分析
    num_labels, labels_im, stats, centroids = cv2.connectedComponentsWithStats(processed_image)

    # 创建彩色图像用于显示连通域
    output_image = cv2.cvtColor(processed_image, cv2.COLOR_GRAY2BGR)
    
    # 生成随机颜色
    colors = []
    for i in range(1, num_labels):
        colors.append((np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255)))
    
    # 绘制连通域
    for i in range(1, num_labels):  # 从 1 开始，0 是背景
        color = colors[i-1]
        mask = (labels_im == i)
        output_image[mask] = color

        # 绘制连通域的矩形框和质心
        x, y, w, h, area = stats[i]
        cx, cy = centroids[i]
        cv2.rectangle(output_image, (x, y), (x+w, y+h), (255, 0, 0), 2)
        cv2.circle(output_image, (int(cx), int(cy)), 5, (0, 0, 255), -1)
    
    return output_image, num_labels, stats, centroids


# 去除孤立点并合并靠得近的点
def process_features(features, distance_threshold=50):
    ## 将特征点转换为中心点
    #centers = [(x, y) for x, y, w, h in features]
    centers = features
    processed = []
    
    while centers:
        cluster = []
        x1, y1, w1, h1 = centers.pop(0)
        cluster.append((x1, y1, w1, h1))
        
        # 查找靠近的点
        close_points = []
        for (x2, y2, w2, h2) in centers:
            distance = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
            if distance < distance_threshold:
                close_points.append((x2, y2, w2, h2))
        
        # 将找到的点移除并加入cluster
        for point in close_points:
            centers.remove(point)
            cluster.append(point)
        
        # 合并cluster中的点
        if len(cluster) > 1:  # 只有当点数量大于1时，才合并
            x_left = [x for x, y, w, h in cluster]
            xs = x_left + [x + w for x, y, w, h in cluster]
            y_top = [y for x, y, w, h in cluster]
            ys = y_top + [y + h for x, y, w, h in cluster]
            x_min, x_max = min(xs), max(xs)
            y_min, y_max = min(ys), max(ys)
            w = x_max - x_min
            h = y_max - y_min
            processed.append((x_min, y_min, w, h))
    
    return processed


def detect_handwritten_text(image, save_path=None):

    if image is None:
        return None
    
    if len(image) >= 3:
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    else:
        gray = image
    
    # 图像预处理
    #gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    _, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    img_h, img_w = binary.shape
    #print('image size : ', img_h, img_w)

    # 使用滤波器去除噪声
    kernel = np.ones((5,5), np.uint8)
    # opening = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel)
    opening = cv2.morphologyEx(binary, cv2.MORPH_DILATE, kernel)

    cv2.imwrite("./opening.png", opening)

    # 查找轮廓
    contours, _ = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    print('contours size : ', len(contours))

    # 轮廓筛选
    text_contours = []
    for contour in contours:
        # 计算轮廓的边界框
        x, y, w, h = cv2.boundingRect(contour)
        area = cv2.contourArea(contour)
        
        if h < 10 or y < 50 or (y+50) > img_h:
            continue

        # 手写体通常面积较小，宽高比变化较大
        if 30 < area < 500:  # 根据实际情况调整面积阈值
            aspect_ratio = float(w) / h
            
            if 0.5 < aspect_ratio < 3:  # 根据实际情况调整宽高比阈值
            
                # 计算轮廓的紧凑性（圆形度）
                perimeter = cv2.arcLength(contour, True)
                approx = cv2.approxPolyDP(contour, 0.02 * perimeter, True)
                compactness = cv2.arcLength(contour, True) / (w * h)
                
                # 手写体通常紧凑性较低
                if compactness < 0.3:  # 根据实际情况调整紧凑性阈值
                    #text_contours.append((contour, x, y, w, h))
                    text_contours.append((x, y, w, h))
                    # print(f"{x}_{y}_{w}_{h}")
                    # print('area : ', area)
                    # print('aspect_ratio : ', aspect_ratio)
                    # print('compactness : ', compactness)
    print('text_contours size : ', len(text_contours))
    processed = process_features(text_contours)
    # processed = text_contours

    # 绘制边界框
    #image = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
    for x, y, w, h in processed:
        cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

    if save_path is not None:
        save_image_with_unicode_path(image, save_path)
    
    #return processed

    # 显示结果
    cv2.imshow('Handwritten Text Detection', cv2.resize(image, None, fx=0.45, fy=0.45))
    cv2.waitKey(0)
    cv2.destroyAllWindows()


def connected_components(image_path):

    # 预处理图像
    binary_image = preprocess_image(image_path)

    # 形态学处理
    processed_image = morphological_processing(binary_image)

    # 连通域分析
    output_image, num_labels, stats, centroids = connected_component_analysis(processed_image)

    # 显示结果
    cv2.imshow('Connected Components', output_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

if __name__ == "__main__":
    # # Testing the function
    # img1_path = "D:\\temp\\essay\\1.webp"
    # img2_path = "D:\\temp\\essay\\2.webp"
    # img3_path = "D:\\temp\\essay\\3.webp"
    # img_list = [img1_path, img2_path, img3_path]
    # output_path = "D:\\temp\\essay\\output.png"

    # #concatenated_image = concatenate_images_vertically(img1_path, img2_path, output_path)
    # concatenate_iamge_files_vertically(img_list, output_path)


    # # 通过坐标来切割
    # img_path = "D:\\work\\code\\fx-business\\data\\8831211_1132209374_谭复律_46.5_o.png"
    # output_path = "D:\\work\\code\\fx-business\\data\\8831211_1132209374_谭复律_46.5.png"

    # boxs = [(140, 220, 1000, 1990), (1190, 154, 1000, 2055), (2230, 154, 1006, 2055)]
    # roi_imgs = []

    # img = imread_unicode_path(img_path)
    # if img is None:
    #     print("Error: cannot read image.")

    # concatenate_image = concatenate_image_by_roi_coordinates(img, boxs)
    # if concatenate_image is not None:
    #     cv2.imwrite(output_path, concatenate_image)

    # for box in boxs:
    #     # 提取矩形框的坐标和尺寸
    #     x, y, width, height = box
        
    #     # 截取 ROI
    #     roi_img = img[y:y+height, x:x+width]

    #     roi_imgs.append(roi_img)

    # concatenate_images_vertically(roi_imgs[0], roi_imgs[1], output_path)

    img_path = 'D:\\work\\code\\fx-business\\future\\test\\yj_api\\outputs\\8992296_1\\not_blank\\16941_502110180081_秦林_19865544_0.png'
    img = imread_unicode_path(img_path)
    print('img : ', img.shape)
    detect_handwritten_text(img)

    # img_flod = 'D:\\work\\code\\fx-business\\future\\test\\yj_api\\outputs\\8831189\\not_blank'
    # png_img_list = glob.glob(img_flod + '/*.png')

    # for i, img_path in enumerate(png_img_list):
    #     print(f"Processing image {i+1} of {len(png_img_list)}")
    #     img = imread_unicode_path(img_path)
    #     print('img : ', img.shape)
    #     # 生成保存的路径
    #     save_path = img_path.replace('.png', '_handwritten.png')
    #     detect_handwritten_text(img, save_path)