from PIL import Image, ImageEnhance, ImageOps
import pytesseract
import cv2
import numpy as np


def enhance_image(img):
    """增强图像对比度，并进行二值化处理"""
    # 增强对比度
    enhancer = ImageEnhance.Contrast(img)
    img_enhanced = enhancer.enhance(2.0)

    # 转换为灰度图像
    img_gray = img_enhanced.convert("L")

    # 二值化处理
    threshold = 150
    img_binary = img_gray.point(lambda x: 255 if x > threshold else 0, '1')

    return img_binary


def order_points(pts):
    """对找到的四个顶点排序，确保它们按正确的顺序排列"""
    rect = np.zeros((4, 2), dtype="float32")

    s = pts.sum(axis=1)
    rect[0] = pts[np.argmin(s)]  # 左上角点具有最小的和
    rect[2] = pts[np.argmax(s)]  # 右下角点具有最大的和

    diff = np.diff(pts, axis=1)
    rect[1] = pts[np.argmin(diff)]  # 右上角点具有最小的差值
    rect[3] = pts[np.argmax(diff)]  # 左下角点具有最大的差值

    return rect


def four_point_transform(image, pts):
    """应用透视变换校正图像"""
    rect = order_points(pts)
    (tl, tr, br, bl) = rect

    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))

    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))

    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype="float32")

    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))

    return warped


def rotate_image(image_path):
    """基于物流运单的白色矩形轮廓旋转并校正图像"""
    img = cv2.imread(image_path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 75, 200)

    cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]

    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)

        if len(approx) == 4:  # 寻找近似四边形
            screenCnt = approx
            break

    if 'screenCnt' in locals():
        warped = four_point_transform(img, screenCnt.reshape(4, 2))
        return Image.fromarray(cv2.cvtColor(warped, cv2.COLOR_BGR2RGB))
    else:
        return Image.open(image_path)


def recognize_text_from_image(image_path, tessdata_dir=None):
    """
    从给定的图片路径中提取文字信息。
    """
    if tessdata_dir:
        pytesseract.pytesseract.tesseract_cmd = r'[你的Tesseract安装路径]'  # 如果Tesseract不在环境变量中，请指定完整路径

    # 根据边缘检测旋转校正图像
    img = rotate_image(image_path)
    cv2.imwrite("rotate_image.png", img)

    # 增强图像
    img = enhance_image(img)

    # 使用pytesseract进行文字识别
    config = '--psm 6 -c preserve_interword_spaces=1'  # PSM 6假设输入是一个单一的统一块文本
    text = pytesseract.image_to_string(img, lang='chi_sim', config=config)  # 'chi_sim'表示简体中文
    return text


def clean_and_join_lines(text):
    """
    清理并拼接提取的文字中的换行符。

    :param text: 提取出的文字内容
    :return: 拼接后更易读的文字内容
    """
    lines = text.split('\n')
    cleaned_lines = []
    current_line = ""

    for line in lines:
        stripped_line = line.strip()
        if stripped_line:
            if current_line:
                current_line += " " + stripped_line
            else:
                current_line = stripped_line
        else:
            if current_line:
                cleaned_lines.append(current_line)
                current_line = ""

    # 处理最后一行
    if current_line:
        cleaned_lines.append(current_line)

    return "\n".join(cleaned_lines)


def extract_address_info(text):
    """
    尝试从提取的文字中分离出收件人的姓名和地址信息。

    :param text: 提取出的文字内容
    :return: 包含收件人和地址的字典
    """
    lines = text.split('\n')
    address_info = {'收件人': '未找到', '地址': '未找到'}

    for i, line in enumerate(lines):
        line = line.strip()
        if "收件人" in line or "收货人" in line:
            address_info['收件人'] = line.replace("收件人:", "").replace("收货人:", "").strip()
        elif "地址" in line:
            address_info['地址'] = line.replace("地址:", "").strip()
        else:
            # 尝试通过上下文分析来猜测收件人和地址
            if not address_info['收件人'] and any(keyword in line for keyword in ['先生', '女士', '小姐']):
                address_info['收件人'] = line.strip()
            if not address_info['地址'] and any(keyword in line for keyword in ['市', '区', '路', '号']):
                address_info['地址'] = line.strip()

    # 如果仍然没有找到，尝试进一步分析
    if address_info['收件人'] == '未找到' or address_info['地址'] == '未找到':
        for i, line in enumerate(lines):
            if address_info['收件人'] == '未找到':
                if any(keyword in line for keyword in ['先生', '女士', '小姐']):
                    address_info['收件人'] = line.strip()
            if address_info['地址'] == '未找到':
                if any(keyword in line for keyword in ['市', '区', '路', '号']):
                    address_info['地址'] = line.strip()

    return address_info


def main():
    # 图片路径，请替换为实际路径
    image_path = '/home/weiqiangren/Python/project/py-basic-exercises/图片文字识别/企业微信截图_17388262872342.png'  # 替换为你的图片路径

    # 调用函数并打印结果
    recognized_text = recognize_text_from_image(image_path)
    print("原始识别文字：")
    print(recognized_text)

    cleaned_text = clean_and_join_lines(recognized_text)
    print("\n清理并拼接后的文字：")
    print(cleaned_text)

    address_info = extract_address_info(cleaned_text)
    print("\n提取的收件人和地址信息：")
    print(address_info)


if __name__ == "__main__":
    main()