File size: 5,970 Bytes
f2c4bda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
#######################################################################################
#
# MIT License
#
# Copyright (c) [2025] [leonelhs@gmail.com]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#######################################################################################
#
#
# Source code is based on or inspired by several projects.
# For more details and proper attribution, please refer to the following resources:
#
# - [stackoverflow] - [https://stackoverflow.com/questions/22656698/perspective-correction-in-opencv-using-python]
# - [rembg] [https://huggingface.co/spaces/leonelhs/rembg]
# - [rembg] [https://github.com/danielgatis/rembg]
# - [Chatgpt] [https://chatgpt.com/]
#
# The image is first processed by an AI service.
# This step provides a cleaner, bounded version of the image,
# because OpenCV’s edge detection is not always reliable on raw inputs.
# With the improved intermediate image, OpenCV can detect borders more consistently
# and the perspective unwrap produces better results.


import cv2
import numpy as np
import gradio as gr
from gradio_client import Client, handle_file

client = Client("leonelhs/rembg")

def unwrap(image, mask):
    img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    gray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)

    _, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contours = sorted(contours, key=cv2.contourArea, reverse=True)

    if len(contours) > 0:
        cnt = contours[0]
        peri = cv2.arcLength(cnt, True)
        approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)

        if len(approx) == 4:
            corners = approx.reshape(4, 2).astype(np.float32)

            # Order points: top-left, top-right, bottom-right, bottom-left
            rect = np.zeros((4, 2), dtype="float32")
            s = corners.sum(axis=1)
            rect[0] = corners[np.argmin(s)]
            rect[2] = corners[np.argmax(s)]

            diff = np.diff(corners, axis=1)
            rect[1] = corners[np.argmin(diff)]
            rect[3] = corners[np.argmax(diff)]

            (tl, tr, br, bl) = rect

            # Compute width & height
            widthA = np.linalg.norm(br - bl)
            widthB = np.linalg.norm(tr - tl)
            maxWidth = int(max(widthA, widthB))

            heightA = np.linalg.norm(tr - br)
            heightB = np.linalg.norm(tl - bl)
            maxHeight = int(max(heightA, heightB))

            dst = np.array([
                [0, 0],
                [maxWidth - 1, 0],
                [maxWidth - 1, maxHeight - 1],
                [0, maxHeight - 1]
            ], dtype="float32")

            # Perspective transform
            M = cv2.getPerspectiveTransform(rect, dst)
            warped = cv2.warpPerspective(img, M, (maxWidth, maxHeight))

            return cv2.cvtColor(warped, cv2.COLOR_BGR2RGB), mask, corners

    # fallback: return original if no rectangle
    return image, mask, contours

def predict(img):
    """
        Unwrap an image using AI-assisted preprocessing and OpenCV.

        The algorithm first leverages an AI service to generate a cleaner,
        well-bounded intermediate image. This helps OpenCV detect borders
        more reliably before performing the perspective unwrap.

        Parameters:
            img (string): File path to the input image to be unwrapped.

        Returns:
            path (string): File path to the generated, unwrapped image.
    """

    # Step 1: Use an AI service to preprocess the image.
    #   - OpenCV can detect edges, but results are inconsistent depending on noise/lighting.
    #   - The AI model generates a cleaner, well-bounded intermediate image.
    crop, mask = client.predict(image=handle_file(img), session="U2NET", smoot=True, api_name="/predict")

    # Step 2: Apply OpenCV on this intermediate image for more accurate border detection
    #         before performing the perspective unwrap.
    crop = cv2.imread(crop)
    mask = cv2.imread(mask)
    return unwrap(crop, mask)


with gr.Blocks() as app:
    gr.Markdown("## 🖼️ Rectangle Detection & Perspective Unwrap")
    with gr.Row():
        with gr.Column(scale=1):
            inp = gr.Image(type="filepath", label="Upload Image")
            btn_unwrap = gr.Button("📐 Perspective Unwrap")
        with gr.Column(scale=2):
            with gr.Row():
                with gr.Column(scale=1):
                    out_unwrap = gr.Image(type="numpy", label="Unwrapped Rectangle")
                    with gr.Accordion("See intermediates", open=False):
                        out_mask = gr.Image(type="numpy", label="Detected Corners")
                        out_corners = gr.JSON(label="Corners (x,y)")

    btn_unwrap.click(predict, inputs=inp, outputs=[out_unwrap, out_mask, out_corners])

app.launch(share=False, debug=True, show_error=True, mcp_server=True)
app.queue()