File size: 9,481 Bytes
9bf8fd5
f821350
9bf8fd5
f821350
6df5087
 
 
ff80e9c
6df5087
a9662a3
dd60892
193d2f5
f821350
8ee9cd4
 
 
 
e5a4c07
 
 
 
 
 
 
 
39c2d33
 
 
 
8ee9cd4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0acc62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8ee9cd4
 
 
 
 
 
 
f821350
39c2d33
 
 
 
 
 
 
 
 
 
 
 
d24843f
39c2d33
 
d24843f
39c2d33
 
8ee9cd4
f821350
e182d3e
 
753d856
8ee9cd4
 
 
 
f821350
 
 
 
 
8ee9cd4
 
 
 
 
 
 
 
 
 
 
c9c3cc9
 
 
e182d3e
8ee9cd4
 
 
 
 
6cca1d6
8ee9cd4
 
6cca1d6
8ee9cd4
6cca1d6
 
8ee9cd4
 
9bf8fd5
7da8d8e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8ee9cd4
 
 
 
9bf8fd5
8ee9cd4
 
 
 
 
c5cf101
 
 
a9662a3
2f2c7e5
 
 
 
 
 
 
 
 
 
d34d442
2f2c7e5
 
87d5df7
27ce2a0
 
 
 
2f2c7e5
 
 
27ce2a0
2f2c7e5
 
 
 
27ce2a0
2f2c7e5
 
 
27ce2a0
2f2c7e5
 
27ce2a0
5b3d288
 
2f2c7e5
5b3d288
2f2c7e5
 
a9662a3
8ee9cd4
 
 
9bf8fd5
8ee9cd4
6c1172b
8ee9cd4
 
9bf8fd5
 
8ee9cd4
 
 
 
6c1172b
8ee9cd4
 
 
 
a9662a3
 
2f2c7e5
 
c156bb0
a9662a3
 
c997170
a9662a3
8ee9cd4
a9662a3
8ee9cd4
9bf8fd5
89b3d73
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
import gradio as gr
from ultralytics import YOLO
import cv2
import os
import pymysql
import boto3
from io import BytesIO
import io
from PIL import Image
from transformers import AutoTokenizer, AutoModel
import torch
import numpy as np

# Initialize AWS S3 client
aws_access_key = "AKIAXECLNGBK5SXL2CER"
aws_secret_key = "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"
aws_region = "eu-west-3"

# Initialize the S3 client using environment variables
s3 = boto3.client(
    's3',
    aws_access_key_id=aws_access_key,
    aws_secret_access_key=aws_secret_key,
    region_name=aws_region
)

S3_BUCKET_NAME = 'savingbuckett5'
S3_FOLDER = 'Video-Processing/'

# Load YOLO model from the local directory (ensure the model is uploaded to your Hugging Face space)
model = YOLO("./YOLO_Model_v5.pt")

RDS_HOST = "database-2.cnqamusmkwon.eu-north-1.rds.amazonaws.com"
RDS_PORT = 3306 
DB_USER = "root"
DB_PASSWORD = "mkmk162345"
DB_NAME = "traffic"

def get_connection():
    return pymysql.connect(
        host=RDS_HOST,
        port=RDS_PORT,
        user=DB_USER,
        password=DB_PASSWORD,
        database=DB_NAME,
        cursorclass=pymysql.cursors.DictCursor
    )

def increment_road(id_value, increment_value, is_in=True):
    try:
        connection = get_connection()
        with connection.cursor() as cursor:
            select_sql = "SELECT id, road_in, road_out, road_current FROM traffic_counter_road WHERE id = %s"
            cursor.execute(select_sql, (id_value,))
            result = cursor.fetchone()
        
        if result:
            with connection.cursor() as cursor:
                if is_in:
                    new_road_in = result['road_in'] + increment_value
                    new_road_current = new_road_in - result['road_out']
                    update_sql = """
                    UPDATE traffic_counter_road
                    SET road_in = %s, road_current = %s
                    WHERE id = %s
                    """
                    cursor.execute(update_sql, (new_road_in, new_road_current, id_value))
                else:
                    new_road_out = result['road_out'] + increment_value
                    new_road_current = result['road_in'] - new_road_out
                    update_sql = """
                    UPDATE traffic_counter_road
                    SET road_out = %s, road_current = %s
                    WHERE id = %s
                    """
                    cursor.execute(update_sql, (new_road_out, new_road_current, id_value))
            
            connection.commit()
    except pymysql.MySQLError as e:
        print(f"Error: {e}")
    finally:
        if connection:
            connection.close()

def upload_frame_to_s3(frame, frame_number):
    # Convert the OpenCV frame (BGR) to a PIL image (RGB)
    image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    pil_image = Image.fromarray(image)

    # Save the PIL image to an in-memory file
    buffer = BytesIO()
    pil_image.save(buffer, format="JPEG")
    buffer.seek(0)

    # Define the S3 object key (file name)
    s3_key = f"{S3_FOLDER}frame_{frame_number}.jpg"

    # Upload the image to S3
    s3.upload_fileobj(buffer, S3_BUCKET_NAME, s3_key)

    print(f"Uploaded frame {frame_number} to S3 at {S3_BUCKET_NAME}/{s3_key}")

def process_video(video_path, count_type):
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        raise ValueError("Error opening video file.")
    
    box = (1650, 900, 2816, 1500)  # Define the area for license plates
    counter = 0
    License_plate = set()
    class_names = ['License Plate', 'Car', 'Motorcycle', 'Truck']

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        results = model.track(frame, persist=True)
        for result in results:
            for boxes in result.boxes:
                bbox = boxes.xyxy[0].cpu().numpy()
                class_id = int(boxes.cls[0].cpu().numpy())  
                conf = boxes.conf[0].cpu().numpy()

                id = int(boxes.id[0].cpu().numpy()) if boxes.id is not None else -1

                x1, y1, x2, y2 = map(int, bbox)
                cropped_object = frame[y1:y2, x1:x2]
                # cv2.rectangle(frame, (x1, y1), (x2, y2), (208, 38, 7), 3)
                # label = f'ID: {id}, class: {class_names[class_id]} Conf: {conf:.2f}'
                # cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (208, 38, 7), 2, cv2.LINE_AA)

                # Check if the object is in the defined box for the license plate
                if x1 >= box[0] and y1 >= box[1] and x2 <= box[2] and y2 <= box[3]:
                    if id not in License_plate:
                        License_plate.add(id)
                        if count_type == "in":
                            print("It's counting IN")
                            increment_road(1, 1, is_in=True)  # Update the road traffic database (count in)
                        elif count_type == "out":
                            print("It's counting OUT")
                            increment_road(1, 1, is_in=False)  # Update the road traffic database (count out)

                        print("It's now uploading")
                        upload_frame_to_s3(cropped_object, counter)  # Save cropped license plate to S3
                        counter += 1


def insert_data(license_value):
    try:
        connection = get_connection()
        with connection.cursor() as cursor:
            
            insert_sql = """
            INSERT INTO license_plates (license_plate)
            VALUES (%s)
            """
            cursor.execute(insert_sql, (license_value))
        
        connection.commit()
        
    except pymysql.MySQLError as e:
        print(f"Error: {e}")
    
    finally:
        if connection:
            connection.close()
            
# Gradio function for counting vehicles in
def count_in(video):
    process_video(video, count_type="in")
    return "Processed vehicles counting 'in' successfully."

# Gradio function for counting vehicles out
def count_out(video):
    process_video(video, count_type="out")
    return "Processed vehicles counting 'out' successfully."

# tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
# model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True)

def ocr(image):
    # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
    # model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, use_safetensors=True, pad_token_id=tokenizer.eos_token_id).to(device)
    # # if isinstance(image, np.ndarray):
    # #     image = Image.fromarray(image)
    # # Save the image to a temporary file in /tmp directory
    # temp_image_path = "/tmp/temp_image.jpg"
    # image.save(temp_image_path, format='JPEG')
    # # Perform OCR on the image
    # res = model.chat(tokenizer, image, ocr_type='ocr')
    
    # # Return the extracted text
    # return res
    try:
        # Convert image to PIL Image if it's a NumPy array
        if isinstance(image, np.ndarray):
            image = Image.fromarray(image)

        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
        model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, use_safetensors=True, pad_token_id=tokenizer.eos_token_id).to(device)

        # Ensure the /tmp directory exists
        temp_dir = "/tmp"
        if not os.path.exists(temp_dir):
            os.makedirs(temp_dir)

        # Save the image to a temporary file in /tmp directory
        temp_image_path = os.path.join(temp_dir, "temp_image.jpg")
        image.save(temp_image_path, format='JPEG')

        # Perform OCR on the image using the file path
        res = model.chat(tokenizer=tokenizer, image=temp_image_path, ocr_type='ocr')  # Pass the file path here

        output_text = tokenizer.decode(res[0], skip_special_tokens=True)
        return output_text
        # Return the extracted text
        # return res['text']  # Adjust this based on the actual return structure
    except Exception as e:
        return str(e)

# Create Gradio interfaces for two endpoints: count_in and count_out
iface_in = gr.Interface(
    fn=count_in,
    inputs="video",
    outputs=None,
    api_name="count_in",  # This explicitly sets the api_name
    title="YOLO Video Object Detection (Count In)",
    description="Upload a video to count vehicles 'in' and save frames to S3."
)

iface_out = gr.Interface(
    fn=count_out,
    inputs="video",
    outputs=None,
    api_name="count_out",  # This explicitly sets the api_name
    title="YOLO Video Object Detection (Count Out)",
    description="Upload a video to count vehicles 'out' and save frames to S3."
)

iface_ocr = gr.Interface(
    fn=ocr,
    inputs="image",
    # inputs=gr.Image(type="pil"),
    outputs="text",
    api_name="ocr",  # This explicitly sets the api_name
    title="OCR Image Text Extraction",
)

# Create a tabbed interface for both endpoints
iface = gr.TabbedInterface([iface_in, iface_out, iface_ocr], ["Count In", "Count Out", "OCR"])

# Launch the Gradio app
iface.launch()