File size: 4,833 Bytes
22ef1d5
746bf5b
22ef1d5
 
 
746bf5b
7fae8fb
 
22ef1d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7fae8fb
22ef1d5
 
 
 
 
 
 
 
 
 
 
 
 
7fae8fb
22ef1d5
7fae8fb
22ef1d5
 
 
746bf5b
22ef1d5
 
0d5f8a4
22ef1d5
 
 
7fae8fb
22ef1d5
 
 
 
7fae8fb
22ef1d5
 
 
 
 
 
 
 
 
 
 
 
 
 
a1501eb
22ef1d5
a06f639
22ef1d5
a06f639
 
 
 
 
 
 
 
22ef1d5
a06f639
22ef1d5
746bf5b
7fae8fb
22ef1d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import gradio as gr
import uuid
from qdrant_client import QdrantClient
from qdrant_client.models import PointStruct
from sentence_transformers import SentenceTransformer
from PIL import Image
import numpy as np

# Connect to Qdrant
COLLECTION = "lost_and_found"
qclient = QdrantClient(":memory:")  # use in-memory for demo, replace with host/port for persistence

# Load CLIP model
model = SentenceTransformer("sentence-transformers/clip-ViT-B-32-multilingual-v1")

# Ensure collection exists
qclient.recreate_collection(
    collection_name=COLLECTION,
    vectors_config={"size": 512, "distance": "Cosine"}
)

# Encode helper
def encode_data(text=None, image=None):
    if image is not None:
        img = Image.open(image).convert("RGB")
        emb = model.encode(img, convert_to_numpy=True, normalize_embeddings=True)
    elif text:
        emb = model.encode(text, convert_to_numpy=True, normalize_embeddings=True)
    else:
        raise ValueError("Need text or image")
    return emb.astype(np.float32)

# Add item
def add_item(mode, text, image, name, phone):
    try:
        vector = encode_data(text=text if text else None, image=image if image else None)
        payload = {
            "mode": mode,
            "text": text,
            "has_image": image is not None,
        }
        if mode == "found":
            payload["finder_name"] = name
            payload["finder_phone"] = phone
        qclient.upsert(
            collection_name=COLLECTION,
            points=[PointStruct(id=str(uuid.uuid4()), vector=vector.tolist(), payload=payload)]
        )
        return "βœ… Item added successfully!"
    except Exception as e:
        return f"❌ Error: {e}"

# Search items
def search_items(query_image, query_text, limit, min_score):
    try:
        query_vector = encode_data(
            text=query_text if query_text else None,
            image=query_image if query_image else None
        )
        results = qclient.search(
            collection_name=COLLECTION,
            query_vector=query_vector.tolist(),
            limit=limit,
        )
        out_texts, out_imgs = [], []
        for r in results:
            if r.score < min_score:
                continue
            pl = r.payload
            info = f"id:{r.id} | score:{r.score:.4f} | mode:{pl.get('mode','')}"
            if pl.get("text"):
                info += f" | text:{pl['text']}"
            if pl.get("mode") == "found":
                info += f" | found by: {pl.get('finder_name','?')} ({pl.get('finder_phone','?')})"
            out_texts.append(info)
            if pl.get("has_image"):
                out_imgs.append(query_image)  # just echo search image (or store actual image paths if needed)
        return "\n".join(out_texts) if out_texts else "No matches.", out_imgs
    except Exception as e:
        return f"❌ Error: {e}", []

# Clear all images
def clear_all_images():
    try:
        qclient.delete(
            collection_name=COLLECTION,
            points_selector={
                "filter": {"must": [{"key": "has_image", "match": {"value": True}}]}
            }
        )
        return "πŸ—‘οΈ All image items cleared!"
    except Exception as e:
        return f"❌ Error clearing images: {e}"

# Gradio UI
with gr.Blocks() as demo:
    gr.Markdown("# πŸ”Ž Lost & Found System")

    with gr.Tab("βž• Add Item"):
        mode = gr.Radio(["lost", "found"], label="Mode")
        text = gr.Textbox(label="Describe the item (optional)")
        img = gr.Image(type="filepath", label="Upload image (optional)")
        name = gr.Textbox(label="Finder's Name (only if found)", placeholder="John Doe")
        phone = gr.Textbox(label="Finder's Phone (only if found)", placeholder="+1234567890")
        add_btn = gr.Button("Add Item")
        add_out = gr.Textbox(label="Add result")
        add_btn.click(add_item, inputs=[mode, text, img, name, phone], outputs=add_out)

    with gr.Tab("πŸ” Search"):
        query_text = gr.Textbox(label="Search by text (optional)")
        query_img = gr.Image(type="filepath", label="Search by image (optional)")
        max_results = gr.Slider(1, 10, value=5, step=1, label="Max results")
        score_slider = gr.Slider(0.5, 1.0, value=0.9, step=0.01, label="Min similarity threshold")
        search_btn = gr.Button("Search")
        search_out = gr.Textbox(label="Search results (text)")
        gallery = gr.Gallery(label="Search Results", show_label=True, elem_id="gallery", columns=2, height="auto")
        search_btn.click(search_items, inputs=[query_img, query_text, max_results, score_slider], outputs=[search_out, gallery])

    with gr.Tab("πŸ—‘οΈ Admin"):
        clear_btn = gr.Button("Clear All Images")
        clear_out = gr.Textbox(label="Clear Result")
        clear_btn.click(clear_all_images, outputs=clear_out)

demo.launch()