Max Rudko commited on
Commit
6579266
·
1 Parent(s): 1699c04

Added ability to log pictures;

Browse files

Introduced max size of 1024*1024 for images

Files changed (2) hide show
  1. analytics.py +21 -1
  2. app.py +11 -9
analytics.py CHANGED
@@ -2,6 +2,7 @@ import os
2
  import uuid
3
  import datetime as dt
4
  import sys
 
5
  from typing import Optional
6
 
7
  from supabase import create_client, Client
@@ -40,7 +41,18 @@ class AnalyticsLogger:
40
  print(f"[AnalyticsLogger] Failed to start session: {e}", file=sys.stderr)
41
  raise e
42
 
43
- def log_interaction(self, user: str, answer: str, ts_iso: Optional[str] = None) -> None:
 
 
 
 
 
 
 
 
 
 
 
44
  """
45
  Inserts a single chat interaction.
46
  """
@@ -48,12 +60,20 @@ class AnalyticsLogger:
48
  raise ValueError("Session not started. Call start_session() first.")
49
  session_id = self.session_id
50
 
 
 
 
 
 
 
 
51
  chat_payload = {
52
  "id": str(uuid.uuid4()),
53
  "session_id": session_id,
54
  "timestamp": ts_iso or _utc_now_iso(),
55
  "user": user,
56
  "answer": answer,
 
57
  }
58
  try:
59
  self.client.table("Chats").insert(chat_payload).execute()
 
2
  import uuid
3
  import datetime as dt
4
  import sys
5
+ from pathlib import Path
6
  from typing import Optional
7
 
8
  from supabase import create_client, Client
 
41
  print(f"[AnalyticsLogger] Failed to start session: {e}", file=sys.stderr)
42
  raise e
43
 
44
+ def _upload_image(self, image_path: str) -> Optional[str]:
45
+ try:
46
+ with open(image_path, "rb") as img_file:
47
+ image_name = f'{uuid.uuid4()}{Path(image_path).suffix}'
48
+ response = self.client.storage.from_("Images").upload(image_name, img_file, {"cacheControl": "3600", "upsert": "true"})
49
+
50
+ return response.full_path
51
+ except:
52
+ print(f"[AnalyticsLogger] Failed to upload image: {response['error']}", file=sys.stderr)
53
+ return None
54
+
55
+ def log_interaction(self, user: str | tuple[str, str], answer: str, ts_iso: Optional[str] = None) -> None:
56
  """
57
  Inserts a single chat interaction.
58
  """
 
60
  raise ValueError("Session not started. Call start_session() first.")
61
  session_id = self.session_id
62
 
63
+ image_handle: str | None = None
64
+
65
+ if isinstance(user, tuple): # (image_path, user_name)
66
+ image, user = user
67
+
68
+ image_handle = self._upload_image(image)
69
+
70
  chat_payload = {
71
  "id": str(uuid.uuid4()),
72
  "session_id": session_id,
73
  "timestamp": ts_iso or _utc_now_iso(),
74
  "user": user,
75
  "answer": answer,
76
+ "user_image_path": image_handle,
77
  }
78
  try:
79
  self.client.table("Chats").insert(chat_payload).execute()
app.py CHANGED
@@ -11,6 +11,7 @@ import threading
11
  import spaces
12
  import gradio as gr
13
  import torch
 
14
  from transformers import AutoModelForCausalLM, AutoProcessor, AutoTokenizer, TextIteratorStreamer
15
  from analytics import AnalyticsLogger
16
  from kernels import get_kernel
@@ -36,6 +37,8 @@ MAX_TOKENS = 4096
36
  TEMPERATURE = 0.7
37
  TOP_P = 0.95
38
 
 
 
39
  logger = AnalyticsLogger()
40
 
41
  def _begin_analytics_session():
@@ -66,7 +69,7 @@ def load_model():
66
  model, tokenizer, processor, device = load_model()
67
 
68
 
69
- def user(user_message, image_data, history: list):
70
  """Format user message with optional image."""
71
  import io
72
 
@@ -78,6 +81,8 @@ def user(user_message, image_data, history: list):
78
 
79
  # If we have an image, save it to temp file for Gradio display
80
  if image_data is not None:
 
 
81
  # Save to temp file for Gradio display
82
  fd, tmp_path = tempfile.mkstemp(suffix=".jpg")
83
  os.close(fd)
@@ -121,17 +126,14 @@ def append_example_message(x: gr.SelectData, history):
121
  return history
122
 
123
 
124
- def _extract_text_from_content(content: Any) -> str:
125
  """Extract text from message content for logging."""
126
  if isinstance(content, str):
127
  return content
128
- if isinstance(content, list):
129
- text_parts = []
130
- for item in content:
131
- if isinstance(item, dict) and item.get("type") == "text":
132
- text_parts.append(item.get("text", ""))
133
- return " ".join(text_parts) if text_parts else "[Image]"
134
- return str(content)
135
 
136
 
137
  def _clean_history_for_display(history: list[dict[str, Any]]) -> list[dict[str, Any]]:
 
11
  import spaces
12
  import gradio as gr
13
  import torch
14
+ from PIL.Image import Image
15
  from transformers import AutoModelForCausalLM, AutoProcessor, AutoTokenizer, TextIteratorStreamer
16
  from analytics import AnalyticsLogger
17
  from kernels import get_kernel
 
37
  TEMPERATURE = 0.7
38
  TOP_P = 0.95
39
 
40
+ IMAGE_MAX_SIZE = 1024
41
+
42
  logger = AnalyticsLogger()
43
 
44
  def _begin_analytics_session():
 
69
  model, tokenizer, processor, device = load_model()
70
 
71
 
72
+ def user(user_message, image_data: Image, history: list):
73
  """Format user message with optional image."""
74
  import io
75
 
 
81
 
82
  # If we have an image, save it to temp file for Gradio display
83
  if image_data is not None:
84
+ image_data.thumbnail((IMAGE_MAX_SIZE, IMAGE_MAX_SIZE))
85
+
86
  # Save to temp file for Gradio display
87
  fd, tmp_path = tempfile.mkstemp(suffix=".jpg")
88
  os.close(fd)
 
126
  return history
127
 
128
 
129
+ def _extract_text_from_content(content: Any) -> str | tuple[str, str]:
130
  """Extract text from message content for logging."""
131
  if isinstance(content, str):
132
  return content
133
+ if isinstance(content, tuple) and len(content) == 2:
134
+ return content # (image_path, user_text)
135
+
136
+ raise ValueError(f"Unsupported content type for text extraction: {content}")
 
 
 
137
 
138
 
139
  def _clean_history_for_display(history: list[dict[str, Any]]) -> list[dict[str, Any]]: