Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,17 +4,16 @@ import os
|
|
4 |
import torch
|
5 |
import re
|
6 |
import hashlib
|
7 |
-
from collections import defaultdict
|
8 |
|
9 |
-
# β
|
10 |
os.environ["HF_HOME"] = "/data/huggingface"
|
11 |
|
12 |
app = Flask(__name__, template_folder="templates")
|
13 |
|
14 |
-
# β
Path to
|
15 |
MODEL_DIR = "./finetuned-flan-t5"
|
16 |
|
17 |
-
# β
Load tokenizer and model
|
18 |
try:
|
19 |
tokenizer = T5Tokenizer.from_pretrained(MODEL_DIR)
|
20 |
model = T5ForConditionalGeneration.from_pretrained(MODEL_DIR)
|
@@ -25,168 +24,124 @@ except Exception as e:
|
|
25 |
print(f"β Failed to load model: {e}")
|
26 |
model_loaded = False
|
27 |
|
28 |
-
#
|
29 |
response_cache = {}
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
def create_fallback_response(prompt, context):
|
32 |
-
"""
|
33 |
prompt_lower = prompt.lower()
|
34 |
-
|
35 |
-
# Basic scoring logic based on prompt characteristics
|
36 |
clarity_score = 3
|
37 |
completeness_score = 3
|
38 |
alignment_score = 3
|
39 |
-
|
40 |
-
# Length-based scoring
|
41 |
if len(prompt) < 20:
|
42 |
-
clarity_score = 1
|
43 |
-
completeness_score = 1
|
44 |
elif len(prompt) > 100:
|
45 |
-
clarity_score = 4
|
46 |
-
|
47 |
-
|
48 |
-
# Content-based scoring
|
49 |
if "write" in prompt_lower and "something" in prompt_lower:
|
50 |
-
clarity_score = 1
|
51 |
-
completeness_score = 1
|
52 |
-
|
53 |
if "clear" in prompt_lower and "detailed" in prompt_lower:
|
54 |
-
clarity_score = 4
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
# Comments
|
64 |
clarity_comment = "Clear and specific." if clarity_score >= 3 else "Could be clearer."
|
65 |
completeness_comment = "Detailed enough." if completeness_score >= 3 else "Lacks context."
|
66 |
alignment_comment = "Fully aligned." if alignment_score >= 4 else "Needs tone adjustment."
|
67 |
-
|
68 |
-
# Improved suggested prompt generation
|
69 |
suggested = generate_improved_suggestion(prompt, prompt_lower, clarity_score, completeness_score)
|
70 |
-
|
71 |
-
return f"Scores: clarity={clarity_score}, completeness={completeness_score}, alignment={alignment_score}\nComments: clarity={clarity_comment}, completeness={completeness_comment}, alignment={alignment_comment}\nSuggested: {suggested}"
|
72 |
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
# If the prompt is already good, suggest minor improvements
|
77 |
-
if clarity_score >= 4 and completeness_score >= 4:
|
78 |
-
return prompt # Keep the original if it's already well-structured
|
79 |
-
|
80 |
-
# Extract key elements from the prompt
|
81 |
-
action_words = []
|
82 |
-
if "create" in prompt_lower:
|
83 |
-
action_words.append("create")
|
84 |
-
if "design" in prompt_lower:
|
85 |
-
action_words.append("design")
|
86 |
-
if "write" in prompt_lower:
|
87 |
-
action_words.append("write")
|
88 |
-
if "build" in prompt_lower:
|
89 |
-
action_words.append("build")
|
90 |
-
if "develop" in prompt_lower:
|
91 |
-
action_words.append("develop")
|
92 |
-
|
93 |
-
# Extract technology/tools mentioned
|
94 |
-
tech_keywords = []
|
95 |
-
if "google sheets" in prompt_lower or "sheets" in prompt_lower:
|
96 |
-
tech_keywords.append("Google Sheets")
|
97 |
-
if "excel" in prompt_lower:
|
98 |
-
tech_keywords.append("Excel")
|
99 |
-
if "android" in prompt_lower:
|
100 |
-
tech_keywords.append("Android")
|
101 |
-
if "python" in prompt_lower:
|
102 |
-
tech_keywords.append("Python")
|
103 |
-
if "javascript" in prompt_lower or "js" in prompt_lower:
|
104 |
-
tech_keywords.append("JavaScript")
|
105 |
-
if "api" in prompt_lower:
|
106 |
-
tech_keywords.append("API")
|
107 |
-
|
108 |
-
# Extract purpose/functionality
|
109 |
-
purpose_keywords = []
|
110 |
-
if "track" in prompt_lower or "tracking" in prompt_lower:
|
111 |
-
purpose_keywords.append("tracking")
|
112 |
-
if "alert" in prompt_lower or "notify" in prompt_lower or "notification" in prompt_lower:
|
113 |
-
purpose_keywords.append("notifications")
|
114 |
-
if "expense" in prompt_lower or "budget" in prompt_lower:
|
115 |
-
purpose_keywords.append("expense management")
|
116 |
-
if "attendance" in prompt_lower:
|
117 |
-
purpose_keywords.append("attendance management")
|
118 |
-
if "assignment" in prompt_lower:
|
119 |
-
purpose_keywords.append("assignment tracking")
|
120 |
-
if "deadline" in prompt_lower:
|
121 |
-
purpose_keywords.append("deadline management")
|
122 |
-
|
123 |
-
# Generate improved suggestion based on content
|
124 |
-
if "google sheets" in prompt_lower and "formula" in prompt_lower:
|
125 |
-
if "expense" in prompt_lower and "alert" in prompt_lower:
|
126 |
-
return "Create a comprehensive Google Sheets formula system that tracks monthly expenses by category, calculates running totals, and includes conditional formatting to alert you when spending exceeds predefined monthly limits for each category."
|
127 |
-
else:
|
128 |
-
return "Create a detailed Google Sheets formula that [specific function] with clear step-by-step instructions and examples."
|
129 |
-
|
130 |
-
elif "android app" in prompt_lower or "android" in prompt_lower:
|
131 |
-
if "student" in prompt_lower and "attendance" in prompt_lower:
|
132 |
-
return "Design a comprehensive Android app for students that includes class attendance tracking with QR codes, assignment management with due dates, grade tracking, and push notifications for upcoming deadlines and class reminders."
|
133 |
-
else:
|
134 |
-
return "Design a detailed Android app concept that [specific functionality] with user interface mockups, feature specifications, and technical requirements."
|
135 |
-
|
136 |
-
elif "python" in prompt_lower:
|
137 |
-
if "api" in prompt_lower:
|
138 |
-
return "Write a complete Python script that integrates with [specific API] to [specific functionality], including error handling, authentication, and example usage."
|
139 |
-
else:
|
140 |
-
return "Write a comprehensive Python script that [specific functionality] with proper documentation, error handling, and example usage."
|
141 |
-
|
142 |
-
# Generic improvements for vague prompts
|
143 |
-
elif "write something" in prompt_lower or len(prompt) < 20:
|
144 |
-
return "Provide specific details about what you want to create, including the purpose, target audience, required features, and any technical constraints or preferences."
|
145 |
-
|
146 |
-
# For prompts that are somewhat clear but could be more specific
|
147 |
-
elif clarity_score < 4 or completeness_score < 4:
|
148 |
-
# Try to enhance the existing prompt
|
149 |
-
enhanced = prompt
|
150 |
-
if not any(word in prompt_lower for word in ["specific", "detailed", "comprehensive"]):
|
151 |
-
enhanced = enhanced.replace("Create", "Create a detailed").replace("Design", "Design a comprehensive").replace("Write", "Write a complete")
|
152 |
-
|
153 |
-
if not any(word in prompt_lower for word in ["include", "with", "that"]):
|
154 |
-
enhanced += " with specific examples and step-by-step instructions."
|
155 |
-
|
156 |
-
return enhanced
|
157 |
-
|
158 |
-
# Default fallback
|
159 |
-
else:
|
160 |
-
return prompt
|
161 |
|
162 |
def is_valid_model_response(response):
|
163 |
-
"""Check if
|
164 |
-
if not response or len(response.strip()) <
|
165 |
return False
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
has_suggested = "Suggested:" in response
|
171 |
-
|
172 |
-
return has_scores and has_comments and has_suggested
|
173 |
-
|
174 |
-
def get_cached_response(prompt, context):
|
175 |
-
"""Get cached response for consistent results"""
|
176 |
-
cache_key = hashlib.md5(f"{prompt}|{context}".encode()).hexdigest()
|
177 |
-
return response_cache.get(cache_key)
|
178 |
-
|
179 |
-
def cache_response(prompt, context, response):
|
180 |
-
"""Cache response for future use"""
|
181 |
-
cache_key = hashlib.md5(f"{prompt}|{context}".encode()).hexdigest()
|
182 |
-
response_cache[cache_key] = response
|
183 |
-
|
184 |
-
# Limit cache size to prevent memory issues
|
185 |
-
if len(response_cache) > 1000:
|
186 |
-
# Remove oldest entries
|
187 |
-
keys_to_remove = list(response_cache.keys())[:100]
|
188 |
-
for key in keys_to_remove:
|
189 |
-
del response_cache[key]
|
190 |
|
191 |
@app.route("/")
|
192 |
def home():
|
@@ -202,70 +157,80 @@ def generate():
|
|
202 |
if not user_prompt:
|
203 |
return jsonify({"error": "Please provide a prompt"}), 400
|
204 |
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
return jsonify({"response": cached_response})
|
209 |
|
210 |
-
# Try model generation with balanced parameters
|
211 |
if model_loaded:
|
212 |
try:
|
213 |
-
|
214 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
215 |
|
216 |
-
# β
Tokenize & generate with balanced parameters
|
217 |
inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=256)
|
218 |
-
|
219 |
with torch.no_grad():
|
220 |
outputs = model.generate(
|
221 |
**inputs,
|
222 |
max_new_tokens=256,
|
223 |
do_sample=True,
|
224 |
-
temperature=0.3,
|
225 |
-
top_p=0.85,
|
226 |
-
top_k=30,
|
227 |
num_beams=1,
|
228 |
pad_token_id=tokenizer.eos_token_id,
|
229 |
eos_token_id=tokenizer.eos_token_id,
|
230 |
-
repetition_penalty=1.1,
|
231 |
length_penalty=1.0,
|
232 |
early_stopping=True,
|
233 |
-
no_repeat_ngram_size=2
|
234 |
)
|
235 |
-
|
236 |
-
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
237 |
-
|
238 |
-
# Clean up the result - remove the input prompt if it appears in output
|
239 |
if full_prompt in result:
|
240 |
result = result.replace(full_prompt, "").strip()
|
241 |
-
|
242 |
-
# Validate the response
|
243 |
if is_valid_model_response(result):
|
244 |
-
|
245 |
-
|
|
|
|
|
246 |
return jsonify({"response": result})
|
247 |
else:
|
248 |
-
print(f"β οΈ
|
249 |
-
|
250 |
except Exception as e:
|
251 |
print(f"β Model generation failed: {e}")
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
return jsonify({"response": fallback_result})
|
258 |
-
|
259 |
except Exception as e:
|
260 |
-
print(f"Error in generate: {
|
261 |
return jsonify({"error": f"Generation failed: {str(e)}"}), 500
|
262 |
|
263 |
@app.route("/clear_cache", methods=["POST"])
|
264 |
def clear_cache():
|
265 |
-
"""Clear the response cache to allow new variations"""
|
266 |
-
global response_cache
|
267 |
response_cache.clear()
|
268 |
return jsonify({"message": "Cache cleared successfully"})
|
269 |
|
270 |
if __name__ == "__main__":
|
271 |
-
app.run(host="0.0.0.0", port=7860)
|
|
|
4 |
import torch
|
5 |
import re
|
6 |
import hashlib
|
|
|
7 |
|
8 |
+
# β
Ensure Hugging Face cache directory is writable
|
9 |
os.environ["HF_HOME"] = "/data/huggingface"
|
10 |
|
11 |
app = Flask(__name__, template_folder="templates")
|
12 |
|
13 |
+
# β
Path to fine-tuned model
|
14 |
MODEL_DIR = "./finetuned-flan-t5"
|
15 |
|
16 |
+
# β
Load tokenizer and model at startup
|
17 |
try:
|
18 |
tokenizer = T5Tokenizer.from_pretrained(MODEL_DIR)
|
19 |
model = T5ForConditionalGeneration.from_pretrained(MODEL_DIR)
|
|
|
24 |
print(f"β Failed to load model: {e}")
|
25 |
model_loaded = False
|
26 |
|
27 |
+
# β
Response cache for consistency
|
28 |
response_cache = {}
|
29 |
|
30 |
+
def generate_improved_suggestion(prompt, prompt_lower, clarity_score, completeness_score):
|
31 |
+
"""Generate more specific and helpful suggested prompts"""
|
32 |
+
if clarity_score >= 4 and completeness_score >= 4:
|
33 |
+
return prompt
|
34 |
+
|
35 |
+
action_words, tech_keywords, purpose_keywords = [], [], []
|
36 |
+
|
37 |
+
for word in ["create", "design", "write", "build", "develop"]:
|
38 |
+
if word in prompt_lower:
|
39 |
+
action_words.append(word)
|
40 |
+
|
41 |
+
tech_map = {
|
42 |
+
"google sheets": "Google Sheets",
|
43 |
+
"excel": "Excel",
|
44 |
+
"android": "Android",
|
45 |
+
"python": "Python",
|
46 |
+
"javascript": "JavaScript",
|
47 |
+
"js": "JavaScript",
|
48 |
+
"api": "API"
|
49 |
+
}
|
50 |
+
for key, val in tech_map.items():
|
51 |
+
if key in prompt_lower:
|
52 |
+
tech_keywords.append(val)
|
53 |
+
|
54 |
+
purpose_map = {
|
55 |
+
"track": "tracking",
|
56 |
+
"tracking": "tracking",
|
57 |
+
"alert": "notifications",
|
58 |
+
"notify": "notifications",
|
59 |
+
"notification": "notifications",
|
60 |
+
"expense": "expense management",
|
61 |
+
"budget": "expense management",
|
62 |
+
"attendance": "attendance management",
|
63 |
+
"assignment": "assignment tracking",
|
64 |
+
"deadline": "deadline management"
|
65 |
+
}
|
66 |
+
for key, val in purpose_map.items():
|
67 |
+
if key in prompt_lower:
|
68 |
+
purpose_keywords.append(val)
|
69 |
+
|
70 |
+
# Specific suggestion rules
|
71 |
+
if "google sheets" in prompt_lower and "formula" in prompt_lower:
|
72 |
+
if "expense" in prompt_lower and "alert" in prompt_lower:
|
73 |
+
return ("Create a comprehensive Google Sheets formula system that tracks monthly expenses "
|
74 |
+
"by category, calculates running totals, and highlights overspending with conditional formatting.")
|
75 |
+
return "Create a detailed Google Sheets formula that [specific function] with clear instructions."
|
76 |
+
|
77 |
+
if "android" in prompt_lower:
|
78 |
+
if "student" in prompt_lower and "attendance" in prompt_lower:
|
79 |
+
return ("Design a complete Android student app with attendance tracking (QR codes), "
|
80 |
+
"assignment deadlines, grade tracking, and push notifications.")
|
81 |
+
return "Design a detailed Android app concept with UI mockups, features, and technical requirements."
|
82 |
+
|
83 |
+
if "python" in prompt_lower:
|
84 |
+
if "api" in prompt_lower:
|
85 |
+
return ("Write a complete Python script integrating with [specific API], including "
|
86 |
+
"authentication, error handling, and usage examples.")
|
87 |
+
return "Write a Python script that [specific functionality] with documentation and error handling."
|
88 |
+
|
89 |
+
if "write something" in prompt_lower or len(prompt) < 20:
|
90 |
+
return ("Be more specific: describe purpose, audience, required features, and any technical "
|
91 |
+
"constraints or preferences.")
|
92 |
+
|
93 |
+
if clarity_score < 4 or completeness_score < 4:
|
94 |
+
enhanced = prompt
|
95 |
+
if not any(w in prompt_lower for w in ["specific", "detailed", "comprehensive"]):
|
96 |
+
enhanced = enhanced.replace("Create", "Create a detailed").replace("Design", "Design a comprehensive").replace("Write", "Write a complete")
|
97 |
+
if not any(w in prompt_lower for w in ["include", "with", "that"]):
|
98 |
+
enhanced += " with examples and step-by-step instructions."
|
99 |
+
return enhanced
|
100 |
+
|
101 |
+
return prompt
|
102 |
+
|
103 |
def create_fallback_response(prompt, context):
|
104 |
+
"""Generate fallback response if model fails"""
|
105 |
prompt_lower = prompt.lower()
|
106 |
+
|
|
|
107 |
clarity_score = 3
|
108 |
completeness_score = 3
|
109 |
alignment_score = 3
|
110 |
+
|
|
|
111 |
if len(prompt) < 20:
|
112 |
+
clarity_score = completeness_score = 1
|
|
|
113 |
elif len(prompt) > 100:
|
114 |
+
clarity_score = completeness_score = 4
|
115 |
+
|
|
|
|
|
116 |
if "write" in prompt_lower and "something" in prompt_lower:
|
117 |
+
clarity_score = completeness_score = 1
|
|
|
|
|
118 |
if "clear" in prompt_lower and "detailed" in prompt_lower:
|
119 |
+
clarity_score = completeness_score = 4
|
120 |
+
|
121 |
+
if context:
|
122 |
+
if "professional" in context.lower():
|
123 |
+
alignment_score = 4
|
124 |
+
elif "friendly" in context.lower():
|
125 |
+
alignment_score = 4
|
126 |
+
|
|
|
|
|
127 |
clarity_comment = "Clear and specific." if clarity_score >= 3 else "Could be clearer."
|
128 |
completeness_comment = "Detailed enough." if completeness_score >= 3 else "Lacks context."
|
129 |
alignment_comment = "Fully aligned." if alignment_score >= 4 else "Needs tone adjustment."
|
130 |
+
|
|
|
131 |
suggested = generate_improved_suggestion(prompt, prompt_lower, clarity_score, completeness_score)
|
|
|
|
|
132 |
|
133 |
+
return (f"Scores: clarity={clarity_score}, completeness={completeness_score}, alignment={alignment_score}\n"
|
134 |
+
f"Comments: clarity={clarity_comment}, completeness={completeness_comment}, alignment={alignment_comment}\n"
|
135 |
+
f"Suggested: {suggested}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
|
137 |
def is_valid_model_response(response):
|
138 |
+
"""Check if model output matches expected format"""
|
139 |
+
if not response or len(response.strip()) < 20:
|
140 |
return False
|
141 |
+
return all(key in response for key in ["Scores:", "Comments:", "Suggested:"])
|
142 |
+
|
143 |
+
def get_cache_key(prompt, context):
|
144 |
+
return hashlib.md5(f"{prompt}|{context}".encode()).hexdigest()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
145 |
|
146 |
@app.route("/")
|
147 |
def home():
|
|
|
157 |
if not user_prompt:
|
158 |
return jsonify({"error": "Please provide a prompt"}), 400
|
159 |
|
160 |
+
cache_key = get_cache_key(user_prompt, context)
|
161 |
+
if cache_key in response_cache:
|
162 |
+
return jsonify({"response": response_cache[cache_key]})
|
|
|
163 |
|
|
|
164 |
if model_loaded:
|
165 |
try:
|
166 |
+
full_prompt = f"""
|
167 |
+
You are an AI Prompt Auditor.
|
168 |
+
Your job is to evaluate the given prompt and return a structured analysis.
|
169 |
+
|
170 |
+
Follow this exact output format, with no extra text before or after:
|
171 |
+
|
172 |
+
Scores: clarity=<1-5>, completeness=<1-5>, alignment=<1-5>
|
173 |
+
Comments: clarity=<short comment>, completeness=<short comment>, alignment=<short comment>
|
174 |
+
Suggested: <improved prompt>
|
175 |
+
|
176 |
+
Example:
|
177 |
+
Scores: clarity=4, completeness=3, alignment=5
|
178 |
+
Comments: clarity=Clear and concise., completeness=Could include more context., alignment=Tone matches well.
|
179 |
+
Suggested: Write a 500-word blog post on healthy breakfast ideas with nutritional information and recipes.
|
180 |
+
|
181 |
+
Now evaluate:
|
182 |
+
Prompt: {prompt}
|
183 |
+
Context: {context}
|
184 |
+
"""
|
185 |
|
|
|
186 |
inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=256)
|
187 |
+
|
188 |
with torch.no_grad():
|
189 |
outputs = model.generate(
|
190 |
**inputs,
|
191 |
max_new_tokens=256,
|
192 |
do_sample=True,
|
193 |
+
temperature=0.3,
|
194 |
+
top_p=0.85,
|
195 |
+
top_k=30,
|
196 |
num_beams=1,
|
197 |
pad_token_id=tokenizer.eos_token_id,
|
198 |
eos_token_id=tokenizer.eos_token_id,
|
199 |
+
repetition_penalty=1.1,
|
200 |
length_penalty=1.0,
|
201 |
early_stopping=True,
|
202 |
+
no_repeat_ngram_size=2
|
203 |
)
|
204 |
+
|
205 |
+
result = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
|
206 |
+
|
|
|
207 |
if full_prompt in result:
|
208 |
result = result.replace(full_prompt, "").strip()
|
209 |
+
|
|
|
210 |
if is_valid_model_response(result):
|
211 |
+
response_cache[cache_key] = result
|
212 |
+
if len(response_cache) > 1000:
|
213 |
+
for k in list(response_cache.keys())[:100]:
|
214 |
+
del response_cache[k]
|
215 |
return jsonify({"response": result})
|
216 |
else:
|
217 |
+
print(f"β οΈ Invalid model output β using fallback. Output:\n{result}")
|
218 |
+
|
219 |
except Exception as e:
|
220 |
print(f"β Model generation failed: {e}")
|
221 |
+
|
222 |
+
fallback = create_fallback_response(user_prompt, context)
|
223 |
+
response_cache[cache_key] = fallback
|
224 |
+
return jsonify({"response": fallback})
|
225 |
+
|
|
|
|
|
226 |
except Exception as e:
|
227 |
+
print(f"β Error in /generate: {e}")
|
228 |
return jsonify({"error": f"Generation failed: {str(e)}"}), 500
|
229 |
|
230 |
@app.route("/clear_cache", methods=["POST"])
|
231 |
def clear_cache():
|
|
|
|
|
232 |
response_cache.clear()
|
233 |
return jsonify({"message": "Cache cleared successfully"})
|
234 |
|
235 |
if __name__ == "__main__":
|
236 |
+
app.run(host="0.0.0.0", port=7860)
|