Update app.py
Browse files
app.py
CHANGED
|
@@ -47,61 +47,7 @@ llm = ChatGroq(
|
|
| 47 |
api_key=GROQ_API_KEY,
|
| 48 |
)
|
| 49 |
|
| 50 |
-
# ---
|
| 51 |
-
LLM_PARSE_ERROR_MESSAGE = "I'm sorry, I couldn't process the last response correctly due to a formatting issue. Could you please rephrase or try a simpler query?"
|
| 52 |
-
|
| 53 |
-
PROGRAMMING_ASSISTANT_PROMPT = """
|
| 54 |
-
You are an expert programming assistant. Your role is to provide code suggestions, fix bugs, explain programming concepts, and offer contextual help based on the user's query and preferred programming language.
|
| 55 |
-
|
| 56 |
-
**CONTEXT HANDLING RULES (Follow these strictly):**
|
| 57 |
-
- **Conversation Summary:** At the end of every response, you MUST provide an updated, concise `conversationSummary` based on the entire chat history provided. This summary helps you maintain context.
|
| 58 |
-
- **Language Adaptation:** Adjust your suggestions, code, and explanations to the programming language specified in the 'language' field of the 'AssistantState'.
|
| 59 |
-
|
| 60 |
-
STRICT OUTPUT FORMAT (JSON ONLY):
|
| 61 |
-
Return a single JSON object with the following keys. **The JSON object MUST be enclosed in a single ```json block.**
|
| 62 |
-
- assistant_reply: string // A natural language reply to the user (short and helpful). Do NOT include code blocks here.
|
| 63 |
-
- code_snippet: string // If suggesting code, provide it here in a markdown code block. **CRITICALLY, you must escape all internal newlines as '\\n' and backslashes as '\\\\'** to keep the string value valid JSON. If no code is required, use an empty string: "".
|
| 64 |
-
- state_updates: object // updates to the internal state, must include: language, conversationSummary
|
| 65 |
-
- suggested_tags: array of strings // a list of 1-3 relevant tags for the assistant_reply
|
| 66 |
-
|
| 67 |
-
Rules:
|
| 68 |
-
- ALWAYS include all four top-level keys: `assistant_reply`, `code_snippet`, `state_updates`, and `suggested_tags`.
|
| 69 |
-
- ALWAYS include `assistant_reply` as a non-empty string.
|
| 70 |
-
- Do NOT produce any text outside the JSON block.
|
| 71 |
-
"""
|
| 72 |
-
|
| 73 |
-
def extract_json_from_llm_response(raw_response: str) -> dict:
|
| 74 |
-
default = {
|
| 75 |
-
"assistant_reply": LLM_PARSE_ERROR_MESSAGE,
|
| 76 |
-
"code_snippet": "",
|
| 77 |
-
"state_updates": {"conversationSummary": "", "language": "Python"},
|
| 78 |
-
"suggested_tags": [],
|
| 79 |
-
}
|
| 80 |
-
if not raw_response or not isinstance(raw_response, str):
|
| 81 |
-
return default
|
| 82 |
-
m = re.search(r"```json\s*([\s\S]*?)\s*```", raw_response)
|
| 83 |
-
json_string = m.group(1).strip() if m else raw_response
|
| 84 |
-
first = json_string.find('{')
|
| 85 |
-
last = json_string.rfind('}')
|
| 86 |
-
candidate = json_string[first:last+1] if first != -1 and last != -1 and first < last else json_string
|
| 87 |
-
candidate = re.sub(r',\s*(?=[}\]])', '', candidate)
|
| 88 |
-
try:
|
| 89 |
-
parsed = json.loads(candidate)
|
| 90 |
-
except Exception as e:
|
| 91 |
-
logger.warning("Failed to parse JSON from LLM output: %s. Candidate: %s", e, candidate[:200])
|
| 92 |
-
return default
|
| 93 |
-
if isinstance(parsed, dict) and "assistant_reply" in parsed:
|
| 94 |
-
parsed.setdefault("code_snippet", "")
|
| 95 |
-
parsed.setdefault("state_updates", {})
|
| 96 |
-
parsed["state_updates"].setdefault("conversationSummary", "")
|
| 97 |
-
parsed["state_updates"].setdefault("language", "Python")
|
| 98 |
-
parsed.setdefault("suggested_tags", [])
|
| 99 |
-
if not parsed["assistant_reply"].strip():
|
| 100 |
-
parsed["assistant_reply"] = "I need a clearer instruction to provide a reply."
|
| 101 |
-
return parsed
|
| 102 |
-
else:
|
| 103 |
-
logger.warning("Parsed JSON missing required keys or invalid format. Returning default.")
|
| 104 |
-
return default
|
| 105 |
|
| 106 |
def detect_language_from_text(text: str) -> Optional[str]:
|
| 107 |
if not text:
|
|
@@ -113,6 +59,16 @@ def detect_language_from_text(text: str) -> Optional[str]:
|
|
| 113 |
return lang_match.group(2).capitalize()
|
| 114 |
return None
|
| 115 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
# --- Routes ---
|
| 117 |
|
| 118 |
@app.route("/", methods=["GET"])
|
|
@@ -122,6 +78,7 @@ def serve_frontend():
|
|
| 122 |
except Exception:
|
| 123 |
return "<h3>frontend.html not found in static/ — please add your frontend.html there.</h3>", 404
|
| 124 |
|
|
|
|
| 125 |
def chat():
|
| 126 |
data = request.get_json(force=True)
|
| 127 |
chat_history = data.get("chat_history", [])
|
|
@@ -129,29 +86,51 @@ def chat():
|
|
| 129 |
|
| 130 |
conversation_summary = assistant_state.get("conversationSummary", "")
|
| 131 |
language = assistant_state.get("language", "Python")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
|
| 133 |
# Build prompt with system + conversation summary + chat history
|
| 134 |
system_prompt = f"You are a helpful programming assistant. Current language: {language}. Conversation summary: {conversation_summary}"
|
| 135 |
messages = [{"role": "system", "content": system_prompt}]
|
| 136 |
messages.extend(chat_history)
|
| 137 |
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 141 |
|
| 142 |
# Append assistant reply to chat history
|
| 143 |
chat_history.append({"role": "assistant", "content": assistant_reply})
|
| 144 |
|
| 145 |
-
#
|
| 146 |
conversation_summary = update_summary(chat_history)
|
| 147 |
|
| 148 |
-
# Return plain text reply and updated state
|
| 149 |
return jsonify({
|
| 150 |
"assistant_reply": assistant_reply,
|
| 151 |
"updated_state": {
|
| 152 |
"conversationSummary": conversation_summary,
|
| 153 |
"language": language,
|
| 154 |
-
"taggedReplies":
|
| 155 |
},
|
| 156 |
"chat_history": chat_history,
|
| 157 |
})
|
|
@@ -173,24 +152,20 @@ def tag_reply():
|
|
| 173 |
if not tags:
|
| 174 |
return jsonify({"error": "Tags list cannot be empty"}), 400
|
| 175 |
|
| 176 |
-
|
|
|
|
|
|
|
|
|
|
| 177 |
"conversationSummary": assistant_state.get("conversationSummary", ""),
|
| 178 |
"language": assistant_state.get("language", "Python"),
|
| 179 |
-
"taggedReplies":
|
| 180 |
}
|
| 181 |
|
| 182 |
-
|
| 183 |
-
"reply": reply_content,
|
| 184 |
-
"tags": tags,
|
| 185 |
-
}
|
| 186 |
-
|
| 187 |
-
state["taggedReplies"].append(new_tagged_reply)
|
| 188 |
-
|
| 189 |
-
logger.info("Reply tagged with: %s", tags)
|
| 190 |
|
| 191 |
return jsonify({
|
| 192 |
"message": "Reply saved and tagged successfully.",
|
| 193 |
-
"updated_state":
|
| 194 |
}), 200
|
| 195 |
|
| 196 |
@app.route("/ping", methods=["GET"])
|
|
|
|
| 47 |
api_key=GROQ_API_KEY,
|
| 48 |
)
|
| 49 |
|
| 50 |
+
# --- Helper functions ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
def detect_language_from_text(text: str) -> Optional[str]:
|
| 53 |
if not text:
|
|
|
|
| 59 |
return lang_match.group(2).capitalize()
|
| 60 |
return None
|
| 61 |
|
| 62 |
+
def update_summary(chat_history: List[Dict[str, str]]) -> str:
|
| 63 |
+
"""
|
| 64 |
+
Placeholder for conversation summary update logic.
|
| 65 |
+
You can replace this with a call to a summarization LLM chain or heuristic.
|
| 66 |
+
For now, returns last 3 messages concatenated as a simple summary.
|
| 67 |
+
"""
|
| 68 |
+
recent_msgs = chat_history[-6:] # last 6 messages (user+assistant)
|
| 69 |
+
summary = " | ".join(f"{m['role']}: {m['content'][:50].replace('\n',' ')}" for m in recent_msgs)
|
| 70 |
+
return summary
|
| 71 |
+
|
| 72 |
# --- Routes ---
|
| 73 |
|
| 74 |
@app.route("/", methods=["GET"])
|
|
|
|
| 78 |
except Exception:
|
| 79 |
return "<h3>frontend.html not found in static/ — please add your frontend.html there.</h3>", 404
|
| 80 |
|
| 81 |
+
@app.route("/chat", methods=["POST"])
|
| 82 |
def chat():
|
| 83 |
data = request.get_json(force=True)
|
| 84 |
chat_history = data.get("chat_history", [])
|
|
|
|
| 86 |
|
| 87 |
conversation_summary = assistant_state.get("conversationSummary", "")
|
| 88 |
language = assistant_state.get("language", "Python")
|
| 89 |
+
tagged_replies = assistant_state.get("taggedReplies", [])
|
| 90 |
+
|
| 91 |
+
# Detect language from last user message
|
| 92 |
+
last_user_msg = ""
|
| 93 |
+
for msg in reversed(chat_history):
|
| 94 |
+
if msg.get("role") == "user" and msg.get("content"):
|
| 95 |
+
last_user_msg = msg["content"]
|
| 96 |
+
break
|
| 97 |
+
detected_lang = detect_language_from_text(last_user_msg)
|
| 98 |
+
if detected_lang and detected_lang.lower() != language.lower():
|
| 99 |
+
logger.info(f"Detected new language: {detected_lang}")
|
| 100 |
+
language = detected_lang
|
| 101 |
|
| 102 |
# Build prompt with system + conversation summary + chat history
|
| 103 |
system_prompt = f"You are a helpful programming assistant. Current language: {language}. Conversation summary: {conversation_summary}"
|
| 104 |
messages = [{"role": "system", "content": system_prompt}]
|
| 105 |
messages.extend(chat_history)
|
| 106 |
|
| 107 |
+
try:
|
| 108 |
+
llm_response = llm.invoke(messages)
|
| 109 |
+
assistant_reply = llm_response.content if hasattr(llm_response, "content") else str(llm_response)
|
| 110 |
+
except Exception as e:
|
| 111 |
+
logger.exception("LLM invocation failed")
|
| 112 |
+
return jsonify({
|
| 113 |
+
"assistant_reply": "Sorry, the assistant is currently unavailable. Please try again later.",
|
| 114 |
+
"updated_state": {
|
| 115 |
+
"conversationSummary": conversation_summary,
|
| 116 |
+
"language": language,
|
| 117 |
+
"taggedReplies": tagged_replies,
|
| 118 |
+
},
|
| 119 |
+
"chat_history": chat_history,
|
| 120 |
+
}), 500
|
| 121 |
|
| 122 |
# Append assistant reply to chat history
|
| 123 |
chat_history.append({"role": "assistant", "content": assistant_reply})
|
| 124 |
|
| 125 |
+
# Update conversation summary
|
| 126 |
conversation_summary = update_summary(chat_history)
|
| 127 |
|
|
|
|
| 128 |
return jsonify({
|
| 129 |
"assistant_reply": assistant_reply,
|
| 130 |
"updated_state": {
|
| 131 |
"conversationSummary": conversation_summary,
|
| 132 |
"language": language,
|
| 133 |
+
"taggedReplies": tagged_replies,
|
| 134 |
},
|
| 135 |
"chat_history": chat_history,
|
| 136 |
})
|
|
|
|
| 152 |
if not tags:
|
| 153 |
return jsonify({"error": "Tags list cannot be empty"}), 400
|
| 154 |
|
| 155 |
+
tagged_replies = assistant_state.get("taggedReplies", [])
|
| 156 |
+
tagged_replies.append({"reply": reply_content, "tags": tags})
|
| 157 |
+
|
| 158 |
+
updated_state = {
|
| 159 |
"conversationSummary": assistant_state.get("conversationSummary", ""),
|
| 160 |
"language": assistant_state.get("language", "Python"),
|
| 161 |
+
"taggedReplies": tagged_replies,
|
| 162 |
}
|
| 163 |
|
| 164 |
+
logger.info(f"Reply tagged with: {tags}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 165 |
|
| 166 |
return jsonify({
|
| 167 |
"message": "Reply saved and tagged successfully.",
|
| 168 |
+
"updated_state": updated_state,
|
| 169 |
}), 200
|
| 170 |
|
| 171 |
@app.route("/ping", methods=["GET"])
|