Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,122 +1,324 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
import torch
|
| 3 |
-
from transformers import pipeline
|
| 4 |
import re
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
-
#
|
| 7 |
-
|
| 8 |
-
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
# Automatically initialize the model pipeline
|
| 11 |
try:
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
device_map="auto" if device != -1 else None
|
| 18 |
-
)
|
| 19 |
-
model_status = f"β
Model '{DEFAULT_MODEL_ID}' loaded successfully on {'GPU' if device != -1 else 'CPU'}!"
|
| 20 |
-
except Exception as e:
|
| 21 |
-
pipe = None
|
| 22 |
-
model_status = f"β Error loading model: {str(e)}"
|
| 23 |
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
full_prompt = f"{system_prompt}\n\n{prompt}\n\nCode:"
|
| 32 |
-
elif task_type == "Fix Bugs":
|
| 33 |
-
system_prompt = f"You are an expert {language} debugger. Analyze the following code and fix all bugs, then provide the corrected version:"
|
| 34 |
-
full_prompt = f"{system_prompt}\n\n{prompt}\n\nFixed Code:"
|
| 35 |
-
elif task_type == "Optimize Code":
|
| 36 |
-
system_prompt = f"You are an expert {language} optimizer. Analyze and optimize the following code for better performance and readability:"
|
| 37 |
-
full_prompt = f"{system_prompt}\n\n{prompt}\n\nOptimized Code:"
|
| 38 |
-
else:
|
| 39 |
-
system_prompt = f"You are an expert {language} teacher. Explain the following code step by step:"
|
| 40 |
-
full_prompt = f"{system_prompt}\n\n{prompt}\n\nExplanation:"
|
| 41 |
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
else:
|
| 55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
|
|
|
|
|
|
| 63 |
|
| 64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
-
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
|
|
|
| 72 |
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
choices=["Python", "JavaScript", "Java", "C++", "C#", "Go", "Rust", "TypeScript", "PHP", "Ruby"],
|
| 86 |
-
value="Python",
|
| 87 |
-
label="π» Programming Language"
|
| 88 |
-
)
|
| 89 |
-
max_tokens = gr.Slider(
|
| 90 |
-
minimum=50,
|
| 91 |
-
maximum=1000,
|
| 92 |
-
value=256,
|
| 93 |
-
step=50,
|
| 94 |
-
label="π Max Tokens"
|
| 95 |
-
)
|
| 96 |
-
temperature = gr.Slider(
|
| 97 |
-
minimum=0.1,
|
| 98 |
-
maximum=1.0,
|
| 99 |
-
value=0.7,
|
| 100 |
-
step=0.1,
|
| 101 |
-
label="π‘οΈ Temperature (Creativity)"
|
| 102 |
-
)
|
| 103 |
-
with gr.Column(scale=2):
|
| 104 |
-
prompt = gr.Textbox(
|
| 105 |
-
label="Your Code Request or Buggy Code",
|
| 106 |
-
placeholder="Example: Create a function to sort a list of dictionaries by a specific key...",
|
| 107 |
-
lines=8
|
| 108 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
|
|
|
|
|
|
| 113 |
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
inputs=[prompt, task_type, language, max_tokens, temperature],
|
| 118 |
-
outputs=[code_output, explanation_output]
|
| 119 |
-
)
|
| 120 |
|
| 121 |
-
|
| 122 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import re
|
| 2 |
+
import random
|
| 3 |
+
import gradio as gr
|
| 4 |
+
import json
|
| 5 |
+
import os
|
| 6 |
+
from typing import Dict, List, Any
|
| 7 |
|
| 8 |
+
# Try to import AI libraries
|
| 9 |
+
try:
|
| 10 |
+
import openai
|
| 11 |
+
OPENAI_AVAILABLE = True
|
| 12 |
+
except ImportError:
|
| 13 |
+
OPENAI_AVAILABLE = False
|
| 14 |
|
|
|
|
| 15 |
try:
|
| 16 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 17 |
+
import torch
|
| 18 |
+
TRANSFORMERS_AVAILABLE = True
|
| 19 |
+
except ImportError:
|
| 20 |
+
TRANSFORMERS_AVAILABLE = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
+
# Try to import sentence transformers for semantic search
|
| 23 |
+
try:
|
| 24 |
+
from sentence_transformers import SentenceTransformer
|
| 25 |
+
import numpy as np
|
| 26 |
+
SENTENCE_TRANSFORMERS_AVAILABLE = True
|
| 27 |
+
except ImportError:
|
| 28 |
+
SENTENCE_TRANSFORMERS_AVAILABLE = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
+
class CodeGenius:
|
| 31 |
+
def __init__(self):
|
| 32 |
+
self.name = "CodeGenius"
|
| 33 |
+
self.user_name = ""
|
| 34 |
+
self.conversation_history = []
|
| 35 |
+
self.model_loaded = False
|
| 36 |
+
self.generator = None
|
| 37 |
+
self.tokenizer = None
|
| 38 |
+
self.model = None
|
| 39 |
+
self.embedding_model = None
|
| 40 |
+
|
| 41 |
+
# Load programming knowledge base
|
| 42 |
+
self.programming_data = self.load_programming_data()
|
| 43 |
+
self.knowledge_base = self.prepare_knowledge_base()
|
| 44 |
+
|
| 45 |
+
# Initialize embedding model for semantic search
|
| 46 |
+
self.init_embedding_model()
|
| 47 |
|
| 48 |
+
def load_programming_data(self) -> Dict:
|
| 49 |
+
"""Load programming knowledge from JSON file"""
|
| 50 |
+
try:
|
| 51 |
+
json_path = os.path.join(os.path.dirname(__file__), 'programming_data.json')
|
| 52 |
+
with open(json_path, 'r', encoding='utf-8') as file:
|
| 53 |
+
return json.load(file)
|
| 54 |
+
except FileNotFoundError:
|
| 55 |
+
print("Programming data file not found. Using basic data.")
|
| 56 |
+
return self.get_fallback_data()
|
| 57 |
+
except json.JSONDecodeError:
|
| 58 |
+
print("Error reading programming data. Using basic data.")
|
| 59 |
+
return self.get_fallback_data()
|
| 60 |
+
|
| 61 |
+
def get_fallback_data(self) -> Dict:
|
| 62 |
+
"""Fallback data if JSON file is not available"""
|
| 63 |
+
return {
|
| 64 |
+
"languages": {
|
| 65 |
+
"Python": {
|
| 66 |
+
"paradigm": ["Object-oriented", "Imperative", "Functional", "Procedural"],
|
| 67 |
+
"typing": "Dynamic",
|
| 68 |
+
"use_cases": ["Web development", "Data science", "AI/ML", "Automation"],
|
| 69 |
+
"common_errors": [
|
| 70 |
+
{"name": "IndentationError", "solution": "Ensure consistent use of tabs or spaces"},
|
| 71 |
+
{"name": "NameError", "solution": "Check if variable is defined before use"}
|
| 72 |
+
],
|
| 73 |
+
"optimization": ["Use list comprehensions", "Avoid global variables", "Use built-in functions"]
|
| 74 |
+
},
|
| 75 |
+
"JavaScript": {
|
| 76 |
+
"paradigm": ["Event-driven", "Functional", "Object-oriented"],
|
| 77 |
+
"typing": "Dynamic",
|
| 78 |
+
"use_cases": ["Web development", "Frontend", "Backend", "Mobile apps"],
|
| 79 |
+
"common_errors": [
|
| 80 |
+
{"name": "TypeError", "solution": "Check variable types before operations"},
|
| 81 |
+
{"name": "ReferenceError", "solution": "Ensure variables/functions are in scope"}
|
| 82 |
+
],
|
| 83 |
+
"optimization": ["Minimize DOM access", "Debounce events", "Use Web Workers"]
|
| 84 |
+
}
|
| 85 |
+
},
|
| 86 |
+
"concepts": {
|
| 87 |
+
"OOP": {
|
| 88 |
+
"definition": "Object-oriented programming organizes software design around objects rather than functions and logic",
|
| 89 |
+
"principles": ["Encapsulation", "Inheritance", "Polymorphism", "Abstraction"]
|
| 90 |
+
},
|
| 91 |
+
"Functional Programming": {
|
| 92 |
+
"definition": "Programming paradigm that treats computation as evaluation of mathematical functions",
|
| 93 |
+
"key_features": ["Pure functions", "Immutability", "First-class functions"]
|
| 94 |
+
}
|
| 95 |
+
}
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
def prepare_knowledge_base(self) -> List[Dict]:
|
| 99 |
+
"""Prepare searchable knowledge base from programming data"""
|
| 100 |
+
knowledge_items = []
|
| 101 |
+
|
| 102 |
+
# Process languages data
|
| 103 |
+
for lang_name, lang_data in self.programming_data.get('languages', {}).items():
|
| 104 |
+
# Basic language info
|
| 105 |
+
knowledge_items.append({
|
| 106 |
+
'type': 'language_info',
|
| 107 |
+
'language': lang_name,
|
| 108 |
+
'content': f"{lang_name} programming language: Paradigms - {', '.join(lang_data.get('paradigm', []))}, "
|
| 109 |
+
f"Typing - {lang_data.get('typing', 'N/A')}, "
|
| 110 |
+
f"Use cases - {', '.join(lang_data.get('use_cases', []))}",
|
| 111 |
+
'data': lang_data
|
| 112 |
+
})
|
| 113 |
+
|
| 114 |
+
# Common errors
|
| 115 |
+
for error in lang_data.get('common_errors', []):
|
| 116 |
+
knowledge_items.append({
|
| 117 |
+
'type': 'error',
|
| 118 |
+
'language': lang_name,
|
| 119 |
+
'content': f"{error.get('name', 'Unknown')} in {lang_name}: "
|
| 120 |
+
f"Solution - {error.get('solution', 'N/A')}",
|
| 121 |
+
'data': error
|
| 122 |
+
})
|
| 123 |
+
|
| 124 |
+
# Optimization tips
|
| 125 |
+
for tip in lang_data.get('optimization', []):
|
| 126 |
+
knowledge_items.append({
|
| 127 |
+
'type': 'optimization',
|
| 128 |
+
'language': lang_name,
|
| 129 |
+
'content': f"Optimization tip for {lang_name}: {tip}",
|
| 130 |
+
'data': tip
|
| 131 |
+
})
|
| 132 |
+
|
| 133 |
+
# Process programming concepts
|
| 134 |
+
for concept_name, concept_data in self.programming_data.get('concepts', {}).items():
|
| 135 |
+
knowledge_items.append({
|
| 136 |
+
'type': 'concept',
|
| 137 |
+
'content': f"{concept_name}: {concept_data.get('definition', 'N/A')}. "
|
| 138 |
+
f"Key aspects: {', '.join(concept_data.get('principles', concept_data.get('key_features', [])))}",
|
| 139 |
+
'data': concept_data
|
| 140 |
+
})
|
| 141 |
+
|
| 142 |
+
return knowledge_items
|
| 143 |
+
|
| 144 |
+
def init_embedding_model(self):
|
| 145 |
+
"""Initialize embedding model for semantic search"""
|
| 146 |
+
if SENTENCE_TRANSFORMERS_AVAILABLE:
|
| 147 |
+
try:
|
| 148 |
+
self.embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 149 |
+
# Pre-compute embeddings for knowledge base
|
| 150 |
+
self.knowledge_embeddings = self.embedding_model.encode([item['content'] for item in self.knowledge_base])
|
| 151 |
+
except Exception as e:
|
| 152 |
+
print(f"Failed to load embedding model: {e}")
|
| 153 |
+
self.embedding_model = None
|
| 154 |
else:
|
| 155 |
+
self.embedding_model = None
|
| 156 |
+
|
| 157 |
+
def semantic_search(self, query: str, top_k: int = 3) -> List[Dict]:
|
| 158 |
+
"""Perform semantic search on knowledge base"""
|
| 159 |
+
if self.embedding_model is None:
|
| 160 |
+
return self.fallback_search(query, top_k)
|
| 161 |
+
|
| 162 |
+
try:
|
| 163 |
+
query_embedding = self.embedding_model.encode([query])
|
| 164 |
+
similarities = np.dot(query_embedding, self.knowledge_embeddings.T)[0]
|
| 165 |
+
top_indices = np.argsort(similarities)[-top_k:][::-1]
|
| 166 |
+
|
| 167 |
+
results = []
|
| 168 |
+
for idx in top_indices:
|
| 169 |
+
if similarities[idx] > 0.3: # Threshold for relevance
|
| 170 |
+
results.append({
|
| 171 |
+
'item': self.knowledge_base[idx],
|
| 172 |
+
'score': float(similarities[idx])
|
| 173 |
+
})
|
| 174 |
+
|
| 175 |
+
return results
|
| 176 |
+
except Exception as e:
|
| 177 |
+
print(f"Semantic search error: {e}")
|
| 178 |
+
return self.fallback_search(query, top_k)
|
| 179 |
+
|
| 180 |
+
def fallback_search(self, query: str, top_k: int = 3) -> List[Dict]:
|
| 181 |
+
"""Fallback search using keyword matching"""
|
| 182 |
+
query_words = set(query.lower().split())
|
| 183 |
+
results = []
|
| 184 |
|
| 185 |
+
for item in self.knowledge_base:
|
| 186 |
+
content_words = set(item['content'].lower().split())
|
| 187 |
+
overlap = len(query_words.intersection(content_words))
|
| 188 |
+
if overlap > 0:
|
| 189 |
+
results.append({
|
| 190 |
+
'item': item,
|
| 191 |
+
'score': overlap / len(query_words)
|
| 192 |
+
})
|
| 193 |
|
| 194 |
+
results.sort(key=lambda x: x['score'], reverse=True)
|
| 195 |
+
return results[:top_k]
|
| 196 |
+
|
| 197 |
+
def load_model(self):
|
| 198 |
+
"""Load AI model for advanced queries"""
|
| 199 |
+
if self.model_loaded:
|
| 200 |
+
return True
|
| 201 |
+
|
| 202 |
+
if TRANSFORMERS_AVAILABLE:
|
| 203 |
+
try:
|
| 204 |
+
# Use a code-specific model
|
| 205 |
+
model_name = "bigcode/starcoder2-7b"
|
| 206 |
+
|
| 207 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 208 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
| 209 |
+
model_name,
|
| 210 |
+
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
| 211 |
+
device_map="auto" if torch.cuda.is_available() else None,
|
| 212 |
+
low_cpu_mem_usage=True
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
# Add pad token if not present
|
| 216 |
+
if self.tokenizer.pad_token is None:
|
| 217 |
+
self.tokenizer.pad_token = self.tokenizer.eos_token
|
| 218 |
+
|
| 219 |
+
self.generator = pipeline(
|
| 220 |
+
"text-generation",
|
| 221 |
+
model=self.model,
|
| 222 |
+
tokenizer=self.tokenizer,
|
| 223 |
+
device=0 if torch.cuda.is_available() else -1,
|
| 224 |
+
return_full_text=False
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
self.model_loaded = True
|
| 228 |
+
print("β
AI model loaded successfully!")
|
| 229 |
+
return True
|
| 230 |
+
|
| 231 |
+
except Exception as e:
|
| 232 |
+
print(f"β οΈ Could not load AI model: {str(e)}")
|
| 233 |
+
return False
|
| 234 |
+
else:
|
| 235 |
+
print("π§ Install transformers and torch for AI features")
|
| 236 |
+
return False
|
| 237 |
+
|
| 238 |
+
def generate_ai_response(self, query: str, context: str = "", code: str = "") -> str:
|
| 239 |
+
"""Generate conversational AI response using programming knowledge"""
|
| 240 |
+
if not self.model_loaded:
|
| 241 |
+
if not self.load_model():
|
| 242 |
+
return self.generate_openai_style_response(query, context, code)
|
| 243 |
|
| 244 |
+
try:
|
| 245 |
+
# Create a conversational prompt for code assistance
|
| 246 |
+
system_prompt = """You are an expert programming assistant with years of experience helping developers.
|
| 247 |
+
Your job is to provide helpful, accurate code solutions, explanations, and optimizations.
|
| 248 |
+
Provide clear, concise answers with code examples when appropriate.
|
| 249 |
+
Explain complex concepts in simple terms and always consider best practices."""
|
| 250 |
|
| 251 |
+
user_prompt = f"""Based on this programming knowledge: {context}
|
| 252 |
+
And this provided code: {code}
|
| 253 |
+
Please answer this developer's question: {query}
|
| 254 |
+
Provide the best solution with explanation and consider edge cases."""
|
| 255 |
|
| 256 |
+
# Generate response
|
| 257 |
+
full_prompt = f"{system_prompt}\n\nUser: {user_prompt}\nAssistant:"
|
| 258 |
+
|
| 259 |
+
response = self.generator(
|
| 260 |
+
full_prompt,
|
| 261 |
+
max_new_tokens=300,
|
| 262 |
+
do_sample=True,
|
| 263 |
+
temperature=0.7,
|
| 264 |
+
top_p=0.9,
|
| 265 |
+
pad_token_id=self.tokenizer.eos_token_id,
|
| 266 |
+
repetition_penalty=1.1,
|
| 267 |
+
no_repeat_ngram_size=3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 268 |
)
|
| 269 |
+
|
| 270 |
+
if response and len(response) > 0:
|
| 271 |
+
generated_text = response[0]["generated_text"]
|
| 272 |
+
# Extract only the assistant's response
|
| 273 |
+
if "Assistant:" in generated_text:
|
| 274 |
+
ai_response = generated_text.split("Assistant:")[-1].strip()
|
| 275 |
+
if len(ai_response) > 20:
|
| 276 |
+
return ai_response
|
| 277 |
+
|
| 278 |
+
except Exception as e:
|
| 279 |
+
print(f"AI generation error: {e}")
|
| 280 |
+
|
| 281 |
+
# Fallback to OpenAI-style response
|
| 282 |
+
return self.generate_openai_style_response(query, context, code)
|
| 283 |
+
|
| 284 |
+
def generate_openai_style_response(self, query: str, context: str, code: str) -> str:
|
| 285 |
+
"""Generate OpenAI-style conversational response using template"""
|
| 286 |
+
query_lower = query.lower()
|
| 287 |
+
|
| 288 |
+
# Extract key information from context
|
| 289 |
+
lang_mentioned = None
|
| 290 |
+
for lang in ['python', 'javascript', 'java', 'c++', 'go']:
|
| 291 |
+
if lang in query_lower or lang in context.lower():
|
| 292 |
+
lang_mentioned = lang
|
| 293 |
+
break
|
| 294 |
+
|
| 295 |
+
if lang_mentioned:
|
| 296 |
+
lang_data = self.programming_data.get('languages', {}).get(lang_mentioned.capitalize(), {})
|
| 297 |
+
|
| 298 |
+
if 'error' in query_lower or 'bug' in query_lower or 'fix' in query_lower:
|
| 299 |
+
return self.generate_error_response(lang_mentioned, lang_data, query, code)
|
| 300 |
+
elif 'optimiz' in query_lower or 'improve' in query_lower or 'speed' in query_lower:
|
| 301 |
+
return self.generate_optimization_response(lang_mentioned, lang_data, code)
|
| 302 |
+
elif 'explain' in query_lower or 'how does' in query_lower:
|
| 303 |
+
return self.generate_explanation_response(lang_mentioned, lang_data, code)
|
| 304 |
+
elif 'generate' in query_lower or 'write' in query_lower or 'create' in query_lower:
|
| 305 |
+
return self.generate_code_response(lang_mentioned, lang_data, query)
|
| 306 |
+
else:
|
| 307 |
+
return self.generate_general_lang_response(lang_mentioned, lang_data, query)
|
| 308 |
+
|
| 309 |
+
return self.generate_general_programming_response(query, context, code)
|
| 310 |
|
| 311 |
+
def generate_error_response(self, lang: str, lang_data: dict, query: str, code: str) -> str:
|
| 312 |
+
"""Generate detailed error explanation and solution"""
|
| 313 |
+
common_errors = lang_data.get('common_errors', [])
|
| 314 |
+
|
| 315 |
+
response = f"""Let me help you debug this {lang} code. Here's a systematic approach:
|
| 316 |
|
| 317 |
+
π **Error Analysis:**
|
| 318 |
+
First, let's identify the error type and root cause. Common {lang} errors include:
|
| 319 |
+
{', '.join([e.get('name', 'Unknown') for e in common_errors[:3]])}
|
|
|
|
|
|
|
|
|
|
| 320 |
|
| 321 |
+
π» **Code Inspection:**
|
| 322 |
+
For the provided code:
|
| 323 |
+
```{lang}
|
| 324 |
+
{code if code else '# No code provided'}
|