Update app.py
Browse files
app.py
CHANGED
@@ -273,6 +273,12 @@ def identify_required_functions(project_path, functionality_description):
|
|
273 |
return response.text
|
274 |
|
275 |
|
|
|
|
|
|
|
|
|
|
|
|
|
276 |
# Hugging Face Inference API endpoint for the model
|
277 |
API_URL = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct"
|
278 |
qwen = os.getenv("QWEN")
|
@@ -280,7 +286,7 @@ headers = {"Authorization": f"Bearer {qwen}"}
|
|
280 |
|
281 |
def validate_and_generate_documentation(api_url, headers, gemini_output, file_contents, functionality_description):
|
282 |
"""Uses the Hugging Face Inference API to validate functions and generate documentation."""
|
283 |
-
# Generate the prompt for the Qwen model
|
284 |
prompt = f"""
|
285 |
User-specified functionality: '{functionality_description}'
|
286 |
Functions identified by Gemini:
|
@@ -289,9 +295,10 @@ def validate_and_generate_documentation(api_url, headers, gemini_output, file_co
|
|
289 |
Project files:
|
290 |
"""
|
291 |
for file_path, content in file_contents.items():
|
292 |
-
#
|
293 |
-
|
294 |
-
|
|
|
295 |
|
296 |
prompt += """
|
297 |
Task:
|
@@ -323,10 +330,17 @@ def validate_and_generate_documentation(api_url, headers, gemini_output, file_co
|
|
323 |
|
324 |
# Handle the API response
|
325 |
if response.status_code == 200:
|
326 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
327 |
else:
|
328 |
raise ValueError(f"Error: {response.status_code}, {response.text}")
|
329 |
|
|
|
330 |
def generate_documentation_page():
|
331 |
st.subheader(f"Generate Documentation for {st.session_state.current_project}")
|
332 |
st.write("Enter the functionality or parts of the project for which you'd like to identify relevant functions.")
|
|
|
273 |
return response.text
|
274 |
|
275 |
|
276 |
+
|
277 |
+
|
278 |
+
def split_into_chunks(content, chunk_size=1000):
|
279 |
+
"""Splits large content into smaller chunks."""
|
280 |
+
return [content[i:i + chunk_size] for i in range(0, len(content), chunk_size)]
|
281 |
+
|
282 |
# Hugging Face Inference API endpoint for the model
|
283 |
API_URL = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct"
|
284 |
qwen = os.getenv("QWEN")
|
|
|
286 |
|
287 |
def validate_and_generate_documentation(api_url, headers, gemini_output, file_contents, functionality_description):
|
288 |
"""Uses the Hugging Face Inference API to validate functions and generate documentation."""
|
289 |
+
# Generate the base prompt for the Qwen model
|
290 |
prompt = f"""
|
291 |
User-specified functionality: '{functionality_description}'
|
292 |
Functions identified by Gemini:
|
|
|
295 |
Project files:
|
296 |
"""
|
297 |
for file_path, content in file_contents.items():
|
298 |
+
# Split content into manageable chunks if necessary
|
299 |
+
chunks = split_into_chunks(content, chunk_size=3000) # Adjust chunk size based on model's max input length
|
300 |
+
for idx, chunk in enumerate(chunks):
|
301 |
+
prompt += f"File: {os.path.basename(file_path)} (Part {idx + 1})\n{chunk}\n\n"
|
302 |
|
303 |
prompt += """
|
304 |
Task:
|
|
|
330 |
|
331 |
# Handle the API response
|
332 |
if response.status_code == 200:
|
333 |
+
api_response = response.json()
|
334 |
+
if isinstance(api_response, list): # If response is a list, extract the first element
|
335 |
+
return api_response[0].get("generated_text", "No output generated.")
|
336 |
+
elif isinstance(api_response, dict): # Handle dict response
|
337 |
+
return api_response.get("generated_text", "No output generated.")
|
338 |
+
else:
|
339 |
+
raise ValueError("Unexpected response format from Hugging Face API.")
|
340 |
else:
|
341 |
raise ValueError(f"Error: {response.status_code}, {response.text}")
|
342 |
|
343 |
+
|
344 |
def generate_documentation_page():
|
345 |
st.subheader(f"Generate Documentation for {st.session_state.current_project}")
|
346 |
st.write("Enter the functionality or parts of the project for which you'd like to identify relevant functions.")
|