| | import requests |
| | import json |
| |
|
| |
|
| | def show_message(title, content): |
| | |
| | print(f"\n--- {title} ---") |
| | print(content) |
| | print("-----------------\n") |
| |
|
| |
|
| | def set_processing_state(is_processing): |
| | |
| | if is_processing: |
| | print("Processing... Please wait.") |
| | else: |
| | print("Processing complete.") |
| |
|
| |
|
| | async def generate_solution_python(user_query): |
| | |
| | if not user_query: |
| | show_message("Input Required", "Please enter your query to get a solution.") |
| | return |
| |
|
| | set_processing_state(True) |
| | response_text = "" |
| |
|
| | try: |
| | |
| | print(f"Searching for information related to: {user_query}") |
| | search_payload = { |
| | "queries": [user_query] |
| | } |
| | |
| | search_response = requests.post( |
| | 'http://localhost:8000/api/google_search', |
| | |
| | headers={'Content-Type': 'application/json'}, |
| | data=json.dumps(search_payload) |
| | ) |
| |
|
| | search_response.raise_for_status() |
| | search_result = search_response.json() |
| | print("Search results received.") |
| |
|
| | context = "" |
| | if search_result.get('results'): |
| | for query_result in search_result['results']: |
| | if query_result.get('results'): |
| | for item_index, item in enumerate(query_result['results']): |
| | if item.get('snippet'): |
| | |
| | context += f"[Source {item_index + 1}] {item['snippet']}\n" |
| | if len(context) > 2000: |
| | context += "...\n" |
| | break |
| | if len(context) > 2000: |
| | break |
| |
|
| | |
| | chat_history = [] |
| | prompt = f"""You are an AI assistant that provides comprehensive solutions based on the given query and additional context from open sources. |
| | |
| | User Query: {user_query} |
| | |
| | {context if context else 'No specific open-source information found for this query.'} |
| | |
| | Please provide a detailed and helpful solution, incorporating the provided information where relevant. If the information is insufficient, state that and provide a general answer. |
| | """ |
| |
|
| | chat_history.append({"role": "user", "parts": [{"text": prompt}]}) |
| |
|
| | |
| | print("Calling Gemini API...") |
| | llm_payload = { |
| | "contents": chat_history |
| | } |
| | |
| | gemini_api_url = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=" |
| |
|
| | gemini_response = requests.post( |
| | gemini_api_url, |
| | headers={'Content-Type': 'application/json'}, |
| | data=json.dumps(llm_payload) |
| | ) |
| |
|
| | gemini_response.raise_for_status() |
| | llm_result = gemini_response.json() |
| | print("Gemini API response received.") |
| |
|
| | if llm_result.get('candidates') and len(llm_result['candidates']) > 0 and \ |
| | llm_result['candidates'][0].get('content') and llm_result['candidates'][0]['content'].get('parts') and \ |
| | len(llm_result['candidates'][0]['content']['parts']) > 0: |
| | response_text = llm_result['candidates'][0]['content']['parts'][0]['text'] |
| | else: |
| | response_text = "No solution could be generated. Please try a different query." |
| |
|
| | except requests.exceptions.RequestException as e: |
| | error_message = f"Network or API error: {e}" |
| | print(f"Error: {error_message}") |
| | show_message("Generation Error", error_message) |
| | response_text = f"An error occurred: {error_message}. Please check the console for details." |
| | except Exception as e: |
| | error_message = f"An unexpected error occurred: {e}" |
| | print(f"Error: {error_message}") |
| | show_message("Generation Error", error_message) |
| | response_text = f"An error occurred: {error_message}. Please check the console for details." |
| | finally: |
| | set_processing_state(False) |
| | print("\n--- Solution ---") |
| | print(response_text) |
| | print("----------------\n") |
| |
|
| |
|