Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import os | |
| import requests | |
| from dotenv import load_dotenv | |
| import nltk | |
| from nltk.tokenize import sent_tokenize | |
| import pandas as pd | |
| import ast | |
| # Initialize NLTK | |
| nltk.download('punkt', quiet=True) | |
| # Load environment variables | |
| load_dotenv() | |
| BLACKBOX_API_KEY = os.getenv("BLACKBOX_API_KEY") | |
| if not BLACKBOX_API_KEY: | |
| BLACKBOX_API_KEY = os.environ.get('BLACKBOX_API_KEY') | |
| class CodeAnalyzer(ast.NodeVisitor): | |
| def __init__(self): | |
| self.func_count = 0 | |
| self.loop_count = 0 | |
| self.cond_count = 0 | |
| self.max_depth = 0 | |
| self.current_depth = 0 | |
| def visit_FunctionDef(self, node): | |
| self.func_count += 1 | |
| self.generic_visit(node) | |
| def visit_For(self, node): | |
| self.loop_count += 1 | |
| self._enter_block(node) | |
| def visit_While(self, node): | |
| self.loop_count += 1 | |
| self._enter_block(node) | |
| def visit_If(self, node): | |
| self.cond_count += 1 | |
| self._enter_block(node) | |
| def _enter_block(self, node): | |
| self.current_depth += 1 | |
| self.max_depth = max(self.max_depth, self.current_depth) | |
| self.generic_visit(node) | |
| self.current_depth -= 1 | |
| def analyze(self, code_str): | |
| try: | |
| tree = ast.parse(code_str) | |
| self.visit(tree) | |
| except SyntaxError: | |
| pass | |
| return { | |
| 'function_def': self.func_count, | |
| 'loop': self.loop_count, | |
| 'conditional': self.cond_count, | |
| 'max_depth': self.max_depth | |
| } | |
| class CodeCopilot: | |
| def __init__(self): | |
| self.chat_history = [] | |
| self.context_window = 3 | |
| def get_blackbox_response(self, prompt, max_tokens=300, temperature=0.7): | |
| headers = { | |
| "Content-Type": "application/json", | |
| "Authorization": f"Bearer {BLACKBOX_API_KEY}" | |
| } | |
| try: | |
| resp = requests.post( | |
| "https://api.blackbox.ai/chat/completions", | |
| headers=headers, | |
| json={ | |
| "messages": [{"role": "user", "content": prompt}], | |
| "max_tokens": max_tokens, | |
| "temperature": temperature, | |
| "model": "blackboxai/openai/gpt-4" | |
| }, | |
| timeout=30 | |
| ) | |
| resp.raise_for_status() | |
| return resp.json()["choices"][0]["message"]["content"] | |
| except Exception as e: | |
| return f"API Error: {e}" | |
| def generate_suggestions(self, analysis): | |
| suggestions = [] | |
| # Functions | |
| if analysis['function_def'] == 0: | |
| suggestions.append("π Consider defining functions to organize your code and improve reuse.") | |
| elif analysis['function_def'] > 3: | |
| suggestions.append(f"π Detected {analysis['function_def']} functions β consider grouping related functions into classes or modules.") | |
| # Loops | |
| if analysis['loop'] >= 1: | |
| suggestions.append(f"π {analysis['loop']} loop(s) found β check if list comprehensions or vectorized operations can simplify them.") | |
| # Conditionals | |
| if analysis['conditional'] >= 2: | |
| suggestions.append(f"β {analysis['conditional']} conditional statements β consider simplifying nested logic or using lookup tables.") | |
| # Nesting depth | |
| if analysis['max_depth'] > 2: | |
| suggestions.append(f"π¦ Maximum nesting depth of {analysis['max_depth']} detected β flatten nested blocks for readability.") | |
| # Default | |
| if not suggestions: | |
| suggestions.append("β Code structure looks clean based on basic analysis.") | |
| return "\n".join(suggestions) | |
| def process_input(self, user_input): | |
| # AST analysis | |
| analyzer = CodeAnalyzer() | |
| analysis = analyzer.analyze(user_input) | |
| # Build context prompt | |
| context = "\nPrevious conversation:\n" + "\n".join( | |
| [f"User: {h[0]}\nAI: {h[1]}" for h in self.chat_history[-self.context_window:]] | |
| ) | |
| prompt = f"You are an expert coding assistant. Analyze this code and provide improvements.\n{context}\nNew input:\n{user_input}" | |
| # AI response | |
| ai_resp = self.get_blackbox_response(prompt) | |
| # Suggestions | |
| sugg = self.generate_suggestions(analysis) | |
| self.chat_history.append((user_input, ai_resp)) | |
| return ai_resp, analysis, sugg | |
| # Initialize copilot | |
| copilot = CodeCopilot() | |
| # Build Gradio UI | |
| with gr.Blocks(theme=gr.themes.Soft(), title="π€ AI Code Copilot") as demo: | |
| gr.Markdown(""" | |
| <div style='text-align: center; margin-bottom: 1rem;'> | |
| <h1>π€ AI Code Copilot</h1> | |
| <p>Paste code or ask a question below to get instant analysis.</p> | |
| </div> | |
| """ | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=3, min_width=300): | |
| inp = gr.Textbox(label="Your Code / Question", lines=10, placeholder="Enter code here...") | |
| btn = gr.Button("π Generate") | |
| with gr.Column(scale=6, min_width=500): | |
| gr.Markdown("**Assistant Response**") | |
| out = gr.Markdown() | |
| gr.Markdown("**Pattern Analysis**") | |
| df = gr.Dataframe(headers=["Metric","Count"], datatype=["str","number"], interactive=False) | |
| gr.Markdown("**Suggestions**") | |
| sug = gr.Markdown() | |
| def run_all(text): | |
| ai_text, analysis, suggestions = copilot.process_input(text) | |
| df_data = {"Metric": list(analysis.keys()), "Count": list(analysis.values())} | |
| return ai_text, pd.DataFrame(df_data), suggestions | |
| btn.click(fn=run_all, inputs=inp, outputs=[out, df, sug]) | |
| inp.submit(fn=run_all, inputs=inp, outputs=[out, df, sug]) | |
| if __name__ == "__main__": | |
| demo.launch() | |