Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,62 +9,26 @@ import textwrap
|
|
| 9 |
import google.generativeai as genai
|
| 10 |
import asyncio
|
| 11 |
from typing import Generator, AsyncGenerator
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
# Metadata
|
| 14 |
-
CURRENT_TIME = "2025-05-23 12:
|
| 15 |
CURRENT_USER = "ErRickow"
|
| 16 |
|
| 17 |
-
#
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
1. Kunjungi [GitHub Token Settings](https://github.com/settings/tokens)
|
| 22 |
-
2. Klik "Generate new token" > "Generate new token (classic)"
|
| 23 |
-
3. Beri nama token Anda di "Note"
|
| 24 |
-
4. Pilih scope:
|
| 25 |
-
- `repo` (untuk akses repository private)
|
| 26 |
-
- `read:packages` (opsional, untuk akses package)
|
| 27 |
-
5. Klik "Generate token"
|
| 28 |
-
6. **PENTING**: Salin token segera! Token hanya ditampilkan sekali
|
| 29 |
-
|
| 30 |
-
Token diperlukan untuk:
|
| 31 |
-
- Mengakses repository private
|
| 32 |
-
- Clone repository dengan rate limit lebih tinggi
|
| 33 |
-
- Mengakses fitur GitHub API
|
| 34 |
-
"""
|
| 35 |
-
|
| 36 |
-
GEMINI_API_HELP = """
|
| 37 |
-
### Cara Mendapatkan Gemini API Key:
|
| 38 |
-
|
| 39 |
-
1. Kunjungi [Google AI Studio](https://makersuite.google.com/app/apikey)
|
| 40 |
-
2. Login dengan akun Google Anda
|
| 41 |
-
3. Klik "Create API Key"
|
| 42 |
-
4. Salin API Key yang dihasilkan
|
| 43 |
-
|
| 44 |
-
Catatan:
|
| 45 |
-
- Gemini memberikan kuota gratis setiap bulan
|
| 46 |
-
- Key bisa dibuat ulang jika diperlukan
|
| 47 |
-
- Monitor penggunaan di [Google Cloud Console](https://console.cloud.google.com/)
|
| 48 |
-
"""
|
| 49 |
-
|
| 50 |
-
OLLAMA_HELP = """
|
| 51 |
-
### Cara Menggunakan Ollama:
|
| 52 |
-
|
| 53 |
-
1. Install Ollama dari [ollama.ai](https://ollama.ai)
|
| 54 |
-
2. Jalankan Ollama di komputer Anda
|
| 55 |
-
3. Pastikan Ollama berjalan di http://localhost:11434
|
| 56 |
-
|
| 57 |
-
Catatan:
|
| 58 |
-
- Ollama berjalan secara lokal di komputer Anda
|
| 59 |
-
- Tidak memerlukan API key
|
| 60 |
-
- Ideal untuk privasi dan penggunaan offline
|
| 61 |
-
"""
|
| 62 |
|
| 63 |
# API settings
|
| 64 |
OLLAMA_API = os.environ.get("OLLAMA_API", "http://localhost:11434")
|
|
|
|
| 65 |
|
| 66 |
# Model lists
|
| 67 |
-
|
| 68 |
"llama2",
|
| 69 |
"codellama",
|
| 70 |
"mistral",
|
|
@@ -75,9 +39,34 @@ DEFAULT_MODELS = [
|
|
| 75 |
"orca-mini"
|
| 76 |
]
|
| 77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
class AIProvider:
|
| 79 |
OLLAMA = "ollama"
|
| 80 |
GEMINI = "gemini"
|
|
|
|
| 81 |
|
| 82 |
class RepoAnalyzer:
|
| 83 |
def __init__(self):
|
|
@@ -85,6 +74,55 @@ class RepoAnalyzer:
|
|
| 85 |
self.repo_content = {}
|
| 86 |
self.chat_history = []
|
| 87 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
async def stream_gemini_response(self, prompt: str, api_key: str) -> AsyncGenerator[str, None]:
|
| 89 |
"""Stream response dari Gemini API"""
|
| 90 |
try:
|
|
@@ -93,7 +131,7 @@ class RepoAnalyzer:
|
|
| 93 |
return
|
| 94 |
|
| 95 |
genai.configure(api_key=api_key)
|
| 96 |
-
model = genai.GenerativeModel('gemini-
|
| 97 |
|
| 98 |
# Tambahkan konteks repository jika ada
|
| 99 |
if self.current_repo:
|
|
@@ -207,21 +245,53 @@ def create_ui():
|
|
| 207 |
|
| 208 |
with gr.Tab("🛠️ Konfigurasi"):
|
| 209 |
provider = gr.Radio(
|
| 210 |
-
choices=[AIProvider.GEMINI, AIProvider.OLLAMA],
|
| 211 |
label="Penyedia AI",
|
| 212 |
-
value=AIProvider.
|
| 213 |
)
|
| 214 |
|
| 215 |
with gr.Group() as api_settings:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 216 |
with gr.Row():
|
| 217 |
gemini_key = gr.Textbox(
|
| 218 |
label="Gemini API Key",
|
| 219 |
type="password",
|
| 220 |
-
placeholder="
|
| 221 |
show_label=True
|
| 222 |
)
|
| 223 |
gr.Markdown(GEMINI_API_HELP)
|
| 224 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 225 |
with gr.Tab("📊 Analisis Repository"):
|
| 226 |
with gr.Row():
|
| 227 |
repo_url = gr.Textbox(
|
|
@@ -267,7 +337,7 @@ def create_ui():
|
|
| 267 |
show_label=True
|
| 268 |
)
|
| 269 |
|
| 270 |
-
async def handle_chat(message, history,
|
| 271 |
if not analyzer.current_repo:
|
| 272 |
return history + [[message, "⚠️ Mohon clone repository terlebih dahulu sebelum mengajukan pertanyaan."]]
|
| 273 |
|
|
@@ -275,24 +345,31 @@ def create_ui():
|
|
| 275 |
history.append([message, ""])
|
| 276 |
|
| 277 |
full_response = ""
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 281 |
yield history
|
| 282 |
|
| 283 |
-
def handle_clone(url, token, branch_name):
|
| 284 |
-
success, result = analyzer.clone_repository(url, token, branch_name if branch_name else None)
|
| 285 |
-
return result
|
| 286 |
-
|
| 287 |
-
clone_button.click(
|
| 288 |
-
fn=handle_clone,
|
| 289 |
-
inputs=[repo_url, github_token, branch],
|
| 290 |
-
outputs=clone_status
|
| 291 |
-
)
|
| 292 |
-
|
| 293 |
send_button.click(
|
| 294 |
fn=handle_chat,
|
| 295 |
-
inputs=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 296 |
outputs=chat_history
|
| 297 |
).then(
|
| 298 |
fn=lambda: gr.update(value=""),
|
|
@@ -301,7 +378,14 @@ def create_ui():
|
|
| 301 |
|
| 302 |
chat_input.submit(
|
| 303 |
fn=handle_chat,
|
| 304 |
-
inputs=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 305 |
outputs=chat_history
|
| 306 |
).then(
|
| 307 |
fn=lambda: gr.update(value=""),
|
|
|
|
| 9 |
import google.generativeai as genai
|
| 10 |
import asyncio
|
| 11 |
from typing import Generator, AsyncGenerator
|
| 12 |
+
from openai import AsyncOpenAI
|
| 13 |
+
import dotenv
|
| 14 |
+
|
| 15 |
+
# Load environment variables
|
| 16 |
+
dotenv.load_dotenv()
|
| 17 |
|
| 18 |
# Metadata
|
| 19 |
+
CURRENT_TIME = "2025-05-23 12:57:22"
|
| 20 |
CURRENT_USER = "ErRickow"
|
| 21 |
|
| 22 |
+
# Default API Keys (fallback if user doesn't provide their own)
|
| 23 |
+
DEFAULT_XAI_KEY = os.getenv("XAI_API_KEY", "xai-vfjhklL384Z4HKdItsZomqpFlXubTZJAFnISQUpV7dE8lRnWwYBVPSCxSTlu08wDbAcv720bx2dDiQ9x")
|
| 24 |
+
DEFAULT_GEMINI_KEY = os.getenv("GEMINI_API_KEY")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
# API settings
|
| 27 |
OLLAMA_API = os.environ.get("OLLAMA_API", "http://localhost:11434")
|
| 28 |
+
XAI_BASE_URL = "https://api.x.ai/v1"
|
| 29 |
|
| 30 |
# Model lists
|
| 31 |
+
OLLAMA_MODELS = [
|
| 32 |
"llama2",
|
| 33 |
"codellama",
|
| 34 |
"mistral",
|
|
|
|
| 39 |
"orca-mini"
|
| 40 |
]
|
| 41 |
|
| 42 |
+
XAI_MODELS = [
|
| 43 |
+
"grok-2-latest",
|
| 44 |
+
"grok-1",
|
| 45 |
+
]
|
| 46 |
+
|
| 47 |
+
GEMINI_MODELS = [
|
| 48 |
+
"gemini-1.5-mini",
|
| 49 |
+
"gemini-pro-vision",
|
| 50 |
+
]
|
| 51 |
+
|
| 52 |
+
# Help texts
|
| 53 |
+
XAI_API_HELP = """
|
| 54 |
+
### Cara Mendapatkan X.AI (Grok) API Key:
|
| 55 |
+
|
| 56 |
+
1. Kunjungi [X.AI Developer Portal](https://x.ai)
|
| 57 |
+
2. Daftar/Login ke akun Anda
|
| 58 |
+
3. Buat API Key baru
|
| 59 |
+
4. Salin API Key
|
| 60 |
+
|
| 61 |
+
Note:
|
| 62 |
+
- Jika tidak diisi, akan menggunakan API key default
|
| 63 |
+
- Masukkan API key Anda sendiri jika default mencapai limit
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
class AIProvider:
|
| 67 |
OLLAMA = "ollama"
|
| 68 |
GEMINI = "gemini"
|
| 69 |
+
XAI = "xai"
|
| 70 |
|
| 71 |
class RepoAnalyzer:
|
| 72 |
def __init__(self):
|
|
|
|
| 74 |
self.repo_content = {}
|
| 75 |
self.chat_history = []
|
| 76 |
|
| 77 |
+
async def stream_xai_response(self, prompt: str, api_key: str = None, model: str = "grok-2-latest") -> AsyncGenerator[str, None]:
|
| 78 |
+
"""Stream response dari X.AI (Grok) API"""
|
| 79 |
+
try:
|
| 80 |
+
# Use default key if none provided
|
| 81 |
+
actual_key = api_key if api_key else DEFAULT_XAI_KEY
|
| 82 |
+
|
| 83 |
+
if not actual_key:
|
| 84 |
+
yield "⚠️ API Key X.AI diperlukan. Gunakan key Anda sendiri atau tunggu reset limit default key."
|
| 85 |
+
return
|
| 86 |
+
|
| 87 |
+
client = AsyncOpenAI(
|
| 88 |
+
api_key=actual_key,
|
| 89 |
+
base_url=XAI_BASE_URL
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
# Prepare messages with repository context if available
|
| 93 |
+
messages = [
|
| 94 |
+
{"role": "system", "content": "Anda adalah asisten AI yang membantu menganalisis repository code. Berikan respons dalam Bahasa Indonesia."}
|
| 95 |
+
]
|
| 96 |
+
|
| 97 |
+
if self.current_repo:
|
| 98 |
+
context = f"Repository: {self.current_repo}\n\n"
|
| 99 |
+
repo_files = "\n".join(list(self.repo_content.keys()))
|
| 100 |
+
context += f"Files in repository:\n{repo_files}\n\n"
|
| 101 |
+
messages.append({"role": "system", "content": context})
|
| 102 |
+
|
| 103 |
+
messages.append({"role": "user", "content": prompt})
|
| 104 |
+
|
| 105 |
+
stream = await client.chat.completions.create(
|
| 106 |
+
model=model,
|
| 107 |
+
messages=messages,
|
| 108 |
+
stream=True
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
full_response = ""
|
| 112 |
+
async for chunk in stream:
|
| 113 |
+
if chunk.choices[0].delta.content:
|
| 114 |
+
content = chunk.choices[0].delta.content
|
| 115 |
+
full_response += content
|
| 116 |
+
yield content
|
| 117 |
+
|
| 118 |
+
self.chat_history.append({"role": "user", "content": prompt})
|
| 119 |
+
self.chat_history.append({"role": "assistant", "content": full_response})
|
| 120 |
+
|
| 121 |
+
except Exception as e:
|
| 122 |
+
error_msg = f"⚠️ Error dalam X.AI API: {str(e)}"
|
| 123 |
+
print(error_msg)
|
| 124 |
+
yield error_msg
|
| 125 |
+
|
| 126 |
async def stream_gemini_response(self, prompt: str, api_key: str) -> AsyncGenerator[str, None]:
|
| 127 |
"""Stream response dari Gemini API"""
|
| 128 |
try:
|
|
|
|
| 131 |
return
|
| 132 |
|
| 133 |
genai.configure(api_key=api_key)
|
| 134 |
+
model = genai.GenerativeModel('gemini-pro')
|
| 135 |
|
| 136 |
# Tambahkan konteks repository jika ada
|
| 137 |
if self.current_repo:
|
|
|
|
| 245 |
|
| 246 |
with gr.Tab("🛠️ Konfigurasi"):
|
| 247 |
provider = gr.Radio(
|
| 248 |
+
choices=[AIProvider.XAI, AIProvider.GEMINI, AIProvider.OLLAMA],
|
| 249 |
label="Penyedia AI",
|
| 250 |
+
value=AIProvider.XAI
|
| 251 |
)
|
| 252 |
|
| 253 |
with gr.Group() as api_settings:
|
| 254 |
+
with gr.Row():
|
| 255 |
+
xai_key = gr.Textbox(
|
| 256 |
+
label="X.AI (Grok) API Key",
|
| 257 |
+
type="password",
|
| 258 |
+
placeholder="Opsional - Klik icon (?) untuk info. Kosongkan untuk gunakan key default",
|
| 259 |
+
show_label=True
|
| 260 |
+
)
|
| 261 |
+
gr.Markdown(XAI_API_HELP)
|
| 262 |
+
|
| 263 |
with gr.Row():
|
| 264 |
gemini_key = gr.Textbox(
|
| 265 |
label="Gemini API Key",
|
| 266 |
type="password",
|
| 267 |
+
placeholder="Opsional - Kosongkan untuk gunakan key default",
|
| 268 |
show_label=True
|
| 269 |
)
|
| 270 |
gr.Markdown(GEMINI_API_HELP)
|
| 271 |
|
| 272 |
+
# Model selection based on provider
|
| 273 |
+
with gr.Row():
|
| 274 |
+
model_dropdown = gr.Dropdown(
|
| 275 |
+
label="Model AI",
|
| 276 |
+
choices=XAI_MODELS,
|
| 277 |
+
value="grok-2-latest",
|
| 278 |
+
interactive=True
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
def update_model_list(provider_choice):
|
| 282 |
+
if provider_choice == AIProvider.XAI:
|
| 283 |
+
return gr.Dropdown(choices=XAI_MODELS, value="grok-2-latest")
|
| 284 |
+
elif provider_choice == AIProvider.GEMINI:
|
| 285 |
+
return gr.Dropdown(choices=GEMINI_MODELS, value="gemini-pro")
|
| 286 |
+
else: # OLLAMA
|
| 287 |
+
return gr.Dropdown(choices=OLLAMA_MODELS, value="llama2")
|
| 288 |
+
|
| 289 |
+
provider.change(
|
| 290 |
+
fn=update_model_list,
|
| 291 |
+
inputs=[provider],
|
| 292 |
+
outputs=[model_dropdown]
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
with gr.Tab("📊 Analisis Repository"):
|
| 296 |
with gr.Row():
|
| 297 |
repo_url = gr.Textbox(
|
|
|
|
| 337 |
show_label=True
|
| 338 |
)
|
| 339 |
|
| 340 |
+
async def handle_chat(message, history, provider_choice, model_name, xai_key, gemini_key):
|
| 341 |
if not analyzer.current_repo:
|
| 342 |
return history + [[message, "⚠️ Mohon clone repository terlebih dahulu sebelum mengajukan pertanyaan."]]
|
| 343 |
|
|
|
|
| 345 |
history.append([message, ""])
|
| 346 |
|
| 347 |
full_response = ""
|
| 348 |
+
if provider_choice == AIProvider.XAI:
|
| 349 |
+
async for chunk in analyzer.stream_xai_response(message, xai_key, model_name):
|
| 350 |
+
full_response += chunk
|
| 351 |
+
history[-1][1] = full_response
|
| 352 |
+
yield history
|
| 353 |
+
elif provider_choice == AIProvider.GEMINI:
|
| 354 |
+
async for chunk in analyzer.stream_gemini_response(message, gemini_key or DEFAULT_GEMINI_KEY):
|
| 355 |
+
full_response += chunk
|
| 356 |
+
history[-1][1] = full_response
|
| 357 |
+
yield history
|
| 358 |
+
else: # OLLAMA
|
| 359 |
+
response = analyze_with_ollama(model_name, message)
|
| 360 |
+
history[-1][1] = response
|
| 361 |
yield history
|
| 362 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 363 |
send_button.click(
|
| 364 |
fn=handle_chat,
|
| 365 |
+
inputs=[
|
| 366 |
+
chat_input,
|
| 367 |
+
chat_history,
|
| 368 |
+
provider,
|
| 369 |
+
model_dropdown,
|
| 370 |
+
xai_key,
|
| 371 |
+
gemini_key
|
| 372 |
+
],
|
| 373 |
outputs=chat_history
|
| 374 |
).then(
|
| 375 |
fn=lambda: gr.update(value=""),
|
|
|
|
| 378 |
|
| 379 |
chat_input.submit(
|
| 380 |
fn=handle_chat,
|
| 381 |
+
inputs=[
|
| 382 |
+
chat_input,
|
| 383 |
+
chat_history,
|
| 384 |
+
provider,
|
| 385 |
+
model_dropdown,
|
| 386 |
+
xai_key,
|
| 387 |
+
gemini_key
|
| 388 |
+
],
|
| 389 |
outputs=chat_history
|
| 390 |
).then(
|
| 391 |
fn=lambda: gr.update(value=""),
|