ErNewdev0 commited on
Commit
f86a66f
·
verified ·
1 Parent(s): 9df625c

chore: new app

Browse files
Files changed (1) hide show
  1. app.py +313 -0
app.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from pathlib import Path
4
+ import subprocess
5
+ import requests
6
+ import json
7
+ from datetime import datetime
8
+ import textwrap
9
+
10
+ # Metadata
11
+ CURRENT_TIME = "2025-05-22 22:42:10"
12
+ CURRENT_USER = "ErRickow"
13
+
14
+ # Ollama API settings
15
+ OLLAMA_API = os.environ("OLLAMA_API")
16
+
17
+ # Default available models
18
+ DEFAULT_MODELS = [
19
+ "llama2",
20
+ "codellama",
21
+ "mistral",
22
+ "neural-chat",
23
+ "starling-lm",
24
+ "dolphin-phi",
25
+ "phi",
26
+ "orca-mini"
27
+ ]
28
+
29
+ def check_ollama_status():
30
+ try:
31
+ response = requests.get(f"{OLLAMA_API}/api/tags", timeout=10)
32
+ return response.status_code == 200
33
+ except:
34
+ return False
35
+
36
+ def list_available_models():
37
+ try:
38
+ response = requests.get(f"{OLLAMA_API}/api/tags")
39
+ installed_models = [model['name'] for model in response.json().get('models', [])]
40
+ # Combine installed and default models
41
+ all_models = list(set(installed_models + DEFAULT_MODELS))
42
+ return sorted(all_models) # Sort for better presentation
43
+ except:
44
+ return sorted(DEFAULT_MODELS)
45
+
46
+ def download_model(model_name):
47
+ if not model_name:
48
+ return "Please select a model to download"
49
+
50
+ print(f"Starting download of model: {model_name}")
51
+ try:
52
+ headers = {
53
+ "Content-Type": "application/json",
54
+ }
55
+
56
+ response = requests.post(
57
+ f"{OLLAMA_API}/api/pull",
58
+ headers=headers,
59
+ json={"name": model_name},
60
+ stream=True
61
+ )
62
+
63
+ if response.status_code == 200:
64
+ for line in response.iter_lines():
65
+ if line:
66
+ print(f"Download progress: {line.decode()}")
67
+ return f"Successfully downloaded model: {model_name}"
68
+ else:
69
+ error_msg = f"Failed to download model. Status: {response.status_code}"
70
+ print(error_msg)
71
+ return error_msg
72
+
73
+ except Exception as e:
74
+ error_msg = f"Error downloading model: {str(e)}"
75
+ print(error_msg)
76
+ return error_msg
77
+
78
+ def clone_repository(repo_url, github_token, branch=None):
79
+ """Clone a repository with authentication"""
80
+ repo_name = repo_url.split('/')[-1].replace('.git', '')
81
+ print(f"Cloning repository: {repo_url} to {repo_name}")
82
+
83
+ if os.path.exists(repo_name):
84
+ print(f"Removing existing repository: {repo_name}")
85
+ subprocess.run(['rm', '-rf', repo_name], check=True)
86
+
87
+ try:
88
+ owner_repo = '/'.join(repo_url.split('/')[-2:])
89
+ auth_url = f"https://{github_token}@github.com/{owner_repo}"
90
+
91
+ cmd = ['git', 'clone']
92
+ if branch:
93
+ cmd.extend(['--branch', branch])
94
+ cmd.append(auth_url)
95
+
96
+ process = subprocess.run(
97
+ cmd,
98
+ capture_output=True,
99
+ text=True,
100
+ env=dict(os.environ, GIT_ASKPASS='echo', GIT_TERMINAL_PROMPT='0')
101
+ )
102
+
103
+ if process.returncode == 0:
104
+ print(f"Successfully cloned repository: {repo_name}")
105
+ return True, repo_name
106
+ else:
107
+ print(f"Failed to clone repository: {process.stderr}")
108
+ return False, process.stderr
109
+ except Exception as e:
110
+ error_msg = f"Error cloning repository: {str(e)}"
111
+ print(error_msg)
112
+ return False, error_msg
113
+
114
+ def analyze_with_ollama(model_name, text):
115
+ """Process text with Ollama model"""
116
+ print(f"\nAnalyzing with {model_name}...")
117
+ try:
118
+ payload = {
119
+ "model": model_name,
120
+ "prompt": text,
121
+ "stream": False,
122
+ "options": {
123
+ "temperature": 0.7,
124
+ "top_p": 0.9,
125
+ "max_tokens": 2048,
126
+ "stop": None
127
+ }
128
+ }
129
+
130
+ print("Sending request to Ollama API...")
131
+ response = requests.post(
132
+ f"{OLLAMA_API}/api/generate",
133
+ headers={"Content-Type": "application/json"},
134
+ json=payload,
135
+ timeout=60
136
+ )
137
+
138
+ print(f"Response status: {response.status_code}")
139
+
140
+ if response.status_code == 200:
141
+ result = response.json()
142
+ if 'response' in result:
143
+ print("Got response from model")
144
+ return result['response']
145
+ else:
146
+ print("Unexpected response format:", result)
147
+ return "Error: Unexpected response format from model"
148
+ else:
149
+ error_msg = f"API Error {response.status_code}: {response.text}"
150
+ print(error_msg)
151
+ return error_msg
152
+
153
+ except Exception as e:
154
+ error_msg = f"Error processing with model: {str(e)}"
155
+ print(error_msg)
156
+ return error_msg
157
+
158
+ def chunk_text(text, max_length=4000):
159
+ return textwrap.wrap(text, max_length, break_long_words=False, break_on_hyphens=False)
160
+
161
+ def read_file_safely(file_path):
162
+ encodings = ['utf-8', 'latin-1', 'cp1252']
163
+ for encoding in encodings:
164
+ try:
165
+ with open(file_path, 'r', encoding=encoding) as f:
166
+ content = f.read()
167
+ print(f"Successfully read file with {encoding} encoding")
168
+ return True, content
169
+ except UnicodeDecodeError:
170
+ continue
171
+ except Exception as e:
172
+ error_msg = f"Error reading file: {str(e)}"
173
+ print(error_msg)
174
+ return False, error_msg
175
+ return False, "Unable to read file with supported encodings"
176
+
177
+ def create_ui():
178
+ with gr.Blocks(title="Ollama Repository Analyzer") as app:
179
+ gr.Markdown(f"""
180
+ # Ollama Repository Analyzer
181
+
182
+ Current Time: {CURRENT_TIME}
183
+ User: {CURRENT_USER}
184
+ """)
185
+
186
+ with gr.Tab("Model Management"):
187
+ model_status = gr.Textbox(label="Ollama Status", interactive=False)
188
+ available_models = gr.Dropdown(
189
+ label="Available Models",
190
+ choices=DEFAULT_MODELS,
191
+ interactive=True
192
+ )
193
+ download_button = gr.Button("Download Selected Model")
194
+ download_status = gr.Textbox(label="Download Status", interactive=False)
195
+
196
+ def update_status():
197
+ status = "Connected" if check_ollama_status() else "Not Connected"
198
+ models = list_available_models()
199
+ return status, gr.Dropdown(choices=models)
200
+
201
+ download_button.click(
202
+ fn=download_model,
203
+ inputs=[available_models],
204
+ outputs=[download_status]
205
+ )
206
+
207
+ with gr.Tab("Repository Analysis"):
208
+ repo_url = gr.Textbox(label="Repository URL")
209
+ github_token = gr.Textbox(label="GitHub Token", type="password")
210
+ branch = gr.Textbox(label="Branch (optional)")
211
+ clone_button = gr.Button("Clone Repository")
212
+ clone_status = gr.Textbox(label="Clone Status", interactive=False)
213
+
214
+ with gr.Row():
215
+ file_list = gr.Dropdown(label="Files in Repository", multiselect=True)
216
+ selected_model = gr.Dropdown(
217
+ label="Select Model for Analysis",
218
+ choices=DEFAULT_MODELS,
219
+ interactive=True
220
+ )
221
+
222
+ analyze_button = gr.Button("Analyze Selected Files")
223
+ debug_output = gr.Textbox(label="Debug Output", interactive=False)
224
+ analysis_output = gr.Markdown()
225
+
226
+ def handle_clone(url, token, branch_name):
227
+ print(f"\nCloning repository: {url}")
228
+ success, result = clone_repository(url, token, branch_name if branch_name else None)
229
+ if success:
230
+ files = [str(p) for p in Path(result).rglob('*')
231
+ if p.is_file() and '.git' not in str(p)]
232
+ print(f"Found {len(files)} files in repository")
233
+ return f"Successfully cloned: {result}", gr.Dropdown(choices=files)
234
+ return f"Clone failed: {result}", None
235
+
236
+ def analyze_files(files, model_name):
237
+ if not files:
238
+ return "Please select files to analyze", "No files selected"
239
+
240
+ debug_info = []
241
+ results = []
242
+
243
+ debug_info.append(f"Starting analysis with model: {model_name}")
244
+ debug_info.append(f"Files to analyze: {len(files)}")
245
+
246
+ for file_path in files:
247
+ debug_info.append(f"\nProcessing file: {file_path}")
248
+ success, content = read_file_safely(file_path)
249
+
250
+ if success:
251
+ chunks = chunk_text(content)
252
+ debug_info.append(f"Split into {len(chunks)} chunks")
253
+ analysis = []
254
+
255
+ for i, chunk in enumerate(chunks, 1):
256
+ debug_info.append(f"Analyzing chunk {i}/{len(chunks)}")
257
+ prompt = f"""
258
+ Analyze this code/content:
259
+
260
+ File: {file_path}
261
+ Part {i}/{len(chunks)}
262
+
263
+ ```
264
+ {chunk}
265
+ ```
266
+
267
+ Provide:
268
+ 1. Brief overview
269
+ 2. Key functionality
270
+ 3. Notable patterns or concerns
271
+ 4. Suggestions (if any)
272
+ """
273
+
274
+ response = analyze_with_ollama(model_name, prompt)
275
+ debug_info.append(f"Got response of length: {len(response)}")
276
+ analysis.append(response)
277
+
278
+ results.append(f"### Analysis of {file_path}\n\n" +
279
+ "\n\n=== Next Part ===\n\n".join(analysis))
280
+ else:
281
+ error_msg = f"Error reading {file_path}: {content}"
282
+ debug_info.append(error_msg)
283
+ results.append(error_msg)
284
+
285
+ return "\n\n---\n\n".join(results), "\n".join(debug_info)
286
+
287
+ clone_button.click(
288
+ fn=handle_clone,
289
+ inputs=[repo_url, github_token, branch],
290
+ outputs=[clone_status, file_list]
291
+ )
292
+
293
+ analyze_button.click(
294
+ fn=analyze_files,
295
+ inputs=[file_list, selected_model],
296
+ outputs=[analysis_output, debug_output]
297
+ )
298
+
299
+ # Update status every 30 seconds
300
+ app.load(update_status, outputs=[model_status, available_models])
301
+
302
+ return app
303
+
304
+ # Launch the app
305
+ if __name__ == "__main__":
306
+ print(f"""
307
+ Starting Ollama Repository Analyzer
308
+ Time: {CURRENT_TIME}
309
+ User: {CURRENT_USER}
310
+ """)
311
+
312
+ app = create_ui()
313
+ app.launch(share=True)