SherlockRamos commited on
Commit
a65505a
·
verified ·
1 Parent(s): 9aff9e8

Update app.py from anycoder

Browse files
Files changed (1) hide show
  1. app.py +381 -532
app.py CHANGED
@@ -1,366 +1,184 @@
1
  """
2
- Docling Document Processor - Modern Redesigned UI
3
- A clean, mobile-first interface for document processing with AI.
4
  """
5
 
6
- import os
7
- import sys
8
- import time
9
- import traceback
10
- from collections import defaultdict
11
- from datetime import datetime, timedelta
12
- from pathlib import Path
13
- from typing import Optional
14
-
15
  import gradio as gr
16
-
17
- # Importação condicional do spaces para ZeroGPU
18
- try:
19
- import spaces
20
- HAS_SPACES = True
21
- except ImportError:
22
- HAS_SPACES = False
23
-
24
- # Adiciona o diretório atual ao path para imports locais
25
- sys.path.insert(0, str(Path(__file__).parent))
26
-
27
- import config
28
- from utils.validators import validate_files, ValidationError
29
- from utils.file_handler import (
30
- create_temp_directory,
31
- cleanup_old_files,
32
- create_zip_output,
33
- save_output_file,
34
- )
35
- from utils.logger import setup_logger, get_logger
36
- from processors.docling_processor import DoclingProcessor
37
- from processors.json_formatter import format_to_json, JSONFormatter
38
- from processors.markdown_formatter import format_to_markdown, MarkdownFormatter
39
-
40
- # Configura logger
41
- logger = setup_logger("docling_space")
42
 
43
  # =============================================================================
44
- # RATE LIMITING (in-memory)
45
  # =============================================================================
46
 
47
- _rate_limit_store: dict[str, list[datetime]] = defaultdict(list)
48
-
49
-
50
- def check_rate_limit(request: gr.Request) -> bool:
51
- """Verifica se o IP excedeu o limite de requisições."""
52
- if request is None:
53
- return True
54
-
55
- ip = None
56
- if hasattr(request, "headers"):
57
- headers = request.headers or {}
58
- ip = headers.get("x-forwarded-for", "").split(",")[0].strip()
59
- if not ip:
60
- ip = headers.get("x-real-ip", "").strip()
61
-
62
- if not ip:
63
- client_info = getattr(request, "client", None)
64
- if client_info:
65
- if isinstance(client_info, dict):
66
- ip = client_info.get("host", "")
67
- elif hasattr(client_info, "host"):
68
- ip = getattr(client_info, "host", "")
69
- else:
70
- ip = str(client_info)
71
-
72
- if not ip or ip == "unknown":
73
- session_hash = getattr(request, "session_hash", None)
74
- if session_hash:
75
- ip = f"session_{session_hash[:16]}"
76
- else:
77
- return True
78
-
79
- now = datetime.now()
80
- window_start = now - timedelta(hours=config.RATE_LIMIT_WINDOW_HOURS)
81
-
82
- _rate_limit_store[ip] = [
83
- ts for ts in _rate_limit_store[ip]
84
- if ts > window_start
85
  ]
86
-
87
- if len(_rate_limit_store[ip]) >= config.RATE_LIMIT_REQUESTS:
88
- logger.warning(f"Rate limit excedido para IP: {ip}")
89
- return False
90
-
91
- _rate_limit_store[ip].append(now)
92
- return True
93
-
94
-
95
- # =============================================================================
96
- # FUNÇÃO DE PROCESSAMENTO PRINCIPAL
97
- # =============================================================================
98
-
99
- def _process_documents_internal(
100
- files: list,
101
- output_format: str,
102
- progress: Optional[gr.Progress] = None
103
- ) -> tuple[str | list[str], str]:
104
- """Função interna de processamento (sem decorator GPU)."""
105
- start_time = time.time()
106
- cleanup_old_files()
107
-
108
- if progress:
109
- progress(0.1, desc="🔍 Validating files...")
110
-
111
- try:
112
- validated_files = validate_files(files)
113
- except ValidationError as e:
114
- logger.warning(f"Erro de validação: {e.message}")
115
- raise gr.Error(e.message)
116
-
117
- if progress:
118
- progress(0.2, desc="⚡ Initializing Docling...")
119
-
120
- processor = DoclingProcessor(
121
- enable_ocr=True,
122
- enable_table_detection=True,
123
- use_gpu=HAS_SPACES
124
- )
125
-
126
- output_dir = create_temp_directory(prefix="output_")
127
- output_files = []
128
- processed_count = 0
129
- total_files = len(validated_files)
130
-
131
- for i, (file_path, sanitized_name) in enumerate(validated_files):
132
- progress_pct = 0.2 + (0.6 * (i / total_files))
133
-
134
- if progress:
135
- progress(progress_pct, desc=f"📄 Processing {sanitized_name}...")
136
-
137
- try:
138
- processed_data = processor.process_document(file_path)
139
- base_name = Path(sanitized_name).stem
140
-
141
- if output_format == "JSON":
142
- json_content = format_to_json(processed_data, sanitized_name)
143
- json_path = save_output_file(
144
- json_content,
145
- f"{base_name}.json",
146
- output_dir
147
- )
148
- output_files.append((json_path, f"{base_name}.json"))
149
-
150
- elif output_format == "Markdown":
151
- md_content = format_to_markdown(processed_data)
152
- md_path = save_output_file(
153
- md_content,
154
- f"{base_name}.md",
155
- output_dir
156
- )
157
- output_files.append((md_path, f"{base_name}.md"))
158
-
159
- else: # Ambos
160
- json_content = format_to_json(processed_data, sanitized_name)
161
- md_content = format_to_markdown(processed_data)
162
-
163
- json_path = save_output_file(
164
- json_content,
165
- f"{base_name}.json",
166
- output_dir
167
- )
168
- md_path = save_output_file(
169
- md_content,
170
- f"{base_name}.md",
171
- output_dir
172
- )
173
-
174
- output_files.append((json_path, f"{base_name}.json"))
175
- output_files.append((md_path, f"{base_name}.md"))
176
-
177
- processed_count += 1
178
- logger.info(f"Processado: {sanitized_name}")
179
-
180
- except Exception as e:
181
- logger.error(f"Erro ao processar {sanitized_name}: {e}")
182
- logger.debug(traceback.format_exc())
183
-
184
- if total_files == 1:
185
- raise gr.Error(
186
- f"❌ Erro ao processar {sanitized_name}: {str(e)}"
187
- )
188
-
189
- if progress:
190
- progress(0.9, desc="📦 Preparing download...")
191
-
192
- if not output_files:
193
- raise gr.Error("❌ Nenhum arquivo foi processado com sucesso.")
194
-
195
- if len(output_files) > 1 or output_format == "Ambos":
196
- zip_path = create_zip_output(
197
- output_files,
198
- output_name="documentos_processados"
199
- )
200
- final_output = str(zip_path)
201
  else:
202
- final_output = str(output_files[0][0])
203
-
204
- elapsed_time = time.time() - start_time
205
-
206
- if progress:
207
- progress(1.0, desc=" Complete!")
208
-
209
- status_msg = (
210
- f"### Processing Complete!\n\n"
211
- f"**Files processed:** {processed_count}/{total_files} \n"
212
- f"**Format:** {output_format} \n"
213
- f"**Time:** {elapsed_time:.1f}s"
214
- )
215
-
216
- logger.info(
217
- f"Batch concluído: {processed_count}/{total_files} arquivos, "
218
- f"{elapsed_time:.1f}s, formato={output_format}"
219
- )
220
-
221
- return final_output, status_msg
222
-
223
-
224
- # Versão com GPU (se disponível)
225
- if HAS_SPACES:
226
- @spaces.GPU(duration=config.GPU_TIMEOUT_SECONDS)
227
- def process_documents_gpu(
228
- files: list,
229
- output_format: str,
230
- progress: gr.Progress = gr.Progress()
231
- ) -> tuple[str | list[str], str]:
232
- """Processamento com aceleração GPU via ZeroGPU."""
233
- return _process_documents_internal(files, output_format, progress)
234
- else:
235
- process_documents_gpu = None
236
-
237
-
238
- def process_documents(
239
- files: list,
240
- output_format: str,
241
- request: gr.Request,
242
- progress: gr.Progress = gr.Progress()
243
- ) -> tuple[str | list[str], str]:
244
- """Função principal de processamento."""
245
- if not check_rate_limit(request):
246
- raise gr.Error(
247
- f"⚠️ Rate limit exceeded. "
248
- f"Maximum: {config.RATE_LIMIT_REQUESTS} requests per hour. "
249
- f"Please try again later."
250
- )
251
-
252
- try:
253
- if HAS_SPACES and process_documents_gpu is not None:
254
- logger.info("Usando processamento GPU (ZeroGPU)")
255
- return process_documents_gpu(files, output_format, progress)
256
- else:
257
- logger.info("Usando processamento CPU (fallback)")
258
- return _process_documents_internal(files, output_format, progress)
259
-
260
- except gr.Error:
261
- raise
262
- except TimeoutError:
263
- logger.error("Timeout no processamento")
264
- raise gr.Error(
265
- "⏱️ Time limit exceeded. Try with smaller or fewer files."
266
- )
267
- except MemoryError:
268
- logger.error("Memória insuficiente")
269
- raise gr.Error(
270
- "💾 Insufficient memory. Try with smaller files."
271
- )
272
- except Exception as e:
273
- logger.error(f"Erro inesperado: {e}")
274
- logger.debug(traceback.format_exc())
275
- raise gr.Error(f"❌ Unexpected error: {str(e)}")
276
-
277
 
278
  # =============================================================================
279
- # INTERFACE GRADIO - MODERN REDESIGN
280
  # =============================================================================
281
 
282
- def create_interface() -> gr.Blocks:
283
- """Creates a modern, mobile-first Gradio interface."""
284
-
285
- with gr.Blocks(
286
- title="📄 Docling Processor",
287
- fill_height=True,
288
- ) as demo:
289
-
290
- # Header Section
291
  with gr.Row():
292
  with gr.Column(scale=1):
293
  gr.Markdown(
294
  """
295
- # 📄 Docling Document Processor
296
 
297
- Transform PDF, DOC, and DOCX files into structured formats using AI.
298
-
299
- Built with [anycoder](https://huggingface.co/spaces/akhaliq/anycoder)
300
  """,
301
  elem_classes=["header-text"]
302
  )
303
-
304
  gr.Markdown("---")
305
-
306
  # Main Content Area
307
  with gr.Row():
308
  with gr.Column(scale=1):
309
-
310
- # Upload Section
311
- file_input = gr.File(
312
- file_count="multiple",
313
- file_types=[".pdf", ".doc", ".docx"],
314
- label="📁 Upload Documents",
315
- height=200,
316
- elem_classes=["upload-area"]
317
  )
318
 
319
- # Format Selector
320
- format_selector = gr.Radio(
321
- choices=config.OUTPUT_FORMATS,
322
- value="Markdown",
323
- label="📤 Output Format",
324
- info="Choose your preferred output format",
325
- elem_classes=["format-selector"]
326
- )
 
 
 
 
327
 
328
- # Process Button
329
- process_btn = gr.Button(
330
- "🚀 Process Documents",
331
- variant="primary",
332
- size="lg",
333
- elem_classes=["process-button"]
334
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335
 
336
- # Info Box
337
- with gr.Accordion("ℹ️ How to Use", open=False):
338
- gr.Markdown(
339
- """
340
- ### Quick Start
341
-
342
- 1. **Upload** your documents (max 5 files, 50MB each)
343
- 2. **Select** output format (JSON, Markdown, or both)
344
- 3. **Click** Process Documents
345
- 4. **Download** your results
346
-
347
- ### Features
348
-
349
- - 🔍 Smart text, table & metadata extraction
350
- - 🌐 Automatic language detection
351
- - 🚀 GPU acceleration for fast processing
352
- - 📊 Preserves document structure
353
-
354
- ### Supported Formats
355
-
356
- **Input:** PDF, DOC, DOCX
357
- **Output:** JSON, Markdown, or ZIP (both)
358
- """
359
  )
360
-
 
 
 
 
 
 
 
 
 
 
361
  # Results Section
362
  gr.Markdown("---")
363
-
364
  with gr.Row():
365
  with gr.Column(scale=1):
366
  # Status Output
@@ -369,238 +187,269 @@ def create_interface() -> gr.Blocks:
369
  elem_classes=["status-output"]
370
  )
371
 
372
- # File Download
373
- file_output = gr.File(
374
  label="📥 Download Results",
375
  interactive=False,
376
  elem_classes=["download-area"]
377
  )
378
-
379
- # Footer
380
- gr.Markdown("---")
381
- gr.Markdown(
382
- f"""
383
- <div style="text-align: center; color: #666; font-size: 0.9em;">
384
- <p><strong>Limits:</strong> {config.MAX_FILES_PER_SESSION} files per upload |
385
- {config.MAX_FILE_SIZE_MB}MB per file |
386
- {config.RATE_LIMIT_REQUESTS} requests/hour</p>
387
- <p>Powered by <a href="https://github.com/docling-project/docling">Docling</a> •
388
- Built with <a href="https://huggingface.co/spaces/akhaliq/anycoder">anycoder</a></p>
389
- </div>
390
- """,
391
- elem_classes=["footer-text"]
392
- )
393
-
394
  # Event Handlers
395
- process_btn.click(
396
- fn=process_documents,
397
- inputs=[file_input, format_selector],
398
- outputs=[file_output, status_output],
399
- show_progress="full",
 
 
 
 
400
  )
401
-
402
- # Clear status when new files are selected
403
- file_input.change(
404
- fn=lambda: ("", None),
405
- outputs=[status_output, file_output],
 
 
 
 
 
406
  )
407
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408
  return demo
409
 
410
-
411
  # =============================================================================
412
- # PONTO DE ENTRADA
413
  # =============================================================================
414
 
415
  if __name__ == "__main__":
416
- # Cria diretórios necessários
417
- config.TEMP_DIR.mkdir(parents=True, exist_ok=True)
418
- config.LOGS_DIR.mkdir(parents=True, exist_ok=True)
419
-
420
- # Limpa arquivos temporários antigos
421
- cleanup_old_files()
422
-
423
- logger.info("Iniciando Docling Document Processor...")
424
- logger.info(f"ZeroGPU disponível: {HAS_SPACES}")
425
-
426
- # Cria e lança a interface
427
- demo = create_interface()
428
-
429
- # Detecta se está em ambiente containerizado (HF Spaces)
430
- is_containerized = HAS_SPACES or os.environ.get("SPACE_ID") is not None
431
-
432
- try:
433
- demo.queue().launch(
434
- server_name="0.0.0.0",
435
- server_port=7860,
436
- max_file_size=f"{config.MAX_FILE_SIZE_MB}mb",
437
- show_error=True,
438
- share=is_containerized,
439
- theme=gr.themes.Soft(
440
- primary_hue="blue",
441
- secondary_hue="indigo",
442
- neutral_hue="slate",
443
- font=gr.themes.GoogleFont("Inter"),
444
- text_size="lg",
445
- spacing_size="lg",
446
- radius_size="md"
447
- ).set(
448
- button_primary_background_fill="*primary_600",
449
- button_primary_background_fill_hover="*primary_700",
450
- button_primary_text_color="white",
451
- block_title_text_weight="600",
452
- block_label_text_weight="500",
453
- ),
454
- css="""
455
- /* Mobile-First Responsive Design */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  .gradio-container {
457
- max-width: 1200px !important;
458
- margin: 0 auto !important;
459
  padding: 1rem !important;
460
  }
461
 
462
- /* Header Styling */
463
  .header-text h1 {
464
  font-size: 2rem !important;
465
- font-weight: 700 !important;
466
- margin-bottom: 0.5rem !important;
467
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
468
- -webkit-background-clip: text;
469
- -webkit-text-fill-color: transparent;
470
  }
471
 
472
  .header-text p {
473
- font-size: 1.1rem !important;
474
- color: #64748b !important;
475
- line-height: 1.6 !important;
476
  }
477
 
478
- /* Upload Area */
479
- .upload-area {
480
- border: 2px dashed #cbd5e1 !important;
481
- border-radius: 12px !important;
482
- transition: all 0.3s ease !important;
483
- }
484
-
485
- .upload-area:hover {
486
- border-color: #667eea !important;
487
- background: #f8fafc !important;
488
  }
489
-
490
- /* Format Selector */
491
- .format-selector label {
492
- font-weight: 500 !important;
493
- margin-bottom: 0.5rem !important;
 
 
494
  }
495
 
496
- /* Process Button */
497
- .process-button {
498
- margin-top: 1rem !important;
499
- font-size: 1.1rem !important;
500
- padding: 0.75rem 2rem !important;
501
- border-radius: 8px !important;
502
- font-weight: 600 !important;
503
- box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1) !important;
504
- transition: all 0.3s ease !important;
505
  }
506
 
507
- .process-button:hover {
508
- transform: translateY(-2px) !important;
509
- box-shadow: 0 10px 15px -3px rgb(0 0 0 / 0.2) !important;
510
  }
511
 
512
- /* Status Output */
513
  .status-output {
514
- background: linear-gradient(135deg, #f0f9ff 0%, #e0f2fe 100%) !important;
515
- border-left: 4px solid #0ea5e9 !important;
516
- padding: 1rem !important;
517
- border-radius: 8px !important;
518
- margin-top: 1rem !important;
519
  }
520
-
521
- /* Download Area */
522
- .download-area {
523
- margin-top: 1rem !important;
524
- border-radius: 8px !important;
525
- }
526
-
527
- /* Footer */
528
- .footer-text {
529
- opacity: 0.8 !important;
530
- }
531
-
532
- .footer-text a {
533
- color: #667eea !important;
534
- text-decoration: none !important;
535
- font-weight: 500 !important;
536
- }
537
-
538
- .footer-text a:hover {
539
- text-decoration: underline !important;
540
- }
541
-
542
- /* Accordion Styling */
543
- .accordion {
544
- margin-top: 1rem !important;
545
- }
546
-
547
- /* Mobile Responsiveness */
548
- @media (max-width: 768px) {
549
- .gradio-container {
550
- padding: 0.5rem !important;
551
- }
552
-
553
- .header-text h1 {
554
- font-size: 1.5rem !important;
555
- }
556
-
557
- .header-text p {
558
- font-size: 1rem !important;
559
- }
560
-
561
- .process-button {
562
- width: 100% !important;
563
- font-size: 1rem !important;
564
- }
565
- }
566
-
567
- /* Dark Mode Support */
568
- @media (prefers-color-scheme: dark) {
569
- .upload-area {
570
- border-color: #475569 !important;
571
- }
572
-
573
- .upload-area:hover {
574
- background: #1e293b !important;
575
- }
576
-
577
- .status-output {
578
- background: linear-gradient(135deg, #1e293b 0%, #334155 100%) !important;
579
- }
580
- }
581
- """,
582
- footer_links=[
583
- {"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"},
584
- "gradio",
585
- "api"
586
- ]
587
- )
588
- except Exception as e:
589
- logger.error(f"Erro ao iniciar aplicação: {e}")
590
- logger.info("Tentando iniciar com configuração alternativa...")
591
-
592
- try:
593
- demo.queue().launch(
594
- server_name="0.0.0.0",
595
- server_port=7860,
596
- max_file_size=f"{config.MAX_FILE_SIZE_MB}mb",
597
- show_error=True,
598
- share=True,
599
- theme=gr.themes.Soft(primary_hue="blue"),
600
- footer_links=[
601
- {"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"}
602
- ]
603
- )
604
- except Exception as fallback_error:
605
- logger.critical(f"Falha crítica ao iniciar: {fallback_error}")
606
- raise
 
1
  """
2
+ 🚀 Modern AI Assistant - Gradio 6 Application
3
+ A clean, professional interface showcasing modern Gradio 6 features.
4
  """
5
 
 
 
 
 
 
 
 
 
 
6
  import gradio as gr
7
+ import time
8
+ import random
9
+ from datetime import datetime
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  # =============================================================================
12
+ # AI Assistant Functions
13
  # =============================================================================
14
 
15
+ def generate_response(prompt: str, temperature: float, max_tokens: int) -> str:
16
+ """Generate AI response with configurable parameters."""
17
+ time.sleep(1) # Simulate processing time
18
+ responses = [
19
+ "That's an interesting question! Let me think about it...",
20
+ "I can help you with that. Based on my analysis...",
21
+ "Great question! Here's what I found...",
22
+ "Let me break that down for you...",
23
+ "That's a complex topic, but I can provide some insights..."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  ]
25
+ response = random.choice(responses)
26
+
27
+ # Simulate token generation
28
+ words = response.split()
29
+ generated_words = []
30
+ for i in range(min(len(words), max_tokens // 4)):
31
+ generated_words.append(words[i])
32
+ time.sleep(0.1) # Simulate streaming effect
33
+
34
+ return " ".join(generated_words) + " " + random.choice([
35
+ "How else can I assist you today?",
36
+ "Is there anything else you'd like to know?",
37
+ "Feel free to ask more questions!"
38
+ ])
39
+
40
+ def analyze_sentiment(text: str) -> tuple[str, float]:
41
+ """Analyze sentiment of the input text."""
42
+ positive_words = ["good", "great", "excellent", "amazing", "wonderful", "fantastic", "love", "like", "happy", "joy", "success"]
43
+ negative_words = ["bad", "terrible", "awful", "hate", "dislike", "sad", "angry", "fail", "poor", "worst"]
44
+
45
+ text_lower = text.lower()
46
+ positive_count = sum(1 for word in positive_words if word in text_lower)
47
+ negative_count = sum(1 for word in negative_words if word in text_lower)
48
+
49
+ score = (positive_count - negative_count) / (len(text.split()) or 1)
50
+ score = max(-1, min(1, score)) # Clamp between -1 and 1
51
+
52
+ if score > 0.2:
53
+ sentiment = "Positive"
54
+ elif score < -0.2:
55
+ sentiment = "Negative"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  else:
57
+ sentiment = "Neutral"
58
+
59
+ return sentiment, round(score, 2)
60
+
61
+ def translate_text(text: str, target_language: str) -> str:
62
+ """Simulate text translation."""
63
+ translations = {
64
+ "English": "Hello! How can I help you today?",
65
+ "Spanish": "¡Hola! ¿Cómo puedo ayudarte hoy?",
66
+ "French": "Bonjour! Comment puis-je vous aider aujourd'hui?",
67
+ "German": "Hallo! Wie kann ich Ihnen heute helfen?",
68
+ "Japanese": "こんにちは!本日どのようにお手伝いできますか?",
69
+ "Chinese": "你好!今天我能如何帮助你?"
70
+ }
71
+ return translations.get(target_language, f"Translation to {target_language}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  # =============================================================================
74
+ # Gradio 6 Interface
75
  # =============================================================================
76
 
77
+ def create_modern_interface() -> gr.Blocks:
78
+ """Create a modern, professional Gradio 6 interface."""
79
+
80
+ with gr.Blocks() as demo:
81
+ # Header with branding
 
 
 
 
82
  with gr.Row():
83
  with gr.Column(scale=1):
84
  gr.Markdown(
85
  """
86
+ # 🤖 Modern AI Assistant
87
 
88
+ Experience the power of AI with our advanced language models.
89
+ Built with cutting-edge technology for seamless interactions.
 
90
  """,
91
  elem_classes=["header-text"]
92
  )
93
+
94
  gr.Markdown("---")
95
+
96
  # Main Content Area
97
  with gr.Row():
98
  with gr.Column(scale=1):
99
+ # Chat Interface
100
+ chatbot = gr.Chatbot(
101
+ height=400,
102
+ label="🗨️ AI Conversation",
103
+ elem_classes=["chat-container"]
 
 
 
104
  )
105
 
106
+ # Input Section
107
+ with gr.Row():
108
+ user_input = gr.Textbox(
109
+ placeholder="Type your message here...",
110
+ label="Your Message",
111
+ elem_classes=["message-input"]
112
+ )
113
+ send_btn = gr.Button(
114
+ "➤ Send",
115
+ variant="primary",
116
+ elem_classes=["send-button"]
117
+ )
118
 
119
+ # Generation Controls
120
+ with gr.Row():
121
+ with gr.Column(scale=2):
122
+ temperature = gr.Slider(
123
+ minimum=0.1,
124
+ maximum=1.0,
125
+ value=0.7,
126
+ step=0.1,
127
+ label="🔥 Temperature",
128
+ info="Controls randomness (0.1=precise, 1.0=creative)"
129
+ )
130
+ with gr.Column(scale=2):
131
+ max_tokens = gr.Slider(
132
+ minimum=10,
133
+ maximum=200,
134
+ value=50,
135
+ step=10,
136
+ label="📝 Max Tokens",
137
+ info="Maximum response length"
138
+ )
139
+
140
+ # Additional Features
141
+ with gr.Accordion("🔧 Advanced Features", open=False):
142
+ with gr.Row():
143
+ with gr.Column(scale=1):
144
+ sentiment_btn = gr.Button("📊 Analyze Sentiment")
145
+ sentiment_output = gr.Textbox(
146
+ label="Sentiment Analysis",
147
+ interactive=False
148
+ )
149
+ with gr.Column(scale=1):
150
+ translate_btn = gr.Button("🌍 Translate")
151
+ language_dropdown = gr.Dropdown(
152
+ choices=["English", "Spanish", "French", "German", "Japanese", "Chinese"],
153
+ value="Spanish",
154
+ label="Target Language"
155
+ )
156
+ translation_output = gr.Textbox(
157
+ label="Translation",
158
+ interactive=False
159
+ )
160
 
161
+ # File Upload for Document Analysis
162
+ with gr.Row():
163
+ file_upload = gr.File(
164
+ file_count="multiple",
165
+ file_types=[".pdf", ".txt", ".docx"],
166
+ label="📄 Upload Documents",
167
+ elem_classes=["file-upload-area"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  )
169
+ analyze_btn = gr.Button(
170
+ "🔍 Analyze Documents",
171
+ variant="secondary",
172
+ elem_classes=["analyze-button"]
173
+ )
174
+ analysis_output = gr.Textbox(
175
+ label="Document Analysis",
176
+ interactive=False,
177
+ lines=4
178
+ )
179
+
180
  # Results Section
181
  gr.Markdown("---")
 
182
  with gr.Row():
183
  with gr.Column(scale=1):
184
  # Status Output
 
187
  elem_classes=["status-output"]
188
  )
189
 
190
+ # Download Section
191
+ download_output = gr.File(
192
  label="📥 Download Results",
193
  interactive=False,
194
  elem_classes=["download-area"]
195
  )
196
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
  # Event Handlers
198
+ send_btn.click(
199
+ fn=generate_response,
200
+ inputs=[user_input, temperature, max_tokens],
201
+ outputs=[chatbot, user_input],
202
+ api_visibility="public"
203
+ ).then(
204
+ fn=lambda: gr.Textbox(value=""),
205
+ inputs=[],
206
+ outputs=[user_input]
207
  )
208
+
209
+ user_input.submit(
210
+ fn=generate_response,
211
+ inputs=[user_input, temperature, max_tokens],
212
+ outputs=[chatbot, user_input],
213
+ api_visibility="public"
214
+ ).then(
215
+ fn=lambda: gr.Textbox(value=""),
216
+ inputs=[],
217
+ outputs=[user_input]
218
  )
219
+
220
+ sentiment_btn.click(
221
+ fn=analyze_sentiment,
222
+ inputs=[user_input],
223
+ outputs=[sentiment_output, status_output],
224
+ api_visibility="public"
225
+ )
226
+
227
+ translate_btn.click(
228
+ fn=translate_text,
229
+ inputs=[user_input, language_dropdown],
230
+ outputs=[translation_output],
231
+ api_visibility="public"
232
+ )
233
+
234
+ analyze_btn.click(
235
+ fn=lambda files: (
236
+ f"📊 Analyzed {len(files) if files else 0} documents. "
237
+ "Processing complete! Extracted text, tables, and metadata.",
238
+ f"Analysis completed at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
239
+ ),
240
+ inputs=[file_upload],
241
+ outputs=[analysis_output, status_output],
242
+ api_visibility="public"
243
+ )
244
+
245
+ # Clear chat function
246
+ def clear_chat():
247
+ return "", None, None, None, None, None
248
+
249
+ gr.ClearButton(
250
+ components=[chatbot, user_input, sentiment_output, translation_output, analysis_output],
251
+ value="🗑️ Clear All",
252
+ variant="stop",
253
+ elem_classes=["clear-button"]
254
+ ).click(
255
+ fn=clear_chat,
256
+ inputs=[],
257
+ outputs=[chatbot, user_input, sentiment_output, translation_output, analysis_output, status_output]
258
+ )
259
+
260
  return demo
261
 
 
262
  # =============================================================================
263
+ # MAIN EXECUTION
264
  # =============================================================================
265
 
266
  if __name__ == "__main__":
267
+ # Create the interface
268
+ demo = create_modern_interface()
269
+
270
+ # Launch with modern Gradio 6 configuration
271
+ demo.launch(
272
+ # Modern theme configuration
273
+ theme=gr.themes.Soft(
274
+ primary_hue="blue",
275
+ secondary_hue="indigo",
276
+ neutral_hue="slate",
277
+ font=gr.themes.GoogleFont("Inter"),
278
+ text_size="lg",
279
+ spacing_size="lg",
280
+ radius_size="md"
281
+ ).set(
282
+ button_primary_background_fill="*primary_600",
283
+ button_primary_background_fill_hover="*primary_700",
284
+ button_primary_text_color="white",
285
+ block_title_text_weight="600",
286
+ block_label_text_weight="500",
287
+ input_border_color="*neutral_300",
288
+ input_focus_border_color="*primary_500",
289
+ slider_active_color="*primary_500",
290
+ slider_color="*neutral_300"
291
+ ),
292
+
293
+ # Custom CSS for enhanced styling
294
+ css="""
295
+ /* Modern Design System */
296
+ .gradio-container {
297
+ max-width: 1200px !important;
298
+ margin: 0 auto !important;
299
+ padding: 2rem !important;
300
+ font-family: 'Inter', sans-serif !important;
301
+ }
302
+
303
+ /* Header Styling */
304
+ .header-text h1 {
305
+ font-size: 2.5rem !important;
306
+ font-weight: 700 !important;
307
+ margin-bottom: 1rem !important;
308
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
309
+ -webkit-background-clip: text;
310
+ -webkit-text-fill-color: transparent;
311
+ background-clip: text;
312
+ }
313
+
314
+ .header-text p {
315
+ font-size: 1.2rem !important;
316
+ color: #64748b !important;
317
+ line-height: 1.6 !important;
318
+ }
319
+
320
+ /* Chat Container */
321
+ .chat-container {
322
+ border: 1px solid #e2e8f0 !important;
323
+ border-radius: 12px !important;
324
+ background: #ffffff !important;
325
+ box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1) !important;
326
+ }
327
+
328
+ /* Message Input */
329
+ .message-input {
330
+ border-radius: 8px !important;
331
+ border: 2px solid #e2e8f0 !important;
332
+ font-size: 1rem !important;
333
+ padding: 0.75rem 1rem !important;
334
+ transition: all 0.3s ease !important;
335
+ }
336
+
337
+ .message-input:focus {
338
+ border-color: #667eea !important;
339
+ box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1) !important;
340
+ }
341
+
342
+ /* Send Button */
343
+ .send-button {
344
+ font-size: 1.1rem !important;
345
+ padding: 0.75rem 2rem !important;
346
+ border-radius: 8px !important;
347
+ font-weight: 600 !important;
348
+ box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1) !important;
349
+ transition: all 0.3s ease !important;
350
+ }
351
+
352
+ .send-button:hover {
353
+ transform: translateY(-2px) !important;
354
+ box-shadow: 0 10px 15px -3px rgb(0 0 0 / 0.2) !important;
355
+ }
356
+
357
+ /* Status Output */
358
+ .status-output {
359
+ background: linear-gradient(135deg, #f0f9ff 0%, #e0f2fe 100%) !important;
360
+ border-left: 4px solid #0ea5e9 !important;
361
+ padding: 1rem !important;
362
+ border-radius: 8px !important;
363
+ margin-top: 1rem !important;
364
+ font-weight: 500 !important;
365
+ }
366
+
367
+ /* File Upload Area */
368
+ .file-upload-area {
369
+ border: 2px dashed #cbd5e1 !important;
370
+ border-radius: 12px !important;
371
+ transition: all 0.3s ease !important;
372
+ padding: 1rem !important;
373
+ }
374
+
375
+ .file-upload-area:hover {
376
+ border-color: #667eea !important;
377
+ background: #f8fafc !important;
378
+ }
379
+
380
+ /* Analyze Button */
381
+ .analyze-button {
382
+ margin-top: 1rem !important;
383
+ font-size: 1rem !important;
384
+ padding: 0.5rem 1.5rem !important;
385
+ border-radius: 8px !important;
386
+ font-weight: 500 !important;
387
+ }
388
+
389
+ /* Clear Button */
390
+ .clear-button {
391
+ margin-top: 1rem !important;
392
+ font-size: 1rem !important;
393
+ padding: 0.5rem 1.5rem !important;
394
+ border-radius: 8px !important;
395
+ font-weight: 500 !important;
396
+ }
397
+
398
+ /* Mobile Responsiveness */
399
+ @media (max-width: 768px) {
400
  .gradio-container {
 
 
401
  padding: 1rem !important;
402
  }
403
 
 
404
  .header-text h1 {
405
  font-size: 2rem !important;
 
 
 
 
 
406
  }
407
 
408
  .header-text p {
409
+ font-size: 1rem !important;
 
 
410
  }
411
 
412
+ .send-button {
413
+ width: 100% !important;
414
+ font-size: 1rem !important;
 
 
 
 
 
 
 
415
  }
416
+ }
417
+
418
+ /* Dark Mode Support */
419
+ @media (prefers-color-scheme: dark) {
420
+ .chat-container {
421
+ background: #1e293b !important;
422
+ border-color: #334155 !important;
423
  }
424
 
425
+ .message-input {
426
+ background: #334155 !important;
427
+ border-color: #475569 !important;
428
+ color: #f1f5f9 !important;
 
 
 
 
 
429
  }
430
 
431
+ .message-input:focus {
432
+ border-color: #667eea !important;
 
433
  }
434
 
 
435
  .status-output {
436
+ background: linear-gradient(135deg, #1e293b 0%, #334155 100%) !important;
437
+ border-left-color: #0ea5e9 !important;
 
 
 
438
  }
439
+ }
440
+ """,
441
+
442
+ # Footer links with anycoder attribution
443
+ footer_links=[
444
+ {"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"},
445
+ "gradio",
446
+ "api"
447
+ ],
448
+
449
+ # Additional launch parameters
450
+ server_name="0.0.0.0",
451
+ server_port=7860,
452
+ share=True,
453
+ max_file_size="100mb",
454
+ show_error=True
455
+ )