bluenevus commited on
Commit
12ce912
·
verified ·
1 Parent(s): 1ab6fac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +132 -78
app.py CHANGED
@@ -1,93 +1,147 @@
1
  import gradio as gr
2
- from awq import AutoAWQForCausalLM
3
- from transformers import AutoTokenizer, AutoConfig
4
- from huggingface_hub import HfApi, login
 
 
 
 
 
5
 
6
- def quantize_model(
7
- model_id: str,
8
- hf_token: str,
9
- repo_name: str,
10
- progress=gr.Progress(track_tqdm=True)
11
- ):
12
  try:
13
- # Validate credentials first
14
- login(token=hf_token, add_to_git_credential=True)
15
- api = HfApi(token=hf_token)
16
-
17
- # Check model accessibility
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  try:
19
- api.model_info(model_id)
 
 
 
20
  except Exception as e:
21
- raise ValueError(f"Model access error: {str(e)}. Check:\n1. Token permissions\n2. Model existence\n3. Accept model terms at https://huggingface.co/{model_id}")
 
22
 
23
- # Load config with proper auth
24
- config = AutoConfig.from_pretrained(
25
- model_id,
26
- token=hf_token,
27
- trust_remote_code=True
28
- )
29
-
30
- # Handle Llama 3 rope_scaling
31
- if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
32
- config.rope_scaling = {
33
- "type": config.rope_scaling.get("rope_type", "linear"),
34
- "factor": config.rope_scaling.get("factor", 1.0)
35
- }
36
-
37
- # Load model with validated credentials
38
- model = AutoAWQForCausalLM.from_pretrained(
39
- model_id,
40
- config=config,
41
- token=hf_token,
42
- trust_remote_code=True,
43
- device_map="auto"
44
- )
45
 
46
- # Load tokenizer with same credentials
47
- tokenizer = AutoTokenizer.from_pretrained(
48
- model_id,
49
- token=hf_token,
50
- trust_remote_code=True
51
- )
52
 
53
- # Quantize with auto-detected settings
54
- model.quantize(tokenizer, quant_config={
55
- "zero_point": True,
56
- "q_group_size": 128,
57
- "w_bit": 4,
58
- "version": "GEMM" if "llama" in model_id.lower() else "GEMV"
59
- })
60
 
61
- # Save and push
62
- save_path = f"{model_id.split('/')[-1]}-awq"
63
- model.save_quantized(save_path)
64
- model.push_to_hub(repo_name, token=hf_token)
65
 
66
- return f"✅ Success!\nSaved: {save_path}\nPushed to: {repo_name}"
67
 
68
- except Exception as e:
69
- return f"❌ Critical Error:\n{str(e)}"
70
-
71
- with gr.Blocks() as app:
72
- gr.Markdown("## 🔐 Secure AutoAWQ Quantizer")
73
 
74
- with gr.Row():
75
- model_id = gr.Textbox(label="Model ID",
76
- placeholder="meta-llama/Meta-Llama-3-8B-Instruct",
77
- info="Must have access rights")
78
- hf_token = gr.Textbox(label="HF Token",
79
- type="password",
80
- info="Required for gated models")
81
- repo_name = gr.Textbox(label="Destination Repo",
82
- info="Format: username/repo-name")
83
 
84
- go_btn = gr.Button("Start Quantization", variant="primary")
85
- output = gr.Markdown()
 
 
 
 
 
86
 
87
- go_btn.click(
88
- quantize_model,
89
- inputs=[model_id, hf_token, repo_name],
90
- outputs=output
91
- )
 
 
 
 
 
92
 
93
- app.launch()
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import pandas as pd
3
+ import matplotlib.pyplot as plt
4
+ import io
5
+ import google.generativeai as genai
6
+ from PIL import Image
7
+ import ast
8
+ import re
9
+ import traceback
10
 
11
+ def process_file(api_key, file, instructions):
12
+ # Configure Gemini API with error handling
 
 
 
 
13
  try:
14
+ genai.configure(api_key=api_key)
15
+ model = genai.GenerativeModel('gemini-2.5-pro-preview-03-25')
16
+ except Exception as e:
17
+ return [generate_error_image(f"API Config Error: {str(e)}")] * 3
18
+
19
+ # Load data with validation
20
+ try:
21
+ if file.name.endswith('.csv'):
22
+ df = pd.read_csv(file.name)
23
+ else:
24
+ df = pd.read_excel(file.name)
25
+
26
+ if df.empty:
27
+ raise ValueError("Empty dataset uploaded")
28
+ if len(df.columns) < 2:
29
+ raise ValueError("Dataset needs at least 2 columns")
30
+ except Exception as e:
31
+ return [generate_error_image(f"Data Error: {str(e)}")] * 3
32
+
33
+ # Enhanced prompt with strict formatting
34
+ prompt = f"""Generate 3 Python matplotlib codes with these rules:
35
+ 1. Perfect Python 3.10 syntax with 4-space indentation
36
+ 2. Complete code blocks with proper indentation
37
+ 3. Use ONLY these variables: df, plt
38
+ 4. Each visualization must:
39
+ - Start with: plt.figure(figsize=(16,9), dpi=120)
40
+ - Use plt.style.use('ggplot')
41
+ - Include title, axis labels, and data visualization
42
+ - End with plt.tight_layout()
43
+
44
+ Dataset Columns: {list(df.columns)}
45
+ Data Sample: {df.head(3).to_dict()}
46
+ User Instructions: {instructions or 'Show general trends'}
47
+
48
+ Format EXACTLY as:
49
+ # Visualization 1
50
+ [properly indented code]
51
+
52
+ # Visualization 2
53
+ [properly indented code]
54
+
55
+ # Visualization 3
56
+ [properly indented code]
57
+ """
58
+
59
+ # Get and process Gemini response
60
+ try:
61
+ response = model.generate_content(prompt)
62
+ code_blocks = re.split(r'# Visualization \d+\s*', response.text)[1:4]
63
+ except Exception as e:
64
+ return [generate_error_image("API Response Error")] * 3
65
+
66
+ visualizations = []
67
+ for i, block in enumerate(code_blocks, 1):
68
  try:
69
+ # Clean and validate code
70
+ cleaned_code = sanitize_code(block, df.columns)
71
+ img = execute_plot_code(cleaned_code, df)
72
+ visualizations.append(img)
73
  except Exception as e:
74
+ print(f"Visualization {i} Error:\n{traceback.format_exc()}")
75
+ visualizations.append(generate_error_image(f"Error in Viz {i}"))
76
 
77
+ # Ensure exactly 3 outputs
78
+ return visualizations + [generate_error_image("Not Generated")]*(3-len(visualizations))
79
+
80
+ def sanitize_code(code_block, columns):
81
+ """Fix indentation and syntax issues in generated code"""
82
+ lines = []
83
+ indent_level = 0
84
+ indent_size = 4
85
+ stack = []
86
+
87
+ for line in code_block.split('\n'):
88
+ line = line.rstrip()
89
+ if not line:
90
+ continue
91
+
92
+ # Remove markdown artifacts
93
+ line = re.sub(r'^```python|```$', '', line)
 
 
 
 
 
94
 
95
+ # Handle indentation triggers
96
+ if re.match(r'^\s*(for|if|while|with|def|class|try|except|else|elif)\b', line):
97
+ stack.append(indent_level)
98
+ indent_level += 1
99
+ elif re.match(r'^\s*(return|pass|break|continue|raise)', line):
100
+ indent_level = max(0, indent_level - 1)
101
 
102
+ # Apply current indentation
103
+ current_indent = ' ' * (indent_level * indent_size)
104
+ cleaned_line = current_indent + line.lstrip()
 
 
 
 
105
 
106
+ # Check for dedent patterns
107
+ if re.match(r'^\s*(\}|\)|]|else:|elif |except)', line):
108
+ if stack:
109
+ indent_level = stack.pop()
110
 
111
+ lines.append(cleaned_line)
112
 
113
+ cleaned_code = '\n'.join(lines)
 
 
 
 
114
 
115
+ # Validate syntax
116
+ try:
117
+ ast.parse(cleaned_code)
118
+ except SyntaxError as e:
119
+ raise ValueError(f"Syntax Error: {str(e)}")
 
 
 
 
120
 
121
+ return cleaned_code
122
+
123
+ def execute_plot_code(code, df):
124
+ """Safely execute plotting code with resource management"""
125
+ buf = io.BytesIO()
126
+ plt.figure(figsize=(16, 9), dpi=120)
127
+ plt.style.use('ggplot')
128
 
129
+ try:
130
+ exec(code, {'df': df, 'plt': plt})
131
+ plt.tight_layout()
132
+ plt.savefig(buf, format='png', bbox_inches='tight')
133
+ buf.seek(0)
134
+ return Image.open(buf)
135
+ except Exception as e:
136
+ raise RuntimeError(f"Execution Error: {str(e)}")
137
+ finally:
138
+ plt.close()
139
 
140
+ def generate_error_image(message):
141
+ """Create error indication image with message"""
142
+ from PIL import ImageDraw, ImageFont
143
+
144
+ img = Image.new('RGB', (1920, 1080), color=(255, 255, 255))
145
+ try:
146
+ draw = ImageDraw.Draw(img)
147
+ font = ImageFont