Update interface.py
Browse files- interface.py +17 -14
interface.py
CHANGED
@@ -12,14 +12,16 @@ from decorators import gpu_decorator # Asegúrate de que la ruta es correcta
|
|
12 |
# Nuevas importaciones para Yi-Coder
|
13 |
import torch
|
14 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
# Inicialización del modelo Yi-Coder
|
17 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
18 |
model_path = "01-ai/Yi-Coder-9B-Chat"
|
19 |
|
20 |
# Carga del tokenizer y modelo
|
21 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
22 |
-
model = AutoModelForCausalLM.from_pretrained(model_path
|
23 |
|
24 |
def parse_bounds(bounds_str, num_params):
|
25 |
try:
|
@@ -38,13 +40,14 @@ def parse_bounds(bounds_str, num_params):
|
|
38 |
upper_bounds = [np.inf] * num_params
|
39 |
return lower_bounds, upper_bounds
|
40 |
|
41 |
-
|
42 |
-
def generate_analysis(prompt, max_length=1024, device=None):
|
43 |
"""
|
44 |
Genera un análisis utilizando el modelo Yi-Coder-9B-Chat.
|
45 |
"""
|
46 |
try:
|
|
|
47 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
|
|
48 |
with torch.no_grad():
|
49 |
outputs = model.generate(
|
50 |
**inputs,
|
@@ -52,6 +55,7 @@ def generate_analysis(prompt, max_length=1024, device=None):
|
|
52 |
eos_token_id=tokenizer.eos_token_id,
|
53 |
pad_token_id=tokenizer.eos_token_id
|
54 |
)
|
|
|
55 |
analysis = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
56 |
return analysis
|
57 |
except Exception as e:
|
@@ -75,8 +79,7 @@ def process_and_plot(
|
|
75 |
show_params,
|
76 |
biomass_eq_count,
|
77 |
substrate_eq_count,
|
78 |
-
product_eq_count
|
79 |
-
device=None
|
80 |
):
|
81 |
# Leer el archivo Excel
|
82 |
df = pd.read_excel(file.name)
|
@@ -139,7 +142,7 @@ def process_and_plot(
|
|
139 |
)
|
140 |
biomass_results.append({
|
141 |
'model': main_model,
|
142 |
-
'y_pred': y_pred,
|
143 |
'equation': equation,
|
144 |
'params': main_model.params['biomass']
|
145 |
})
|
@@ -167,7 +170,7 @@ def process_and_plot(
|
|
167 |
)
|
168 |
substrate_results.append({
|
169 |
'model': main_model,
|
170 |
-
'y_pred': y_pred,
|
171 |
'equation': equation,
|
172 |
'params': main_model.params['substrate']
|
173 |
})
|
@@ -195,7 +198,7 @@ def process_and_plot(
|
|
195 |
)
|
196 |
product_results.append({
|
197 |
'model': main_model,
|
198 |
-
'y_pred': y_pred,
|
199 |
'equation': equation,
|
200 |
'params': main_model.params['product']
|
201 |
})
|
@@ -244,16 +247,16 @@ Eres un experto en modelado de bioprocesos.
|
|
244 |
Analiza los siguientes resultados experimentales y proporciona un veredicto sobre la calidad de los modelos, sugiriendo mejoras si es necesario.
|
245 |
|
246 |
### Biomasa:
|
247 |
-
{biomass_results}
|
248 |
|
249 |
### Sustrato:
|
250 |
-
{substrate_results}
|
251 |
|
252 |
### Producto:
|
253 |
-
{product_results}
|
254 |
"""
|
255 |
|
256 |
# Generar el análisis utilizando Yi-Coder
|
257 |
-
analysis = generate_analysis(prompt, max_length=1024
|
258 |
|
259 |
return image, analysis
|
|
|
12 |
# Nuevas importaciones para Yi-Coder
|
13 |
import torch
|
14 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
15 |
+
import json
|
16 |
+
|
17 |
+
# Inicialización del dispositivo
|
18 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
19 |
|
|
|
|
|
20 |
model_path = "01-ai/Yi-Coder-9B-Chat"
|
21 |
|
22 |
# Carga del tokenizer y modelo
|
23 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
24 |
+
model = AutoModelForCausalLM.from_pretrained(model_path).to(device).eval()
|
25 |
|
26 |
def parse_bounds(bounds_str, num_params):
|
27 |
try:
|
|
|
40 |
upper_bounds = [np.inf] * num_params
|
41 |
return lower_bounds, upper_bounds
|
42 |
|
43 |
+
def generate_analysis(prompt, max_length=1024):
|
|
|
44 |
"""
|
45 |
Genera un análisis utilizando el modelo Yi-Coder-9B-Chat.
|
46 |
"""
|
47 |
try:
|
48 |
+
# Tokenizar el prompt y mover los tensores al dispositivo correcto
|
49 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
50 |
+
|
51 |
with torch.no_grad():
|
52 |
outputs = model.generate(
|
53 |
**inputs,
|
|
|
55 |
eos_token_id=tokenizer.eos_token_id,
|
56 |
pad_token_id=tokenizer.eos_token_id
|
57 |
)
|
58 |
+
|
59 |
analysis = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
60 |
return analysis
|
61 |
except Exception as e:
|
|
|
79 |
show_params,
|
80 |
biomass_eq_count,
|
81 |
substrate_eq_count,
|
82 |
+
product_eq_count
|
|
|
83 |
):
|
84 |
# Leer el archivo Excel
|
85 |
df = pd.read_excel(file.name)
|
|
|
142 |
)
|
143 |
biomass_results.append({
|
144 |
'model': main_model,
|
145 |
+
'y_pred': y_pred.tolist(), # Convertir a lista para serialización
|
146 |
'equation': equation,
|
147 |
'params': main_model.params['biomass']
|
148 |
})
|
|
|
170 |
)
|
171 |
substrate_results.append({
|
172 |
'model': main_model,
|
173 |
+
'y_pred': y_pred.tolist(), # Convertir a lista para serialización
|
174 |
'equation': equation,
|
175 |
'params': main_model.params['substrate']
|
176 |
})
|
|
|
198 |
)
|
199 |
product_results.append({
|
200 |
'model': main_model,
|
201 |
+
'y_pred': y_pred.tolist(), # Convertir a lista para serialización
|
202 |
'equation': equation,
|
203 |
'params': main_model.params['product']
|
204 |
})
|
|
|
247 |
Analiza los siguientes resultados experimentales y proporciona un veredicto sobre la calidad de los modelos, sugiriendo mejoras si es necesario.
|
248 |
|
249 |
### Biomasa:
|
250 |
+
{json.dumps(biomass_results, indent=2)}
|
251 |
|
252 |
### Sustrato:
|
253 |
+
{json.dumps(substrate_results, indent=2)}
|
254 |
|
255 |
### Producto:
|
256 |
+
{json.dumps(product_results, indent=2)}
|
257 |
"""
|
258 |
|
259 |
# Generar el análisis utilizando Yi-Coder
|
260 |
+
analysis = generate_analysis(prompt, max_length=1024)
|
261 |
|
262 |
return image, analysis
|