Woziii commited on
Commit
575de15
1 Parent(s): 65194e4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -23
app.py CHANGED
@@ -12,10 +12,10 @@ import time
12
  login(token=os.environ["HF_TOKEN"])
13
 
14
  # Structure hiérarchique des modèles
15
- model_hierarchy = {
16
  "meta-llama": {
17
  "Llama-2": ["7B", "13B", "70B"],
18
- "Llama-3": ["8B", "3.2-3B", "3.1-8B"]
19
  },
20
  "mistralai": {
21
  "Mistral": ["7B-v0.1", "7B-v0.3"],
@@ -30,16 +30,16 @@ model_hierarchy = {
30
  }
31
 
32
  # Langues supportées par modèle
33
- models_and_languages = {
34
  "meta-llama/Llama-2-7B": ["en"],
35
  "meta-llama/Llama-2-13B": ["en"],
36
  "meta-llama/Llama-2-70B": ["en"],
37
  "meta-llama/Llama-3-8B": ["en"],
38
- "meta-llama/Llama-3.2-3B": ["en", "de", "fr", "it", "pt", "hi", "es", "th"],
39
- "meta-llama/Llama-3.1-8B": ["en", "de", "fr", "it", "pt", "hi", "es", "th"],
40
  "mistralai/Mistral-7B-v0.1": ["en"],
41
- "mistralai/Mistral-7B-v0.3": ["en"],
42
  "mistralai/Mixtral-8x7B-v0.1": ["en", "fr", "it", "de", "es"],
 
43
  "google/Gemma-2B": ["en"],
44
  "google/Gemma-9B": ["en"],
45
  "google/Gemma-27B": ["en"],
@@ -52,11 +52,11 @@ model_parameters = {
52
  "meta-llama/Llama-2-13B": {"temperature": 0.8, "top_p": 0.9, "top_k": 40},
53
  "meta-llama/Llama-2-70B": {"temperature": 0.8, "top_p": 0.9, "top_k": 40},
54
  "meta-llama/Llama-3-8B": {"temperature": 0.75, "top_p": 0.9, "top_k": 50},
55
- "meta-llama/Llama-3.2-3B": {"temperature": 0.75, "top_p": 0.9, "top_k": 50},
56
- "meta-llama/Llama-3.1-8B": {"temperature": 0.75, "top_p": 0.9, "top_k": 50},
57
  "mistralai/Mistral-7B-v0.1": {"temperature": 0.7, "top_p": 0.9, "top_k": 50},
58
- "mistralai/Mistral-7B-v0.3": {"temperature": 0.7, "top_p": 0.9, "top_k": 50},
59
  "mistralai/Mixtral-8x7B-v0.1": {"temperature": 0.8, "top_p": 0.95, "top_k": 50},
 
60
  "google/Gemma-2B": {"temperature": 0.7, "top_p": 0.95, "top_k": 40},
61
  "google/Gemma-9B": {"temperature": 0.7, "top_p": 0.95, "top_k": 40},
62
  "google/Gemma-27B": {"temperature": 0.7, "top_p": 0.95, "top_k": 40},
@@ -69,10 +69,10 @@ tokenizer = None
69
  selected_language = None
70
 
71
  def update_model_choices(company):
72
- return gr.Dropdown(choices=list(model_hierarchy[company].keys()), value=None)
73
 
74
  def update_variation_choices(company, model_name):
75
- return gr.Dropdown(choices=model_hierarchy[company][model_name], value=None)
76
 
77
  def load_model(company, model_name, variation, progress=gr.Progress()):
78
  global model, tokenizer
@@ -101,9 +101,9 @@ def load_model(company, model_name, variation, progress=gr.Progress()):
101
  tokenizer.pad_token = tokenizer.eos_token
102
 
103
  progress(1.0, desc="Modèle chargé")
104
- available_languages = models_and_languages[full_model_name]
105
- params = model_parameters[full_model_name]
106
 
 
107
  return (
108
  f"Modèle {full_model_name} chargé avec succès. Langues disponibles : {', '.join(available_languages)}",
109
  gr.Dropdown(choices=available_languages, value=available_languages[0], visible=True, interactive=True),
@@ -223,20 +223,15 @@ def reset():
223
  model = None
224
  tokenizer = None
225
  selected_language = None
226
- return (
227
- gr.Dropdown(choices=list(model_hierarchy.keys()), value=None),
228
- gr.Dropdown(visible=False),
229
- gr.Dropdown(visible=False),
230
- "", 1.0, 1.0, 50, None, None, None, None, gr.Dropdown(visible=False), ""
231
- )
232
 
233
  with gr.Blocks() as demo:
234
  gr.Markdown("# LLM&BIAS")
235
 
236
  with gr.Accordion("Sélection du modèle"):
237
- company_dropdown = gr.Dropdown(choices=list(model_hierarchy.keys()), label="Choisissez une société")
238
- model_dropdown = gr.Dropdown(label="Choisissez un modèle", visible=False)
239
- variation_dropdown = gr.Dropdown(label="Choisissez une variation", visible=False)
240
  load_button = gr.Button("Charger le modèle")
241
  load_output = gr.Textbox(label="Statut du chargement")
242
  language_dropdown = gr.Dropdown(label="Choisissez une langue", visible=False)
@@ -263,9 +258,10 @@ with gr.Blocks() as demo:
263
 
264
  company_dropdown.change(update_model_choices, inputs=[company_dropdown], outputs=[model_dropdown])
265
  model_dropdown.change(update_variation_choices, inputs=[company_dropdown, model_dropdown], outputs=[variation_dropdown])
 
266
  load_button.click(load_model,
267
  inputs=[company_dropdown, model_dropdown, variation_dropdown],
268
- outputs=[load_output, language_dropdown])
269
  language_dropdown.change(set_language, inputs=[language_dropdown], outputs=[language_output])
270
  analyze_button.click(analyze_next_token,
271
  inputs=[input_text, temperature, top_p, top_k],
 
12
  login(token=os.environ["HF_TOKEN"])
13
 
14
  # Structure hiérarchique des modèles
15
+ models_hierarchy = {
16
  "meta-llama": {
17
  "Llama-2": ["7B", "13B", "70B"],
18
+ "Llama-3": ["8B", "3.2B", "3.1B"]
19
  },
20
  "mistralai": {
21
  "Mistral": ["7B-v0.1", "7B-v0.3"],
 
30
  }
31
 
32
  # Langues supportées par modèle
33
+ models_languages = {
34
  "meta-llama/Llama-2-7B": ["en"],
35
  "meta-llama/Llama-2-13B": ["en"],
36
  "meta-llama/Llama-2-70B": ["en"],
37
  "meta-llama/Llama-3-8B": ["en"],
38
+ "meta-llama/Llama-3-3.2B": ["en", "de", "fr", "it", "pt", "hi", "es", "th"],
39
+ "meta-llama/Llama-3-3.1B": ["en", "de", "fr", "it", "pt", "hi", "es", "th"],
40
  "mistralai/Mistral-7B-v0.1": ["en"],
 
41
  "mistralai/Mixtral-8x7B-v0.1": ["en", "fr", "it", "de", "es"],
42
+ "mistralai/Mistral-7B-v0.3": ["en"],
43
  "google/Gemma-2B": ["en"],
44
  "google/Gemma-9B": ["en"],
45
  "google/Gemma-27B": ["en"],
 
52
  "meta-llama/Llama-2-13B": {"temperature": 0.8, "top_p": 0.9, "top_k": 40},
53
  "meta-llama/Llama-2-70B": {"temperature": 0.8, "top_p": 0.9, "top_k": 40},
54
  "meta-llama/Llama-3-8B": {"temperature": 0.75, "top_p": 0.9, "top_k": 50},
55
+ "meta-llama/Llama-3-3.2B": {"temperature": 0.75, "top_p": 0.9, "top_k": 50},
56
+ "meta-llama/Llama-3-3.1B": {"temperature": 0.75, "top_p": 0.9, "top_k": 50},
57
  "mistralai/Mistral-7B-v0.1": {"temperature": 0.7, "top_p": 0.9, "top_k": 50},
 
58
  "mistralai/Mixtral-8x7B-v0.1": {"temperature": 0.8, "top_p": 0.95, "top_k": 50},
59
+ "mistralai/Mistral-7B-v0.3": {"temperature": 0.7, "top_p": 0.9, "top_k": 50},
60
  "google/Gemma-2B": {"temperature": 0.7, "top_p": 0.95, "top_k": 40},
61
  "google/Gemma-9B": {"temperature": 0.7, "top_p": 0.95, "top_k": 40},
62
  "google/Gemma-27B": {"temperature": 0.7, "top_p": 0.95, "top_k": 40},
 
69
  selected_language = None
70
 
71
  def update_model_choices(company):
72
+ return list(models_hierarchy[company].keys())
73
 
74
  def update_variation_choices(company, model_name):
75
+ return models_hierarchy[company][model_name]
76
 
77
  def load_model(company, model_name, variation, progress=gr.Progress()):
78
  global model, tokenizer
 
101
  tokenizer.pad_token = tokenizer.eos_token
102
 
103
  progress(1.0, desc="Modèle chargé")
104
+ available_languages = models_languages[full_model_name]
 
105
 
106
+ params = model_parameters[full_model_name]
107
  return (
108
  f"Modèle {full_model_name} chargé avec succès. Langues disponibles : {', '.join(available_languages)}",
109
  gr.Dropdown(choices=available_languages, value=available_languages[0], visible=True, interactive=True),
 
223
  model = None
224
  tokenizer = None
225
  selected_language = None
226
+ return "", 1.0, 1.0, 50, None, None, None, None, gr.Dropdown(visible=False), ""
 
 
 
 
 
227
 
228
  with gr.Blocks() as demo:
229
  gr.Markdown("# LLM&BIAS")
230
 
231
  with gr.Accordion("Sélection du modèle"):
232
+ company_dropdown = gr.Dropdown(choices=list(models_hierarchy.keys()), label="Choisissez une société")
233
+ model_dropdown = gr.Dropdown(label="Choisissez un modèle", interactive=False)
234
+ variation_dropdown = gr.Dropdown(label="Choisissez une variation", interactive=False)
235
  load_button = gr.Button("Charger le modèle")
236
  load_output = gr.Textbox(label="Statut du chargement")
237
  language_dropdown = gr.Dropdown(label="Choisissez une langue", visible=False)
 
258
 
259
  company_dropdown.change(update_model_choices, inputs=[company_dropdown], outputs=[model_dropdown])
260
  model_dropdown.change(update_variation_choices, inputs=[company_dropdown, model_dropdown], outputs=[variation_dropdown])
261
+
262
  load_button.click(load_model,
263
  inputs=[company_dropdown, model_dropdown, variation_dropdown],
264
+ outputs=[load_output, language_dropdown, temperature, top_p, top_k])
265
  language_dropdown.change(set_language, inputs=[language_dropdown], outputs=[language_output])
266
  analyze_button.click(analyze_next_token,
267
  inputs=[input_text, temperature, top_p, top_k],