M4sterStudy commited on
Commit
3f0376d
·
verified ·
1 Parent(s): 7971d45

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -11
app.py CHANGED
@@ -1,30 +1,33 @@
1
  import os
2
  from huggingface_hub import login
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import gradio as gr
 
5
 
6
  # Autenticar usando el token almacenado como secreto
7
  hf_token = os.getenv("HF_API_TOKEN")
8
  login(hf_token)
9
 
10
  # Cargar el modelo y el tokenizador
11
- model_name = "meta-llama/Meta-Llama-3-8B"
12
  tokenizer = AutoTokenizer.from_pretrained(model_name)
13
- model = AutoModelForCausalLM.from_pretrained(model_name)
14
 
15
- def chat_with_llama(input_text):
16
- inputs = tokenizer(input_text, return_tensors="pt")
17
- outputs = model.generate(**inputs)
18
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
- return response
 
 
20
 
21
  # Crear la interfaz con Gradio
22
  iface = gr.Interface(
23
- fn=chat_with_llama,
24
  inputs="text",
25
  outputs="text",
26
- title="Chat con LLaMA 3",
27
- description="Interfaz simple para comunicarte con el modelo LLaMA 3."
28
  )
29
 
30
  iface.launch()
 
1
  import os
2
  from huggingface_hub import login
3
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
4
  import gradio as gr
5
+ import torch
6
 
7
  # Autenticar usando el token almacenado como secreto
8
  hf_token = os.getenv("HF_API_TOKEN")
9
  login(hf_token)
10
 
11
  # Cargar el modelo y el tokenizador
12
+ model_name = "bertin-project/bertin-roberta-base-spanish"
13
  tokenizer = AutoTokenizer.from_pretrained(model_name)
14
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
15
 
16
+ def classify_text(input_text):
17
+ inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=512)
18
+ with torch.no_grad():
19
+ outputs = model(**inputs)
20
+ logits = outputs.logits
21
+ predicted_class_id = torch.argmax(logits, dim=-1).item()
22
+ return predicted_class_id
23
 
24
  # Crear la interfaz con Gradio
25
  iface = gr.Interface(
26
+ fn=classify_text,
27
  inputs="text",
28
  outputs="text",
29
+ title="Clasificador en Español con BERTin",
30
+ description="Interfaz para clasificar texto en español usando el modelo BERTin RoBERTa base."
31
  )
32
 
33
  iface.launch()