0x7o commited on
Commit
b6cdd08
1 Parent(s): dc59912

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -7,8 +7,8 @@ from threading import Thread
7
 
8
  # Loading the tokenizer and model from Hugging Face's model hub.
9
  if torch.cuda.is_available():
10
- tokenizer = AutoTokenizer.from_pretrained("0x7194633/fialka-13B-v4")
11
- model = AutoModelForCausalLM.from_pretrained("0x7194633/fialka-13B-v4", load_in_8bit=True, device_map="auto")
12
 
13
 
14
  # Defining a custom stopping criteria class for the model's text generation.
@@ -57,7 +57,7 @@ def predict(message, history):
57
 
58
  # Setting up the Gradio chat interface.
59
  gr.ChatInterface(predict,
60
- title="Fialka 13B v4",
61
  description="Внимание! Все ответы сгенерированы и могут содержать неточную информацию.",
62
  examples=['Как приготовить рыбу?', 'Кто президент США?']
63
  ).launch() # Launching the web interface.
 
7
 
8
  # Loading the tokenizer and model from Hugging Face's model hub.
9
  if torch.cuda.is_available():
10
+ tokenizer = AutoTokenizer.from_pretrained("sambanovasystems/SambaLingo-Russian-Chat")
11
+ model = AutoModelForCausalLM.from_pretrained("sambanovasystems/SambaLingo-Russian-Chat", torch_dtype=torch.float16, device_map="auto")
12
 
13
 
14
  # Defining a custom stopping criteria class for the model's text generation.
 
57
 
58
  # Setting up the Gradio chat interface.
59
  gr.ChatInterface(predict,
60
+ title="SambaLingo-Russian-Chat",
61
  description="Внимание! Все ответы сгенерированы и могут содержать неточную информацию.",
62
  examples=['Как приготовить рыбу?', 'Кто президент США?']
63
  ).launch() # Launching the web interface.