rphrp1985 commited on
Commit
ec7e05a
1 Parent(s): 215396c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -17
app.py CHANGED
@@ -13,6 +13,12 @@ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
13
  token=os.getenv('token')
14
  print('token = ',token)
15
 
 
 
 
 
 
 
16
 
17
 
18
 
@@ -26,24 +32,11 @@ def respond(
26
  temperature,
27
  top_p,
28
  ):
29
- from transformers import AutoTokenizer, AutoModelForCausalLM
30
-
31
- model_id = "CohereForAI/c4ai-command-r-plus-4bit"
32
-
33
- tokenizer = AutoTokenizer.from_pretrained(model_id, token= token)
34
- model = AutoModelForCausalLM.from_pretrained(model_id, token= token)
35
- messages = [{"role": "user", "content": "Hello, how are you?"}]
36
- input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
37
- ## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
38
-
39
- gen_tokens = model.generate(
40
- input_ids,
41
- max_new_tokens=100,
42
- do_sample=True,
43
- temperature=0.3,
44
- )
45
 
46
- gen_text = tokenizer.decode(gen_tokens[0])
 
 
47
  print(gen_text)
48
  yield gen_text
49
  # for val in history:
 
13
  token=os.getenv('token')
14
  print('token = ',token)
15
 
16
+ from transformers import AutoModelForCausalLM, AutoTokenizer
17
+
18
+ model_id = "mistralai/Mistral-7B-v0.3"
19
+ tokenizer = AutoTokenizer.from_pretrained(model_id, token= token)
20
+
21
+ model = AutoModelForCausalLM.from_pretrained(model_id, token= token)
22
 
23
 
24
 
 
32
  temperature,
33
  top_p,
34
  ):
35
+ inputs = tokenizer("Hello my name is", return_tensors="pt")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
+ outputs = model.generate(**inputs, max_new_tokens=20)
38
+ gen_text=tokenizer.decode(outputs[0], skip_special_tokens=True
39
+
40
  print(gen_text)
41
  yield gen_text
42
  # for val in history: