yentinglin commited on
Commit
d2040e0
1 Parent(s): b5fe987

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -65,7 +65,7 @@ Here's how you can run the model using the `pipeline()` function from 🤗 Trans
65
  import torch
66
  from transformers import pipeline
67
 
68
- pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto")
69
 
70
  # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
71
  messages = [
 
65
  import torch
66
  from transformers import pipeline
67
 
68
+ pipe = pipeline("text-generation", model="yentinglin/Taiwan-LLM-7B-v2.0.1-chat", torch_dtype=torch.bfloat16, device_map="auto")
69
 
70
  # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
71
  messages = [