AlanXian commited on
Commit
5da8485
·
1 Parent(s): 8799d26

update: apollo model

Browse files
Files changed (2) hide show
  1. app.py +3 -3
  2. requirements.txt +4 -1
app.py CHANGED
@@ -12,7 +12,7 @@ HF_TOKEN = os.environ.get("HF_TOKEN", None)
12
  DESCRIPTION = '''
13
  <div>
14
  <h1 style="text-align: center;">Meta Llama3 8B</h1>
15
- <p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct"><b>Meta Llama3 8b Chat</b></a>. Meta Llama3 is the new open LLM and comes in two sizes: 8b and 70b. Feel free to play with it, or duplicate to run privately!</p>
16
  <p>🔎 For more details about the Llama3 release and how to use the model with <code>transformers</code>, take a look <a href="https://huggingface.co/blog/llama3">at our blog post</a>.</p>
17
  <p>🦕 Looking for an even more powerful model? Check out the <a href="https://huggingface.co/chat/"><b>Hugging Chat</b></a> integration for Meta Llama 3 70b</p>
18
  </div>
@@ -49,8 +49,8 @@ h1 {
49
  """
50
 
51
  # Load the tokenizer and model
52
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
53
- model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto") # to("cuda:0")
54
  terminators = [
55
  tokenizer.eos_token_id,
56
  tokenizer.convert_tokens_to_ids("<|eot_id|>")
 
12
  DESCRIPTION = '''
13
  <div>
14
  <h1 style="text-align: center;">Meta Llama3 8B</h1>
15
+ <p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/FreedomIntelligence/Apollo-7B"><b>Meta Llama3 8b Chat</b></a>. Meta Llama3 is the new open LLM and comes in two sizes: 8b and 70b. Feel free to play with it, or duplicate to run privately!</p>
16
  <p>🔎 For more details about the Llama3 release and how to use the model with <code>transformers</code>, take a look <a href="https://huggingface.co/blog/llama3">at our blog post</a>.</p>
17
  <p>🦕 Looking for an even more powerful model? Check out the <a href="https://huggingface.co/chat/"><b>Hugging Chat</b></a> integration for Meta Llama 3 70b</p>
18
  </div>
 
49
  """
50
 
51
  # Load the tokenizer and model
52
+ tokenizer = AutoTokenizer.from_pretrained("FreedomIntelligence/Apollo-7B")
53
+ model = AutoModelForCausalLM.from_pretrained("FreedomIntelligence/Apollo-7B", device_map="auto") # to("cuda:0")
54
  terminators = [
55
  tokenizer.eos_token_id,
56
  tokenizer.convert_tokens_to_ids("<|eot_id|>")
requirements.txt CHANGED
@@ -1 +1,4 @@
1
- huggingface_hub==0.25.2
 
 
 
 
1
+ huggingface_hub==0.25.2
2
+ accelerate
3
+ transformers
4
+ SentencePiece