ysharma HF staff commited on
Commit
84ef11d
โ€ข
1 Parent(s): 1d27c70

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -20
app.py CHANGED
@@ -12,23 +12,17 @@ HF_TOKEN = os.environ.get("HF_TOKEN", None)
12
  DESCRIPTION = '''
13
  <div>
14
  <h1 style="text-align: center;">Meta Llama3 8B</h1>
15
- <p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct"><b>Meta Llama3 8b Chat</b></a>. Meta Llama3 is the new open LLM and comes in two sizes: 8b and 70b. Feel free to play with it, or duplicate to run privately!</p>
16
- <p>๐Ÿ”Ž For more details about the Llama3 release and how to use the model with <code>transformers</code>, take a look <a href="https://huggingface.co/blog/llama3">at our blog post</a>.</p>
17
- <p>๐Ÿฆ• Looking for an even more powerful model? Check out the <a href="https://huggingface.co/chat/"><b>Hugging Chat</b></a> integration for Meta Llama 3 70b</p>
18
  </div>
19
  '''
20
 
21
- LICENSE = """
22
- <p/>
23
-
24
- ---
25
- Built with Meta Llama 3
26
- """
27
 
28
  PLACEHOLDER = """
29
  <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
30
- <img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/8e75e61cc9bab22b7ce3dec85ab0e6db1da5d107/Meta_lockup_positive%20primary_RGB.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; ">
31
- <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Meta llama3</h1>
32
  <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything...</p>
33
  </div>
34
  """
@@ -49,21 +43,21 @@ h1 {
49
  """
50
 
51
  # Load the tokenizer and model
52
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
53
- model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto") # to("cuda:0")
54
  terminators = [
55
  tokenizer.eos_token_id,
56
  tokenizer.convert_tokens_to_ids("<|eot_id|>")
57
  ]
58
 
59
  @spaces.GPU(duration=120)
60
- def chat_llama3_8b(message: str,
61
  history: list,
62
  temperature: float,
63
  max_new_tokens: int
64
  ) -> str:
65
  """
66
- Generate a streaming response using the llama3-8b model.
67
  Args:
68
  message (str): The input message.
69
  history (list): The conversation history used by ChatInterface.
@@ -111,7 +105,7 @@ with gr.Blocks(fill_height=True, css=css) as demo:
111
  gr.Markdown(DESCRIPTION)
112
  gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
113
  gr.ChatInterface(
114
- fn=chat_llama3_8b,
115
  chatbot=chatbot,
116
  fill_height=True,
117
  additional_inputs_accordion=gr.Accordion(label="โš™๏ธ Parameters", open=False, render=False),
@@ -139,8 +133,6 @@ with gr.Blocks(fill_height=True, css=css) as demo:
139
  cache_examples=False,
140
  )
141
 
142
- gr.Markdown(LICENSE)
143
-
144
  if __name__ == "__main__":
145
- demo.launch()
146
-
 
12
  DESCRIPTION = '''
13
  <div>
14
  <h1 style="text-align: center;">Meta Llama3 8B</h1>
15
+ <p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3"><b>mistralai/Mistral-7B-Instruct-v0.3</b></a>. The Mistral-7B-Instruct-v0.3 Large Language Model (LLM) is an instruct fine-tuned version of the Mistral-7B-v0.3, which is a Mistral-7B-v0.2 with extended vocabulary. Feel free to play with it, or duplicate to run privately!</p>
16
+ <p>๐Ÿ”Ž For more details about the release and how to use the model with <code>transformers</code>, visit the model-card linked above.</p>
17
+ <p>๐Ÿฆ• The Instruct model - Has Extended vocabulary to 32768. Supports v3 Tokenizer. Supports function calling.</p>
18
  </div>
19
  '''
20
 
 
 
 
 
 
 
21
 
22
  PLACEHOLDER = """
23
  <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
24
+ <img src="https://cdn-thumbnails.huggingface.co/social-thumbnails/models/mistralai/Mistral-7B-Instruct-v0.3.png" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; ">
25
+ <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Mistral-7B-Instruct-v0.3 is an instruct fine-tuned version of the Mistral-7B-v0.3, which is Mistral-7B-v0.2 with extended vocabulary to 32768.</h1>
26
  <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything...</p>
27
  </div>
28
  """
 
43
  """
44
 
45
  # Load the tokenizer and model
46
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
47
+ model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3", device_map="auto")
48
  terminators = [
49
  tokenizer.eos_token_id,
50
  tokenizer.convert_tokens_to_ids("<|eot_id|>")
51
  ]
52
 
53
  @spaces.GPU(duration=120)
54
+ def chat_mistral7b_v0dot3(message: str,
55
  history: list,
56
  temperature: float,
57
  max_new_tokens: int
58
  ) -> str:
59
  """
60
+ Generate a streaming response using the mistralai/Mistral-7B-Instruct-v0.3 model.
61
  Args:
62
  message (str): The input message.
63
  history (list): The conversation history used by ChatInterface.
 
105
  gr.Markdown(DESCRIPTION)
106
  gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
107
  gr.ChatInterface(
108
+ fn=chat_mistral7b_v0dot3,
109
  chatbot=chatbot,
110
  fill_height=True,
111
  additional_inputs_accordion=gr.Accordion(label="โš™๏ธ Parameters", open=False, render=False),
 
133
  cache_examples=False,
134
  )
135
 
136
+
 
137
  if __name__ == "__main__":
138
+ demo.launch()