pcuenq HF staff commited on
Commit
d5ed6b6
1 Parent(s): 79f09a8

Adapt to Code Llama.

Browse files
Files changed (2) hide show
  1. app.py +13 -18
  2. model.py +3 -3
app.py CHANGED
@@ -6,21 +6,18 @@ import torch
6
  from model import get_input_token_length, run
7
 
8
  DEFAULT_SYSTEM_PROMPT = """\
9
- You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\
10
  """
11
- MAX_MAX_NEW_TOKENS = 2048
12
  DEFAULT_MAX_NEW_TOKENS = 1024
13
  MAX_INPUT_TOKEN_LENGTH = 4000
14
 
15
  DESCRIPTION = """
16
- # Llama-2 13B Chat
17
 
18
- This Space demonstrates model [Llama-2-13b-chat](https://huggingface.co/meta-llama/Llama-2-13b-chat) by Meta, a Llama 2 model with 13B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
19
 
20
- 🔎 For more details about the Llama 2 family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/llama2).
21
-
22
- 🔨 Looking for an even more powerful model? Check out the large [**70B** model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI).
23
- 🐇 For a smaller model that you can run on many GPUs, check our [7B model demo](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat).
24
 
25
  """
26
 
@@ -28,8 +25,8 @@ LICENSE = """
28
  <p/>
29
 
30
  ---
31
- As a derivate work of [Llama-2-13b-chat](https://huggingface.co/meta-llama/Llama-2-13b-chat) by Meta,
32
- this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat/blob/main/USE_POLICY.md).
33
  """
34
 
35
  if not torch.cuda.is_available():
@@ -132,30 +129,28 @@ with gr.Blocks(css='style.css') as demo:
132
  minimum=0.1,
133
  maximum=4.0,
134
  step=0.1,
135
- value=1.0,
136
  )
137
  top_p = gr.Slider(
138
  label='Top-p (nucleus sampling)',
139
  minimum=0.05,
140
  maximum=1.0,
141
  step=0.05,
142
- value=0.95,
143
  )
144
  top_k = gr.Slider(
145
  label='Top-k',
146
  minimum=1,
147
  maximum=1000,
148
  step=1,
149
- value=50,
150
  )
151
 
152
  gr.Examples(
153
  examples=[
154
- 'Hello there! How are you doing?',
155
- 'Can you explain briefly to me what is the Python programming language?',
156
- 'Explain the plot of Cinderella in a sentence.',
157
- 'How many hours does it take a man to eat a Helicopter?',
158
- "Write a 100-word article on 'Benefits of Open-Source in AI research'",
159
  ],
160
  inputs=textbox,
161
  outputs=[textbox, chatbot],
 
6
  from model import get_input_token_length, run
7
 
8
  DEFAULT_SYSTEM_PROMPT = """\
9
+ You are a helpful, respectful and honest assistant with a deep knowledge of code and software design. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\
10
  """
11
+ MAX_MAX_NEW_TOKENS = 4096
12
  DEFAULT_MAX_NEW_TOKENS = 1024
13
  MAX_INPUT_TOKEN_LENGTH = 4000
14
 
15
  DESCRIPTION = """
16
+ # Code Llama 13B Chat
17
 
18
+ This Space demonstrates model [CodeLlama-13b-Instruct](https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf) by Meta, a Code Llama model with 13B parameters fine-tuned for chat instructions and specialized on code tasks. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
19
 
20
+ 🔎 For more details about the Code Llama family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/codellama).
 
 
 
21
 
22
  """
23
 
 
25
  <p/>
26
 
27
  ---
28
+ As a derivate work of Code Llama by Meta,
29
+ this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/codellama-2-13b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/codellama-2-13b-chat/blob/main/USE_POLICY.md).
30
  """
31
 
32
  if not torch.cuda.is_available():
 
129
  minimum=0.1,
130
  maximum=4.0,
131
  step=0.1,
132
+ value=0.1,
133
  )
134
  top_p = gr.Slider(
135
  label='Top-p (nucleus sampling)',
136
  minimum=0.05,
137
  maximum=1.0,
138
  step=0.05,
139
+ value=0.9,
140
  )
141
  top_k = gr.Slider(
142
  label='Top-k',
143
  minimum=1,
144
  maximum=1000,
145
  step=1,
146
+ value=10,
147
  )
148
 
149
  gr.Examples(
150
  examples=[
151
+ 'What is the Fibonacci sequence?',
152
+ 'Can you explain briefly what Python is good for?',
153
+ 'How can I display a grid of images in SwiftUI?',
 
 
154
  ],
155
  inputs=textbox,
156
  outputs=[textbox, chatbot],
model.py CHANGED
@@ -4,7 +4,7 @@ from typing import Iterator
4
  import torch
5
  from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
6
 
7
- model_id = 'meta-llama/Llama-2-13b-chat-hf'
8
 
9
  if torch.cuda.is_available():
10
  config = AutoConfig.from_pretrained(model_id)
@@ -45,8 +45,8 @@ def run(message: str,
45
  chat_history: list[tuple[str, str]],
46
  system_prompt: str,
47
  max_new_tokens: int = 1024,
48
- temperature: float = 0.8,
49
- top_p: float = 0.95,
50
  top_k: int = 50) -> Iterator[str]:
51
  prompt = get_prompt(message, chat_history, system_prompt)
52
  inputs = tokenizer([prompt], return_tensors='pt', add_special_tokens=False).to('cuda')
 
4
  import torch
5
  from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
6
 
7
+ model_id = 'codellama/CodeLlama-13b-Instruct-hf'
8
 
9
  if torch.cuda.is_available():
10
  config = AutoConfig.from_pretrained(model_id)
 
45
  chat_history: list[tuple[str, str]],
46
  system_prompt: str,
47
  max_new_tokens: int = 1024,
48
+ temperature: float = 0.1,
49
+ top_p: float = 0.9,
50
  top_k: int = 50) -> Iterator[str]:
51
  prompt = get_prompt(message, chat_history, system_prompt)
52
  inputs = tokenizer([prompt], return_tensors='pt', add_special_tokens=False).to('cuda')