Spaces:
Runtime error
Runtime error
krystian-lieber
commited on
Commit
•
8b78156
1
Parent(s):
ad19dfc
Update to remote service
Browse files- app.py +6 -10
- model.py +21 -39
- requirements.txt +1 -0
app.py
CHANGED
@@ -1,9 +1,8 @@
|
|
1 |
from typing import Iterator
|
2 |
|
3 |
import gradio as gr
|
4 |
-
import torch
|
5 |
|
6 |
-
from model import
|
7 |
|
8 |
DEFAULT_SYSTEM_PROMPT = """\
|
9 |
You are a helpful, respectful and honest assistant with a deep knowledge of code and software design. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\
|
@@ -13,9 +12,9 @@ DEFAULT_MAX_NEW_TOKENS = 1024
|
|
13 |
MAX_INPUT_TOKEN_LENGTH = 4000
|
14 |
|
15 |
DESCRIPTION = """
|
16 |
-
# Code Llama
|
17 |
|
18 |
-
This Space demonstrates model [CodeLlama-
|
19 |
|
20 |
🔎 For more details about the Code Llama family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/codellama) or [the paper](https://huggingface.co/papers/2308.12950).
|
21 |
|
@@ -28,12 +27,9 @@ LICENSE = """
|
|
28 |
|
29 |
---
|
30 |
As a derivate work of Code Llama by Meta,
|
31 |
-
this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/codellama-2-
|
32 |
"""
|
33 |
|
34 |
-
if not torch.cuda.is_available():
|
35 |
-
DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
|
36 |
-
|
37 |
|
38 |
def clear_and_save_textbox(message: str) -> tuple[str, str]:
|
39 |
return '', message
|
@@ -85,7 +81,7 @@ def process_example(message: str) -> tuple[str, list[tuple[str, str]]]:
|
|
85 |
|
86 |
|
87 |
def check_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> None:
|
88 |
-
input_token_length =
|
89 |
if input_token_length > MAX_INPUT_TOKEN_LENGTH:
|
90 |
raise gr.Error(f'The accumulated input is too long ({input_token_length} > {MAX_INPUT_TOKEN_LENGTH}). Clear your chat history and try again.')
|
91 |
|
@@ -274,4 +270,4 @@ with gr.Blocks(css='style.css') as demo:
|
|
274 |
api_name=False,
|
275 |
)
|
276 |
|
277 |
-
demo.queue(max_size=
|
|
|
1 |
from typing import Iterator
|
2 |
|
3 |
import gradio as gr
|
|
|
4 |
|
5 |
+
from model import run
|
6 |
|
7 |
DEFAULT_SYSTEM_PROMPT = """\
|
8 |
You are a helpful, respectful and honest assistant with a deep knowledge of code and software design. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\
|
|
|
12 |
MAX_INPUT_TOKEN_LENGTH = 4000
|
13 |
|
14 |
DESCRIPTION = """
|
15 |
+
# Code Llama 34B Chat
|
16 |
|
17 |
+
This Space demonstrates model [CodeLlama-34b-Instruct](https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf) by Meta, a Code Llama model with 34B parameters fine-tuned for chat instructions and specialized on code tasks. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
|
18 |
|
19 |
🔎 For more details about the Code Llama family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/codellama) or [the paper](https://huggingface.co/papers/2308.12950).
|
20 |
|
|
|
27 |
|
28 |
---
|
29 |
As a derivate work of Code Llama by Meta,
|
30 |
+
this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/codellama-2-34b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/codellama-2-34b-chat/blob/main/USE_POLICY.md).
|
31 |
"""
|
32 |
|
|
|
|
|
|
|
33 |
|
34 |
def clear_and_save_textbox(message: str) -> tuple[str, str]:
|
35 |
return '', message
|
|
|
81 |
|
82 |
|
83 |
def check_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> None:
|
84 |
+
input_token_length = len(message) + len(chat_history)
|
85 |
if input_token_length > MAX_INPUT_TOKEN_LENGTH:
|
86 |
raise gr.Error(f'The accumulated input is too long ({input_token_length} > {MAX_INPUT_TOKEN_LENGTH}). Clear your chat history and try again.')
|
87 |
|
|
|
270 |
api_name=False,
|
271 |
)
|
272 |
|
273 |
+
demo.queue(max_size=32).launch(debug=True)
|
model.py
CHANGED
@@ -1,25 +1,19 @@
|
|
1 |
-
|
2 |
from typing import Iterator
|
3 |
|
4 |
-
import
|
5 |
-
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
6 |
|
7 |
-
model_id = 'codellama/CodeLlama-
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
use_safetensors=False,
|
19 |
-
)
|
20 |
-
else:
|
21 |
-
model = None
|
22 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
23 |
|
24 |
|
25 |
def get_prompt(message: str, chat_history: list[tuple[str, str]],
|
@@ -36,12 +30,6 @@ def get_prompt(message: str, chat_history: list[tuple[str, str]],
|
|
36 |
return ''.join(texts)
|
37 |
|
38 |
|
39 |
-
def get_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> int:
|
40 |
-
prompt = get_prompt(message, chat_history, system_prompt)
|
41 |
-
input_ids = tokenizer([prompt], return_tensors='np', add_special_tokens=False)['input_ids']
|
42 |
-
return input_ids.shape[-1]
|
43 |
-
|
44 |
-
|
45 |
def run(message: str,
|
46 |
chat_history: list[tuple[str, str]],
|
47 |
system_prompt: str,
|
@@ -50,26 +38,20 @@ def run(message: str,
|
|
50 |
top_p: float = 0.9,
|
51 |
top_k: int = 50) -> Iterator[str]:
|
52 |
prompt = get_prompt(message, chat_history, system_prompt)
|
53 |
-
inputs = tokenizer([prompt], return_tensors='pt', add_special_tokens=False).to('cuda')
|
54 |
|
55 |
-
streamer = TextIteratorStreamer(tokenizer,
|
56 |
-
timeout=10.,
|
57 |
-
skip_prompt=True,
|
58 |
-
skip_special_tokens=True)
|
59 |
generate_kwargs = dict(
|
60 |
-
inputs,
|
61 |
-
streamer=streamer,
|
62 |
max_new_tokens=max_new_tokens,
|
63 |
do_sample=True,
|
64 |
top_p=top_p,
|
65 |
top_k=top_k,
|
66 |
temperature=temperature,
|
67 |
-
num_beams=1,
|
68 |
)
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
|
|
|
|
|
1 |
+
import os
|
2 |
from typing import Iterator
|
3 |
|
4 |
+
from text_generation import Client
|
|
|
5 |
|
6 |
+
model_id = 'codellama/CodeLlama-34b-Instruct-hf'
|
7 |
|
8 |
+
API_URL = "https://api-inference.huggingface.co/models/" + model_id
|
9 |
+
HF_TOKEN = os.environ.get("HF_TOKEN", "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")
|
10 |
+
|
11 |
+
client = Client(
|
12 |
+
API_URL,
|
13 |
+
headers={"Authorization": f"Bearer {HF_TOKEN}"},
|
14 |
+
)
|
15 |
+
EOS_STRING = "</s>"
|
16 |
+
EOT_STRING = "<EOT>"
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
|
19 |
def get_prompt(message: str, chat_history: list[tuple[str, str]],
|
|
|
30 |
return ''.join(texts)
|
31 |
|
32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
def run(message: str,
|
34 |
chat_history: list[tuple[str, str]],
|
35 |
system_prompt: str,
|
|
|
38 |
top_p: float = 0.9,
|
39 |
top_k: int = 50) -> Iterator[str]:
|
40 |
prompt = get_prompt(message, chat_history, system_prompt)
|
|
|
41 |
|
|
|
|
|
|
|
|
|
42 |
generate_kwargs = dict(
|
|
|
|
|
43 |
max_new_tokens=max_new_tokens,
|
44 |
do_sample=True,
|
45 |
top_p=top_p,
|
46 |
top_k=top_k,
|
47 |
temperature=temperature,
|
|
|
48 |
)
|
49 |
+
stream = client.generate_stream(prompt, **generate_kwargs)
|
50 |
+
output = ""
|
51 |
+
for response in stream:
|
52 |
+
if any([end_token in response.token.text for end_token in [EOS_STRING, EOT_STRING]]):
|
53 |
+
return output
|
54 |
+
else:
|
55 |
+
output += response.token.text
|
56 |
+
yield output
|
57 |
+
return output
|
requirements.txt
CHANGED
@@ -5,4 +5,5 @@ protobuf
|
|
5 |
scipy
|
6 |
sentencepiece
|
7 |
torch
|
|
|
8 |
git+https://github.com/huggingface/transformers@main
|
|
|
5 |
scipy
|
6 |
sentencepiece
|
7 |
torch
|
8 |
+
text_generation
|
9 |
git+https://github.com/huggingface/transformers@main
|