00ber commited on
Commit
5f21d73
1 Parent(s): 2bbfe89

Added token counter

Browse files
app.py CHANGED
@@ -1,63 +1,53 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
  ),
58
  ],
 
59
  )
60
 
61
 
 
62
  if __name__ == "__main__":
63
- demo.launch()
 
1
  import gradio as gr
2
+ from tokenizers import Tokenizer
3
 
4
+ llama3_tokenizer = Tokenizer.from_file("tokenizer-llama3.json")
5
+ deepseek_coder_tokenizer = Tokenizer.from_file("tokenizer-deepseek-coder.json")
 
 
6
 
7
+ def get_tokenizer(model):
8
+ tokenizer_mapping = {
9
+ "meta-llama/Meta-Llama-3-8B-Instruct": llama3_tokenizer,
10
+ "deepseek-ai/deepseek-coder-7b-instruct-v1.5": deepseek_coder_tokenizer
11
+ }
12
+ if model not in tokenizer_mapping:
13
+ raise Exception(f"Model {model} not supported.")
14
 
15
+ return tokenizer_mapping[model]
16
+
17
+ def count_tokens(
18
+ model,
19
+ target_text,
 
 
20
  ):
21
+ print("###############################")
22
+ print(model)
23
+ tokenizer = get_tokenizer(model)
24
+ print(tokenizer)
25
+ toks = tokenizer.encode(target_text)
26
+ print(toks.ids)
27
+ yield len(toks.ids)
28
+
29
+ def greet(name, intensity):
30
+ return "Hello, " + name + "!" * int(intensity)
31
+
32
+ demo = gr.Interface(
33
+ fn=count_tokens,
34
+ inputs=[
35
+ gr.Dropdown(
36
+ [
37
+ "meta-llama/Meta-Llama-3-8B-Instruct",
38
+ "deepseek-ai/deepseek-coder-7b-instruct-v1.5",
39
+ ], label="Model"
40
+ ),
41
+ gr.Textbox(
42
+ label="Input",
43
+ info="Text to count tokens for",
44
+ lines=10,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  ),
46
  ],
47
+ outputs=["text"],
48
  )
49
 
50
 
51
+
52
  if __name__ == "__main__":
53
+ demo.launch()
requirements.txt CHANGED
@@ -1 +1,4 @@
1
- huggingface_hub==0.22.2
 
 
 
 
1
+ huggingface_hub==0.22.2
2
+ tokenizers==0.19.1
3
+ gradio==4.40.0
4
+ gradio_client==1.2.0
tokenizer-deepseek-coder.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer-llama3.json ADDED
The diff for this file is too large to render. See raw diff