Artples commited on
Commit
cd76efc
1 Parent(s): 6f346c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -40
app.py CHANGED
@@ -1,49 +1,45 @@
1
  import os
 
 
 
2
  import gradio as gr
3
  import spaces
4
  import torch
5
- from threading import Thread
6
- from transformers import AutoModelForCausalLM, AutoTokenizer
7
 
8
- # Constants for model behavior
9
  MAX_MAX_NEW_TOKENS = 2048
10
  DEFAULT_MAX_NEW_TOKENS = 1024
11
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
12
 
13
- # Models selection
14
- MODELS = {
15
- "Fast-Model": "Artples/L-MChat-Small",
16
- "Quality-Model": "Artples/L-MChat-7b"
17
- }
18
-
19
- # Description for the application
20
  DESCRIPTION = """\
21
  # L-MChat
22
  This Space demonstrates [L-MChat](https://huggingface.co/collections/Artples/l-mchat-663265a8351231c428318a8f) by L-AI.
23
  """
24
 
25
- # Check for GPU availability
26
  if not torch.cuda.is_available():
27
  DESCRIPTION += "\n<p>Running on CPU! This demo does not work on CPU.</p>"
28
 
29
- # Load models and tokenizers
30
- models = {name: AutoModelForCausalLM.from_pretrained(model_id, device_map="auto") for name, model_id in MODELS.items()}
31
- tokenizers = {name: AutoTokenizer.from_pretrained(model_id) for name, model_id in MODELS.items()}
 
32
 
33
  @spaces.GPU(enable_queue=True, duration=90)
34
  def generate(
35
- model_choice: str,
36
  message: str,
37
  chat_history: list[tuple[str, str]],
38
  system_prompt: str,
 
39
  max_new_tokens: int = 1024,
40
  temperature: float = 0.1,
41
  top_p: float = 0.9,
42
  top_k: int = 50,
43
  repetition_penalty: float = 1.2,
44
- ) -> str:
45
- model = models[model_choice]
46
- tokenizer = tokenizers[model_choice]
 
 
47
 
48
  conversation = []
49
  if system_prompt:
@@ -52,42 +48,84 @@ def generate(
52
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
53
  conversation.append({"role": "user", "content": message})
54
 
55
- input_ids = tokenizer(conversation, return_tensors="pt", truncation=True, max_length=MAX_INPUT_TOKEN_LENGTH).input_ids
 
 
 
56
  input_ids = input_ids.to(model.device)
57
 
58
- output_ids = model.generate(
59
- input_ids,
60
- max_length=input_ids.shape[1] + max_new_tokens,
61
- temperature=temperature,
 
 
62
  top_p=top_p,
63
  top_k=top_k,
 
 
64
  repetition_penalty=repetition_penalty,
65
- num_return_sequences=1,
66
  )
 
 
67
 
68
- return tokenizer.decode(output_ids[0], skip_special_tokens=True)
 
 
 
69
 
70
- # Gradio Interface
71
- chat_interface = gr.Interface(
72
  fn=generate,
73
- inputs=[
74
- gr.Dropdown(label="Choose Model", choices=list(MODELS.keys()), default="Quality-Model"),
75
- gr.ChatBox(),
76
  gr.Textbox(label="System prompt", lines=6),
77
- gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS),
78
- gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6),
79
- gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9),
80
- gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50),
81
- gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  ],
83
- outputs="text",
84
- theme='ehristoforu/RE_Theme',
85
  examples=[
86
- ["Quality-Model", "Hello there! How are you doing?", [], "Let's start the conversation.", 1024, 0.6, 0.9, 50, 1.2]
87
- ]
 
 
 
 
88
  )
89
 
90
- # Main execution block
91
  with gr.Blocks(css="style.css") as demo:
92
  gr.Markdown(DESCRIPTION)
93
  chat_interface.render()
 
1
  import os
2
+ from threading import Thread
3
+ from typing import Iterator
4
+
5
  import gradio as gr
6
  import spaces
7
  import torch
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
 
9
 
 
10
  MAX_MAX_NEW_TOKENS = 2048
11
  DEFAULT_MAX_NEW_TOKENS = 1024
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
 
 
 
 
 
 
 
14
  DESCRIPTION = """\
15
  # L-MChat
16
  This Space demonstrates [L-MChat](https://huggingface.co/collections/Artples/l-mchat-663265a8351231c428318a8f) by L-AI.
17
  """
18
 
 
19
  if not torch.cuda.is_available():
20
  DESCRIPTION += "\n<p>Running on CPU! This demo does not work on CPU.</p>"
21
 
22
+ model_dict = {
23
+ "Fast-Model": "Artples/L-MChat-Small",
24
+ "Quality-Model": "Artples/L-MChat-7b"
25
+ }
26
 
27
  @spaces.GPU(enable_queue=True, duration=90)
28
  def generate(
 
29
  message: str,
30
  chat_history: list[tuple[str, str]],
31
  system_prompt: str,
32
+ model_choice: str,
33
  max_new_tokens: int = 1024,
34
  temperature: float = 0.1,
35
  top_p: float = 0.9,
36
  top_k: int = 50,
37
  repetition_penalty: float = 1.2,
38
+ ) -> Iterator[str]:
39
+ model_id = model_dict[model_choice]
40
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
41
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
42
+ tokenizer.use_default_system_prompt = False
43
 
44
  conversation = []
45
  if system_prompt:
 
48
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
49
  conversation.append({"role": "user", "content": message})
50
 
51
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt", add_generation_prompt=True)
52
+ if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
53
+ input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
54
+ gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
55
  input_ids = input_ids.to(model.device)
56
 
57
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
58
+ generate_kwargs = dict(
59
+ {"input_ids": input_ids},
60
+ streamer=streamer,
61
+ max_new_tokens=max_new_tokens,
62
+ do_sample=True,
63
  top_p=top_p,
64
  top_k=top_k,
65
+ temperature=temperature,
66
+ num_beams=1,
67
  repetition_penalty=repetition_penalty,
 
68
  )
69
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
70
+ t.start()
71
 
72
+ outputs = []
73
+ for text in streamer:
74
+ outputs.append(text)
75
+ yield "".join(outputs)
76
 
77
+ chat_interface = gr.ChatInterface(
78
+ theme='ehristoforu/RE_Theme',
79
  fn=generate,
80
+ additional_inputs=[
 
 
81
  gr.Textbox(label="System prompt", lines=6),
82
+ gr.Radio(["Fast-Model", "Quality-Model"], label="Model", value="Fast-Model"),
83
+ gr.Slider(
84
+ label="Max new tokens",
85
+ minimum=1,
86
+ maximum=MAX_MAX_NEW_TOKENS,
87
+ step=1,
88
+ value=DEFAULT_MAX_NEW_TOKENS,
89
+ ),
90
+ gr.Slider(
91
+ label="Temperature",
92
+ minimum=0.1,
93
+ maximum=4.0,
94
+ step=0.1,
95
+ value=0.6,
96
+ ),
97
+ gr.Slider(
98
+ label="Top-p (nucleus sampling)",
99
+ minimum=0.05,
100
+ maximum=1.0,
101
+ step=0.05,
102
+ value=0.9,
103
+ ),
104
+ gr.Slider(
105
+ label="Top-k",
106
+ minimum=1,
107
+ maximum=1000,
108
+ step=1,
109
+ value=50,
110
+ ),
111
+ gr.Slider(
112
+ label="Repetition penalty",
113
+ minimum=1.0,
114
+ maximum=2.0,
115
+ step=0.05,
116
+ value=1.2,
117
+ ),
118
  ],
119
+ stop_btn=None,
 
120
  examples=[
121
+ ["Hello there! How are you doing?"],
122
+ ["Can you explain briefly to me what is the Python programming language?"],
123
+ ["Explain the plot of Cinderella in a sentence."],
124
+ ["How many hours does it take a man to eat a Helicopter?"],
125
+ ["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
126
+ ],
127
  )
128
 
 
129
  with gr.Blocks(css="style.css") as demo:
130
  gr.Markdown(DESCRIPTION)
131
  chat_interface.render()