daniloedu commited on
Commit
fd50bcf
Β·
1 Parent(s): b96dda2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -21
app.py CHANGED
@@ -1,33 +1,40 @@
1
  import os
2
- import torch
3
  from dotenv import load_dotenv, find_dotenv
4
  import gradio as gr
5
- from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
 
6
 
7
  # Load environment variables
8
  _ = load_dotenv(find_dotenv())
9
  hf_api_key = os.environ['HF_API_KEY']
10
 
 
 
 
 
 
 
 
 
 
 
 
11
  class Client:
12
- def __init__(self, model_name, device_map="auto"):
13
- self.tokenizer = AutoTokenizer.from_pretrained(model_name)
14
- self.model = AutoModelForCausalLM.from_pretrained(
15
- model_name,
16
- device_map=device_map,
17
- torch_dtype=torch.float16,
18
- load_in_8bit=False,
19
- rope_scaling={"type": "dynamic", "factor": 2}
 
 
 
20
  )
 
21
 
22
- def generate_stream(self, prompt, max_new_tokens, stop_sequences, temperature):
23
- inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
24
- del inputs["token_type_ids"]
25
- streamer = TextStreamer(self.tokenizer, skip_prompt=True, skip_special_tokens=True)
26
- output = self.model.generate(**inputs, streamer=streamer, use_cache=True, max_new_tokens=float('inf'))
27
- output_text = self.tokenizer.decode(output[0], skip_special_tokens=True)
28
- return output_text
29
-
30
- client = Client("upstage/Llama-2-70b-instruct")
31
 
32
  def format_chat_prompt(message, chat_history, instruction):
33
  prompt = f"System:{instruction}"
@@ -40,7 +47,7 @@ def format_chat_prompt(message, chat_history, instruction):
40
  def respond(message, chat_history, instruction, temperature=0.7):
41
  prompt = format_chat_prompt(message, chat_history, instruction)
42
  chat_history = chat_history + [[message, ""]]
43
- output_text = client.generate_stream(prompt, max_new_tokens=1024, stop_sequences=["\nUser:", ""], temperature=temperature)
44
  last_turn = list(chat_history.pop(-1))
45
  last_turn[-1] += output_text
46
  chat_history = chat_history + [last_turn]
@@ -49,4 +56,4 @@ def respond(message, chat_history, instruction, temperature=0.7):
49
  iface = gr.Interface(fn=respond, inputs=[gr.Textbox(label="Prompt"), gr.Chatbot(label="Chat History", height=240), gr.Textbox(label="System message", lines=2, value="A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers."), gr.Slider(label="temperature", minimum=0.1, maximum=1, value=0.7, step=0.1)], outputs=[gr.Textbox(label="Prompt"), gr.Chatbot(label="Chat History", height=240)])
50
 
51
  if __name__ == "__main__":
52
- iface.launch()
 
1
  import os
 
2
  from dotenv import load_dotenv, find_dotenv
3
  import gradio as gr
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
5
+ import torch
6
 
7
  # Load environment variables
8
  _ = load_dotenv(find_dotenv())
9
  hf_api_key = os.environ['HF_API_KEY']
10
 
11
+ model_name = "tiiuae/falcon-7b-instruct"
12
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
13
+ text_gen_pipeline = pipeline(
14
+ "text-generation",
15
+ model=model_name,
16
+ tokenizer=tokenizer,
17
+ torch_dtype=torch.bfloat16,
18
+ trust_remote_code=True,
19
+ device_map="auto",
20
+ )
21
+
22
  class Client:
23
+ def __init__(self, pipeline):
24
+ self.pipeline = pipeline
25
+
26
+ def generate_text(self, prompt, max_new_tokens, temperature):
27
+ sequences = self.pipeline(
28
+ prompt,
29
+ max_length=max_new_tokens,
30
+ do_sample=True,
31
+ top_k=10,
32
+ num_return_sequences=1,
33
+ eos_token_id=tokenizer.eos_token_id,
34
  )
35
+ return sequences[0]['generated_text']
36
 
37
+ client = Client(text_gen_pipeline)
 
 
 
 
 
 
 
 
38
 
39
  def format_chat_prompt(message, chat_history, instruction):
40
  prompt = f"System:{instruction}"
 
47
  def respond(message, chat_history, instruction, temperature=0.7):
48
  prompt = format_chat_prompt(message, chat_history, instruction)
49
  chat_history = chat_history + [[message, ""]]
50
+ output_text = client.generate_text(prompt, max_new_tokens=1024, temperature=temperature)
51
  last_turn = list(chat_history.pop(-1))
52
  last_turn[-1] += output_text
53
  chat_history = chat_history + [last_turn]
 
56
  iface = gr.Interface(fn=respond, inputs=[gr.Textbox(label="Prompt"), gr.Chatbot(label="Chat History", height=240), gr.Textbox(label="System message", lines=2, value="A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers."), gr.Slider(label="temperature", minimum=0.1, maximum=1, value=0.7, step=0.1)], outputs=[gr.Textbox(label="Prompt"), gr.Chatbot(label="Chat History", height=240)])
57
 
58
  if __name__ == "__main__":
59
+ iface.launch()