Tonic commited on
Commit
8de5029
1 Parent(s): 20b42bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -114
app.py CHANGED
@@ -1,130 +1,77 @@
1
- import optimum
 
2
  import transformers
3
- from transformers import AutoConfig, AutoTokenizer, AutoModel, AutoModelForCausalLM
4
- from optimum.bettertransformer import BetterTransformer
5
  import torch
6
  import gradio as gr
7
- import json
8
- import os
9
- import shutil
10
- import requests
11
-
12
- # Define the device
13
- device = "cuda" if torch.cuda.is_available() else "cpu"
14
- #Define variables
15
- temperature=0.4
16
- max_new_tokens=240
17
- top_p=0.92
18
- repetition_penalty=1.7
19
-
20
- model_name = "OpenLLM-France/Claire-7B-0.1"
21
-
22
- tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
23
- model = transformers.AutoModelForCausalLM.from_pretrained(model_name,
24
- device_map="auto",
25
- torch_dtype=torch.bfloat16,
26
- load_in_4bit=True # For efficient inference, if supported by the GPU card
27
- )
28
- model = BetterTransformer.transform(model)
29
-
30
-
31
- # Class to encapsulate the Falcon chatbot
32
- class FalconChatBot:
33
- def __init__(self, system_prompt="Le dialogue suivant est une conversation"):
34
- self.system_prompt = system_prompt
35
-
36
- def predict(self, user_message, assistant_message, temperature=0.4, max_new_tokens=700, top_p=0.99, repetition_penalty=1.9):
37
- # Combine the user and assistant messages into a conversation
38
- conversation = f"{self.system_prompt} {assistant_message if assistant_message else ''} {user_message} "
39
- # Encode the conversation using the tokenizer
40
- input_ids = tokenizer.encode(conversation, return_tensors="pt", add_special_tokens=False)
41
- input_ids = input_ids.to(device)
42
- # Generate a response using the Falcon model
43
- response = model.generate(
44
- input_ids=input_ids,
45
- use_cache=False,
46
- early_stopping=False,
47
- bos_token_id=model.config.bos_token_id,
48
- eos_token_id=model.config.eos_token_id,
49
- pad_token_id=model.config.eos_token_id,
50
- temperature=temperature,
51
- do_sample=True,
52
- max_new_tokens=max_new_tokens,
53
- top_p=top_p,
54
- repetition_penalty=repetition_penalty
55
- )
56
- # Decode the generated response to text
57
- response_text = tokenizer.decode(response[0], skip_special_tokens=True)
58
- return response_text
59
 
60
- # Create the Falcon chatbot instance
61
- falcon_bot = FalconChatBot()
62
 
63
  # Define the Gradio interface
64
- title = "👋🏻Bienvenue à Tonic's 🌜🌚Claire Chat !"
65
- description = "Vous pouvez utiliser [🌜🌚ClaireGPT](https://huggingface.co/OpenLLM-France/Claire-7B-0.1) Ou dupliquer pour l'uiliser localement ou sur huggingface! [Join me on Discord to build together](https://discord.gg/VqTxc76K3u)."
66
- examples = [
67
- [
68
- "Le dialogue suivant est une conversation entre Emmanuel Macron et Elon Musk:", # user_message
69
- "[Emmanuel Macron]: Bonjour Monsieur Musk. Je vous remercie de me recevoir aujourd'hui.", # assistant_message
70
- 0.9, # temperature
71
- 150, # max_new_tokens
72
- 0.90, # top_p
73
- 1.9, # repetition_penalty
74
- ]
75
- ]
76
-
77
- additional_inputs=[
78
- gr.Textbox("", label="Introduisez Un Personnage Ici ou Mettez En Scene"),
79
- gr.Slider(
80
- label="Max new tokens",
81
- value=100, # Default value
82
- minimum=25,
83
- maximum=256,
84
- step=1,
85
- interactive=True,
86
- info="The maximum numbers of new tokens",
87
- ),
88
- gr.Slider(
89
- label="Temperature",
90
- value=0.7, # Default value
91
- minimum=0.05,
92
- maximum=1.0,
93
- step=0.05,
94
- interactive=True,
95
- info="Higher values produce more diverse outputs",
96
- ),
97
- gr.Slider(
98
- label="Top-p (nucleus sampling)",
99
- value=0.90,
100
- minimum=0.01,
101
- maximum=0.99,
102
- step=0.05,
103
- interactive=True,
104
- info="Higher values sample more low-probability tokens",
105
- ),
106
- gr.Slider(
107
- label="Repetition penalty",
108
- value=1.9,
109
- minimum=1.0,
110
- maximum=2.0,
111
- step=0.05,
112
- interactive=True,
113
- info="Penalize repeated tokens",
114
- )
115
- ]
116
 
117
  iface = gr.Interface(
118
- fn=falcon_bot.predict,
119
  title=title,
120
  description=description,
121
- examples=examples,
122
  inputs=[
123
- gr.Textbox(label="Utilisez se format pour initier une conversation [Personage:]", type="text", lines=5),
124
- ] + additional_inputs,
 
 
 
 
 
125
  outputs="text",
126
  theme="ParityError/Anime"
127
  )
128
 
129
- # Launch the Gradio interface for the Falcon model
130
  iface.launch()
 
1
+ import os
2
+ import math
3
  import transformers
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
 
5
  import torch
6
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
 
 
8
 
9
  # Define the Gradio interface
10
+ title = "Welcome to Tonic's 🐋🐳Orca-2-13B!"
11
+ description = "You can use [🐋🐳microsoft/Orca-2-13b](https://huggingface.co/microsoft/Orca-2-13b) Or clone this space to use it locally or on huggingface! [Join me on Discord to build together](https://discord.gg/VqTxc76K3u)."
12
+
13
+
14
+ # Load the model and tokenizer
15
+ model_name = "microsoft/Orca-2-13b"
16
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map='auto')
17
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False,)
18
+
19
+
20
+ class OrcaChatBot:
21
+ def __init__(self, model, tokenizer, system_message="You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."):
22
+ self.model = model
23
+ self.tokenizer = tokenizer
24
+ self.system_message = system_message
25
+ self.conversation_history = None
26
+
27
+ def predict(self, user_message, temperature=0.4, max_new_tokens=70, top_p=0.99, repetition_penalty=1.9):
28
+ # Prepare the prompt
29
+ prompt = f"<|im_start|>system\n{self.system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant" if self.conversation_history is None else self.conversation_history + f"<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
30
+
31
+ # Encode the prompt
32
+ inputs = self.tokenizer(prompt, return_tensors='pt', add_special_tokens=False)
33
+ input_ids = inputs["input_ids"].to(self.model.device)
34
+
35
+ # Generate a response
36
+ output_ids = self.model.generate(
37
+ input_ids,
38
+ max_length=input_ids.shape[1] + max_new_tokens,
39
+ temperature=temperature,
40
+ top_p=top_p,
41
+ repetition_penalty=repetition_penalty,
42
+ pad_token_id=self.tokenizer.eos_token_id
43
+ )
44
+
45
+ # Decode the generated response
46
+ response = self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
47
+
48
+ # Update conversation history
49
+ self.conversation_history = self.tokenizer.decode(output_ids[0], skip_special_tokens=False)
50
+
51
+ return response
52
+
53
+ Orca_bot = OrcaChatBot(model, tokenizer)
54
+
55
+ def gradio_predict(user_message, character_intro, max_new_tokens, temperature, top_p, repetition_penalty):
56
+ # Prepend the character introduction to the user message if provided
57
+ full_message = f"{system_message}\n{user_message}" if system_message else user_message
58
+ return Orca_bot.predict(full_message, temperature, max_new_tokens, top_p, repetition_penalty)
 
 
 
59
 
60
  iface = gr.Interface(
61
+ fn=gradio_predict,
62
  title=title,
63
  description=description,
 
64
  inputs=[
65
+ gr.Textbox(label="Your Message", type="text", lines=3),
66
+ gr.Textbox(label="Introduce a Character Here or Set a Scene (system prompt)", type="text", lines=2),
67
+ gr.Slider(label="Max new tokens", value=1200, minimum=25, maximum=4096, step=1),
68
+ gr.Slider(label="Temperature", value=0.7, minimum=0.05, maximum=1.0, step=0.05),
69
+ gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.01, maximum=0.99, step=0.05),
70
+ gr.Slider(label="Repetition penalty", value=1.9, minimum=1.0, maximum=2.0, step=0.05)
71
+ ],
72
  outputs="text",
73
  theme="ParityError/Anime"
74
  )
75
 
76
+ # Launch the Gradio interface
77
  iface.launch()