File size: 11,267 Bytes
9735e01 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 |
import random
import datetime
import sys
from agent.agent import SigSpace
import spaces
import gradio as gr
import os
from PIL import Image
import os
os.environ["VLLM_USE_V1"] = "0" # Disable v1 API for now since it does not support logits processors.
# Determine the directory where the current file is located
current_dir = os.path.dirname(os.path.abspath(__file__))
os.environ["MKL_THREADING_LAYER"] = "GNU"
# Set an environment variable
HF_TOKEN = os.environ.get("HF_TOKEN", None)
# Create the image path - use absolute path for reliability
img_path = os.path.join(current_dir, 'img', 'SigSpace.png')
def display_image(image_path):
# Load and return the image
img = Image.open(image_path)
return img
DESCRIPTION = f'''
<div style="text-align: center;">
<h1 style="font-size: 32px; margin-bottom: 10px;">SigSpace: An AI Agent for Tahoe-100M</h1>
</div>
'''
INTRO = """
This is the intro that goes here
"""
LICENSE = """
License goes here
"""
PLACEHOLDER = """
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Agent</h1>
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Tips before using Agent:</p>
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.55;">Please click clear🗑️
(top-right) to remove previous context before sumbmitting a new question.</p>
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.55;">Click retry🔄 (below message) to get multiple versions of the answer.</p>
</div>
"""
css = """
h1 {
text-align: center;
display: block;
}
#duplicate-button {
margin: auto;
color: white;
background: #1565c0;
border-radius: 100vh;
}
.small-button button {
font-size: 12px !important;
padding: 4px 8px !important;
height: 6px !important;
width: 4px !important;
}
.gradio-accordion {
margin-top: 0px !important;
margin-bottom: 0px !important;
}
"""
chat_css = """
.gr-button { font-size: 20px !important; } /* Enlarges button icons */
.gr-button svg { width: 32px !important; height: 32px !important; } /* Enlarges SVG icons */
"""
model_name = ''
os.environ["TOKENIZERS_PARALLELISM"] = "false"
question_examples = [
# ['What is the IC50 values for the drug Abemaciclib in the cell line A549?'],
["What's the MoA of the drug Ponatinib on the HCT15 colon cancer cell line? Please synthesize results from the Tahoe-100M dataset, the jump dataset, and the IC50 dataset."],
["Natural perturbation: find the disease perturbation that has the similar effect to Glycyrrhizic acid on CVCL_0334? use the result and what you know to explain the mechanism of action."],
["Mechanism of action: give me the mechanism of action for drug name Abemaciclib provided by Tahoe."],
["Vision scores: what are the top 5 vision scores for cell line A549 and drug name Abemaciclib"]
]
new_tool_files = {
'new_tool': os.path.join(current_dir, 'data', 'new_tool.json'),
}
config_path = "/home/ubuntu/.lambda_api_config.yaml"
agent = SigSpace(config_path)
# agent.init_model()
def update_model_parameters(enable_finish, enable_rag, enable_summary,
init_rag_num, step_rag_num, skip_last_k,
summary_mode, summary_skip_last_k, summary_context_length, force_finish, seed):
# Update model instance parameters dynamically
updated_params = agent.update_parameters(
enable_finish=enable_finish,
enable_rag=enable_rag,
enable_summary=enable_summary,
init_rag_num=init_rag_num,
step_rag_num=step_rag_num,
skip_last_k=skip_last_k,
summary_mode=summary_mode,
summary_skip_last_k=summary_skip_last_k,
summary_context_length=summary_context_length,
force_finish=force_finish,
seed=seed,
)
return updated_params
def update_seed():
# Update model instance parameters dynamically
seed = random.randint(0, 10000)
updated_params = agent.update_parameters(
seed=seed,
)
return updated_params
def handle_retry(history, retry_data: gr.RetryData, temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round):
print("Updated seed:", update_seed())
new_history = history[:retry_data.index]
previous_prompt = history[retry_data.index]['content']
print("previous_prompt", previous_prompt)
yield from agent.run_gradio_chat(new_history + [{"role": "user", "content": previous_prompt}], temperature, max_new_tokens, max_tokens, multi_agent, conversation, max_round)
PASSWORD = "mypassword"
# Function to check if the password is correct
def check_password(input_password):
if input_password == PASSWORD:
return gr.update(visible=True), ""
else:
return gr.update(visible=False), "Incorrect password, try again!"
conversation_state = gr.State([])
# Gradio block
chatbot = gr.Chatbot(height=400, placeholder=PLACEHOLDER,
label='SigSpace', type="messages", show_copy_button=True)
with gr.Blocks(css=css) as demo:
gr.Markdown(DESCRIPTION)
# gr.Markdown(INTRO)
gr.Image(value=display_image(img_path), label="", show_label=False, height=600, width=600)
default_temperature = 0.3
default_max_new_tokens = 1024
default_max_tokens = 81920
default_max_round = 30
temperature_state = gr.State(value=default_temperature)
max_new_tokens_state = gr.State(value=default_max_new_tokens)
max_tokens_state = gr.State(value=default_max_tokens)
max_round_state = gr.State(value=default_max_round)
chatbot.retry(handle_retry, chatbot, chatbot, temperature_state, max_new_tokens_state,
max_tokens_state, gr.Checkbox(value=False, render=False), conversation_state, max_round_state)
gr.ChatInterface(
fn=agent.run_gradio_chat,
chatbot=chatbot,
fill_height=False, fill_width=False, stop_btn=True,
additional_inputs_accordion=gr.Accordion(
label="⚙️ Inference Parameters", open=False, render=False),
additional_inputs=[
temperature_state, max_new_tokens_state, max_tokens_state,
gr.Checkbox(
label="Activate X", value=False, render=False),
conversation_state,
max_round_state,
gr.Number(label="Seed", value=100, render=False)
],
examples=question_examples,
cache_examples=False,
css=chat_css,
)
with gr.Accordion("Settings", open=False):
# Define the sliders
temperature_slider = gr.Slider(
minimum=0,
maximum=1,
step=0.1,
value=default_temperature,
label="Temperature"
)
max_new_tokens_slider = gr.Slider(
minimum=128,
maximum=4096,
step=1,
value=default_max_new_tokens,
label="Max new tokens"
)
max_tokens_slider = gr.Slider(
minimum=128,
maximum=32000,
step=1,
value=default_max_tokens,
label="Max tokens"
)
max_round_slider = gr.Slider(
minimum=0,
maximum=50,
step=1,
value=default_max_round,
label="Max round")
# Automatically update states when slider values change
temperature_slider.change(
lambda x: x, inputs=temperature_slider, outputs=temperature_state)
max_new_tokens_slider.change(
lambda x: x, inputs=max_new_tokens_slider, outputs=max_new_tokens_state)
max_tokens_slider.change(
lambda x: x, inputs=max_tokens_slider, outputs=max_tokens_state)
max_round_slider.change(
lambda x: x, inputs=max_round_slider, outputs=max_round_state)
# password_input = gr.Textbox(
# label="Enter Password for More Settings", type="password")
# incorrect_message = gr.Textbox(visible=False, interactive=False)
# with gr.Accordion("⚙️ Settings", open=False, visible=False) as protected_accordion:
# with gr.Row():
# with gr.Column(scale=1):
# with gr.Accordion("⚙️ Model Loading", open=False):
# model_name_input = gr.Textbox(
# label="Enter model path", value=model_name)
# load_model_btn = gr.Button(value="Load Model")
# load_model_btn.click(
# agent.load_models, inputs=model_name_input, outputs=gr.Textbox(label="Status"))
# with gr.Column(scale=1):
# with gr.Accordion("⚙️ Functional Parameters", open=False):
# # Create Gradio components for parameter inputs
# enable_finish = gr.Checkbox(
# label="Enable Finish", value=True)
# enable_rag = gr.Checkbox(
# label="Enable RAG", value=True)
# enable_summary = gr.Checkbox(
# label="Enable Summary", value=False)
# init_rag_num = gr.Number(
# label="Initial RAG Num", value=0)
# step_rag_num = gr.Number(
# label="Step RAG Num", value=10)
# skip_last_k = gr.Number(label="Skip Last K", value=0)
# summary_mode = gr.Textbox(
# label="Summary Mode", value='step')
# summary_skip_last_k = gr.Number(
# label="Summary Skip Last K", value=0)
# summary_context_length = gr.Number(
# label="Summary Context Length", value=None)
# force_finish = gr.Checkbox(
# label="Force FinalAnswer", value=True)
# seed = gr.Number(label="Seed", value=100)
# # Button to submit and update parameters
# submit_btn = gr.Button("Update Parameters")
# # Display the updated parameters
# updated_parameters_output = gr.JSON()
# # When button is clicked, update parameters
# submit_btn.click(fn=update_model_parameters,
# inputs=[enable_finish, enable_rag, enable_summary, init_rag_num, step_rag_num, skip_last_k,
# summary_mode, summary_skip_last_k, summary_context_length, force_finish, seed],
# outputs=updated_parameters_output)
# Button to submit the password
# submit_button = gr.Button("Submit")
# # When the button is clicked, check if the password is correct
# submit_button.click(
# check_password,
# inputs=password_input,
# outputs=[protected_accordion, incorrect_message]
# )
gr.Markdown(LICENSE)
if __name__ == "__main__":
demo.launch(share=True) |