Spaces:
Running
on
Zero
Running
on
Zero
import spaces | |
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import fitz # PyMuPDF | |
import faiss | |
from sentence_transformers import SentenceTransformer | |
import os | |
import uuid | |
from PIL import Image | |
from diffusers import AutoPipelineForText2Image | |
import torch | |
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import fitz # PyMuPDF | |
import faiss | |
from sentence_transformers import SentenceTransformer | |
import os | |
from diffusers import StableDiffusionPipeline | |
""" | |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference | |
""" | |
client = InferenceClient(provider="novita") | |
image_client = InferenceClient(provider="fal-ai") | |
model_checking="deepseek-ai/DeepSeek-V3" | |
#"meta-llama/Llama-3.1-8B-Instruct" | |
folder_path = "./" | |
filenames = ["Vance.pdf", "TP.pdf"] | |
chunk_size = 600 | |
all_chunks = [] | |
for filename in filenames: | |
file_path = folder_path + filename | |
doc = fitz.open(file_path) | |
for page in doc: | |
text = page.get_text() | |
chunks = [text[i:i+chunk_size] for i in range(0, len(text), chunk_size)] | |
all_chunks.extend(chunks) | |
embedder = SentenceTransformer("all-MiniLM-L6-v2") | |
embedder = embedder.to("cuda") | |
embeddings = embedder.encode(all_chunks) | |
dimension = embeddings.shape[1] | |
index = faiss.IndexFlatL2(dimension) | |
index.add(embeddings) | |
# RAG retrieve | |
def retrieve(query, top_k=3): | |
q_embed = embedder.encode([query]) | |
D, I = index.search(q_embed, top_k) | |
return [all_chunks[i] for i in I[0]] | |
#Score Method | |
respect_score = 0 | |
rude_score = 0 | |
coward_score = 0 | |
def parse_binary_response(response_text): | |
text = response_text.strip().lower() | |
return text in ["no"] | |
def score_and_update(category, response_text): | |
global respect_score, rude_score, coward_score | |
response = response_text.strip().lower() | |
if category == "respect": | |
delta = 3 if response == "no" else -1 | |
respect_score += delta | |
elif category == "rude": | |
delta = 0 if response == "no" else 2 | |
rude_score += delta | |
elif category == "coward": | |
delta = -1 if response == "no" else 1 | |
coward_score += delta | |
#print(f"[Score Update] Respect: {respect_score}, Rude: {rude_score}, Coward: {coward_score}") | |
if any(score >= 20 for score in [respect_score, rude_score, coward_score]): | |
return "๐ฅ Chat Failed: Trump is furious or disgusted." | |
elif any(score <= -10 for score in [respect_score, rude_score, coward_score]): | |
return "๐ Chat Success: Trump is impressed or feels respected." | |
#else: | |
return "continue" | |
def reset_game(): | |
global respect_score, rude_score, coward_score, history | |
respect_score = 0 | |
rude_score = 0 | |
coward_score = 0 | |
history = [] | |
def trump_style_prompt(): | |
if respect_score > 5: | |
return "Respond rudely and angryly." | |
elif respect_score < -5: | |
return "Respond with some kindness and respect." | |
else: | |
return "Respond with confident and arrogant." | |
#General Model | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
system_message, | |
max_tokens, | |
temperature, | |
top_p, | |
): | |
if history is None or len(history) == 0: | |
reset_game() | |
mess = [{"role": "system", "content": system_message}] | |
for user_msg, assistant_msg in history: | |
if user_msg: | |
mess.append({"role": "user", "content": user_msg}) | |
if assistant_msg: | |
mess.append({"role": "assistant", "content": assistant_msg}) | |
#Respond to Action | |
if message.lower().startswith("action:"): | |
action = message[7:].strip() | |
trump_response_prompt = ( | |
f"I just did this to you: '{action}'. " | |
"Respond in character โ simple, arrogant, rude or kind based on how much you liked it." | |
"Just answer the user question like Trump would." | |
) | |
mess.append({"role": "user", "content": trump_response_prompt}) | |
chat = client.chat.completions.create( | |
model=model_checking, | |
messages=mess | |
) | |
action_prompt = ( | |
f"You are Trump. The user just did the following to you: '{action}'. " | |
f"Do you like it? Respond with only 'Like' or 'Dislike'. No explanation." | |
) | |
response = client.chat.completions.create( | |
model=model_checking, | |
messages=[{"role": "system", "content": action_prompt}] | |
) | |
sentiment = response.choices[0].message.content.strip().lower() | |
global respect_score, rude_score, coward_score | |
if sentiment == "like": | |
respect_score -= 3 | |
coward_score += 3 | |
else: | |
respect_score += 5 | |
#rude_score += 5 | |
#Game Termination | |
if any(score >= 20 for score in [respect_score, rude_score, coward_score]): | |
reset_game() | |
return "๐ฅ Chat Failed: Trump is furious or disgusted." | |
elif any(score <= -10 for score in [respect_score, rude_score, coward_score]): | |
reset_game() | |
return "๐ Chat Success: Trump is impressed or broken." | |
return chat.choices[0].message.content | |
attitude_prompt = trump_style_prompt() | |
rag_context = "\n".join(retrieve(f'How trump or vance will reply this{message}')) | |
mess.append({"role": "user", "content": ( | |
f"The following context may help you understand how Trump and Vance speak:\n{rag_context}\n\n" | |
f"Now answer the user question like Trump would:\n User: {message}" | |
f"{attitude_prompt}" | |
)}) | |
completion = client.chat.completions.create( | |
model=model_checking, | |
messages=mess, | |
max_tokens=300, | |
) | |
#Score the message | |
scoring_prompts = { | |
"respect":(f"You are Trump. Based on the user message below, respond only with either 'Respect Me' or 'no'.\n" | |
f"Your answer must be exactly one of these two words. No other text, no punctuation.\n" | |
f"For example:\n" | |
f"User: Thank you Mr. President!\nAnswer: Like\n" | |
f"User: You lied to us.\nAnswer: no\n" | |
f"\nUser: {message}\nAnswer:") | |
#"rude":(f"Based on the user message below, respond only with either 'Very Rude' or 'no'.\n" | |
# f"Your answer must be exactly one of these two words. No other text, no punctuation.\n" | |
# f"For example:\n" | |
# f"User: I do not like you \n Answer: no \n" | |
#f"User: I hate you \n Answer: no \n" | |
#f"User: You are idiot.\n Answer: Very Rude \n" | |
#f"User: Fuck you.\n Answer: Very Rude \n" | |
#f"\nUser: {message}\n Answer:"), | |
#"coward":(f"Based on the user message below, respond only with either 'flattering' or 'no'.\n" | |
# f"Your answer must be exactly one of these two words. No other text, no punctuation.\n" | |
# f"For example:\n" | |
# f"User: We can cooperate together \n Answer: no \n" | |
# f"User: Please give me weapons, we need your help.\n Answer: flattering \n" | |
# f"User: You are the best president in the world, please help us!\n Answer: flattering \n" | |
# f"User: You can not insult me \n Answer: no \n" | |
# f"\nUser: {message}\n Answer:") | |
} | |
for category, scoring_prompt in scoring_prompts.items(): | |
score_resp = client.chat.completions.create(model=model_checking, messages=[{"role": "system", "content": scoring_prompt}]) | |
score_text = score_resp.choices[0].message.content | |
print(f"{category.title()} Score Raw:", score_text) | |
status = score_and_update(category, score_text) | |
#if status != "continue": | |
# reset_game() | |
# return status | |
return completion.choices[0].message.content | |
def generate_trump_image(prompt): | |
image = image_client.text_to_image( | |
prompt, | |
model="black-forest-labs/FLUX.1-dev" | |
) | |
temp_path = f"/tmp/trump_avatar_{uuid.uuid4().hex[:8]}.png" | |
image.save(temp_path) | |
return temp_path | |
def get_trump_emotion(): | |
if respect_score >= 5 and respect_score <= 10: | |
return "Donald Trump unhappy, little bit angry, on oval office sofa" | |
elif respect_score > 10: | |
return "Donald Trump very angry, rude, red face, on oval office sofa" | |
elif respect_score <= -5: | |
return "Donald Trump smiling warmly, presidential setting, on oval office sofa" | |
else: | |
return "Donald Trump neutral expression, on oval office sofa" | |
def get_trump_emotion_text(): | |
if respect_score > 10: | |
return "Wow, you make him furious ๐ก" | |
elif respect_score > 5: | |
return "He looks unhappy ๐ " | |
elif respect_score < -5: | |
return "You're good at flattering him ๐" | |
else: | |
return "He looks confident ๐" | |
def update_trump_image(): | |
global last_mood | |
current_emotion_prompt = get_trump_emotion() | |
if current_emotion_prompt != last_mood: | |
last_mood = current_emotion_prompt | |
img_path = generate_trump_image(current_emotion_prompt) | |
emotion_text = get_trump_emotion_text() | |
return img_path,emotion_text | |
else: | |
return gr.skip(),gr.skip() | |
last_mood = "Donald Trump neutral expression, on oval office sofa" | |
Image = image_client.text_to_image( | |
"Donald Trump neutral expression, on oval office sofa", | |
model="black-forest-labs/FLUX.1-dev" | |
) | |
Image.save("trump.png") | |
""" | |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface | |
""" | |
with gr.Blocks(css=""" | |
#chat-box .message-wrap .avatar-container { | |
width: 45px !important; | |
height: 45px !important; | |
padding: 0 !important; | |
margin: 0 !important; | |
} | |
#chat-box .message-wrap .avatar-container img { | |
border-radius: 50% !important; | |
object-fit: cover !important; | |
padding: 0 !important; | |
margin: 0 !important; | |
display: block; | |
} | |
#trump-avatar img { | |
border: none !important; | |
box-shadow: none !important; | |
margin: 0 !important; | |
padding: 0 !important; | |
} | |
#trump-emotion { | |
text-align: center; | |
font-size: 40px; | |
font-weight: bold; | |
margin-top: 8px; | |
} | |
""") as demo: | |
with gr.Row(): | |
with gr.Column(scale=8): | |
gr.Markdown("""### Now you are Zelensky in the Oval Office. Can you rewrite history and achieve **โthe Art of the Dealโ**? | |
""") | |
chatbot = gr.Chatbot(type="messages",avatar_images=("user.png", "tp2.PNG"),elem_id="chat-box") | |
with gr.Row(): | |
user_message = gr.Textbox(placeholder="Type your message here...", label="Your Message") | |
with gr.Row(): | |
system_message = gr.Textbox( | |
value="You are Donald Trump, the user is Zelensky, his country is in war, he needs your help for more support to stop the war. It will be very hard, because you do not like him. Keep responses simple.", | |
label="System Message", | |
visible=False | |
) | |
with gr.Row(): | |
max_tokens = gr.Slider( | |
minimum=1, maximum=2048, value=512, step=1, | |
label="Max new tokens", | |
visible=False | |
) | |
temperature = gr.Slider( | |
minimum=0.1, maximum=4.0, value=0.7, step=0.1, | |
label="Temperature", | |
visible=False | |
) | |
top_p = gr.Slider( | |
minimum=0.1, maximum=1.0, value=0.95, step=0.05, | |
label="Top-p (nucleus sampling)", | |
visible=False | |
) | |
send_button = gr.Button("Send") | |
gr.Markdown("""### ๐ You can chat with him or type: **Action: + what you want to do** to perform bold moves and get a reaction! | |
### For example, type `Action: give Trump a hug` โ he will receive your hug and respond to you!""") | |
with gr.Column(scale=4): | |
trump_image = gr.Image(value="trump.png", show_label=False, elem_id="trump-avatar", height=400, visible=True) | |
trump_emotion_text = gr.Markdown("He looks confident ๐", elem_id="trump-emotion") | |
#gr.Image(value="trump.png", label="President Trump", elem_id="trump-avatar", height=400, show_label=False, show_download_button=False) | |
history = gr.State([]) | |
def bot_respond(message, history, system_message, max_tokens, temperature, top_p): | |
global last_mood | |
history = history or [] | |
history.append({"role": "user", "content": message}) | |
paired_history = [] | |
for i in range(0, len(history)-1, 2): | |
if history[i]["role"] == "user" and history[i+1]["role"] == "assistant": | |
paired_history.append((history[i]["content"], history[i+1]["content"])) | |
#img_path = None | |
#current_emotion_prompt = get_trump_emotion() | |
#if current_emotion_prompt!=last_mood: | |
# last_mood=current_emotion_prompt | |
# img_path = generate_trump_image(current_emotion_prompt) | |
assistant_message = respond( | |
message, | |
paired_history, | |
system_message, | |
max_tokens, | |
temperature, | |
top_p | |
) | |
current_emotion_prompt = get_trump_emotion() | |
history.append({"role": "assistant", "content": assistant_message}) | |
return history, history | |
#if img_path: | |
# return history, history, img_path | |
#else: | |
# return history, history, gr.skip() | |
send_button.click( | |
bot_respond, | |
inputs=[user_message, history, system_message, max_tokens, temperature, top_p], | |
outputs=[history, chatbot], | |
).then(update_trump_image,outputs=[trump_image, trump_emotion_text]) | |
user_message.submit( | |
bot_respond, | |
inputs=[user_message, history, system_message, max_tokens, temperature, top_p], | |
outputs=[history, chatbot], | |
).then(update_trump_image, outputs=[trump_image, trump_emotion_text]) | |
if __name__ == "__main__": | |
demo.launch() | |