File size: 3,734 Bytes
6502c51
 
 
ef2bbfd
 
6502c51
ef2bbfd
 
6502c51
ef2bbfd
 
6502c51
ef2bbfd
 
 
 
 
6502c51
ef2bbfd
6502c51
ef2bbfd
 
6502c51
 
 
 
 
 
ef2bbfd
6502c51
ef2bbfd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6502c51
ef2bbfd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6502c51
ef2bbfd
 
6502c51
ef2bbfd
 
 
 
 
 
 
 
 
 
 
 
 
6502c51
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import gradio as gr
from huggingface_hub import InferenceClient

# Import the Carmen module. (Ensure that the repository is installed and accessible.)
from carmen.sentience import analyze_sentience

# Initialize the chat client.
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")

def chat_and_sentience(message, history, system_message, max_tokens, temperature, top_p):
    # Prepare messages for the LLM conversation.
    messages = [{"role": "system", "content": system_message}]
    for user_msg, assistant_msg in history:
        if user_msg:
            messages.append({"role": "user", "content": user_msg})
        if assistant_msg:
            messages.append({"role": "assistant", "content": assistant_msg})
    messages.append({"role": "user", "content": message})
    
    response = ""
    # Generate chat response via streaming.
    for chat in client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        token = chat.choices[0].delta.content
        response += token
        # Update the UI with the intermediate chat history; sentiment analysis hasn't run yet.
        yield [history + [(message, response)], None]
    
    # Once the full response is assembled, perform sentience analysis using Carmen.
    # The function analyze_sentience is assumed to return a dictionary or list of sentiment scores/labels.
    sentiment_results = analyze_sentience(response)
    
    # Format the results for display. Adjust the formatting based on the actual output of analyze_sentience.
    if isinstance(sentiment_results, dict):
        sentiment_str = "\n".join([f"{k}: {v:.2f}" for k, v in sentiment_results.items()])
    elif isinstance(sentiment_results, list):
        sentiment_str = "\n".join([f"{item['label']}: {item['score']:.2f}" for item in sentiment_results])
    else:
        sentiment_str = str(sentiment_results)
    
    # Yield the final state: updated chat history and the sentiment analysis result.
    yield [history + [(message, response)], sentiment_str]

# Build the UI with gr.Blocks.
with gr.Blocks() as demo:
    with gr.Row():
        chatbot = gr.Chatbot(label="Chat")
    with gr.Row():
        sentiment_box = gr.Textbox(
            label="Sentience Moment Scanner",
            lines=4,
            placeholder="Emotion analysis will appear here..."
        )
    with gr.Row():
        message_input = gr.Textbox(label="Your Message")
    with gr.Row():
        system_message_input = gr.Textbox(value="You are a friendly Chatbot.", label="System Message")
    with gr.Row():
        max_tokens_slider = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max New Tokens")
    with gr.Row():
        temperature_slider = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
    with gr.Row():
        top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
    submit_btn = gr.Button("Send")

    # Use a state to track conversation history.
    state = gr.State([])

    # Wire up the events: on click or pressing enter the chat and sentiment analysis runs.
    submit_btn.click(
         chat_and_sentience,
         inputs=[message_input, state, system_message_input, max_tokens_slider, temperature_slider, top_p_slider],
         outputs=[chatbot, sentiment_box],
         show_progress=True
    )
    message_input.submit(
         chat_and_sentience,
         inputs=[message_input, state, system_message_input, max_tokens_slider, temperature_slider, top_p_slider],
         outputs=[chatbot, sentiment_box],
         show_progress=True
    )

if __name__ == "__main__":
    demo.launch()