File size: 3,102 Bytes
96911b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89661b3
96911b6
 
89661b3
 
96911b6
89661b3
 
96911b6
89661b3
 
 
96911b6
89661b3
 
96911b6
89661b3
 
96911b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e1b9d08
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96911b6
 
e1b9d08
96911b6
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import gradio as gr
from gradio_client import Client, handle_file
import os


# Define your Hugging Face token (make sure to set it as an environment variable)
HF_TOKEN = os.getenv("HF_TOKEN")  # Replace with your actual token if not using an environment variable

# Initialize the Gradio Client for the specified API
client = Client("mangoesai/Elections_Comparing_Agent_V2", hf_token=HF_TOKEN)

client_name = ['2016 Election','2024 Election', 'Comparison two years']



def stream_chat_with_rag(
    message: str,
    # history: list,
    client_name: str
):
    # print(f"Message: {message}")
    # print(f"History: {history}")

    # # Build the conversation prompt including system prompt and history
    # conversation = f"For Client: {client_name}\n"
    
    # # Add previous conversation history
    # for user_input, assistant_response in history:
    #     conversation += f"User: {user_input}\nAssistant: {assistant_response}\n"
    
    # # Add the current user message
    # conversation += f"User: {message}\nAssistant:"

    # # # Call the API with the user's process_query
    # question = message
    #answer = client.predict(question=question, api_name="/run_graph")
    answer = client.predict(
    	query= message,
		election_year=client_name,
		api_name="/process_query"
    )

    # Debugging: Print the raw response
    print("Raw answer from API:")
    print(answer)


    return answer



# Create Gradio interface
with gr.Blocks(title="Reddit Election Comments Analysis") as demo:
    gr.Markdown("# Reddit Election Comments Analysis")
    gr.Markdown("Ask questions about election-related comments and posts")
    
    with gr.Row():
        with gr.Column():
            # Add election year selector
            year_selector = gr.Radio(
                choices=["2016 Election", "2024 Election", "Comparison two years"],
                label="Select Election Year",
                value="2016 Election"  # Default value
            )
            
            query_input = gr.Textbox(
                label="Your Question",
                placeholder="Ask about election comments or posts..."
            )
            # context_input = gr.Textbox(
            #     label="Context (Optional)",
            #     value = "Looking for discussions about the election results in 2016" #default value
            # )
            submit_btn = gr.Button("Submit")
        
        with gr.Column():
            output = gr.Textbox(
                label="Response",
                lines=20
            )
    
    # Update submit button to include year selection
    submit_btn.click(
        fn=stream_chat_with_rag,
        inputs=[query_input, year_selector],
        outputs=output
    )
    
    gr.Markdown("""
    ## Example Questions:
    - Is there any comments don't like the election results
    - Summarize the main discussions about voting process
    - What are the common opinions about candidates?
    - How have people's attitudes toward the Republican Party changed in the past two years?
     """)

if __name__ == "__main__":
    demo.launch(share=True)