Spaces:
Sleeping
Sleeping
import json | |
import os | |
import gradio as gr | |
os.environ['OPENAI_API_KEY']=os.getenv('OpenAI_KEY') | |
os.environ["LANGCHAIN_API_KEY"]=os.getenv('LANGCHAIN_API_KEY') | |
os.environ["LANGCHAIN_TRACING_V2"]="true" | |
os.environ["LANGCHAIN_ENDPOINT"]="https://api.smith.langchain.com" | |
os.environ["LANGCHAIN_PROJECT"]=os.getenv('LANGCHAIN_PROJECT') | |
import openai | |
from langsmith.wrappers import wrap_openai | |
from langsmith import traceable | |
# Auto-trace LLM calls in-context | |
client = wrap_openai(openai.Client()) | |
FakeNewsAggregator = client.beta.assistants.retrieve(os.getenv('OPENAI_ASSISTANT_ID')) | |
thread= client.beta.threads.create() | |
# Auto-trace this function | |
def FakeNewsAggregatorRequest(text): | |
global FakeNewsAggregator | |
global thread | |
message = client.beta.threads.messages.create( | |
thread_id=thread.id, | |
role="user", | |
content=text | |
) | |
run = client.beta.threads.runs.create_and_poll( | |
thread_id=thread.id, | |
assistant_id=FakeNewsAggregator.id | |
) | |
if run.status == 'completed': | |
messages = client.beta.threads.messages.list( | |
thread_id=thread.id | |
) | |
gpt_response=messages.data[0].content[0].text.value | |
return gpt_response | |
else: | |
return run.status | |
gr.Interface(fn=FakeNewsAggregatorRequest, | |
inputs=["text"], | |
outputs=["text"], | |
flagging_mode='never', | |
title="I am the TruthBot", | |
description="I am virtual assistant to help you verify rumors or news", | |
theme="soft", | |
examples=['How to spot misinformation?','What are the views of Trump and Kamala on climate change?', 'Can Trump serve a third term?', 'What is disinformation?'] | |
).launch(share=False) | |