shikharyashmaurya's picture
Rename app4.py to app.py
c7faf24 verified
raw
history blame contribute delete
No virus
4.44 kB
import streamlit as st
import google.generativeai as genai
import ast
import time
import re
import os
from typing import List, Tuple, Optional
def extract_python_code(text: str) -> Optional[str]:
pattern = r"```python\n(.*?)```"
match = re.search(pattern, text, re.DOTALL)
return match.group(1).strip() if match else None
def configure_genai():
secret_key = os.getenv("SECRET_KEY")
if not secret_key:
st.error("API key not found. Please set the SECRET_KEY environment variable.")
st.stop()
genai.configure(api_key=secret_key)
def parse_gemini_response(response_text: str) -> Tuple[str, str]:
try:
# First, try to parse as a single list
parsed = ast.literal_eval(response_text)
if isinstance(parsed, list) and len(parsed) == 2:
return parsed[0], parsed[1]
# If that fails, look for multiple lists
matches = re.findall(r'\[.*?\]', response_text)
if len(matches) >= 2:
first_list = ast.literal_eval(matches[0])
second_list = ast.literal_eval(matches[1])
return first_list[0], second_list[0]
# If no valid format is found, raise an exception
raise ValueError("Unexpected response format")
except Exception as e:
return "Error", f"Failed to parse response: {str(e)}"
def get_gemini_response(input_text: str) -> Tuple[str, str]:
prompt = """You are a fact checker. Given a text, respond with:
1. 'true', 'false', or 'unsure' (if you are unsure or knowledge cutoff)
2. Evidence in support or 'knowledge cutoff'
Respond in this exact format: ['true/false/unsure', 'evidence or knowledge cutoff']
Example input: 'Google was founded in 1998'
Example output: ['true', 'Google was indeed founded in September 1998 by Larry Page and Sergey Brin']
Now give a response in the exact described format for the following text:
"""
model = genai.GenerativeModel('gemini-1.5-pro')
try:
response = model.generate_content(prompt + input_text)
result, evidence = parse_gemini_response(response.text)
return result, evidence
except Exception as e:
return "Error", f"Failed to get or parse the model's response: {str(e)}"
def break_down_text(text: str) -> List[str]:
prompt = """Break down the following text into a list of individual factual statements that can be independently verified. Return only a Python list of strings.
Example input: "The Eiffel Tower, built in 1889, is 324 meters tall and located in Paris, France."
Example output: ["The Eiffel Tower was built in 1889", "The Eiffel Tower is 324 meters tall", "The Eiffel Tower is located in Paris, France"]
Now break down the following text:
"""
model = genai.GenerativeModel('gemini-1.5-pro')
response = model.generate_content(prompt + text)
code = extract_python_code(response.text)
try:
return ast.literal_eval(code) if code else []
except (ValueError, SyntaxError):
st.error(f"Failed to parse the breakdown response: {response.text}")
return []
def main():
st.title("Fact Checker")
configure_genai()
text = st.text_area('Paste the text to fact check (preferably about facts before 2021)', height=150)
if st.button("Check Facts"):
if not text:
st.warning("Please enter some text to fact-check.")
return
statements = break_down_text(text)
if not statements:
st.error("Failed to break down the text into checkable statements. Please try rephrasing your input.")
return
st.subheader("Fact Checking Results:")
for statement in statements:
with st.expander(statement):
with st.spinner('Checking...'):
result, evidence = get_gemini_response(statement)
if result.lower() == "true":
st.success(f"Likely True: {evidence}")
elif result.lower() == "false":
st.error(f"Likely False: {evidence}")
elif result.lower() == "unsure":
st.warning(f"Uncertain: {evidence}")
else:
st.error(f"Error in fact-checking: {evidence}")
time.sleep(3) # Delay to avoid rate limiting
if __name__ == "__main__":
main()