Francisco Zanartu commited on
Commit
0267b0d
1 Parent(s): 7f1f7ae

add palm methods

Browse files
Files changed (2) hide show
  1. app.py +2 -1
  2. context/palm.py +149 -0
app.py CHANGED
@@ -6,6 +6,7 @@ Module for detecting fallacies in text.
6
  import os
7
  import gradio as gr
8
  from utils.core import HamburgerStyle
 
9
 
10
  rebuttal = HamburgerStyle()
11
 
@@ -29,7 +30,7 @@ gpt4 = gr.Interface(
29
  description="Single, comprehensive prompt which assigns GPT-4 the role of a climate change analyst as an expert persona to debunk misinformation",
30
  )
31
  palm = gr.Interface(
32
- fn=textgen,
33
  inputs=gr.Textbox(
34
  label="input myth", lines=4, placeholder="climate change misinformation"
35
  ),
 
6
  import os
7
  import gradio as gr
8
  from utils.core import HamburgerStyle
9
+ from context.palm import rebuttal_generator
10
 
11
  rebuttal = HamburgerStyle()
12
 
 
30
  description="Single, comprehensive prompt which assigns GPT-4 the role of a climate change analyst as an expert persona to debunk misinformation",
31
  )
32
  palm = gr.Interface(
33
+ fn=rebuttal_generator,
34
  inputs=gr.Textbox(
35
  label="input myth", lines=4, placeholder="climate change misinformation"
36
  ),
context/palm.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module for detecting fallacies in text.
3
+
4
+ Functions:
5
+ - rebuttal_generator: Detects fallacies in a text input by utilizing models for fallacy
6
+ detection and semantic textual similarity and generates a rebuttal for the fallacious claim.
7
+ - query: Sends a query to a specified API endpoint with the provided payload and returns
8
+ the response.
9
+ - demo: Launches a Gradio interface for interactively detecting fallacies in text.
10
+
11
+ Dependencies:
12
+ - os: Provides a portable way of using operating system dependent functionality.
13
+ - json: Provides functions for encoding and decoding JSON data.
14
+ - requests: Allows sending HTTP requests easily.
15
+ - gradio: Facilitates the creation of customizable UI components for machine learning models.
16
+ - langchain_google_genai: Wrapper for Google Generative AI language models.
17
+ - auxiliar: Contains auxiliary data used in the fallacy detection process.
18
+
19
+ Environment Variables:
20
+ - HF_API_KEY: API key for accessing Hugging Face model APIs.
21
+ - GOOGLE_API_KEY: API key for accessing Google APIs.
22
+
23
+ Constants:
24
+ - FLICC_MODEL: API endpoint for the FLICC model used for fallacy detection.
25
+ - CARDS_MODEL: API endpoint for the CARDS model used for fallacy detection.
26
+ - SEMANTIC_TEXTUAL_SIMILARITY: API endpoint for the model used for semantic textual similarity.
27
+
28
+ Global Variables:
29
+ - hf_api_key: API key for accessing Hugging Face model APIs.
30
+ - google_key: API key for accessing Google APIs.
31
+ - safety_settings: Settings for safety measures in the Google Generative AI model.
32
+ - llm: Instance of the GoogleGenerativeAI class for text generation.
33
+ - similarity_template: Template for generating prompts for similarity comparison.
34
+ - FALLACY_CLAIMS: Dictionary containing fallacy labels and corresponding claims.
35
+ - DEBUNKINGS: Dictionary containing debunkings for fallacy claims.
36
+ - DEFINITIONS: Dictionary containing definitions for fallacy labels.
37
+ """
38
+
39
+ import os
40
+ import json
41
+ import requests
42
+ from langchain_google_genai import GoogleGenerativeAI
43
+ from langchain.prompts import PromptTemplate
44
+ from langchain.chains import LLMChain
45
+ from auxiliar import (
46
+ FALLACY_CLAIMS,
47
+ DEBUNKINGS,
48
+ DEFINITIONS,
49
+ SIMILARITY_TEMPLATE,
50
+ )
51
+
52
+ hf_api_key = os.environ["HF_API_KEY"]
53
+ google_key = os.environ["GOOGLE_API_KEY"]
54
+
55
+ llm = GoogleGenerativeAI(
56
+ model="models/text-bison-001",
57
+ google_api_key=google_key,
58
+ temperature=0,
59
+ # safety_settings=safety_settings,
60
+ )
61
+
62
+ similarity_template = PromptTemplate.from_template(SIMILARITY_TEMPLATE)
63
+
64
+
65
+ def query(payload, api_url, api_token=hf_api_key):
66
+ """
67
+ Sends a query to the specified API endpoint with the provided payload.
68
+
69
+ Args:
70
+ payload (dict): The payload to be sent to the API.
71
+ api_url (str): The URL of the API endpoint.
72
+ api_token (str, optional): The API token used for authentication. Defaults to hf_api_key.
73
+
74
+ Returns:
75
+ dict: The JSON response from the API.
76
+
77
+ Raises:
78
+ ValueError: If the response content cannot be decoded as UTF-8.
79
+
80
+ Example:
81
+ >>> query({"text": "example text"}, "https://api.example.com")
82
+ {'status': 'success', 'result': 'example result'}
83
+ """
84
+ headers = {"Authorization": f"Bearer {api_token}"}
85
+ options = {"use_cache": False, "wait_for_model": True}
86
+ payload = {"inputs": payload, "options": options}
87
+ response = requests.post(api_url, headers=headers, json=payload)
88
+ return json.loads(response.content.decode("utf-8"))
89
+
90
+
91
+ FLICC_MODEL = "https://api-inference.huggingface.co/models/fzanartu/flicc"
92
+ CARDS_MODEL = (
93
+ "https://api-inference.huggingface.co/models/crarojasca/BinaryAugmentedCARDS"
94
+ )
95
+ SEMANTIC_TEXTUAL_SIMILARITY = (
96
+ "https://api-inference.huggingface.co/models/sentence-transformers/all-MiniLM-L6-v2"
97
+ )
98
+
99
+
100
+ def rebuttal_generator(text):
101
+ """
102
+ Generates a rebuttal for a text containing a detected fallacy.
103
+
104
+ This function detects fallacies in the input text and generates a rebuttal
105
+ for the fallacious claim.
106
+
107
+ Args:
108
+ text (str): The input text containing a potentially fallacious claim.
109
+
110
+ Returns:
111
+ str: A rebuttal for the fallacious claim in the input text.
112
+
113
+ Raises:
114
+ ValueError: If no similar sentence is found.
115
+
116
+ Example:
117
+ >>> rebuttal_generator("This is a text containing a fallacy.")
118
+ 'A rebuttal to the fallacy of [fallacy label]: [rebuttal]'
119
+ """
120
+
121
+ response = query(text, api_url=CARDS_MODEL)
122
+ if response[0][0].get("label") == "Contrarian":
123
+ response = query(text, api_url=FLICC_MODEL)
124
+ label = response[0][0].get("label")
125
+ claims = FALLACY_CLAIMS.get(label, None)
126
+
127
+ if claims:
128
+ data = query(
129
+ {"source_sentence": text, "sentences": claims},
130
+ api_url=SEMANTIC_TEXTUAL_SIMILARITY,
131
+ )
132
+ max_similarity = data.index(max(data))
133
+ chain = LLMChain(llm=llm, prompt=similarity_template, verbose=True)
134
+ result = chain.run(
135
+ {
136
+ "claim": claims[max_similarity],
137
+ "fallacy": label,
138
+ "definition": DEFINITIONS.get(label),
139
+ "example": DEBUNKINGS.get(claims[max_similarity]),
140
+ "text": text,
141
+ }
142
+ )
143
+
144
+ else:
145
+ raise ValueError("No similar sentence found")
146
+ else:
147
+ result = "No fallacy has been detected in your text."
148
+
149
+ return result