Spaces:
Sleeping
Sleeping
Francisco Zanartu
commited on
Commit
·
fe89393
1
Parent(s):
70ded33
Change 3rd layer prompt ## FALALCY from in context learning to dialog simulation
Browse files- utils/core.py +11 -12
- utils/templates.py +16 -21
utils/core.py
CHANGED
@@ -49,6 +49,7 @@ class HamburgerStyle:
|
|
49 |
"use_cache": False,
|
50 |
},
|
51 |
)
|
|
|
52 |
self.flicc_model = "fzanartu/flicc"
|
53 |
self.card_model = "crarojasca/BinaryAugmentedCARDS"
|
54 |
self.semantic_textual_similarity = "sentence-transformers/all-MiniLM-L6-v2"
|
@@ -61,10 +62,8 @@ class HamburgerStyle:
|
|
61 |
## FACT: ReAct
|
62 |
prompt = REACT
|
63 |
|
64 |
-
chat_model = ChatHuggingFace(llm=self.llm)
|
65 |
-
|
66 |
# define the agent
|
67 |
-
chat_model_with_stop = chat_model.bind(stop=["\nObservation"])
|
68 |
agent = (
|
69 |
{
|
70 |
"input": lambda x: x["input"],
|
@@ -73,7 +72,7 @@ class HamburgerStyle:
|
|
73 |
),
|
74 |
}
|
75 |
| prompt
|
76 |
-
| chat_model
|
77 |
| ReActJsonSingleInputOutputParser()
|
78 |
)
|
79 |
|
@@ -105,13 +104,13 @@ class HamburgerStyle:
|
|
105 |
## FALLACY: Fallacy
|
106 |
|
107 |
# 1 predict fallacy label in FLICC taxonomy
|
108 |
-
|
109 |
model=self.flicc_model, payload=misinformation
|
110 |
)[0][0].get("label")
|
111 |
-
fallacy_definition = DEFINITIONS.get(
|
112 |
|
113 |
# 2 get all examples with the same label
|
114 |
-
claims = FALLACY_CLAIMS.get(
|
115 |
|
116 |
# 3 get cosine similarity for all claims and myth
|
117 |
example_myths = self.endpoint_query(
|
@@ -125,18 +124,18 @@ class HamburgerStyle:
|
|
125 |
example_response = DEBUNKINGS.get(claims[max_similarity])
|
126 |
fact = re.findall(r"## FALLACY:.*?(?=##)", example_response, re.DOTALL)[
|
127 |
0
|
128 |
-
] # get only the fallacy layer
|
129 |
fact = fact.replace("## FALLACY:", "")
|
130 |
|
131 |
prompt = INCONTEXT
|
132 |
-
chain = prompt | self.
|
133 |
return chain.invoke(
|
134 |
{
|
135 |
"misinformation": misinformation,
|
136 |
-
"
|
137 |
-
"example_response": fact,
|
138 |
-
"fallacy": fallacy_label,
|
139 |
"fallacy_definition": fallacy_definition,
|
|
|
|
|
140 |
"factual_information": self.hamburger[1].content,
|
141 |
}
|
142 |
)
|
|
|
49 |
"use_cache": False,
|
50 |
},
|
51 |
)
|
52 |
+
self.chat_model = ChatHuggingFace(llm=self.llm)
|
53 |
self.flicc_model = "fzanartu/flicc"
|
54 |
self.card_model = "crarojasca/BinaryAugmentedCARDS"
|
55 |
self.semantic_textual_similarity = "sentence-transformers/all-MiniLM-L6-v2"
|
|
|
62 |
## FACT: ReAct
|
63 |
prompt = REACT
|
64 |
|
|
|
|
|
65 |
# define the agent
|
66 |
+
chat_model_with_stop = self.chat_model.bind(stop=["\nObservation"])
|
67 |
agent = (
|
68 |
{
|
69 |
"input": lambda x: x["input"],
|
|
|
72 |
),
|
73 |
}
|
74 |
| prompt
|
75 |
+
| self.chat_model
|
76 |
| ReActJsonSingleInputOutputParser()
|
77 |
)
|
78 |
|
|
|
104 |
## FALLACY: Fallacy
|
105 |
|
106 |
# 1 predict fallacy label in FLICC taxonomy
|
107 |
+
detected_fallacy = self.endpoint_query(
|
108 |
model=self.flicc_model, payload=misinformation
|
109 |
)[0][0].get("label")
|
110 |
+
fallacy_definition = DEFINITIONS.get(detected_fallacy)
|
111 |
|
112 |
# 2 get all examples with the same label
|
113 |
+
claims = FALLACY_CLAIMS.get(detected_fallacy, None)
|
114 |
|
115 |
# 3 get cosine similarity for all claims and myth
|
116 |
example_myths = self.endpoint_query(
|
|
|
124 |
example_response = DEBUNKINGS.get(claims[max_similarity])
|
125 |
fact = re.findall(r"## FALLACY:.*?(?=##)", example_response, re.DOTALL)[
|
126 |
0
|
127 |
+
] # get only the fallacy layer from the example.
|
128 |
fact = fact.replace("## FALLACY:", "")
|
129 |
|
130 |
prompt = INCONTEXT
|
131 |
+
chain = prompt | self.chat_model
|
132 |
return chain.invoke(
|
133 |
{
|
134 |
"misinformation": misinformation,
|
135 |
+
"detected_fallacy": detected_fallacy,
|
|
|
|
|
136 |
"fallacy_definition": fallacy_definition,
|
137 |
+
"Response": fact,
|
138 |
+
"example_myth": example_myth,
|
139 |
"factual_information": self.hamburger[1].content,
|
140 |
}
|
141 |
)
|
utils/templates.py
CHANGED
@@ -32,27 +32,22 @@ Thought:{agent_scratchpad}"""
|
|
32 |
)
|
33 |
|
34 |
INCONTEXT = PromptTemplate.from_template(
|
35 |
-
"""[
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
[
|
42 |
-
|
43 |
-
misinformation
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
<<<
|
52 |
-
misinformation: {misinformation}
|
53 |
-
response:
|
54 |
-
>>>
|
55 |
-
"""
|
56 |
)
|
57 |
|
58 |
SUMMARIZATION = PromptTemplate.from_template(
|
|
|
32 |
)
|
33 |
|
34 |
INCONTEXT = PromptTemplate.from_template(
|
35 |
+
"""<s>[INST] <<SYS>>
|
36 |
+
You are a senior climate analyst, an expert in identifying and responding to climate change misinformation.
|
37 |
+
<</SYS>>
|
38 |
+
What fallacy is contained in the following misinformation?
|
39 |
+
misinformation: {misinformation} [/INST]
|
40 |
+
Your text contains {detected_fallacy} fallacy. {detected_fallacy} fallacy is {fallacy_definition} </s>
|
41 |
+
<s>[INST] What is the factual evidence surrounding this misinformation?[/INST]
|
42 |
+
{factual_information}</s>
|
43 |
+
<s>[INST] Provide a precise and concise response to this climate change misinformation.
|
44 |
+
1. Identify the logical or argumentative fallacy in 40 words or fewer.
|
45 |
+
2. Explicitly name the fallacy, explain why it's incorrect, and link it to factual evidence showing how it distorts reality.
|
46 |
+
Consider the following example before providing your answer:
|
47 |
+
Misinformation: {example_myth}
|
48 |
+
Response: {example_response}
|
49 |
+
Misinformation: {misinformation}
|
50 |
+
Response:[/INST]"""
|
|
|
|
|
|
|
|
|
|
|
51 |
)
|
52 |
|
53 |
SUMMARIZATION = PromptTemplate.from_template(
|