File size: 4,840 Bytes
daee091 4c67943 daee091 d446780 4766d10 d446780 f312d57 d446780 daee091 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
import os
import re
import json
import gradio as gr
from langchain import HuggingFaceHub
from langchain.output_parsers import PydanticOutputParser
from langchain import PromptTemplate
from pydantic import BaseModel, Field
from transformers import pipeline
from typing import List
import openai
openai.api_key = os.environ['OPENAI_API_TOKEN']
HUGGINGFACEHUB_API_TOKEN=os.environ["HUGGINGFACEHUB_API_TOKEN"]
classifier = pipeline("sentiment-analysis")
repo_id = "tiiuae/falcon-7b-instruct"
llm = HuggingFaceHub(huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,
repo_id=repo_id,
model_kwargs={"temperature":0.1, "max_new_tokens":300})
class Sentiment(BaseModel):
label: str = Field(description="Is the above sentiment 'Good', or 'Bad' ?")
def pipeline_sentiment(text):
out = classifier(text)
print(out)
if out[0]['label'] == "NEGATIVE":
bad_label = out[0]['score']
good_label = 1 - bad_label
elif out[0]['label'] == "POSITIVE":
good_label = out[0]['score']
bad_label = 1 - good_label
print(good_label, bad_label)
return good_label, bad_label
def falcon_sentiment(text):
parser = PydanticOutputParser(pydantic_object=Sentiment)
prompt = PromptTemplate(
template="Classify the following review as Good or Bad : .\n{format_instructions}\n{query}\n",
input_variables=["query"],
partial_variables={"format_instructions": parser.get_format_instructions()},
)
_input = prompt.format_prompt(query=text)
output = llm(_input.to_string())
print('Sentiment :', output)
try:
parsed_output=parser.parse(output)
print("parsed_output",parsed_output)
except:
pattern = r'\b(Good|Bad)\b'
match = re.search(pattern, output)
print(match)
print(match.group(0))
parsed_output=match.group(0)
return parsed_output=="Good",parsed_output=='Bad'
def Find_sentiment(sentence):
system_msg = {"role": "system", "content": f'You an AI that help me label sentences based on multiples features'}
# Initialize messages array
messages = [system_msg]
message=f"Sentence: {sentence}, based on the sentence, fin the best sentiment to discribe this sentence."
messages.append({"role": "user", "content": message})
try:
response = openai.ChatCompletion.create(
model="gpt-4-0613",
messages=messages,
functions=[
{
"name": "set_sentiment",
"description": "Set the sentiment of the sentence.",
"parameters": {
"type": "object",
"properties": {
"sentiment": {
"type": "string",
"description": "A Sentiment between 'Good' or 'Bad'",
},
},
"required": ["sentiment"],
},
}
],
function_call={"name": "set_sentiment"},
)
assistant_msg = response['choices'][0]['message']
response_options = assistant_msg.to_dict()['function_call']['arguments']
options = json.loads(response_options)
return options["sentiment"]
except openai.error.OpenAIError as e:
print("Some error happened here.",e)
return
def gpt4_sentiment(text):
out=Find_sentiment(text)
return out=="Good",out=='Bad'
def sentence_builder(Model,Text):
if Model=="Sentiment analysis pipeline":
good_label,bad_label=pipeline_sentiment(Text)
if Model=="Falcon-7b-instruct":
good_label,bad_label=falcon_sentiment(Text)
if Model=="GPT-4 Function call":
good_label,bad_label=gpt4_sentiment(Text)
print(Model, Text)
print({"Good": good_label, "Bad": bad_label})
return {"Good": good_label, "Bad": bad_label}
demo = gr.Interface(
sentence_builder,
[
gr.Dropdown(
["Sentiment analysis pipeline","Falcon-7b-instruct","GPT-4 Function call"], label="Model", info="Wich model to use"
),
gr.Textbox(
label="Text",
info="Review text",
lines=2,
value="I'm not sure about the origin of this product, it seems suspicious.",
),
],
"label",
examples=[
["Sentiment analysis pipeline","The product broke ! Great ..."],
["Sentiment analysis pipeline","Not sure if I like it or not."],
["Sentiment analysis pipeline","This product is just a toy."],
["Sentiment analysis pipeline","Bought a TV, received an Ipad..."],
["Sentiment analysis pipeline","Could have found the same on wish.com ."],
["Sentiment analysis pipeline","They did a wonderfull job at ripping us."],
]
)
if __name__ == "__main__":
demo.launch()
|