import os import re import json import gradio as gr from langchain import HuggingFaceHub from langchain.output_parsers import PydanticOutputParser from langchain import PromptTemplate from pydantic import BaseModel, Field from transformers import pipeline from typing import List import openai openai.api_key = os.environ['OPENAI_API_TOKEN'] HUGGINGFACEHUB_API_TOKEN=os.environ["HUGGINGFACEHUB_API_TOKEN"] classifier = pipeline("sentiment-analysis") repo_id = "tiiuae/falcon-7b-instruct" llm = HuggingFaceHub(huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN, repo_id=repo_id, model_kwargs={"temperature":0.1, "max_new_tokens":300}) class Sentiment(BaseModel): label: str = Field(description="Is the above sentiment 'Good', or 'Bad' ?") def pipeline_sentiment(text): out = classifier(text) print(out) if out[0]['label'] == "NEGATIVE": bad_label = out[0]['score'] good_label = 1 - bad_label elif out[0]['label'] == "POSITIVE": good_label = out[0]['score'] bad_label = 1 - good_label print(good_label, bad_label) return good_label, bad_label def falcon_sentiment(text): parser = PydanticOutputParser(pydantic_object=Sentiment) prompt = PromptTemplate( template="Classify the following review as Good or Bad : .\n{format_instructions}\n{query}\n", input_variables=["query"], partial_variables={"format_instructions": parser.get_format_instructions()}, ) _input = prompt.format_prompt(query=text) output = llm(_input.to_string()) print('Sentiment :', output) try: parsed_output=parser.parse(output) print("parsed_output",parsed_output) except: pattern = r'\b(Good|Bad)\b' match = re.search(pattern, output) print(match) print(match.group(0)) parsed_output=match.group(0) return parsed_output=="Good",parsed_output=='Bad' def Find_sentiment(sentence): system_msg = {"role": "system", "content": f'You an AI that help me label sentences based on multiples features'} # Initialize messages array messages = [system_msg] message=f"Sentence: {sentence}, based on the sentence, fin the best sentiment to discribe this sentence." messages.append({"role": "user", "content": message}) try: response = openai.ChatCompletion.create( model="gpt-4-0613", messages=messages, functions=[ { "name": "set_sentiment", "description": "Set the sentiment of the sentence.", "parameters": { "type": "object", "properties": { "sentiment": { "type": "string", "description": "A Sentiment between 'Good' or 'Bad'", }, }, "required": ["sentiment"], }, } ], function_call={"name": "set_sentiment"}, ) assistant_msg = response['choices'][0]['message'] response_options = assistant_msg.to_dict()['function_call']['arguments'] options = json.loads(response_options) return options["sentiment"] except openai.error.OpenAIError as e: print("Some error happened here.",e) return def gpt4_sentiment(text): out=Find_sentiment(text) return out=="Good",out=='Bad' def sentence_builder(Model,Text): if Model=="Sentiment analysis pipeline": good_label,bad_label=pipeline_sentiment(Text) if Model=="Falcon-7b-instruct": good_label,bad_label=falcon_sentiment(Text) if Model=="GPT-4 Function call": good_label,bad_label=gpt4_sentiment(Text) print(Model, Text) print({"Good": good_label, "Bad": bad_label}) return {"Good": good_label, "Bad": bad_label} demo = gr.Interface( sentence_builder, [ gr.Dropdown( ["Sentiment analysis pipeline","Falcon-7b-instruct","GPT-4 Function call"], label="Model", info="Wich model to use" ), gr.Textbox( label="Text", info="Review text", lines=2, value="I'm not sure about the origin of this product, it seems suspicious.", ), ], "label", examples=[ ["Sentiment analysis pipeline","The product broke ! Great ..."], ["Sentiment analysis pipeline","Not sure if I like it or not."], ["Sentiment analysis pipeline","This product is just a toy."], ["Sentiment analysis pipeline","Bought a TV, received an Ipad..."], ["Sentiment analysis pipeline","Could have found the same on wish.com ."], ["Sentiment analysis pipeline","They did a wonderfull job at ripping us."], ] ) if __name__ == "__main__": demo.launch()