import gradio as gr from transformers import pipeline pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog") def predict(input_img): predictions = pipeline(input_img) return input_img, {p["label"]: p["score"] for p in predictions} gradio_app = gr.Interface( predict, inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"), outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)], title="Hot Dog? Or Not?", ) if __name__ == "__main__": gradio_app.launch() # from langchain.chat_models import ChatOpenAI # from langchain.schema import AIMessage, HumanMessage # import openai # import os # import gradio as gr # # os.environ["OPENAI_API_KEY"] = "sk-proj-KyHyIzEDLKe94DhrSq6LT3BlbkFJynj2NRPnnRwdDSV3XaZY" # Replace with your key # # llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613') # # def predict(message, history): # history_langchain_format = [] # for human, ai in history: # history_langchain_format.append(HumanMessage(content=human)) # history_langchain_format.append(AIMessage(content=ai)) # history_langchain_format.append(HumanMessage(content=message)) # gpt_response = llm(history_langchain_format) # return gpt_response.content # # gr.ChatInterface(predict).launch()