kumshing-wilson-huang commited on
Commit
18bdfd9
1 Parent(s): e4c6ebe
Files changed (2) hide show
  1. app.py +35 -15
  2. requirements.txt +1 -1
app.py CHANGED
@@ -1,20 +1,40 @@
1
- from langchain.chat_models import ChatOpenAI
2
- from langchain.schema import AIMessage, HumanMessage
3
- import openai
4
- import os
5
  import gradio as gr
 
6
 
7
- os.environ["OPENAI_API_KEY"] = "sk-proj-KyHyIzEDLKe94DhrSq6LT3BlbkFJynj2NRPnnRwdDSV3XaZY" # Replace with your key
8
 
9
- llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613')
 
 
10
 
11
- def predict(message, history):
12
- history_langchain_format = []
13
- for human, ai in history:
14
- history_langchain_format.append(HumanMessage(content=human))
15
- history_langchain_format.append(AIMessage(content=ai))
16
- history_langchain_format.append(HumanMessage(content=message))
17
- gpt_response = llm(history_langchain_format)
18
- return gpt_response.content
19
 
20
- gr.ChatInterface(predict).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
 
4
+ pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
5
 
6
+ def predict(input_img):
7
+ predictions = pipeline(input_img)
8
+ return input_img, {p["label"]: p["score"] for p in predictions}
9
 
10
+ gradio_app = gr.Interface(
11
+ predict,
12
+ inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
13
+ outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)],
14
+ title="Hot Dog? Or Not?",
15
+ )
 
 
16
 
17
+ if __name__ == "__main__":
18
+ gradio_app.launch()
19
+
20
+
21
+ # from langchain.chat_models import ChatOpenAI
22
+ # from langchain.schema import AIMessage, HumanMessage
23
+ # import openai
24
+ # import os
25
+ # import gradio as gr
26
+ #
27
+ # os.environ["OPENAI_API_KEY"] = "sk-proj-KyHyIzEDLKe94DhrSq6LT3BlbkFJynj2NRPnnRwdDSV3XaZY" # Replace with your key
28
+ #
29
+ # llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613')
30
+ #
31
+ # def predict(message, history):
32
+ # history_langchain_format = []
33
+ # for human, ai in history:
34
+ # history_langchain_format.append(HumanMessage(content=human))
35
+ # history_langchain_format.append(AIMessage(content=ai))
36
+ # history_langchain_format.append(HumanMessage(content=message))
37
+ # gpt_response = llm(history_langchain_format)
38
+ # return gpt_response.content
39
+ #
40
+ # gr.ChatInterface(predict).launch()
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
  transformers
2
  torch
3
  openai
4
- langchain-community
 
1
  transformers
2
  torch
3
  openai
4
+ langchain