quocdat25 commited on
Commit
fae0e8a
1 Parent(s): 75d6b68

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +2 -8
  2. main.py +48 -0
  3. requirements.txt +13 -0
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Guess What I Am Thinking
3
- emoji: 👁
4
- colorFrom: blue
5
- colorTo: blue
6
  sdk: gradio
7
  sdk_version: 4.26.0
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: guess_what_I_am_thinking
3
+ app_file: main.py
 
 
4
  sdk: gradio
5
  sdk_version: 4.26.0
 
 
6
  ---
 
 
main.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.llms import HuggingFaceEndpoint
2
+ from langchain.prompts import PromptTemplate
3
+ from langchain.schema import AIMessage, HumanMessage
4
+ from langchain.chains import LLMChain
5
+ import gradio as gr
6
+ import os
7
+
8
+ from dotenv import load_dotenv
9
+
10
+ load_dotenv()
11
+
12
+ repo_id = "mistralai/Mistral-7B-Instruct-v0.2"
13
+ llm = HuggingFaceEndpoint(
14
+ repo_id = repo_id,
15
+ # huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,
16
+ )
17
+
18
+ template = """You're a good chatbot, you're thinking only about Eras Tour.
19
+ You're playing with the user: they will guess what you're thinking. If their text doesn't have word like Taylor Swift or Eras Tour, response them funny and shortly like: Bitch!. User: {question}
20
+ Answer: ."""
21
+ prompt = PromptTemplate.from_template(template=template)
22
+ llm_chain = LLMChain(llm=llm, prompt=prompt)
23
+
24
+
25
+ def predict(message, history):
26
+ history_langchain_format = []
27
+ # for human, ai in history:
28
+ # history_langchain_format.append(HumanMessage(content=human))
29
+ # history_langchain_format.append(AIMessage(content=ai))
30
+ # history_langchain_format.append(HumanMessage(content=message))
31
+ # gpt_response = llm(history_langchain_format)
32
+ response = llm_chain.invoke(message)['text']
33
+ return response
34
+
35
+ gr.ChatInterface(predict).launch()
36
+
37
+
38
+
39
+
40
+
41
+
42
+
43
+
44
+
45
+
46
+
47
+
48
+
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain
2
+ crewai
3
+ wolframalpha
4
+ duckduckgo-search
5
+ crewai[tools]
6
+ beautifulsoup4
7
+ requests
8
+ lxml
9
+ langchain-community
10
+ huggingface_hub
11
+ google-api-python-client
12
+ google-auth-httplib2
13
+ google-auth-oauthlib