quocdat25 commited on
Commit
3ecfb12
1 Parent(s): 9c17e9c

Upload folder using huggingface_hub

Browse files
.ipynb_checkpoints/test_gradio-checkpoint.py CHANGED
@@ -1,24 +1,47 @@
1
- # import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- # def greet(name, intensity):
4
- # return "Hello, " + name + "!" * int(intensity)
5
 
6
- # def llm_inference(next):
7
-
8
-
9
- # demo = gr.Interface(
10
- # fn=greet,
11
- # inputs=["text"],
12
- # outputs=["text"],
13
- # )
14
 
15
- # demo.launch(share=True)
16
 
17
- import random
18
 
19
- def random_response(message, history):
20
- return random.choice(["Yes", "No"])
21
 
22
- import gradio as gr
23
 
24
- gr.ChatInterface(random_response).launch()
 
1
+ from langchain_community.llms import HuggingFaceEndpoint
2
+ from langchain.prompts import PromptTemplate
3
+ from langchain.schema import AIMessage, HumanMessage
4
+ import gradio as gr
5
+ import os
6
+
7
+ from dotenv import load_dotenv
8
+
9
+ load_dotenv()
10
+
11
+ repo_id = "mistralai/Mistral-7B-Instruct-v0.2"
12
+ llm = HuggingFaceEndpoint(
13
+ repo_id = repo_id,
14
+ huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,
15
+ )
16
+
17
+ llm_chain = LLMChain(llm=llm, prompt=prompt)
18
+ print(llm_chain.invoke(question)['text'])
19
+
20
+ template = """You're a good chatbot. Answer this request: {question}
21
+ Answer: Let's think step by step."""
22
+ prompt = PromptTemplate.from_template(template=template)
23
+
24
+
25
+ def predict(message, history):
26
+ history_langchain_format = []
27
+ # for human, ai in history:
28
+ # history_langchain_format.append(HumanMessage(content=human))
29
+ # history_langchain_format.append(AIMessage(content=ai))
30
+ # history_langchain_format.append(HumanMessage(content=message))
31
+ # gpt_response = llm(history_langchain_format)
32
+ response = llm_chain(message)
33
+ return response
34
+
35
+ gr.ChatInterface(predict).launch()
36
+
37
+
38
+
39
+
40
+
41
 
 
 
42
 
 
 
 
 
 
 
 
 
43
 
 
44
 
 
45
 
 
 
46
 
 
47
 
 
test_gradio.py CHANGED
@@ -1,24 +1,47 @@
1
- # import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- # def greet(name, intensity):
4
- # return "Hello, " + name + "!" * int(intensity)
5
 
6
- # def llm_inference(next):
7
-
8
-
9
- # demo = gr.Interface(
10
- # fn=greet,
11
- # inputs=["text"],
12
- # outputs=["text"],
13
- # )
14
 
15
- # demo.launch(share=True)
16
 
17
- import random
18
 
19
- def random_response(message, history):
20
- return random.choice(["Yes", "No"])
21
 
22
- import gradio as gr
23
 
24
- gr.ChatInterface(random_response).launch()
 
1
+ from langchain_community.llms import HuggingFaceEndpoint
2
+ from langchain.prompts import PromptTemplate
3
+ from langchain.schema import AIMessage, HumanMessage
4
+ import gradio as gr
5
+ import os
6
+
7
+ from dotenv import load_dotenv
8
+
9
+ load_dotenv()
10
+
11
+ repo_id = "mistralai/Mistral-7B-Instruct-v0.2"
12
+ llm = HuggingFaceEndpoint(
13
+ repo_id = repo_id,
14
+ huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,
15
+ )
16
+
17
+ llm_chain = LLMChain(llm=llm, prompt=prompt)
18
+ print(llm_chain.invoke(question)['text'])
19
+
20
+ template = """You're a good chatbot. Answer this request: {question}
21
+ Answer: Let's think step by step."""
22
+ prompt = PromptTemplate.from_template(template=template)
23
+
24
+
25
+ def predict(message, history):
26
+ history_langchain_format = []
27
+ # for human, ai in history:
28
+ # history_langchain_format.append(HumanMessage(content=human))
29
+ # history_langchain_format.append(AIMessage(content=ai))
30
+ # history_langchain_format.append(HumanMessage(content=message))
31
+ # gpt_response = llm(history_langchain_format)
32
+ response = llm_chain(message)
33
+ return response
34
+
35
+ gr.ChatInterface(predict).launch()
36
+
37
+
38
+
39
+
40
+
41
 
 
 
42
 
 
 
 
 
 
 
 
 
43
 
 
44
 
 
45
 
 
 
46
 
 
47