Alexander Casimir Fischer
commited on
Commit
•
3d8a869
1
Parent(s):
95330ab
app.py
Browse files
app.py
CHANGED
@@ -1,18 +1,20 @@
|
|
1 |
#importing dependencies
|
2 |
import os
|
3 |
-
from keys import
|
4 |
|
5 |
import streamlit as st
|
6 |
-
from
|
|
|
7 |
from langchain.prompts import PromptTemplate
|
8 |
from langchain.chains import LLMChain
|
9 |
from langchain.tools import WikipediaQueryRun
|
10 |
from langchain.utilities import WikipediaAPIWrapper
|
11 |
|
|
|
12 |
wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
|
13 |
|
14 |
-
os.environ["OPENAI_API_KEY"] = apikey
|
15 |
os.environ["HUGGINGFACEHUB_API_TOKEN"] = token
|
|
|
16 |
|
17 |
#app framework
|
18 |
st.title("🛕Gurubot AI")
|
@@ -95,12 +97,16 @@ yoda_grammar = PromptTemplate(
|
|
95 |
#llms
|
96 |
#llm = OpenAI(temperature=0.9)
|
97 |
#llm_facts = OpenAI(temperature=0)
|
98 |
-
llm = HuggingFaceHub
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
|
|
|
|
|
|
|
|
104 |
|
105 |
|
106 |
#answer on screen if prompt is entered
|
|
|
1 |
#importing dependencies
|
2 |
import os
|
3 |
+
from keys import token
|
4 |
|
5 |
import streamlit as st
|
6 |
+
from transformers import pipeline
|
7 |
+
from langchain.llms import HuggingFaceHub
|
8 |
from langchain.prompts import PromptTemplate
|
9 |
from langchain.chains import LLMChain
|
10 |
from langchain.tools import WikipediaQueryRun
|
11 |
from langchain.utilities import WikipediaAPIWrapper
|
12 |
|
13 |
+
|
14 |
wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
|
15 |
|
|
|
16 |
os.environ["HUGGINGFACEHUB_API_TOKEN"] = token
|
17 |
+
repo_id = "tiiuae/falcon-40b"
|
18 |
|
19 |
#app framework
|
20 |
st.title("🛕Gurubot AI")
|
|
|
97 |
#llms
|
98 |
#llm = OpenAI(temperature=0.9)
|
99 |
#llm_facts = OpenAI(temperature=0)
|
100 |
+
llm = HuggingFaceHub(
|
101 |
+
repo_id=repo_id, model_kwargs={"temperature": 0.9, "max_length": 500}
|
102 |
+
)
|
103 |
+
llm_facts = HuggingFaceHub(
|
104 |
+
repo_id=repo_id, model_kwargs={"temperature": 0.0, "max_length": 500}
|
105 |
+
)
|
106 |
+
main_chain = LLMChain(prompt=context, llm=llm)
|
107 |
+
yoda_grammar_chain = LLMChain(prompt=yoda_grammar, llm=llm_facts)
|
108 |
+
keyword_chain = LLMChain(prompt=find_keyword, llm=llm_facts)
|
109 |
+
wiki_chain = LLMChain(prompt=context, llm=llm_facts)
|
110 |
|
111 |
|
112 |
#answer on screen if prompt is entered
|