Spaces:
Runtime error
Runtime error
from environs import Env | |
env = Env() | |
try: | |
env.read_env("/Users/kanasani/Documents/api_keys/.env.llm") | |
print("Using local .env.llm file") | |
except: | |
env.read_env() | |
print(".env file from repo secrets is used") | |
import openai | |
openai.api_type = env("API_TYPE") | |
openai.api_base = env("API_BASE") | |
openai.api_version = env("API_VERSION") | |
openai.api_key = env("AZURE_OPENAI_KEY") | |
def check_password(): | |
import streamlit as st | |
"""Returns `True` if the user had the correct password.""" | |
def password_entered(): | |
"""Checks whether a password entered by the user is correct.""" | |
if st.session_state["password"] == env("st_password"): | |
st.session_state["password_correct"] = True | |
del st.session_state["password"] # don't store password | |
else: | |
st.session_state["password_correct"] = False | |
if "password_correct" not in st.session_state: | |
# First run, show input for password. | |
st.text_input( | |
"Password", type="password", on_change=password_entered, key="password" | |
) | |
return False | |
elif not st.session_state["password_correct"]: | |
# Password not correct, show input + error. | |
st.text_input( | |
"Password", type="password", on_change=password_entered, key="password" | |
) | |
st.error("π Password incorrect") | |
return False | |
else: | |
# Password correct. | |
return True | |
def submit_prompt_to_gpt(input_list_of_prompts): | |
response = openai.ChatCompletion.create( | |
engine=env("DEPLOYMENT_NAME"), | |
messages=input_list_of_prompts, | |
temperature=1, | |
max_tokens=256, | |
top_p=1, | |
frequency_penalty=0, | |
presence_penalty=0, | |
) | |
response_content = response["choices"][0]["message"]["content"] | |
return response_content | |
def get_hf_embeddings(): | |
from langchain.embeddings import HuggingFaceHubEmbeddings | |
embeddings = HuggingFaceHubEmbeddings( | |
repo_id="sentence-transformers/all-mpnet-base-v2", | |
task="feature-extraction", | |
huggingfacehub_api_token=env("HUGGINGFACEHUB_API_TOKEN"), | |
) | |
return embeddings | |
def get_openAI_chat_model(): | |
import openai | |
from langchain.chat_models.azure_openai import AzureChatOpenAI | |
chat_model = AzureChatOpenAI(deployment_name=env("DEPLOYMENT_NAME"), | |
openai_api_version=env("API_VERSION"), | |
openai_api_base=env("API_BASE"), | |
openai_api_type=env("API_TYPE"), | |
openai_api_key=env("AZURE_OPENAI_KEY"), | |
verbose=True) | |
return chat_model | |
def get_hf_model(repo_id = "google/flan-t5-xxl"): | |
from langchain import HuggingFaceHub | |
hf_llm = HuggingFaceHub( | |
repo_id=repo_id, | |
model_kwargs={"temperature": 0.1, "max_length": 1024}, | |
huggingfacehub_api_token = env("HUGGINGFACEHUB_API_TOKEN"), | |
) | |
return hf_llm | |
def get_local_gpt4_model(model = "GPT4All-13B-snoozy.ggmlv3.q4_0.bin"): | |
from langchain.llms import GPT4All | |
gpt4_llm = GPT4All(model=".models/"+model, | |
verbose=True) | |
return gpt4_llm | |
def set_LangChain_tracking(project="Chat with your PDF"): | |
import os | |
os.environ['LANGCHAIN_PROJECT'] = project | |
print("LangChain tracking is set to : ", project) | |
def unset_LangChain_tracking(): | |
import os | |
os.environ.pop('LANGCHAIN_API_KEY', None) | |
os.environ.pop('LANGCHAIN_TRACING_V2', None) | |
os.environ.pop('LANGCHAIN_ENDPOINT', None) | |
os.environ.pop('LANGCHAIN_PROJECT', None) | |
print("LangChain tracking is removed .") |