File size: 3,662 Bytes
e6f8d33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
from environs import Env
env = Env()

try:
    env.read_env("/Users/kanasani/Documents/api_keys/.env.llm")
    print("Using local .env.llm file")
except:
    env.read_env()
    print(".env file from repo secrets is used")

import openai
openai.api_type = env("API_TYPE")
openai.api_base = env("API_BASE")
openai.api_version = env("API_VERSION")
openai.api_key = env("AZURE_OPENAI_KEY")

def check_password():
    import streamlit as st
    """Returns `True` if the user had the correct password."""

    def password_entered():
        """Checks whether a password entered by the user is correct."""
        if st.session_state["password"] == env("st_password"):
            st.session_state["password_correct"] = True
            del st.session_state["password"]  # don't store password
        else:
            st.session_state["password_correct"] = False

    if "password_correct" not in st.session_state:
        # First run, show input for password.
        st.text_input(
            "Password", type="password", on_change=password_entered, key="password"
        )
        return False
    elif not st.session_state["password_correct"]:
        # Password not correct, show input + error.
        st.text_input(
            "Password", type="password", on_change=password_entered, key="password"
        )
        st.error("😕 Password incorrect")
        return False
    else:
        # Password correct.
        return True

def submit_prompt_to_gpt(input_list_of_prompts):
    response = openai.ChatCompletion.create(
        engine=env("DEPLOYMENT_NAME"),
        messages=input_list_of_prompts,
        temperature=1,
        max_tokens=256,
        top_p=1,
        frequency_penalty=0,
        presence_penalty=0,
    )
    response_content = response["choices"][0]["message"]["content"]
    return response_content


def get_hf_embeddings():
    from langchain.embeddings import HuggingFaceHubEmbeddings

    embeddings = HuggingFaceHubEmbeddings(
        repo_id="sentence-transformers/all-mpnet-base-v2",
        task="feature-extraction",
        huggingfacehub_api_token=env("HUGGINGFACEHUB_API_TOKEN"),
    )
    return embeddings

def get_openAI_chat_model():
    import openai
    from langchain.chat_models.azure_openai import AzureChatOpenAI
    chat_model = AzureChatOpenAI(deployment_name=env("DEPLOYMENT_NAME"),
                                openai_api_version=env("API_VERSION"),
                                openai_api_base=env("API_BASE"),
                                openai_api_type=env("API_TYPE"),
                                openai_api_key=env("AZURE_OPENAI_KEY"),
                                verbose=True)
    return chat_model

def get_hf_model(repo_id = "google/flan-t5-xxl"):
    
    from langchain import HuggingFaceHub

    hf_llm = HuggingFaceHub(
        repo_id=repo_id, 
        model_kwargs={"temperature": 0.1, "max_length": 1024},
        huggingfacehub_api_token = env("HUGGINGFACEHUB_API_TOKEN"),
    )
    return hf_llm

def get_local_gpt4_model(model = "GPT4All-13B-snoozy.ggmlv3.q4_0.bin"):
    from langchain.llms import GPT4All
    gpt4_llm = GPT4All(model=".models/"+model, 
                       verbose=True)
    return gpt4_llm

def set_LangChain_tracking(project="Chat with your PDF"):
    import os
    os.environ['LANGCHAIN_PROJECT'] = project
    print("LangChain tracking is set to : ", project)

def unset_LangChain_tracking():
    import os
    os.environ.pop('LANGCHAIN_API_KEY', None)
    os.environ.pop('LANGCHAIN_TRACING_V2', None)
    os.environ.pop('LANGCHAIN_ENDPOINT', None)
    os.environ.pop('LANGCHAIN_PROJECT', None)
    print("LangChain tracking is removed .")