Spaces:
Sleeping
Sleeping
Update helpers/foundation_models.py
Browse files- helpers/foundation_models.py +100 -6
helpers/foundation_models.py
CHANGED
@@ -1,10 +1,11 @@
|
|
1 |
import os
|
2 |
-
from typing import List, Tuple
|
3 |
|
4 |
import openai
|
5 |
import streamlit as st
|
6 |
from langchain.agents import AgentType, initialize_agent, load_tools
|
7 |
from langchain.llms import OpenAI as l_OpenAI
|
|
|
8 |
|
9 |
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
|
10 |
SERPAPI_API_KEY = os.environ["SERPAPI_API_KEY"]
|
@@ -43,13 +44,106 @@ def call_chatgpt(query: str, model: str = "gpt-3.5-turbo") -> str:
|
|
43 |
|
44 |
|
45 |
def call_langchain(prompt: str) -> str:
|
46 |
-
|
47 |
-
tools
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
["serpapi", "llm-math"], llm=llm, serpapi_api_key=SERPAPI_API_KEY
|
49 |
)
|
50 |
-
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
)
|
53 |
-
|
|
|
|
|
|
|
|
|
54 |
|
55 |
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
+
from typing import List, Tuple, Dict, Any
|
3 |
|
4 |
import openai
|
5 |
import streamlit as st
|
6 |
from langchain.agents import AgentType, initialize_agent, load_tools
|
7 |
from langchain.llms import OpenAI as l_OpenAI
|
8 |
+
import requests
|
9 |
|
10 |
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
|
11 |
SERPAPI_API_KEY = os.environ["SERPAPI_API_KEY"]
|
|
|
44 |
|
45 |
|
46 |
def call_langchain(prompt: str) -> str:
|
47 |
+
"""
|
48 |
+
Initializes a language model with specific settings, loads additional tools, initializes an agent with these tools,
|
49 |
+
and then runs the agent with a given prompt to produce a text response.
|
50 |
+
|
51 |
+
Args:
|
52 |
+
prompt (str): The input text prompt that the agent will process.
|
53 |
+
|
54 |
+
Returns:
|
55 |
+
str: The text output produced by the agent after processing the input prompt.
|
56 |
+
"""
|
57 |
+
|
58 |
+
# Initialize the OpenAI language model with a specified temperature
|
59 |
+
# and the OpenAI API key. It's assumed that `l_OpenAI` is a class or function
|
60 |
+
# that is responsible for setting up the language model with the given parameters.
|
61 |
+
llm = l_OpenAI(temperature=0, openai_api_key=OPENAI_API_KEY) # Type: ignore
|
62 |
+
|
63 |
+
# Load additional tools needed for the agent. Here, 'serpapi' for search engine results
|
64 |
+
# and 'llm-math' for math capabilities are loaded, along with their respective API keys.
|
65 |
+
# The `load_tools` function is assumed to return a dictionary of initialized tools,
|
66 |
+
# with the tools being ready for use by the agent.
|
67 |
+
tools = load_tools( # Type: ignore
|
68 |
["serpapi", "llm-math"], llm=llm, serpapi_api_key=SERPAPI_API_KEY
|
69 |
)
|
70 |
+
|
71 |
+
# Initialize the agent with the provided tools, the language model, and specific agent settings.
|
72 |
+
# The agent is set to a ZERO_SHOT_REACT_DESCRIPTION type, which likely defines its behavior
|
73 |
+
# or capabilities, with verbosity enabled for detailed logs.
|
74 |
+
# The `initialize_agent` function presumably returns an instance of an agent configured
|
75 |
+
# with the specified tools and settings, ready to process prompts.
|
76 |
+
agent = initialize_agent( # Type: ignore
|
77 |
+
tools, llm, agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
|
78 |
)
|
79 |
+
|
80 |
+
# Execute the agent with the given prompt and capture the output.
|
81 |
+
# The `run` method of the agent is assumed to process the prompt and return a string response,
|
82 |
+
# which is then returned by this function to the caller.
|
83 |
+
output: str = agent.run(prompt)
|
84 |
|
85 |
return output
|
86 |
+
|
87 |
+
|
88 |
+
def query(payload: Dict[str, Any]) -> Dict[str, Any]:
|
89 |
+
"""
|
90 |
+
Sends a JSON payload to a predefined API URL and returns the JSON response.
|
91 |
+
|
92 |
+
Args:
|
93 |
+
payload (Dict[str, Any]): The JSON payload to be sent to the API.
|
94 |
+
|
95 |
+
Returns:
|
96 |
+
Dict[str, Any]: The JSON response received from the API.
|
97 |
+
"""
|
98 |
+
|
99 |
+
# API endpoint URL
|
100 |
+
API_URL = "https://sks7h7h5qkhoxwxo.us-east-1.aws.endpoints.huggingface.cloud"
|
101 |
+
|
102 |
+
# Headers to indicate both the request and response formats are JSON
|
103 |
+
headers = {
|
104 |
+
"Accept": "application/json",
|
105 |
+
"Content-Type": "application/json"
|
106 |
+
}
|
107 |
+
|
108 |
+
# Sending a POST request with the JSON payload and headers
|
109 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
110 |
+
|
111 |
+
# Returning the JSON response
|
112 |
+
return response.json()
|
113 |
+
|
114 |
+
def llama2_7b_ysa(prompt: str) -> str:
|
115 |
+
"""
|
116 |
+
Queries a model and retrieves the generated text based on the given prompt.
|
117 |
+
|
118 |
+
This function sends a prompt to a model (presumably named 'llama2_7b') and extracts
|
119 |
+
the generated text from the model's response. It's tailored for handling responses
|
120 |
+
from a specific API or model query structure where the response is expected to be
|
121 |
+
a list of dictionaries, with at least one dictionary containing a key 'generated_text'.
|
122 |
+
|
123 |
+
Parameters:
|
124 |
+
- prompt (str): The text prompt to send to the model.
|
125 |
+
|
126 |
+
Returns:
|
127 |
+
- str: The generated text response from the model.
|
128 |
+
|
129 |
+
Note:
|
130 |
+
- The function assumes that the 'query' function is previously defined and accessible
|
131 |
+
within the same scope or module. It should send a request to the model and return
|
132 |
+
the response in a structured format.
|
133 |
+
- The 'parameters' dictionary is passed empty but can be customized to include specific
|
134 |
+
request parameters as needed by the model API.
|
135 |
+
"""
|
136 |
+
|
137 |
+
# Define the query payload with the prompt and any additional parameters
|
138 |
+
query_payload: Dict[str, Any] = {
|
139 |
+
"inputs": prompt,
|
140 |
+
"parameters": {}
|
141 |
+
}
|
142 |
+
|
143 |
+
# Send the query to the model and store the output response
|
144 |
+
output = query(query_payload)
|
145 |
+
|
146 |
+
# Extract the 'generated_text' from the first item in the response list
|
147 |
+
response: str = output[0]['generated_text']
|
148 |
+
|
149 |
+
return response
|