Spaces:
Running
Running
misc: Change to use Azure OpenAI
Browse files- app.py +1 -1
- service_provider_config.py +12 -11
app.py
CHANGED
@@ -38,7 +38,7 @@ MODEL_NAME = ChatbotVersion.CHATGPT_4O.value
|
|
38 |
|
39 |
CHUNK_SIZE = 8191
|
40 |
LLM, EMBED_MODEL = get_service_provider_config(
|
41 |
-
service_provider=ServiceProvider.
|
42 |
|
43 |
# LLM = Ollama(model="llama3.1:latest", request_timeout=60.0, context_window=10000)
|
44 |
|
|
|
38 |
|
39 |
CHUNK_SIZE = 8191
|
40 |
LLM, EMBED_MODEL = get_service_provider_config(
|
41 |
+
service_provider=ServiceProvider.AZURE, model_name=MODEL_NAME)
|
42 |
|
43 |
# LLM = Ollama(model="llama3.1:latest", request_timeout=60.0, context_window=10000)
|
44 |
|
service_provider_config.py
CHANGED
@@ -1,7 +1,9 @@
|
|
|
|
|
|
1 |
from dotenv import load_dotenv
|
2 |
from llama_index.embeddings.openai import OpenAIEmbedding
|
3 |
from llama_index.llms.openai import OpenAI
|
4 |
-
from llama_index.
|
5 |
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
|
6 |
from schemas import ServiceProvider, ChatbotVersion
|
7 |
|
@@ -9,21 +11,21 @@ load_dotenv()
|
|
9 |
|
10 |
def get_service_provider_config(service_provider: ServiceProvider, model_name: str=ChatbotVersion.CHATGPT_35.value):
|
11 |
if service_provider == ServiceProvider.AZURE:
|
12 |
-
return get_azure_openai_config()
|
13 |
if service_provider == ServiceProvider.OPENAI:
|
14 |
llm = OpenAI(model=model_name)
|
15 |
embed_model = OpenAIEmbedding()
|
16 |
return llm, embed_model
|
17 |
|
18 |
-
|
19 |
-
def get_azure_openai_config():
|
20 |
-
api_key = "
|
21 |
-
azure_endpoint = "https
|
22 |
-
api_version = "
|
23 |
|
24 |
llm = AzureOpenAI(
|
25 |
-
|
26 |
-
|
27 |
api_key=api_key,
|
28 |
azure_endpoint=azure_endpoint,
|
29 |
api_version=api_version,
|
@@ -31,8 +33,7 @@ def get_azure_openai_config():
|
|
31 |
|
32 |
# You need to deploy your own embedding model as well as your own chat completion model
|
33 |
embed_model = AzureOpenAIEmbedding(
|
34 |
-
|
35 |
-
deployment_name="my-custom-embedding",
|
36 |
api_key=api_key,
|
37 |
azure_endpoint=azure_endpoint,
|
38 |
api_version=api_version,
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
from dotenv import load_dotenv
|
4 |
from llama_index.embeddings.openai import OpenAIEmbedding
|
5 |
from llama_index.llms.openai import OpenAI
|
6 |
+
from llama_index.llms.azure_openai import AzureOpenAI
|
7 |
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
|
8 |
from schemas import ServiceProvider, ChatbotVersion
|
9 |
|
|
|
11 |
|
12 |
def get_service_provider_config(service_provider: ServiceProvider, model_name: str=ChatbotVersion.CHATGPT_35.value):
|
13 |
if service_provider == ServiceProvider.AZURE:
|
14 |
+
return get_azure_openai_config(model_name = model_name)
|
15 |
if service_provider == ServiceProvider.OPENAI:
|
16 |
llm = OpenAI(model=model_name)
|
17 |
embed_model = OpenAIEmbedding()
|
18 |
return llm, embed_model
|
19 |
|
20 |
+
# The engine name needs to be the same as the deployment name in Azure.
|
21 |
+
def get_azure_openai_config(model_name: str):
|
22 |
+
api_key = os.getenv("AZURE_OPENAI_API_KEY")
|
23 |
+
azure_endpoint = "https://awesumcare.openai.azure.com/"
|
24 |
+
api_version = "2024-10-01-preview"
|
25 |
|
26 |
llm = AzureOpenAI(
|
27 |
+
engine=model_name,
|
28 |
+
model=model_name,
|
29 |
api_key=api_key,
|
30 |
azure_endpoint=azure_endpoint,
|
31 |
api_version=api_version,
|
|
|
33 |
|
34 |
# You need to deploy your own embedding model as well as your own chat completion model
|
35 |
embed_model = AzureOpenAIEmbedding(
|
36 |
+
deployment_name="text-embedding-ada-002",
|
|
|
37 |
api_key=api_key,
|
38 |
azure_endpoint=azure_endpoint,
|
39 |
api_version=api_version,
|