ray commited on
Commit
29e9295
1 Parent(s): 693929a

add schema and service provider config

Browse files
Files changed (2) hide show
  1. schemas.py +13 -0
  2. service_provider_config.py +40 -0
schemas.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+
3
+
4
+ class ChatbotVersion(str, Enum):
5
+ # make a enum of chatbot type and string
6
+ CHATGPT_35 = "gpt-3.5-turbo-1106"
7
+ CHATGPT_4 = "gpt-4-1106-preview"
8
+
9
+
10
+ class ServiceProvider(str, Enum):
11
+ # make a enum of service provider and string
12
+ OPENAI = "openai"
13
+ AZURE = "azure"
service_provider_config.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv
2
+ from llama_index import OpenAIEmbedding
3
+ from llama_index.llms import OpenAI
4
+ from llama_index.llms import AzureOpenAI
5
+ from llama_index.embeddings import AzureOpenAIEmbedding
6
+ from schemas import ServiceProvider, ChatbotVersion
7
+
8
+ load_dotenv()
9
+
10
+ def get_service_provider_config(service_provider: ServiceProvider):
11
+ if service_provider == ServiceProvider.AZURE:
12
+ return get_azure_openai_config()
13
+ if service_provider == ServiceProvider.OPENAI:
14
+ llm = OpenAI(model=ChatbotVersion.CHATGPT_35)
15
+ embed_model = OpenAIEmbedding()
16
+ return llm, embed_model
17
+
18
+
19
+ def get_azure_openai_config():
20
+ api_key = "<api-key>"
21
+ azure_endpoint = "https://<your-resource-name>.openai.azure.com/"
22
+ api_version = "2023-07-01-preview"
23
+
24
+ llm = AzureOpenAI(
25
+ model="gpt-35-turbo-16k",
26
+ deployment_name="my-custom-llm",
27
+ api_key=api_key,
28
+ azure_endpoint=azure_endpoint,
29
+ api_version=api_version,
30
+ )
31
+
32
+ # You need to deploy your own embedding model as well as your own chat completion model
33
+ embed_model = AzureOpenAIEmbedding(
34
+ model="text-embedding-ada-002",
35
+ deployment_name="my-custom-embedding",
36
+ api_key=api_key,
37
+ azure_endpoint=azure_endpoint,
38
+ api_version=api_version,
39
+ )
40
+ return llm, embed_model