mattoofahad commited on
Commit
6ce2da5
1 Parent(s): 28a4b1b

replacing openai compeltion with litellm

Browse files
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
  streamlit==1.37.1
2
- openai==1.40.6
3
  discord-webhook==1.3.1
4
  python-dotenv==1.0.1
5
  colorama==0.4.6
 
1
  streamlit==1.37.1
2
+ litellm==1.47.0
3
  discord-webhook==1.3.1
4
  python-dotenv==1.0.1
5
  colorama==0.4.6
src/utils/constants.py CHANGED
@@ -1,27 +1,28 @@
1
- """Module doc string"""
2
-
3
- from .config import OPENAI_API_KEY
4
-
5
-
6
- class ConstantVariables:
7
- """Module doc string"""
8
-
9
- model_list_tuple = (
10
- "gpt-4o",
11
- "gpt-4o-mini",
12
- "gpt-4-turbo",
13
- "gpt-4",
14
- "gpt-3.5-turbo",
15
- )
16
- default_model = "gpt-4o-mini"
17
-
18
- max_tokens = 180
19
- min_token = 20
20
- step = 20
21
- default = round(((max_tokens + min_token) / 2) / step) * step
22
- default_token = max(min_token, min(max_tokens, default))
23
-
24
- if OPENAI_API_KEY != "NO_KEY":
25
- api_key = OPENAI_API_KEY
26
- else:
27
- api_key = ""
 
 
1
+ """Module doc string"""
2
+
3
+ from .config import OPENAI_API_KEY
4
+
5
+
6
+ class ConstantVariables:
7
+ """Module doc string"""
8
+
9
+ model_list_tuple = (
10
+ "gpt-4o",
11
+ "gpt-4o-mini",
12
+ "gpt-4-turbo",
13
+ "gpt-3.5-turbo",
14
+ "o1-preview",
15
+ "o1-mini"
16
+ )
17
+ default_model = "gpt-4o-mini"
18
+
19
+ max_tokens = 180
20
+ min_token = 20
21
+ step = 20
22
+ default = round(((max_tokens + min_token) / 2) / step) * step
23
+ default_token = max(min_token, min(max_tokens, default))
24
+
25
+ if OPENAI_API_KEY != "NO_KEY":
26
+ api_key = OPENAI_API_KEY
27
+ else:
28
+ api_key = ""
src/utils/openai_utils.py CHANGED
@@ -1,70 +1,73 @@
1
- """Module doc string"""
2
-
3
- import openai
4
- import streamlit as st
5
- from openai import OpenAI
6
-
7
- from .logs import log_execution_time, logger
8
-
9
-
10
- class OpenAIFunctions:
11
- """Module doc string"""
12
-
13
- @log_execution_time
14
- @staticmethod
15
- def invoke_model():
16
- """_summary_"""
17
- logger.debug("OpenAI invoked")
18
- client = OpenAI(api_key=st.session_state.openai_api_key)
19
- with st.chat_message("assistant"):
20
- stream = client.chat.completions.create(
21
- model=st.session_state["openai_model"],
22
- messages=[
23
- {"role": m["role"], "content": m["content"]}
24
- for m in st.session_state.messages
25
- ],
26
- max_tokens=st.session_state["openai_maxtokens"],
27
- stream=True,
28
- stream_options={"include_usage": True},
29
- )
30
-
31
- def stream_data():
32
- for chunk in stream:
33
- if chunk.choices != []:
34
- word = chunk.choices[0].delta.content
35
- if word is not None:
36
- yield word
37
- if chunk.usage is not None:
38
- yield {
39
- "completion_tokens": chunk.usage.completion_tokens,
40
- "prompt_tokens": chunk.usage.prompt_tokens,
41
- "total_tokens": chunk.usage.total_tokens,
42
- }
43
-
44
- return st.write_stream(stream_data)
45
-
46
- @log_execution_time
47
- @staticmethod
48
- def check_openai_api_key():
49
- """_summary_"""
50
- logger.info("Checking OpenAI Key")
51
- try:
52
- client = OpenAI(api_key=st.session_state.openai_api_key)
53
- client.models.list()
54
- logger.debug("OpenAI key Working")
55
- return True
56
- except openai.AuthenticationError as auth_error:
57
- with st.chat_message("assistant"):
58
- st.error(str(auth_error))
59
- logger.error("AuthenticationError: %s", auth_error)
60
- return False
61
- except openai.OpenAIError as openai_error:
62
- with st.chat_message("assistant"):
63
- st.error(str(openai_error))
64
- logger.error("OpenAIError: %s", openai_error)
65
- return False
66
- except Exception as general_error:
67
- with st.chat_message("assistant"):
68
- st.error(str(general_error))
69
- logger.error("Unexpected error: %s", general_error)
70
- return False
 
 
 
 
1
+ """Module doc string"""
2
+
3
+ import openai
4
+ import streamlit as st
5
+ from litellm import completion
6
+ from openai import OpenAI
7
+
8
+ from .logs import log_execution_time, logger
9
+
10
+
11
+ class OpenAIFunctions:
12
+ """Module doc string"""
13
+
14
+ @log_execution_time
15
+ @staticmethod
16
+ def invoke_model():
17
+ """_summary_"""
18
+ logger.debug("OpenAI invoked")
19
+ with st.chat_message("assistant"):
20
+ messages = [
21
+ {"role": m["role"], "content": m["content"]}
22
+ for m in st.session_state.messages
23
+ ]
24
+
25
+ stream = completion(
26
+ api_key=st.session_state.openai_api_key,
27
+ model=st.session_state["openai_model"],
28
+ messages=messages,
29
+ max_tokens=st.session_state["openai_maxtokens"],
30
+ stream=True,
31
+ stream_options={"include_usage": True},
32
+ )
33
+
34
+ def stream_data():
35
+ for chunk in stream:
36
+ if chunk.choices != []:
37
+ word = chunk.choices[0].delta.content
38
+ if word is not None:
39
+ yield word
40
+ if hasattr(chunk, "usage"):
41
+ yield {
42
+ "completion_tokens": chunk.usage.completion_tokens,
43
+ "prompt_tokens": chunk.usage.prompt_tokens,
44
+ "total_tokens": chunk.usage.total_tokens,
45
+ }
46
+
47
+ return st.write_stream(stream_data)
48
+
49
+ @log_execution_time
50
+ @staticmethod
51
+ def check_openai_api_key():
52
+ """_summary_"""
53
+ logger.info("Checking OpenAI Key")
54
+ try:
55
+ client = OpenAI(api_key=st.session_state.openai_api_key)
56
+ client.models.list()
57
+ logger.debug("OpenAI key Working")
58
+ return True
59
+ except openai.AuthenticationError as auth_error:
60
+ with st.chat_message("assistant"):
61
+ st.error(str(auth_error))
62
+ logger.error("AuthenticationError: %s", auth_error)
63
+ return False
64
+ except openai.OpenAIError as openai_error:
65
+ with st.chat_message("assistant"):
66
+ st.error(str(openai_error))
67
+ logger.error("OpenAIError: %s", openai_error)
68
+ return False
69
+ except Exception as general_error:
70
+ with st.chat_message("assistant"):
71
+ st.error(str(general_error))
72
+ logger.error("Unexpected error: %s", general_error)
73
+ return False