IliaLarchenko commited on
Commit
72b491a
1 Parent(s): 6fcae9a

Added Anthropic LLMs support

Browse files
Files changed (2) hide show
  1. api/llm.py +54 -13
  2. requirements.txt +1 -0
api/llm.py CHANGED
@@ -1,5 +1,6 @@
1
  import os
2
  from openai import OpenAI
 
3
  from utils.errors import APIError
4
  from typing import List, Dict, Generator, Optional, Tuple
5
 
@@ -37,7 +38,13 @@ class PromptManager:
37
  class LLMManager:
38
  def __init__(self, config, prompts: Dict[str, str]):
39
  self.config = config
40
- self.client = OpenAI(base_url=config.llm.url, api_key=config.llm.key)
 
 
 
 
 
 
41
  self.prompt_manager = PromptManager(prompts)
42
 
43
  self.status = self.test_llm(stream=False)
@@ -50,21 +57,55 @@ class LLMManager:
50
  if stream is None:
51
  stream = self.streaming
52
  try:
53
- if not stream:
54
- response = self.client.chat.completions.create(
55
- model=self.config.llm.name, messages=messages, temperature=1, max_tokens=2000
56
- )
57
- yield response.choices[0].message.content.strip()
58
- else:
59
- response = self.client.chat.completions.create(
60
- model=self.config.llm.name, messages=messages, temperature=1, stream=True, max_tokens=2000
61
- )
62
- for chunk in response:
63
- if chunk.choices[0].delta.content:
64
- yield chunk.choices[0].delta.content
65
  except Exception as e:
66
  raise APIError(f"LLM Get Text Error: Unexpected error: {e}")
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  def test_llm(self, stream=False) -> bool:
69
  """
70
  Test the LLM connection with or without streaming.
 
1
  import os
2
  from openai import OpenAI
3
+ import anthropic
4
  from utils.errors import APIError
5
  from typing import List, Dict, Generator, Optional, Tuple
6
 
 
38
  class LLMManager:
39
  def __init__(self, config, prompts: Dict[str, str]):
40
  self.config = config
41
+ self.llm_type = config.llm.type
42
+ if self.llm_type == "ANTHROPIC_API":
43
+ self.client = anthropic.Anthropic(api_key=config.llm.key)
44
+ else:
45
+ # all other API types suppose to support OpenAI format
46
+ self.client = OpenAI(base_url=config.llm.url, api_key=config.llm.key)
47
+
48
  self.prompt_manager = PromptManager(prompts)
49
 
50
  self.status = self.test_llm(stream=False)
 
57
  if stream is None:
58
  stream = self.streaming
59
  try:
60
+ if self.llm_type == "OPENAI_API":
61
+ return self._get_text_openai(messages, stream)
62
+ elif self.llm_type == "ANTHROPIC_API":
63
+ return self._get_text_anthropic(messages, stream)
 
 
 
 
 
 
 
 
64
  except Exception as e:
65
  raise APIError(f"LLM Get Text Error: Unexpected error: {e}")
66
 
67
+ def _get_text_openai(self, messages: List[Dict[str, str]], stream: bool) -> Generator[str, None, None]:
68
+ if not stream:
69
+ response = self.client.chat.completions.create(model=self.config.llm.name, messages=messages, temperature=1, max_tokens=2000)
70
+ yield response.choices[0].message.content.strip()
71
+ else:
72
+ response = self.client.chat.completions.create(
73
+ model=self.config.llm.name, messages=messages, temperature=1, stream=True, max_tokens=2000
74
+ )
75
+ for chunk in response:
76
+ if chunk.choices[0].delta.content:
77
+ yield chunk.choices[0].delta.content
78
+
79
+ def _get_text_anthropic(self, messages: List[Dict[str, str]], stream: bool) -> Generator[str, None, None]:
80
+ # I convert the messages every time to the Anthropics format
81
+ # It is not optimal way to do it, we can instead support the messages format from the beginning
82
+ # But it duplicates the code and I don't want to do it now
83
+ system_message = None
84
+ consolidated_messages = []
85
+
86
+ for message in messages:
87
+ if message["role"] == "system":
88
+ if system_message is None:
89
+ system_message = message["content"]
90
+ else:
91
+ system_message += "\n" + message["content"]
92
+ else:
93
+ if consolidated_messages and consolidated_messages[-1]["role"] == message["role"]:
94
+ consolidated_messages[-1]["content"] += "\n" + message["content"]
95
+ else:
96
+ consolidated_messages.append(message.copy())
97
+
98
+ if not stream:
99
+ response = self.client.messages.create(
100
+ model=self.config.llm.name, max_tokens=2000, temperature=1, system=system_message, messages=consolidated_messages
101
+ )
102
+ yield response.content[0].text
103
+ else:
104
+ with self.client.messages.stream(
105
+ model=self.config.llm.name, max_tokens=2000, temperature=1, system=system_message, messages=consolidated_messages
106
+ ) as stream:
107
+ yield from stream.text_stream
108
+
109
  def test_llm(self, stream=False) -> bool:
110
  """
111
  Test the LLM connection with or without streaming.
requirements.txt CHANGED
@@ -5,3 +5,4 @@ pytest==8.2.0
5
  webrtcvad==2.0.10
6
  setuptools==69.5.1
7
  transformers==4.40.0
 
 
5
  webrtcvad==2.0.10
6
  setuptools==69.5.1
7
  transformers==4.40.0
8
+ anthropic=0.30.1