File size: 1,696 Bytes
e636070 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
from .BaseLLM import BaseLLM
import os
from openai import OpenAI
import tiktoken
encoding = tiktoken.encoding_for_model("gpt-4o")
class OpenRouter(BaseLLM):
def __init__(self, model="deepseek/deepseek-r1:free"):
super(OpenRouter, self).__init__()
self.client = OpenAI(
api_key=os.getenv("OPENROUTER_API_KEY"),
base_url="https://openrouter.ai/api/v1",
)
self.model_name = model
self.messages = []
self.in_token = 0
self.out_token = 0
def initialize_message(self):
self.messages = []
def ai_message(self, payload):
self.messages.append({"role": "ai", "content": payload})
def system_message(self, payload):
self.messages.append({"role": "system", "content": payload})
def user_message(self, payload):
self.messages.append({"role": "user", "content": payload})
def get_response(self,temperature = 0.8):
completion = self.client.chat.completions.create(
model=self.model_name,
messages=self.messages
)
return completion.choices[0].message.content
def chat(self,text,temperature = 0.8):
self.initialize_message()
self.user_message(text)
response = self.get_response(temperature = temperature)
print("In",self.count_token(text))
print("Out", self.count_token(response))
self.in_token += self.count_token(text)
self.out_token += self.count_token(response)
return response
def print_prompt(self):
for message in self.messages:
print(message)
def count_token(self,text,):
return len(encoding.encode(text)) |