starsaround's picture
Update models_for_langchain/model.py
383c5d9
raw
history blame
2.37 kB
from typing import Any, List, Mapping, Optional
from g4f.Provider import (
Ails,
You,
Bing,
Yqcloud,
Theb,
Aichat,
Bard,
Vercel,
Forefront,
Lockchat,
Liaobots,
H2o,
ChatgptLogin,
DeepAi,
GetGpt,
AItianhu,
EasyChat,
Acytoo,
DfeHub,
AiService,
Wewordle,
ChatgptAi,
)
from g4f import Provider
import g4f
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
provider_dict = {
'Ails': Ails,
'You': You,
'Bing': Bing,
'Yqcloud': Yqcloud,
'Theb': Theb,
'Aichat': Aichat,
'Bard': Bard,
'Vercel': Vercel,
'Forefront': Forefront,
'Lockchat': Lockchat,
'Liaobots': Liaobots,
'H2o': H2o,
'ChatgptLogin': ChatgptLogin,
'DeepAi': DeepAi,
'GetGpt': GetGpt,
'AItianhu': AItianhu,
'EasyChat': EasyChat,
'Acytoo': Acytoo,
'DfeHub': DfeHub,
'AiService': AiService,
'Wewordle': Wewordle,
'ChatgptAi': ChatgptAi,
}
provider_auth_settings = {
'Bard':{
'cookie':""
}
}
class CustomLLM(LLM):
model_name: str="gpt-3.5-turbo"
provider_name: str="GetGpt"
@property
def _llm_type(self) -> str:
return "custom"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
model_name = 'gpt-3.5-turbo',
provider = GetGpt
) -> str:
# if stop is not None:
# raise ValueError("stop kwargs are not permitted.")
provider_llm = getattr(Provider, self.provider_name)
provider_llm.working = True
bot_msg = g4f.ChatCompletion.create(model=self.model_name,
provider=provider_dict[self.provider_name],
messages=[{"role": "user",
"content": prompt}],
stream=provider_llm.supports_stream,
auth=provider_auth_settings['provider'] if provider_llm.needs_auth else None)
return bot_msg
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"model:": "gpt-3.5-turbo"}