File size: 2,797 Bytes
395201c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
# import os
# import sys, os
# import traceback
# from dotenv import load_dotenv
# load_dotenv()
# import os, io
# sys.path.insert(
# 0, os.path.abspath("../..")
# ) # Adds the parent directory to the system path
# import pytest
# import litellm
# from litellm import embedding, completion, text_completion, completion_cost
# from langchain.chat_models import ChatLiteLLM
# from langchain.prompts.chat import (
# ChatPromptTemplate,
# SystemMessagePromptTemplate,
# AIMessagePromptTemplate,
# HumanMessagePromptTemplate,
# )
# from langchain.schema import AIMessage, HumanMessage, SystemMessage
# def test_chat_gpt():
# try:
# chat = ChatLiteLLM(model="gpt-3.5-turbo", max_tokens=10)
# messages = [
# HumanMessage(
# content="what model are you"
# )
# ]
# resp = chat(messages)
# print(resp)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# # test_chat_gpt()
# def test_claude():
# try:
# chat = ChatLiteLLM(model="claude-2", max_tokens=10)
# messages = [
# HumanMessage(
# content="what model are you"
# )
# ]
# resp = chat(messages)
# print(resp)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# # test_claude()
# def test_palm():
# try:
# chat = ChatLiteLLM(model="palm/chat-bison", max_tokens=10)
# messages = [
# HumanMessage(
# content="what model are you"
# )
# ]
# resp = chat(messages)
# print(resp)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# # test_palm()
# # def test_openai_with_params():
# # try:
# # api_key = os.environ["OPENAI_API_KEY"]
# # os.environ.pop("OPENAI_API_KEY")
# # print("testing openai with params")
# # llm = ChatLiteLLM(
# # model="gpt-3.5-turbo",
# # openai_api_key=api_key,
# # # Prefer using None which is the default value, endpoint could be empty string
# # openai_api_base= None,
# # max_tokens=20,
# # temperature=0.5,
# # request_timeout=10,
# # model_kwargs={
# # "frequency_penalty": 0,
# # "presence_penalty": 0,
# # },
# # verbose=True,
# # max_retries=0,
# # )
# # messages = [
# # HumanMessage(
# # content="what model are you"
# # )
# # ]
# # resp = llm(messages)
# # print(resp)
# # except Exception as e:
# # pytest.fail(f"Error occurred: {e}")
# # test_openai_with_params()
|