File size: 2,546 Bytes
7db0ae4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import sys, os
import traceback
from dotenv import load_dotenv

load_dotenv()
import os

sys.path.insert(
    0, os.path.abspath("../..")
)  # Adds the parent directory to the system path
import pytest
from litellm.llms.prompt_templates.factory import prompt_factory


def test_prompt_formatting():
    try:
        prompt = prompt_factory(
            model="mistralai/Mistral-7B-Instruct-v0.1",
            messages=[
                {"role": "system", "content": "Be a good bot"},
                {"role": "user", "content": "Hello world"},
            ],
        )
        assert (
            prompt == "<s>[INST] Be a good bot [/INST]</s> [INST] Hello world [/INST]"
        )
    except Exception as e:
        pytest.fail(f"An exception occurred: {str(e)}")


def test_prompt_formatting_custom_model():
    try:
        prompt = prompt_factory(
            model="ehartford/dolphin-2.5-mixtral-8x7b",
            messages=[
                {"role": "system", "content": "Be a good bot"},
                {"role": "user", "content": "Hello world"},
            ],
            custom_llm_provider="huggingface",
        )
        print(f"prompt: {prompt}")
    except Exception as e:
        pytest.fail(f"An exception occurred: {str(e)}")


# test_prompt_formatting_custom_model()
# def logger_fn(user_model_dict):
#     return
#     print(f"user_model_dict: {user_model_dict}")

# messages=[{"role": "user", "content": "Write me a function to print hello world"}]

# # test if the first-party prompt templates work
# def test_huggingface_supported_models():
#     model = "huggingface/WizardLM/WizardCoder-Python-34B-V1.0"
#     response = completion(model=model, messages=messages, max_tokens=256, api_base="https://ji16r2iys9a8rjk2.us-east-1.aws.endpoints.huggingface.cloud", logger_fn=logger_fn)
#     print(response['choices'][0]['message']['content'])
#     return response

# test_huggingface_supported_models()

# # test if a custom prompt template works
# litellm.register_prompt_template(
# 	model="togethercomputer/LLaMA-2-7B-32K",
# 	roles={"system":"", "assistant":"Assistant:", "user":"User:"},
# 	pre_message_sep= "\n",
# 	post_message_sep= "\n"
# )
# def test_huggingface_custom_model():
#     model = "huggingface/togethercomputer/LLaMA-2-7B-32K"
#     response = completion(model=model, messages=messages, api_base="https://ecd4sb5n09bo4ei2.us-east-1.aws.endpoints.huggingface.cloud", logger_fn=logger_fn)
#     print(response['choices'][0]['message']['content'])
#     return response

# test_huggingface_custom_model()