File size: 3,277 Bytes
87729e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8207b05
 
 
87729e7
 
 
 
 
 
 
 
 
 
 
 
8207b05
87729e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import json
import requests
from typing import Union

def generate(message: str, model: str='meta-llama/Meta-Llama-3-70B-Instruct', system_prompt: str = "Be Helpful and Friendly. Keep your response straightfoward, short and concise", max_tokens: int = 512, temperature: float = 0.7) -> Union[str, None]:
    """
    Utilizes a variety of large language models (LLMs) to engage in conversational interactions.
    
    Parameters:
        - model (str): The name or identifier of the LLM to be used for conversation. Available models include:
            - "meta-llama/Meta-Llama-3-70B-Instruct"
            - "meta-llama/Meta-Llama-3-8B-Instruct" 
            - "mistralai/Mixtral-8x22B-Instruct-v0.1"
            - "mistralai/Mixtral-8x22B-v0.1"
            - "microsoft/WizardLM-2-8x22B"
            - "microsoft/WizardLM-2-7B"
            - "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1"
            - "google/gemma-1.1-7b-it"
            - "databricks/dbrx-instruct"
            - "mistralai/Mixtral-8x7B-Instruct-v0.1"
            - "mistralai/Mistral-7B-Instruct-v0.2"
            - "meta-llama/Llama-2-70b-chat-hf"
            - "cognitivecomputations/dolphin-2.6-mixtral-8x7b"
        - message (str): The message to be sent to the LLM to initiate or continue the conversation.
        - system_prompt (str): Optional. The initial system message to start the conversation. Defaults to "Talk Like Shakespeare".

    Returns:
        - Union[str, None]: The response message from the LLM if successful, otherwise None.
    """
    url = "https://api.deepinfra.com/v1/openai/chat/completions"
    headers ={
        "Authorization" : "Bearer jwt:eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJnaDoxMjE3NTEyOTkiLCJleHAiOjE3MTcwODExMzN9.L51dMtUEJgsWb684EkNAyXIsznZqyJWdrbcPboM2ZgI"
    }
    data = json.dumps(
        {
            'model': model,
            'messages': [{"role": "system", "content": system_prompt}] + [{"role": "user", "content": message}] ,
            'temperature': temperature,
            'max_tokens': max_tokens,
            'stop': [],
            'stream': False
        }, separators=(',', ':')
    )
    
    try:
        result = requests.post(url=url, headers=headers, data=data)
        return result.json()['choices'][0]['message']['content']
    except Exception as e:
        print("Error:", e)
        return None


if __name__ == "__main__":

    model_names = [
        "meta-llama/Meta-Llama-3-70B-Instruct",
        "meta-llama/Meta-Llama-3-8B-Instruct",
        "mistralai/Mixtral-8x22B-Instruct-v0.1",
        "mistralai/Mixtral-8x22B-v0.1",
        "microsoft/WizardLM-2-8x22B",
        "microsoft/WizardLM-2-7B",
        "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
        "google/gemma-1.1-7b-it",
        "databricks/dbrx-instruct",
        "mistralai/Mixtral-8x7B-Instruct-v0.1",
        "mistralai/Mistral-7B-Instruct-v0.2",
        "meta-llama/Llama-2-70b-chat-hf",
        "cognitivecomputations/dolphin-2.6-mixtral-8x7b"
    ]

    for name in model_names:
        messages =  "Introduce yourself and tell who made you and about your owner company" # Add more messages as needed
        response = generate(name, messages, system_prompt="Respond very concisely and shortly")

        print(f"• Model: {name} -", response)