|
from langchain_groq import ChatGroq |
|
from dotenv import load_dotenv |
|
from typing import Optional |
|
import os |
|
|
|
|
|
|
|
class LlamaModel: |
|
""" |
|
This class is used to interact with the Llama LLM models for text generation. |
|
|
|
Args: |
|
model: The name of the model to be used. Defaults to 'gemini-pro'. |
|
max_output_tokens: The maximum number of tokens to generate. Defaults to 1024. |
|
top_p: The probability of generating the next token. Defaults to 1.0. |
|
temperature: The temperature of the model. Defaults to 0.0. |
|
top_k: The number of top tokens to consider. Defaults to 5. |
|
""" |
|
|
|
def __init__(self, |
|
model: Optional[str] = 'llama3-70b-8192', |
|
): |
|
|
|
|
|
load_dotenv() |
|
|
|
self.model = llm = ChatGroq(groq_api_key = os.getenv('groq_llama70_key'),model = model) |
|
|
|
|
|
def execute(self, prompt: str) -> str: |
|
|
|
try: |
|
response = self.model.invoke(prompt) |
|
res=response.content |
|
meta=response.response_metadata |
|
return {'text':res,'meta':meta} |
|
except Exception as e: |
|
return f"An error occurred: {e}" |