|
|
|
from src.logger.logger import logging |
|
from src.exception.exception import customexception |
|
import sys |
|
from langchain_huggingface import HuggingFaceEndpoint |
|
|
|
|
|
repo_id="mistralai/Mistral-7B-Instruct-v0.3" |
|
|
|
|
|
class TextProcessor: |
|
def __init__(self, hf_token): |
|
self.llm = HuggingFaceEndpoint( |
|
repo_id=repo_id, |
|
max_new_tokens=512, |
|
top_k=10, |
|
top_p=0.95, |
|
typical_p=0.95, |
|
temperature=0.01, |
|
repetition_penalty=1.03, |
|
streaming=False, |
|
|
|
|
|
) |
|
|
|
logging.info("LLM model for text generation created.") |
|
|
|
def generate_response(self, input_text): |
|
try: |
|
logging.info("Text response generated.") |
|
return self.llm.invoke(input_text) |
|
except Exception as e: |
|
raise customexception(e,sys) |