| from langchain_huggingface import HuggingFaceEndpoint | |
| import streamlit as st | |
| model_id="mistralai/Mistral-7B-Instruct-v0.3" | |
| def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.1): | |
| llm = HuggingFaceEndpoint( | |
| repo_id=model_id, | |
| max_new_tokens=max_new_tokens, | |
| temperature=temperature, | |
| token = os.getenv("HF_TOKEN") | |
| ) | |
| return llm |