Spaces:
Sleeping
Sleeping
#%% | |
import os | |
from dotenv import load_dotenv | |
load_dotenv('../../.env') | |
from langchain_huggingface import HuggingFaceEndpoint | |
from langchain_core.runnables import RunnablePassthrough | |
import schemas | |
from prompts import ( | |
raw_prompt, | |
format_context, | |
tokenizer | |
) | |
from data_indexing import DataIndexer | |
# data_indexer = DataIndexer() | |
llm = HuggingFaceEndpoint( | |
repo_id="meta-llama/Meta-Llama-3-8B-Instruct", | |
huggingfacehub_api_token=os.environ['HF_TOKEN'], | |
max_new_tokens=512, | |
stop_sequences=[tokenizer.eos_token], | |
streaming=True, | |
) | |
simple_chain = (raw_prompt | llm).with_types(input_type=schemas.UserQuestion) | |
# %% | |
# data_indexer = DataIndexer() | |
# # TODO: create formatted_chain by piping raw_prompt_formatted and the LLM endpoint. | |
# formatted_chain = None | |
# # TODO: use history_prompt_formatted and HistoryInput to create the history_chain | |
# history_chain = None | |
# # TODO: Let's construct the standalone_chain by piping standalone_prompt_formatted with the LLM | |
# standalone_chain = None | |
# input_1 = RunnablePassthrough.assign(new_question=standalone_chain) | |
# input_2 = { | |
# 'context': lambda x: format_context(data_indexer.search(x['new_question'])), | |
# 'standalone_question': lambda x: x['new_question'] | |
# } | |
# input_to_rag_chain = input_1 | input_2 | |
# # TODO: use input_to_rag_chain, rag_prompt_formatted, | |
# # HistoryInput and the LLM to build the rag_chain. | |
# rag_chain = None | |
# # TODO: Implement the filtered_rag_chain. It should be the | |
# # same as the rag_chain but with hybrid_search = True. | |
# filtered_rag_chain = None | |