import os import multiprocessing import concurrent.futures from langchain.document_loaders import TextLoader, DirectoryLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import FAISS from sentence_transformers import SentenceTransformer import faiss import torch import numpy as np from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig from datetime import datetime import json import gradio as gr import re from threading import Thread class DocumentRetrievalAndGeneration: def __init__(self, embedding_model_name, lm_model_id, data_folder): self.all_splits = self.load_documents(data_folder) self.embeddings = SentenceTransformer(embedding_model_name) self.gpu_index = self.create_faiss_index() self.tokenizer, self.model = self.initialize_llm(lm_model_id) def load_documents(self, folder_path): loader = DirectoryLoader(folder_path, loader_cls=TextLoader) documents = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=5000, chunk_overlap=250) all_splits = text_splitter.split_documents(documents) print('Length of documents:', len(documents)) print("LEN of all_splits", len(all_splits)) for i in range(3): print(all_splits[i].page_content) return all_splits def create_faiss_index(self): all_texts = [split.page_content for split in self.all_splits] embeddings = self.embeddings.encode(all_texts, convert_to_tensor=True).cpu().numpy() index = faiss.IndexFlatL2(embeddings.shape[1]) index.add(embeddings) gpu_resource = faiss.StandardGpuResources() gpu_index = faiss.index_cpu_to_gpu(gpu_resource, 0, index) return gpu_index def initialize_llm(self, model_id): quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16 ) tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.bfloat16, device_map="auto", print("--") quantization_config=quantization_config ) return tokenizer, model def generate_response_with_timeout(self, input_ids, max_new_tokens=1000): try: streamer = TextIteratorStreamer(self.tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True) generate_kwargs = dict( input_ids=input_ids, max_new_tokens=max_new_tokens, do_sample=True, top_p=1.0, top_k=20, temperature=0.8, repetition_penalty=1.2, eos_token_id=[128001, 128008, 128009], streamer=streamer, ) thread = Thread(target=self.model.generate, kwargs=generate_kwargs) thread.start() generated_text = "" for new_text in streamer: generated_text += new_text return generated_text except Exception as e: print(f"Error in generate_response_with_timeout: {str(e)}") return "Text generation process encountered an error" def query_and_generate_response(self, query): similarityThreshold = 1 query_embedding = self.embeddings.encode(query, convert_to_tensor=True).cpu().numpy() distances, indices = self.gpu_index.search(np.array([query_embedding]), k=3) print("Distance", distances, "indices", indices) content = "" filtered_results = [] for idx, distance in zip(indices[0], distances[0]): if distance <= similarityThreshold: filtered_results.append(idx) for i in filtered_results: print(self.all_splits[i].page_content) content += "-" * 50 + "\n" content += self.all_splits[idx].page_content + "\n" print("CHUNK", idx) print("Distance:", distance) print("indices:", indices) print(self.all_splits[idx].page_content) print("############################") conversation = [ {"role": "system", "content": "You are a knowledgeable assistant with access to a comprehensive database."}, {"role": "user", "content": f""" I need you to answer my question and provide related information in a specific format. I have provided five relatable json files {content}, choose the most suitable chunks for answering the query. RETURN ONLY SOLUTION without additional comments, sign-offs, retrived chunks, refrence to any Ticket or extra phrases. Be direct and to the point. IF THERE IS NO ANSWER RELATABLE IN RETRIEVED CHUNKS, RETURN "NO SOLUTION AVAILABLE". Here's my question: Query: {query} Solution==> """} ] #Include a final answer without additional comments, sign-offs, or extra phrases. Be direct and to the point. input_ids = self.tokenizer.apply_chat_template(conversation, return_tensors="pt").to(self.model.device) start_time = datetime.now() generated_response = self.generate_response_with_timeout(input_ids) elapsed_time = datetime.now() - start_time print("Generated response:", generated_response) print("Time elapsed:", elapsed_time) print("Device in use:", self.model.device) solution_text = generated_response.strip() if "Solution:" in solution_text: solution_text = solution_text.split("Solution:", 1)[1].strip() # Post-processing to remove "assistant" prefix solution_text = re.sub(r'^assistant\s*', '', solution_text, flags=re.IGNORECASE) solution_text = solution_text.strip() return solution_text, content def qa_infer_gradio(self, query): response = self.query_and_generate_response(query) return response if __name__ == "__main__": embedding_model_name = 'flax-sentence-embeddings/all_datasets_v3_MiniLM-L12' lm_model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct" data_folder = 'sample_embedding_folder2' doc_retrieval_gen = DocumentRetrievalAndGeneration(embedding_model_name, lm_model_id, data_folder) def launch_interface(): css_code = """ .gradio-container { background-color: #daccdb; } button { background-color: #927fc7; color: black; border: 1px solid black; padding: 10px; margin-right: 10px; font-size: 16px; font-weight: bold; } """ EXAMPLES = [ "On which devices can the VIP and CSI2 modules operate simultaneously?", "I'm using Code Composer Studio 5.4.0.00091 and enabled FPv4SPD16 floating point support for CortexM4 in TDA2. However, after building the project, the .asm file shows --float_support=vfplib instead of FPv4SPD16. Why is this happening?", "Could you clarify the maximum number of cameras that can be connected simultaneously to the video input ports on the TDA2x SoC, considering it supports up to 10 multiplexed input ports and includes 3 dedicated video input modules?" ] file_path = "ticketNames.txt" with open(file_path, "r") as file: content = file.read() ticket_names = json.loads(content) dropdown = gr.Dropdown(label="Sample queries", choices=ticket_names) tab1 = gr.Interface( fn=doc_retrieval_gen.qa_infer_gradio, inputs=[gr.Textbox(label="QUERY", placeholder="Enter your query here")], allow_flagging='never', examples=EXAMPLES, cache_examples=False, outputs=[gr.Textbox(label="RESPONSE"), gr.Textbox(label="RELATED QUERIES")], css=css_code ) tab2 = gr.Interface( fn=doc_retrieval_gen.qa_infer_gradio, inputs=[dropdown], allow_flagging='never', outputs=[gr.Textbox(label="SOLUTION"), gr.Textbox(label="RELATED QUERIES")], css=css_code ) gr.TabbedInterface( [tab1, tab2], ["Textbox Input", "FAQs"], title="TI E2E FORUM", css=css_code ).launch(debug=True) launch_interface()