# import numpy as np | |
# from sklearn.metrics.pairwise import cosine_similarity | |
# from utils.convert_embedding import GetEmbedding | |
# import random | |
# import pickle | |
# import os | |
# from utils.rag import RAG | |
# from faster_whisper import WhisperModel | |
# def process(user_query:str): | |
# # dump_user_question(user_query) | |
# user_embedding = GetEmbedding([user_query]).user_query_emb() | |
# with open(r"all_mix_embedding.pkl","rb") as f: | |
# load_embedding = pickle.load(f) | |
# with open(r"all_answers.pkl","rb") as f: | |
# ans = pickle.load(f) | |
# similarity_scores = cosine_similarity(user_embedding, load_embedding) | |
# index = np.argmax(similarity_scores) | |
# answer = ans[index] | |
# score = similarity_scores[0,index] | |
# print(f"Index : {index}:\tscore:{score} \tquery: {user_query}") | |
# if float(score) > 0.60 : | |
# final_output = random.choice(answer) | |
# else: | |
# final_output = RAG().pipeline(query=user_query) | |
# return final_output | |
# def audio_process(audio): | |
# try: | |
# model = WhisperModel(model_size_or_path="medium.en") | |
# segments, info = model.transcribe(audio) | |
# transcription = " ".join([seg.text for seg in segments]) | |
# result = process(user_query=transcription) | |
# return result | |
# except Exception as e: | |
# print("Error:", e) | |
# return str(e) | |
# if __name__ == "__main__": | |
# res = audio_process(r"C:\Users\lalit\Documents\Sound recordings\who_is_lalit.m4a") | |
# print(res) | |
# # for _ in range(3): | |
# # user = input("How can i help you :? \n") | |
# # result = process(user) | |
# # print(result) | |
# # with open(r"data\question_data.pkl","rb") as f: | |
# # que = pickle.load(f) | |
# # print(que) |