Luciferalive commited on
Commit
55216cd
1 Parent(s): 3a8c659

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +153 -0
app.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import io
3
+ import re
4
+ import numpy as np
5
+ import pytesseract
6
+ from PIL import Image
7
+ from typing import List
8
+ from sentence_transformers import SentenceTransformer
9
+ from langchain_community.vectorstores import Chroma
10
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
11
+ from langchain_community.embeddings import SentenceTransformerEmbeddings
12
+ from groq import Groq
13
+ import gradio as gr
14
+ import requests
15
+
16
+ # Ensure the Tesseract OCR path is set correctly
17
+ pytesseract.pytesseract.tesseract_cmd = r'/usr/bin/tesseract'
18
+
19
+ GROQ_API_KEY = "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"
20
+
21
+ def preprocess_text(text):
22
+ try:
23
+ text = text.replace('\n', ' ').replace('\r', ' ')
24
+ text = re.sub(r'[^\x00-\x7F]+', ' ', text)
25
+ text = text.lower()
26
+ text = re.sub(r'[^\w\s]', '', text)
27
+ text = re.sub(r'\s+', ' ', text).strip()
28
+ return text
29
+ except Exception as e:
30
+ print("Failed to preprocess text:", e)
31
+ return ""
32
+
33
+ def fetch_text_file_from_huggingface_space():
34
+ url = "https://huggingface.co/spaces/Luciferalive/goosev9/blob/main/extracted_text.txt"
35
+ try:
36
+ response = requests.get(url)
37
+ response.raise_for_status()
38
+ text_content = response.text
39
+ print("Successfully downloaded the text file")
40
+ return text_content
41
+ except Exception as e:
42
+ print(f"Failed to download the text file: {e}")
43
+ return ""
44
+
45
+ def create_vector_store(text_content):
46
+ embeddings = SentenceTransformerEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
47
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
48
+ texts = text_splitter.split_text(text_content)
49
+ if not texts:
50
+ print("No text chunks created.")
51
+ return None
52
+
53
+ vector_store = Chroma.from_texts(texts, embeddings, collection_name="insurance_cosine")
54
+ print("Vector DB Successfully Created!")
55
+ return vector_store
56
+
57
+ def load_vector_store():
58
+ embeddings = SentenceTransformerEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
59
+ try:
60
+ db = Chroma(embedding_function=embeddings, collection_name="insurance_cosine")
61
+ print("Vector DB Successfully Loaded!")
62
+ return db
63
+ except Exception as e:
64
+ print("Failed to load Vector DB:", e)
65
+ return None
66
+
67
+ def answer_query(query):
68
+ try:
69
+ vector_store = load_vector_store()
70
+ if not vector_store:
71
+ return None
72
+
73
+ docs = vector_store.similarity_search(query)
74
+ print(f"\n\nDocuments retrieved: {len(docs)}")
75
+
76
+ if not docs:
77
+ print("No documents match the query.")
78
+ return None
79
+
80
+ docs_content = [doc.page_content for doc in docs]
81
+ all_docs_content = " ".join(docs_content)
82
+
83
+ client = Groq(api_key=GROQ_API_KEY)
84
+ template = """
85
+ ### [INST] Instruction:
86
+ You are an AI assistant named Goose. Your purpose is to provide accurate, relevant, and helpful information to users in a friendly, warm, and supportive manner, similar to ChatGPT. When responding to queries, please keep the following guidelines in mind:
87
+ - When someone says hi, or small talk, only respond in a sentence.
88
+ - Retrieve relevant information from your knowledge base to formulate accurate and informative responses.
89
+ - Always maintain a positive, friendly, and encouraging tone in your interactions with users.
90
+ - Strictly write crisp and clear answers, don't write unnecessary stuff.
91
+ - Only answer the asked question, don't hallucinate or print any pre-information.
92
+ - After providing the answer, always ask for any other help needed in the next paragraph.
93
+ - Writing in bullet format is our top preference.
94
+ Remember, your goal is to be a reliable, friendly, and supportive AI assistant that provides accurate information while creating a positive user experience, just like ChatGPT. Adapt your communication style to best suit each user's needs and preferences.
95
+ ### Docs: {docs}
96
+ ### Question: {question}
97
+ """
98
+
99
+ chat_completion = client.chat.completions.create(
100
+ messages=[
101
+ {
102
+ "role": "system",
103
+ "content": template.format(docs=all_docs_content, question=query)
104
+ },
105
+ {
106
+ "role": "user",
107
+ "content": query
108
+ }
109
+ ],
110
+ model="llama3-8b-8192",
111
+ )
112
+
113
+ answer = chat_completion.choices[0].message.content.strip()
114
+ return answer
115
+ except Exception as e:
116
+ print("An error occurred while getting the answer: ", str(e))
117
+ return None
118
+
119
+ def process_query(query):
120
+ try:
121
+ response = answer_query(query)
122
+ if response:
123
+ return "Answer: " + response
124
+ else:
125
+ return "No answer found."
126
+ except Exception as e:
127
+ print("An error occurred while getting the answer: ", str(e))
128
+ return "An error occurred: " + str(e)
129
+
130
+
131
+ # Set up the Gradio interface
132
+ def launch_assistant():
133
+ text_content = fetch_text_file_from_huggingface_space()
134
+ if not text_content.strip():
135
+ print("No text content fetched.")
136
+ return
137
+
138
+ vector_store = create_vector_store(text_content)
139
+ if not vector_store:
140
+ print("Failed to create Vector DB.")
141
+ return
142
+
143
+ iface = gr.Interface(
144
+ fn=process_query,
145
+ inputs=gr.Textbox(lines=7, label="Enter your question"),
146
+ outputs="text",
147
+ title="Goose AI Assistant",
148
+ description="Ask a question and get an answer from the AI assistant."
149
+ )
150
+
151
+ iface.launch()
152
+
153
+ launch_assistant()