ThisIs-Developer commited on
Commit
8a555dd
β€’
1 Parent(s): f7a8aad

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -81
app.py DELETED
@@ -1,81 +0,0 @@
1
- import streamlit as st
2
- import os
3
- from langchain.document_loaders.csv_loader import CSVLoader
4
- from langchain.text_splitter import RecursiveCharacterTextSplitter
5
- from langchain.embeddings import HuggingFaceEmbeddings
6
- from langchain.vectorstores import FAISS
7
- from langchain.llms import CTransformers
8
- from langchain.chains import ConversationalRetrievalChain
9
-
10
- def add_vertical_space(spaces=1):
11
- for _ in range(spaces):
12
- st.sidebar.markdown("---")
13
-
14
- def main():
15
- st.set_page_config(page_title="Llama-2-GGML CSV Chatbot", layout="wide")
16
- st.title("Llama-2-GGML CSV Chatbot")
17
-
18
- st.sidebar.title("About")
19
- st.sidebar.markdown('''
20
- The Llama-2-GGML CSV Chatbot uses the **Llama-2-7B-Chat-GGML** model.
21
-
22
- ### πŸ”„Bot evolving, stay tuned!
23
-
24
- ## Useful Links πŸ”—
25
-
26
- - **Model:** [Llama-2-7B-Chat-GGML](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/tree/main) πŸ“š
27
- - **GitHub:** [ThisIs-Developer/Llama-2-GGML-CSV-Chatbot](https://github.com/ThisIs-Developer/Llama-2-GGML-CSV-Chatbot) πŸ’¬
28
- ''')
29
-
30
- DB_FAISS_PATH = "vectorstore/db_faiss"
31
- TEMP_DIR = "temp"
32
-
33
- if not os.path.exists(TEMP_DIR):
34
- os.makedirs(TEMP_DIR)
35
-
36
- uploaded_file = st.sidebar.file_uploader("Upload CSV file", type=['csv'], help="Upload a CSV file")
37
-
38
- add_vertical_space(1)
39
- st.sidebar.markdown('Made by [@ThisIs-Developer](https://huggingface.co/ThisIs-Developer)')
40
-
41
- if uploaded_file is not None:
42
- file_path = os.path.join(TEMP_DIR, uploaded_file.name)
43
- with open(file_path, "wb") as f:
44
- f.write(uploaded_file.getvalue())
45
-
46
- st.write(f"Uploaded file: {uploaded_file.name}")
47
- st.write("Processing CSV file...")
48
-
49
- loader = CSVLoader(file_path=file_path, encoding="utf-8", csv_args={'delimiter': ','})
50
- data = loader.load()
51
-
52
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=20)
53
- text_chunks = text_splitter.split_documents(data)
54
-
55
- st.write(f"Total text chunks: {len(text_chunks)}")
56
-
57
- embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')
58
- docsearch = FAISS.from_documents(text_chunks, embeddings)
59
- docsearch.save_local(DB_FAISS_PATH)
60
-
61
- llm = CTransformers(model="models/llama-2-7b-chat.ggmlv3.q4_0.bin",
62
- model_type="llama",
63
- max_new_tokens=512,
64
- temperature=0.1)
65
-
66
- qa = ConversationalRetrievalChain.from_llm(llm, retriever=docsearch.as_retriever())
67
-
68
- st.write("### Enter your query:")
69
- query = st.text_input("Input Prompt:")
70
- if query:
71
- with st.spinner("Processing your question..."):
72
- chat_history = []
73
- result = qa({"question": query, "chat_history": chat_history})
74
- st.write("---")
75
- st.write("### Response:")
76
- st.write(f"> {result['answer']}")
77
-
78
- os.remove(file_path)
79
-
80
- if __name__ == "__main__":
81
- main()