File size: 7,091 Bytes
3673798
 
 
 
 
 
 
 
 
a894fcd
01f377d
3673798
a894fcd
01f377d
 
 
 
 
 
 
 
 
 
 
 
a894fcd
 
 
01f377d
 
 
 
 
 
 
 
 
 
 
 
3673798
 
01f377d
 
 
611a078
3673798
01f377d
325bc8c
01f377d
325bc8c
01f377d
325bc8c
 
 
3673798
325bc8c
 
 
3673798
325bc8c
01f377d
325bc8c
 
975f8f5
01f377d
 
 
 
325bc8c
 
a894fcd
 
01f377d
 
 
325bc8c
01f377d
 
a894fcd
01f377d
a894fcd
9b8b396
a894fcd
611a078
cd3f529
325bc8c
 
 
611a078
325bc8c
 
 
 
 
 
 
 
 
 
 
 
886f0ee
611a078
01f377d
a894fcd
3673798
 
 
01f377d
 
 
 
3673798
 
01f377d
 
 
 
 
 
 
 
 
 
 
 
a894fcd
3673798
01f377d
3673798
 
01f377d
 
4b9f242
01f377d
 
 
 
 
 
 
 
 
 
 
 
 
 
3673798
 
01f377d
 
 
611a078
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import streamlit as st
import time
import pandas as pd
import io
from transformers import pipeline
from streamlit_extras.stylable_container import stylable_container
import json
import plotly.express as px

st.subheader("AI CSV and XLSX Data Analyzer", divider="blue")
st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary")

expander = st.expander("**Important notes on the AI CSV and XLSX Data Analyzer**")
expander.write(
    """
        **Supported File Formats:** This app accepts files in .csv and .xlsx formats.
        **How to Use:** Upload your file first. Select two different columns from your data to visualize in a Tree Map. Then, type your question into the text area provided and click the 'Retrieve your answer' button.
        **Tree Map:** Your uploaded data is presented in an interactive Tree Map for visual exploration. Click on any area within the map to access specific data insights.
        **Usage Limits:** You can ask up to 5 questions.
        **Subscription Management:** This app offers a one-day free trial, followed by a one-day subscription, expiring after 24 hours. If you are interested in building your own AI CSV and XLSX Data Analyzer, we invite you to explore our NLP Web App Store on our website. You can select your desired features, place your order, and we will deliver your custom app in five business days. If you wish to delete your Account with us, please contact us at info@nlpblogs.com
        **Customization:** To change the app's background color to white or black, click the three-dot menu on the right-hand side of your app, go to Settings and then Choose app theme, colors and fonts.
        **File Handling and Errors:** (a) The app may provide an inaccurate answer if the information is missing from the relevant cell. (b) The app may display an error message if your file has errors, date values or float numbers (0.5, 1.2, 4.5 etc.).
        For any errors or inquiries, please contact us at info@nlpblogs.com
    """
)

with st.sidebar:
    container = st.container(border=True)
    container.write(
        "**Question-Answering (QA)** is the task of retrieving the answer to a question from a given text (knowledge base), which is used as context."
    )
    st.subheader("Related NLP Web Apps", divider="blue")
    st.link_button(
        "AI Google Sheet Data Analyzer",
        "https://nlpblogs.com/shop/table-question-answering-qa/google-sheet-qa-demo-app/",
        type="primary",
    )

if "question_attempts" not in st.session_state:
    st.session_state["question_attempts"] = 0
max_attempts = 5

upload_file = st.file_uploader(
    "Upload your file. Accepted file formats include: .csv, .xlsx", type=["csv", "xlsx"]
)

if upload_file is not None:
    file_extension = upload_file.name.split(".")[-1].lower()
    try:
        if file_extension == "csv":
            df_original = pd.read_csv(upload_file, na_filter=False)
        elif file_extension == "xlsx":
            df_original = pd.read_excel(upload_file, na_filter=False)
        else:
            st.warning("Unsupported file type.")
            st.stop()

        if df_original.isnull().values.any():
            st.error("Error: The file contains missing values.")
            st.stop()
        else:
            st.session_state.df_original = df_original

            all_columns = df_original.columns.tolist()
            st.divider()
            st.write(
                "Select two different columns from your data to visualize in a **Tree Map**. "
            )

            parent_column = st.selectbox("Select the parent column:", all_columns)
            value_column = st.selectbox("Select the value column:", all_columns)
            if parent_column and value_column:
                if parent_column == value_column:
                    st.warning(
                        "Warning: You have selected the same column for both the parent and value column. Please select two different columns from your data."
                    )
                else:
                    df_treemap = df_original.copy()

                    path_columns = [px.Constant("all"), parent_column, value_column]
                    fig = px.treemap(df_treemap, path=path_columns)
                    fig.update_layout(margin=dict(t=50, l=25, r=25, b=25))
                    st.subheader("Tree Map", divider="blue")
                    st.plotly_chart(fig)

            st.subheader("Uploaded File", divider="blue")
            st.dataframe(df_original, key="uploaded_dataframe")
            st.write(f"_Number of rows_: {df_original.shape[0]}")
            st.write(f"_Number of columns_: {df_original.shape[1]}")

    except pd.errors.ParserError:
        st.error("Error: The CSV file is not readable or is incorrectly formatted.")
        st.stop()
    except UnicodeDecodeError:
        st.error("Error: The CSV file could not be decoded.")
        st.stop()
    except ValueError:
        st.error("Error: The Excel file is not readable or is incorrectly formatted.")
        st.stop()
    except Exception as e:
        st.error(f"An unexpected error occurred: {e}")
        st.stop()

st.divider()


def clear_question():
    st.session_state["question"] = ""


question = st.text_input(
    "Type your question here and then press **Retrieve your answer**:", key="question"
)
st.button("Clear question", on_click=clear_question)

# --- Sampling Implementation ---
SAMPLE_SIZE = 500  # Define the number of rows to sample
if "df_original" in st.session_state:
    df_for_qa = st.session_state.df_original
    if df_for_qa.shape[0] > SAMPLE_SIZE:
        st.warning(f"The uploaded file has {df_for_qa.shape[0]} rows. For faster processing and to avoid memory issues, a sample of {SAMPLE_SIZE} rows will be used for question answering.")
        df_for_qa = df_for_qa.sample(n=SAMPLE_SIZE, random_state=42) # Set random_state for reproducibility
    else:
        st.info("The file size is within the limit, using the entire dataset for question answering.")
else:
    df_for_qa = None
# --- End of Sampling Implementation ---

if st.button("Retrieve your answer"):
    if st.session_state["question_attempts"] >= max_attempts:
        st.error(f"You have asked {max_attempts} questions. Maximum question attempts reached.")
        st.stop()
    st.session_state["question_attempts"] += 1

    with st.spinner("Wait for it...", show_time=True):
        time.sleep(2)  # Reduced sleep time for better responsiveness

        if df_for_qa is not None:
            try:
                tqa = pipeline(
                    task="table-question-answering",
                    model="microsoft/tapex-large-finetuned-wtq",
                )
                answer = tqa(table=df_for_qa, query=question)["answer"]
                st.write(answer)
            except Exception as e:
                st.error(f"An error occurred during question answering: {e}")
        else:
            st.warning("Please upload a file first.")

st.divider()
st.write(
    f"Number of questions asked: {st.session_state['question_attempts']}/{max_attempts}"
)