|
import pandas as pd |
|
import streamlit as st |
|
import matplotlib.pyplot as plt |
|
import numpy as np |
|
from second import double_main |
|
from multiple import multiple_main |
|
from multiple import display_story_points_stats |
|
from jira_integration import render_jira_login, JIRA_SERVER |
|
from weekly import generate_weekly_report |
|
from pre import preprocess_uploaded_file, add_app_description |
|
from multi_env_compare import multi_env_compare_main |
|
import multiple_env_loader |
|
|
|
|
|
|
|
def single_main(uploaded_file): |
|
|
|
if uploaded_file is not None: |
|
|
|
data = preprocess_uploaded_file(uploaded_file) |
|
|
|
|
|
st.write("Data shape:", data.shape) |
|
st.write("Unique functional areas:", data['Functional area'].nunique()) |
|
st.write("Sample of data:", data.head()) |
|
|
|
|
|
failed_scenarios = data[data['Status'] == 'FAILED'] |
|
passed_scenarios = data[data['Status'] == 'PASSED'] |
|
|
|
|
|
fail_count = len(failed_scenarios) |
|
st.markdown(f"Failing scenarios Count: {fail_count}") |
|
|
|
pass_count = len(passed_scenarios) |
|
st.markdown(f"Passing scenarios Count: {pass_count}") |
|
|
|
|
|
selected_status = st.radio("Select a status", ['Failed', 'Passed']) |
|
|
|
|
|
if selected_status == 'Failed': |
|
unique_areas = np.append(failed_scenarios['Functional area'].unique(), "All") |
|
selected_scenarios = failed_scenarios |
|
elif selected_status == 'Passed': |
|
unique_areas = np.append(passed_scenarios['Functional area'].unique(), "All") |
|
selected_scenarios = passed_scenarios |
|
else: |
|
selected_scenarios = None |
|
|
|
if selected_scenarios is not None: |
|
|
|
st.markdown(f"### Scenarios with status '{selected_status}' grouped by functional area:") |
|
|
|
|
|
|
|
|
|
|
|
selected_functional_areas = st.multiselect("Select functional areas", unique_areas, ["All"]) |
|
|
|
if "All" in selected_functional_areas: |
|
filtered_scenarios = selected_scenarios |
|
else: |
|
filtered_scenarios = selected_scenarios[selected_scenarios['Functional area'].isin(selected_functional_areas)] |
|
|
|
if not selected_functional_areas: |
|
st.error("Please select at least one functional area.") |
|
else: |
|
|
|
st.write(f"Number of filtered scenarios: {len(filtered_scenarios)}") |
|
|
|
|
|
average_time_spent_seconds = filtered_scenarios.groupby('Functional area')['Time spent'].mean().reset_index() |
|
|
|
|
|
average_time_spent_seconds['Time spent'] = pd.to_datetime(average_time_spent_seconds['Time spent'], unit='s').dt.strftime('%M:%S') |
|
|
|
|
|
start_datetime_group = filtered_scenarios.groupby('Functional area')['Start datetime'].min().reset_index() |
|
|
|
|
|
average_time_spent_seconds = average_time_spent_seconds.merge(start_datetime_group, on='Functional area') |
|
|
|
|
|
if selected_status == 'Failed': |
|
|
|
if 'Failed Step' in filtered_scenarios.columns: |
|
grouped_filtered_scenarios = filtered_scenarios.groupby('Functional area')[['Scenario Name', 'Error Message', 'Failed Step', 'Time spent(m:s)']].apply(lambda x: x.reset_index(drop=True)) |
|
else: |
|
grouped_filtered_scenarios = filtered_scenarios.groupby('Functional area')[['Scenario Name', 'Error Message', 'Time spent(m:s)']].apply(lambda x: x.reset_index(drop=True)) |
|
elif selected_status == 'Passed': |
|
grouped_filtered_scenarios = filtered_scenarios.groupby('Functional area')[['Scenario Name', 'Time spent(m:s)']].apply(lambda x: x.reset_index(drop=True)) |
|
else: |
|
grouped_filtered_scenarios = None |
|
|
|
grouped_filtered_scenarios.reset_index(inplace=True) |
|
|
|
|
|
if 'level_1' in grouped_filtered_scenarios.columns: |
|
grouped_filtered_scenarios.drop(columns=['level_1'], inplace=True) |
|
|
|
grouped_filtered_scenarios.index = grouped_filtered_scenarios.index + 1 |
|
st.dataframe(grouped_filtered_scenarios) |
|
|
|
average_time_spent_seconds = average_time_spent_seconds.sort_values(by='Start datetime') |
|
|
|
|
|
st.markdown("### Average Time Spent on Each Functional Area") |
|
average_time_spent_seconds.index = average_time_spent_seconds.index + 1 |
|
st.dataframe(average_time_spent_seconds) |
|
|
|
|
|
if selected_status != 'Passed' and len(grouped_filtered_scenarios) <= 400: |
|
|
|
st.write(f"### Bar graph showing number of '{selected_status}' scenarios in each functional area:") |
|
error_counts = grouped_filtered_scenarios['Functional area'].value_counts() |
|
|
|
|
|
if not error_counts.empty: |
|
plt.figure(figsize=(10, 6)) |
|
plt.bar(error_counts.index, error_counts.values) |
|
plt.xlabel('Functional Area') |
|
plt.ylabel('Number of Failures') |
|
plt.title(f"Number of '{selected_status}' scenarios by Functional Area") |
|
plt.xticks(rotation=45, ha='right') |
|
|
|
|
|
y_max = max(error_counts.values) + 1 |
|
plt.ylim(0, y_max) |
|
plt.yticks(range(0, y_max, 1)) |
|
|
|
|
|
for i, count in enumerate(error_counts.values): |
|
plt.text(i, count, str(count), ha='center', va='bottom') |
|
|
|
plt.tight_layout() |
|
st.pyplot(plt) |
|
else: |
|
st.info(f"No '{selected_status}' scenarios found to display in the graph.") |
|
else: |
|
st.write("### No scenarios with status 'failed' found.") |
|
pass |
|
|
|
|
|
|
|
def main(): |
|
add_app_description() |
|
|
|
|
|
|
|
if 'jira_server' not in st.session_state: |
|
st.session_state.jira_server = JIRA_SERVER |
|
if 'is_authenticated' not in st.session_state: |
|
st.session_state.is_authenticated = False |
|
if 'jira_client' not in st.session_state: |
|
st.session_state.jira_client = None |
|
if 'sprint_data_initialized' not in st.session_state: |
|
st.session_state.sprint_data_initialized = False |
|
if 'force_sprint_refresh' not in st.session_state: |
|
st.session_state.force_sprint_refresh = False |
|
if 'sprint_data_cache' not in st.session_state: |
|
st.session_state.sprint_data_cache = None |
|
if 'last_sprint_fetch' not in st.session_state: |
|
st.session_state.last_sprint_fetch = None |
|
|
|
|
|
if "mode" not in st.session_state: |
|
st.session_state["mode"] = "multi" |
|
|
|
|
|
with st.sidebar: |
|
|
|
selected_mode = st.selectbox( |
|
"Select Mode", |
|
["Multi", "Compare", "Weekly", "Multi-Env Compare", "Auto Environment Loader"], |
|
index=["Multi", "Compare", "Weekly", "Multi-Env Compare", "Auto Environment Loader"].index(st.session_state.get("selected_mode", "Multi")) |
|
) |
|
|
|
st.session_state["selected_mode"] = selected_mode |
|
st.session_state["mode"] = selected_mode.lower() |
|
mode_display = f'## Current mode: {st.session_state["mode"].title()} mode' |
|
st.markdown(mode_display) |
|
|
|
st.markdown("---") |
|
|
|
|
|
with st.expander("Jira Integration (Optional)", expanded=True): |
|
|
|
|
|
st.session_state.is_authenticated = render_jira_login() |
|
|
|
|
|
if st.session_state.is_authenticated and st.session_state.jira_client: |
|
st.markdown("---") |
|
with st.expander("Sprint Progress", expanded=True): |
|
|
|
if st.button("π Refresh Sprint Data", key="refresh_sprint_sidebar_app"): |
|
st.session_state.force_sprint_refresh = True |
|
|
|
display_story_points_stats(force_refresh=st.session_state.force_sprint_refresh) |
|
|
|
st.session_state.force_sprint_refresh = False |
|
|
|
|
|
if "selected_mode" not in st.session_state: |
|
st.session_state["selected_mode"] = "Multi" |
|
|
|
|
|
if st.session_state["mode"] == "multi": |
|
multiple_main() |
|
elif st.session_state["mode"] == "compare": |
|
st.sidebar.markdown("### Upload Files for Comparison") |
|
|
|
|
|
upload_option = st.sidebar.radio("Upload method", ["Single uploader", "Two separate uploaders"], key="compare_upload_method") |
|
|
|
if upload_option == "Single uploader": |
|
uploaded_files = st.sidebar.file_uploader("Upload CSV or XLSX files for comparison", type=["csv", "xlsx"], accept_multiple_files=True) |
|
if uploaded_files: |
|
if len(uploaded_files) < 2: |
|
st.warning("Please upload at least two files for comparison.") |
|
elif len(uploaded_files) > 2: |
|
st.warning("More than two files uploaded. Only the first two will be used for comparison.") |
|
else: |
|
with st.spinner('Processing...'): |
|
double_main(uploaded_files[0], uploaded_files[1]) |
|
st.success('Comparison Complete!') |
|
else: |
|
col1, col2 = st.sidebar.columns(2) |
|
with col1: |
|
uploaded_file1 = st.file_uploader("Upload older CSV/XLSX file", type=["csv", "xlsx"], key="file1") |
|
with col2: |
|
uploaded_file2 = st.file_uploader("Upload newer CSV/XLSX file", type=["csv", "xlsx"], key="file2") |
|
|
|
if uploaded_file1 is not None and uploaded_file2 is not None: |
|
with st.spinner('Processing...'): |
|
double_main(uploaded_file1, uploaded_file2) |
|
st.success('Comparison Complete!') |
|
elif uploaded_file1 is not None or uploaded_file2 is not None: |
|
st.warning("Please upload both files for comparison.") |
|
elif st.session_state["mode"] == "weekly": |
|
uploaded_files = st.sidebar.file_uploader("Upload CSV or XLSX files for Weekly Report", type=["csv", "xlsx"], accept_multiple_files=True) |
|
if uploaded_files: |
|
generate_weekly_report(uploaded_files) |
|
elif st.session_state["mode"] == "multi-env compare": |
|
multi_env_compare_main() |
|
elif st.session_state["mode"] == "auto environment loader": |
|
|
|
multiple_env_loader.main() |
|
|
|
if __name__ == "__main__": |
|
main() |