Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from sklearn.feature_extraction.text import TfidfVectorizer | |
| from sklearn.cluster import KMeans | |
| from sklearn.metrics.pairwise import linear_kernel, cosine_similarity | |
| import nltk | |
| from nltk.corpus import stopwords | |
| from nltk import FreqDist | |
| import re | |
| import os | |
| import base64 | |
| from graphviz import Digraph | |
| from io import BytesIO | |
| import networkx as nx | |
| import matplotlib.pyplot as plt | |
| st.set_page_config( | |
| page_title="πΊTranscriptπEDAπNLTK", | |
| page_icon="π ", | |
| layout="wide", | |
| initial_sidebar_state="expanded", | |
| menu_items={ | |
| 'Get Help': 'https://huggingface.co/awacke1', | |
| 'Report a bug': "https://huggingface.co/awacke1", | |
| 'About': "https://huggingface.co/awacke1" | |
| } | |
| ) | |
| st.markdown(''' | |
| 1. π **Transcript Insights Using Exploratory Data Analysis (EDA)** π - Unveil hidden patterns π΅οΈββοΈ and insights π§ in your transcripts. π. | |
| 2. π **Natural Language Toolkit (NLTK)** π οΈ:- your compass π§ in the vast landscape of NLP. | |
| 3. πΊ **Transcript Analysis** π:Speech recognition ποΈ and thematic extraction π, audiovisual content to actionable insights π. | |
| ''') | |
| # π₯ Download NLTK data | |
| def download_nltk_data(): | |
| try: | |
| nltk.data.find('tokenizers/punkt') | |
| nltk.data.find('corpora/stopwords') | |
| except LookupError: | |
| with st.spinner('Downloading required NLTK data...'): | |
| nltk.download('punkt') | |
| nltk.download('stopwords') | |
| st.success('NLTK data is ready!') | |
| download_nltk_data() | |
| # π°οΈ Remove timestamps | |
| def remove_timestamps(text): | |
| return re.sub(r'\d{1,2}:\d{2}\n.*\n', '', text) | |
| # π Extract high information words | |
| def extract_high_information_words(text, top_n=10): | |
| try: | |
| words = [word.lower() for word in nltk.word_tokenize(text) if word.isalpha()] | |
| stop_words = set(stopwords.words('english')) | |
| filtered_words = [word for word in words if word not in stop_words] | |
| return [word for word, _ in FreqDist(filtered_words).most_common(top_n)] | |
| except Exception as e: | |
| st.error(f"Error in extract_high_information_words: {str(e)}") | |
| return [] | |
| # π Create relationship graph | |
| def create_relationship_graph(words): | |
| graph = Digraph() | |
| for i, word in enumerate(words): | |
| graph.node(str(i), word) | |
| if i > 0: | |
| graph.edge(str(i-1), str(i), label=word) | |
| return graph | |
| # π Display relationship graph | |
| def display_relationship_graph(words): | |
| try: | |
| graph = create_relationship_graph(words) | |
| st.graphviz_chart(graph) | |
| except Exception as e: | |
| st.error(f"Error displaying relationship graph: {str(e)}") | |
| # π Extract context words | |
| def extract_context_words(text, high_information_words): | |
| words = nltk.word_tokenize(text) | |
| return [(words[i-1] if i > 0 else None, word, words[i+1] if i < len(words)-1 else None) | |
| for i, word in enumerate(words) if word.lower() in high_information_words] | |
| # π Create context graph | |
| def create_context_graph(context_words): | |
| graph = Digraph() | |
| for i, (before, high, after) in enumerate(context_words): | |
| if before: | |
| graph.node(f'before{i}', before, shape='box') | |
| graph.edge(f'before{i}', f'high{i}', label=before) | |
| graph.node(f'high{i}', high, shape='ellipse') | |
| if after: | |
| graph.node(f'after{i}', after, shape='diamond') | |
| graph.edge(f'high{i}', f'after{i}', label=after) | |
| return graph | |
| # π Display context graph | |
| def display_context_graph(context_words): | |
| try: | |
| graph = create_context_graph(context_words) | |
| st.graphviz_chart(graph) | |
| except Exception as e: | |
| st.error(f"Error displaying context graph: {str(e)}") | |
| # π Display context table | |
| def display_context_table(context_words): | |
| table = "| Before | High Info Word | After |\n|--------|----------------|-------|\n" | |
| table += "\n".join(f"| {b if b else ''} | {h} | {a if a else ''} |" for b, h, a in context_words) | |
| st.markdown(table) | |
| # π Load example files | |
| def load_example_files(): | |
| excluded_files = {'freeze.txt', 'requirements.txt', 'packages.txt', 'pre-requirements.txt'} | |
| example_files = [f for f in os.listdir() if f.endswith('.txt') and f not in excluded_files] | |
| if example_files: | |
| selected_file = st.selectbox("π Select an example file:", example_files) | |
| if st.button(f"π Load {selected_file}"): | |
| with open(selected_file, 'r', encoding="utf-8") as file: | |
| return file.read() | |
| else: | |
| st.write("No suitable example files found.") | |
| return None | |
| # π§ Cluster sentences | |
| def cluster_sentences(sentences, num_clusters): | |
| sentences = [s for s in sentences if len(s) > 10] | |
| num_clusters = min(num_clusters, len(sentences)) | |
| vectorizer = TfidfVectorizer() | |
| X = vectorizer.fit_transform(sentences) | |
| kmeans = KMeans(n_clusters=num_clusters, random_state=42) | |
| kmeans.fit(X) | |
| clustered_sentences = [[] for _ in range(num_clusters)] | |
| for i, label in enumerate(kmeans.labels_): | |
| similarity = linear_kernel(kmeans.cluster_centers_[label:label+1], X[i:i+1]).flatten()[0] | |
| clustered_sentences[label].append((similarity, sentences[i])) | |
| return [[s for _, s in sorted(cluster, reverse=True)] for cluster in clustered_sentences] | |
| # πΎ Get text file download link | |
| def get_text_file_download_link(text_to_download, filename='Output.txt', button_label="πΎ Save"): | |
| b64 = base64.b64encode(text_to_download.encode()).decode() | |
| return f'<a href="data:file/txt;base64,{b64}" download="{filename}" style="margin-top:20px;">{button_label}</a>' | |
| # π Get high info words per cluster | |
| def get_high_info_words_per_cluster(cluster_sentences, num_words=5): | |
| return [extract_high_information_words(" ".join(cluster), num_words) for cluster in cluster_sentences] | |
| # π Plot cluster words | |
| def plot_cluster_words(cluster_sentences): | |
| for i, cluster in enumerate(cluster_sentences): | |
| words = re.findall(r'\b[a-z]{4,}\b', " ".join(cluster)) | |
| word_freq = FreqDist(words) | |
| top_words = [word for word, _ in word_freq.most_common(20)] | |
| vectorizer = TfidfVectorizer() | |
| X = vectorizer.fit_transform(top_words) | |
| similarity_matrix = cosine_similarity(X.toarray()) | |
| G = nx.from_numpy_array(similarity_matrix) | |
| pos = nx.spring_layout(G, k=0.5) | |
| plt.figure(figsize=(8, 6)) | |
| nx.draw_networkx(G, pos, node_size=500, font_size=12, font_weight='bold', with_labels=True, | |
| labels={i: word for i, word in enumerate(top_words)}, | |
| node_color='skyblue', edge_color='gray') | |
| plt.axis('off') | |
| plt.title(f"Cluster {i+1} Word Arrangement") | |
| st.pyplot(plt) | |
| st.markdown(f"**Cluster {i+1} Details:**") | |
| st.markdown(f"Top Words: {', '.join(top_words)}") | |
| st.markdown(f"Number of Sentences: {len(cluster)}") | |
| st.markdown("---") | |
| # Main code for UI | |
| uploaded_file = st.file_uploader("π Choose a .txt file", type=['txt']) | |
| example_text = load_example_files() | |
| if example_text: | |
| file_text = example_text | |
| elif uploaded_file: | |
| file_text = uploaded_file.read().decode("utf-8") | |
| else: | |
| file_text = "" | |
| if file_text: | |
| text_without_timestamps = remove_timestamps(file_text) | |
| top_words = extract_high_information_words(text_without_timestamps, 10) | |
| with st.expander("π Top 10 High Information Words"): | |
| st.write(top_words) | |
| with st.expander("π Relationship Graph"): | |
| display_relationship_graph(top_words) | |
| context_words = extract_context_words(text_without_timestamps, top_words) | |
| with st.expander("π Context Graph"): | |
| display_context_graph(context_words) | |
| with st.expander("π Context Table"): | |
| display_context_table(context_words) | |
| sentences = [line.strip() for line in file_text.split('\n') if len(line.strip()) > 10] | |
| num_sentences = len(sentences) | |
| st.write(f"Total Sentences: {num_sentences}") | |
| num_clusters = st.slider("Number of Clusters", min_value=2, max_value=10, value=5) | |
| clustered_sentences = cluster_sentences(sentences, num_clusters) | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.subheader("Original Text") | |
| original_text = "\n".join(sentences) | |
| st.text_area("Original Sentences", value=original_text, height=400) | |
| with col2: | |
| st.subheader("Clustered Text") | |
| clusters = "" | |
| clustered_text = "" | |
| cluster_high_info_words = get_high_info_words_per_cluster(clustered_sentences) | |
| for i, cluster in enumerate(clustered_sentences): | |
| cluster_text = "\n".join(cluster) | |
| high_info_words = ", ".join(cluster_high_info_words[i]) | |
| clusters += f"Cluster {i+1} (High Info Words: {high_info_words})\n" | |
| clustered_text += f"Cluster {i+1} (High Info Words: {high_info_words}):\n{cluster_text}\n\n" | |
| st.text_area("Clusters", value=clusters, height=200) | |
| st.text_area("Clustered Sentences", value=clustered_text, height=200) | |
| clustered_sentences_flat = [sentence for cluster in clustered_sentences for sentence in cluster] | |
| if set(sentences) == set(clustered_sentences_flat): | |
| st.write("β All sentences are accounted for in the clustered output.") | |
| else: | |
| st.write("β Some sentences are missing in the clustered output.") | |
| plot_cluster_words(clustered_sentences) | |
| st.markdown("For more information and updates, visit our [help page](https://huggingface.co/awacke1).") |