# %% import datetime import gradio as gr from io import BytesIO import numpy as np import numpy as np import tiktoken import pandas as pd from utils.functions import load_csv from wordcloud import WordCloud import matplotlib.pyplot as plt plt.switch_backend('Agg') import requests import umap from sklearn.feature_extraction.text import TfidfVectorizer import hdbscan import plotly.express as px import plotly.graph_objects as go import plotly.express as px from langchain.chat_models import ChatOpenAI import os from langchain.agents import load_tools from langchain.agents import initialize_agent, create_pandas_dataframe_agent from langchain.llms import OpenAI from langchain.text_splitter import RecursiveCharacterTextSplitter import gradio as gr import openai import pandas as pd import numpy as np import re import whisper import openai import pandas as pd import networkx as nx import matplotlib.pyplot as plt import networkx as nx import matplotlib.pyplot as plt from langchain import OpenAI, PromptTemplate, LLMChain from langchain.agents import load_tools from langchain.agents import initialize_agent from langchain.llms import OpenAI from langchain.document_loaders import YoutubeLoader from langchain.chains.summarize import load_summarize_chain from langchain.text_splitter import RecursiveCharacterTextSplitter, TokenTextSplitter from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification from langchain.document_loaders import YoutubeLoader import time import re import pinecone import pandas as pd from sentence_transformers import SentenceTransformer, util #import numpy as np from langchain.embeddings import HuggingFaceEmbeddings, OpenAIEmbeddings from sklearn.metrics import silhouette_score import torch import nltk nltk.download('vader_lexicon') from dotenv import load_dotenv load_dotenv() gradio_css = os.getenv("GRADIO_CSS") gradio_js = os.getenv("GRADIO_JS") rubik_backend = os.getenv("RUBIK_BACKEND") openapi_key = os.getenv("OPENAI_API_KEY") wolfram_alpha_appid = os.getenv("WOLFRAM_ALPHA_APPID") #for versioning ra = np.random.randint(1000000) os.environ['OPENAI_API_KEY'] = openapi_key os.environ['WOLFRAM_ALPHA_APPID'] = wolfram_alpha_appid def get_key(item): return item['label'] def get_emotion_bertweet(dataset): tokenizer4 = AutoTokenizer.from_pretrained("finiteautomata/bertweet-base-emotion-analysis", truncation=True) model4 = AutoModelForSequenceClassification.from_pretrained("finiteautomata/bertweet-base-emotion-analysis") nlp = pipeline('sentiment-analysis', model=model4, tokenizer=tokenizer4, top_k=6, truncation=True, device=device) top_emotion = [] # apply emotion model on data and get the labels and scores for i in range(len(dataset)): label = [] score = [] jsonfile = (nlp(dataset['translated_text'].iloc[i])) jsonfile[0].sort(key=get_key) for j in range(0, 6): jsonfile2 = np.array(jsonfile) label.append(jsonfile2[0][j]['label']) score.append(jsonfile2[0][j]['score']) top_emotion.append(label[score.index(max(score))]) dataset['top_emotion_bertweet'] = top_emotion print(jsonfile2) return dataset model_name = "sentence-transformers/all-MiniLM-L6-v2" hf = HuggingFaceEmbeddings(model_name=model_name) embeddings = OpenAIEmbeddings() # pinecone.init( # api_key='ENTER API KEY HERE', # environment='us-central1-gcp' # ) # index_name = 'openaigradio' def markdown_to_html(md_string): # html_string = markdown.markdown(md_string) return md_string tokenizer4 = AutoTokenizer.from_pretrained("finiteautomata/bertweet-base-emotion-analysis", truncation=True) model4 = AutoModelForSequenceClassification.from_pretrained("finiteautomata/bertweet-base-emotion-analysis") openai.api_key = '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' model_whisp = whisper.load_model("base") llm = OpenAI(temperature=0.2, model_name='text-davinci-003', max_tokens=1000, top_p=1) model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') # check if cpu or gpu device = 'cuda' if torch.cuda.is_available() else 'cpu' model = model.to(device) # %% Industries = ['Agriculture', 'Automobile Manufacturing', 'Banking and Finance', 'Biotechnology', 'Chemicals and Petrochemicals', 'Construction and Engineering', 'Consumer Goods and Retail', 'Education', 'Electronics', 'Energy (Oil, Gas, Coal, and Renewable Energy)', 'Entertainment and Media', 'Food and Beverage', 'Healthcare and Pharmaceuticals', 'Hospitality, Travel, and Tourism', 'Information Technology (IT) and Software', 'Insurance', 'Manufacturing (various sectors)', 'Mining and Metals', 'Real Estate', 'Renewable Energy (Solar, Wind, Hydro, Geothermal)', 'Telecommunications', 'Textiles and Apparel', 'Transportation and Logistics', 'Utilities (Electricity, Water, Gas)', 'Waste Management and Environmental Services'] def get_memory(): memory_string = '' for i,j in memory.items(): print(i, j) memory_string += str(j) + '\n' return memory_string def check_words_in_string(word_list, input_string, case=False): input_string = input_string.lower() # Convert words to lowercase if case is False word_list = [word.lower() if case else word for word in word_list] # Check if any word is in the input_string result = any(word in input_string for word in word_list) # check if True if result: return True else: return False # Will be used by the Langchain chatbot words = ['rows', 'data', 'length', 'dataset','plot', 'col','columns','column', 'max', 'min', 'minimum', 'maximum', 'visualize','visualise','represent','graph','chart','plot','diagram','illustrate','show','depict','display','count','number','sum','total','aggregate','trend','pattern','distribution','average','linechart','scatter','barchart','piechart','histogram','boxplot','heatmap','correlation','regression','forecast','predict'] memory = {'agent':[], 'user':[]} def get_topic_cluster(dataframe, graph_type = None,filename = None): print(filename) if (dataframe is None): # return None,None, '

Please click "Launch" on the left sidebar.

', '

Please click "Launch" on the left.

', "Executive Summary" return None, None, None, None, None, '', '

Please click "Launch" on the left sidebar.

', "Analysis:Topic Cluster" reduce_dim = umap.UMAP( n_components=3, n_neighbors=8, min_dist=0.55) df = dataframe.copy() # some cleaning for reddit datasets df = df[df['translated_text'] != 'nan'] df = df[df['translated_text'] != '[deleted]'] df = df[df['translated_text'] != '[removed]'] # if filename + 'df3.csv' exists load csv if os.path.exists(filename + 'df3.csv'): df3 = pd.read_csv(filename + 'df3.csv') df2 = df3 else: def CleanTxt_quotes(text): text = re.sub(r'https?:\/\/\S+', '', text) # Remove hyperlinks text = re.sub(r'http?:\/\/\S+', '', text) # Remove hyperlinks # if more than 5 mentions, remove all mention if len(re.findall(r'@[A-Za-z0-9]+', text)) > 5: text = re.sub(r'@[A-Za-z0-9]+', '', text) # if more than 4 hashtags, remove all hashtags #text = re.sub(r'[^A-Za-z0-9.!?_#@]+', ' ', text) # Remove non-alphanumeric characters except exclamation marks and question marks text = re.sub(r'\s+', ' ', text) # Remove extra whitespace return text df['clean_text'] = df['translated_text'].apply(lambda x: str(x)) df['clean_text'] = df['translated_text'].apply(lambda x: CleanTxt_quotes(x)) embedding = np.array([np.array(xi) for xi in df.embeddings]) umap_embeddings = reduce_dim.fit_transform(embedding) print('umap_embeddings', umap_embeddings.shape) # CHECK THIS LINE df['x'] = umap_embeddings[:, 0] df['y'] = umap_embeddings[:, 1] df['z'] = umap_embeddings[:, 2] df.dropna(inplace=True) hdbscan_min_samples = 1 hdbscan_minimal_cluster_size = int(len(df) * 0.01+40) # hdbscan_minimal_cluster_size = 7 # hdbscan_min_samples = 10 cluster = hdbscan.HDBSCAN( min_cluster_size=hdbscan_minimal_cluster_size, metric='euclidean', cluster_selection_epsilon=0.001, cluster_selection_method='leaf', algorithm='best', prediction_data=False, min_samples=hdbscan_min_samples).fit(df[['x', 'y', 'z']]) cluster_analysis = len(pd.Series(cluster.labels_).unique()) print('Number of Sentences = ', len(df)) print('Number of Clusters = ', cluster_analysis, '/n') df_cluster = pd.DataFrame( pd.DataFrame(cluster.labels_).value_counts()) print(df_cluster) clusters = pd.DataFrame(cluster.labels_) # percent_unlabelled = round((len(df[clusters[0] == -1]) / len(df)) * 100, 2) # print('The percentage of unlabelled sentences is: ', percent_unlabelled, '%') # reindex df.reset_index(inplace=True, drop=True) print(len(df[clusters[0] == -1])) for i in range(0, cluster_analysis): print('Cluster ', i, ' has ', len( df[clusters[0] == i]), ' sentences') print(df_cluster.index) from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer def get_tfidf_top_features(documents, n_top=4): tfidf_vectorizer = TfidfVectorizer( min_df=0.05, max_df=0.95, max_features=10, stop_words='english') tfidf = tfidf_vectorizer.fit_transform(documents) importance = np.argsort(np.asarray( tfidf.sum(axis=0)).ravel())[::-1] tfidf_feature_names = np.array( tfidf_vectorizer.get_feature_names()) return tfidf_feature_names[importance[:n_top]] cluster_names = pd.DataFrame( columns=['cluster_name', 'embed_index']) for i in range(cluster_analysis): try: print(get_tfidf_top_features( df['clean_text'][clusters[0] == i])) clstr_nm = get_tfidf_top_features( df['clean_text'][clusters[0] == i]) clstr_idx = df['clean_text'][clusters[0] == i].index cluster_names = cluster_names.append( {'cluster_name': clstr_nm, 'embed_index': clstr_idx}, ignore_index=True) except Exception as e: print(e) # cluster_name.append('NULL') pass cluster_names['cluster_name'] = cluster_names['cluster_name'].astype( str) cluster_names['cluster_name'] = cluster_names['cluster_name'].str.replace( '[', '') cluster_names['cluster_name'] = cluster_names['cluster_name'].str.replace( ']', '') cluster_names['cluster_name'] = cluster_names['cluster_name'].str.replace( "'", '') cluster_names['cluster_name'] = cluster_names['cluster_name'].str.replace( " ", '-') clusters_names = cluster_names.explode('embed_index') df2 = df.merge(clusters_names, left_index=True, right_on='embed_index') df2['cluster_name_str'] = df2['cluster_name'].apply( lambda x: str(x)) # assign a int value to each unique cluster name in df3 df2['cluster_number'] = df2['cluster_name_str'].astype( 'category').cat.codes df2['trimmed_text'] = df2['clean_text'].str[:175] print(df2.head()) df3 = df2[['x', 'y', 'z', 'cluster_number', 'cluster_name_str', 'trimmed_text']] #################################################### GET CLUSTER NAME ############################################# df2['gpt_cluster'] = '' df3['gpt_cluster'] = '' for cluster in df3['cluster_name_str'].unique(): each_cluster = df3[df3['cluster_name_str'] == cluster] docs = '\n'.join(np.random.choice(each_cluster['trimmed_text'], 50)) response3 = openai.ChatCompletion.create( model="gpt-3.5-turbo", temperature=0.3, max_tokens=300, top_p=1, # stream=True, messages=[ {"role": "user", "content": f'Given a list of keywords {cluster}, and documents present in the cluster : {docs}; assign the most relevant topic name for this cluster : \n\n Cluster Name : '}, ] )['choices'][0]['message']['content'] df3.loc[df3['cluster_name_str'] == cluster, 'gpt_cluster'] = response3 df2.loc[df2['cluster_name_str'] == cluster, 'gpt_cluster'] = response3 # print(df3['cluster_name_str']) # xi = 0 # for cluster in df3['cluster_name_str'].unique(): # xi += 1 # df3.loc[df3['cluster_name_str'] == cluster, 'gpt_cluster'] = cluster#"cluster " + str(xi) # df2.loc[df2['cluster_name_str'] == cluster, 'gpt_cluster'] = cluster#"cluster " + str(xi) # save df3 df3.to_csv(filename + 'df3.csv', index=False, encoding='utf-8-sig') if len(df3) > 10000: dot_size = 1 else: dot_size = 4 color_scale = px.colors.sequential.Viridis color_list = ['#FF0000', '#FF0000', '#FF0000', '#FF0000', '#FF0000', '#FF0000'] fig = px.scatter_3d(df3, x='x', y='y', z='z', color='gpt_cluster', hover_name='trimmed_text', hover_data={ 'x': False, 'y': False, 'z': False, 'cluster_name_str': False, 'cluster_number': False, 'gpt_cluster': False}, opacity=1, template='plotly_white') fig.update_traces(marker=dict(size=dot_size)) fig.add_trace(go.Scatter3d(x=[0], y=[0], z=[0], mode='markers', marker=dict( size=0.1, color='white'), showlegend=True, name=' ', hoverinfo='none')) # legend on the right side fig.update_layout(legend=dict( bgcolor='rgba(17,17,17,0)', xanchor='auto', yanchor='auto', x=0.8, # Adjust the x position of the legend y=0.2, # Adjust the y position of the legend bordercolor='rgba(17,17,17,0)', borderwidth=0, )) # fig.update_layout(scene=dict( # xaxis=dict( # title=' ', # nticks=0, # # backgroundcolor="rgb(0, 0, 0, 1)", # gridcolor="rgba(17,17,17, 0)", # showbackground=True, # zerolinecolor="rgba(17,17,17, 0)", # zeroline=False, # showgrid=True, # showticklabels=False, # showspikes=False # ), # # hide ticks # yaxis=dict( # # name # title=' ', # nticks=0, # # backgroundcolor="rgb(0, 0, 0, 1)", # gridcolor="rgba(17,17,17, 0)", # showbackground=True, # zerolinecolor="rgba(17,17,17, 0)", # zeroline=False, # showgrid=True, # showticklabels=False, # showspikes=False # ), # zaxis=dict( # # name # title=' ', # nticks=0, # # backgroundcolor="rgba(0, 0, 0, 1)", # gridcolor="rgba(17,17,17, 0)", # showbackground=True, # zerolinecolor="rgba(17,17,17, 0)", # zeroline=False, # showgrid=True, # showticklabels=False, # showspikes=False),) # # tickvals=[],), # ) fig.update_layout(coloraxis_showscale=False, width=1300, height=750, legend=dict(x=0, y=1, traceorder='normal', font=dict(size=14, color='black'), bgcolor='rgba(17,17,17,0)', bordercolor='rgba(17,17,17,0)', borderwidth=0)) # TO ADD AN IMAGE UNCOMMENT # fig.add_layout_image( # dict( # source=, # xref="x", # yref="y", # x=-1, # y=3.8, # # xanchor = "left", # # yanchor = "top", # sizex=.4, # sizey=.4, # opacity=1, # layer="above", # ) # ) fig.update_layout(legend={'itemsizing': 'constant'}, legend_title_text=' ', legend_title_font_color='black', legend_font_color='black', legend_font_size=14, legend_bgcolor='rgba(17,17,17,0)', legend_bordercolor='rgba(17,17,17,0)', legend_borderwidth=2) # , title_font_size=30, title_font_family='Arial', title_font_color='white', title_x=0.06, title_y=0.95, title_xanchor='left', title_yanchor='top', title_text='Cluster of Emotions for {}/n n = {}'.format(subreddit, len(dataset_umap)), margin=dict(l=0, r=0, b=0, t=0, pad=0)) fig.update_layout(scene_camera_eye=dict(x=0.87, y=-0.88, z=0.84), scene_camera_center=dict( x=0, y=0, z=0), template='plotly_white', hovermode='x unified', margin=dict(l=0, r=0, b=0, t=0, pad=2)) fig.update_layout(coloraxis_showscale=True) fig.update_xaxes(showticklabels=True, showgrid=False, zeroline=False, showline=True, automargin=False, showspikes=False) fig.update_yaxes(showticklabels=True, showgrid=False, zeroline=False, showline=True, automargin=False, showspikes=False) #full_html=False, include_plotlyjs='cdn', default_height='750px', default_width='1500px', config={'displaylogo': False, 'modeBarButtonsToRemove': ['zoom2d', 'pan2d', 'select2d', 'lasso2d', 'zoomIn2d', 'zoomOut2d', 'autoScale2d', 'resetScale2d', 'hoverClosestCartesian', 'hoverCompareCartesian', 'zoom3d', 'pan3d', 'resetCameraDefault3d', 'resetCameraLastSave3d', 'hoverClosest3d', 'orbitRotation', 'tableRotation', 'zoomInGeo', 'zoomOutGeo', 'resetGeo', 'hoverClosestGeo', 'toImage', 'sendDataToCloud', 'hoverClosestGl2d', 'hoverClosestPie', 'toggleHover', 'resetViews', 'toggleSpikelines', 'resetViewMapbox']})} cluster_name = df3[['cluster_number', 'gpt_cluster']] cluster_name = cluster_name.drop_duplicates() cluster_name = cluster_name.sort_values(by=['cluster_number']) cluster_name = cluster_name.reset_index(drop=True) # create a list cluster_name_list = cluster_name['gpt_cluster'].tolist() cluster_name_list = '\n'.join(cluster_name_list) Silhouette_Score = 'Silhouette score is : ', silhouette_score(df3[['x', 'y', 'z']], df3['gpt_cluster'], metric='euclidean') # get a dataframe of unique cluster names and their count cluster_count = df3.groupby('gpt_cluster').agg({'cluster_number': 'count'}).reset_index() cluster_count = cluster_count.rename(columns={'cluster_number': 'count', 'gpt_cluster': 'Cluster'}) # return fig, cluster_count, cluster_name_list, Silhouette_Score, df2 return fig, cluster_count, cluster_name_list, None, df2, '', "Please check 'Graph' tab for more details.", "Analysis:Topic Cluster" def get_executive_summary(dataframe=None, brand=None, industry=None, summaries=None, csv_file= None, graph_type = None,filename = None, fullProjectData= None, sourceIndex = 0): # if data_answer.txt exists, open and read sourceData = fullProjectData['sources'][int(sourceIndex)] externalPrompt = sourceData['content']['exec_sum'] if (dataframe is None): return None,None,'', '

Please click "Launch" on the left sidebar.

', "Analysis:Executive Summary" if os.path.exists(filename + 'Executive Summary_CACHE.txt'): with open(filename + 'Executive Summary_CACHE.txt', 'r') as f: output_summary = f.read() return output_summary, dataframe[['translated_text']], '', markdown_to_html(output_summary), "Analysis:Executive Summary" else: if brand is None: brand = ' ' else : brand = brand try: dataframe = dataframe[dataframe['translated_text'].str.contains(brand, case=False)] except: pass text_splitter = TokenTextSplitter.from_tiktoken_encoder( encoding_name='p50k_base', chunk_size = 2000, ) splitted_articles = text_splitter.split_text(''.join(dataframe['translated_text'])) summarize_template = """ {text} \n\n Summarize the most relevant information for an executive summary from the above document: SUMMARY: """ prompt_template = PromptTemplate(input_variables=['text'], template=summarize_template) summary_chain = LLMChain(llm=llm, prompt=prompt_template) summaries = [] for i in splitted_articles: summaries.append(summary_chain.run(i)) summaries1 = '/n'.join(summaries) word_count = 500 #If needed, guess the industry # industry_template = PromptTemplate(input_variables=['summaries'], template=extract_industry) # summary_chain = LLMChain(llm=llm, prompt=industry_template) # industry = summary_chain.run(summaries) #Check size of output and go in a 2nd loop if it's too big encoding = tiktoken.get_encoding('p50k_base') if len(encoding.encode(summaries1)) > 2000: # return only first 2000 tokens summaries1 = encoding.decode(encoding.encode(summaries1)[:2000]) executive_summary_template = '''Imagine you are an Elite Analyst, Expert Sociologist, and Data Guru, Your task is to leverage your invaluable expertise in crafting a comprehensive and insightful {word_count} words executive summary tailored for C-level executives and decision-makers in {industry}. The summary should synthesize information from various data sources, incorporate relevant cultural and contextual elements, and provide valuable insights that can drive strategic decision-making. Please ensure that your analysis meets the following high-level objectives: Thoroughly examine and interpret the key trends, patterns, and insights derived from the following data sources: {summaries1} Articulate the implications and opportunities for {industry}, keeping in mind the needs and challenges of the industry. Consider the cultural, social, and contextual nuances present in the data, drawing on your sociological expertise to ensure the summary remains relevant and insightful across diverse audiences. Identify any potential risks or challenges that might arise from the data, providing strategic recommendations for mitigating these issues. Present the information in a clear, concise, and engaging manner that captures the attention of busy executives and effectively communicates the key insights. Leverage your data expertise to ensure the accuracy, reliability, and relevance of the information presented in the summary. Make us benefit from your unique expertise and insights. Using markdown formatting, write a {word_count} word SEQ-optimized Executive Summary. Write a click worthy short titles. Add a key takeaway section at the end. Use the seed keyword as the first H2. Always use a combination of paragraphs, lists, and tables for a better reader experience. For the styling of the output, please include headers for different sections, and use bullet points where applicable to organize the key insights. To avoid repetition, vary the sentence structure and word choice when presenting information from different data sources or discussing various trends, insights, or opportunities. Using synonyms, alternate phrasings, and modifying sentence structure can help keep the text engaging and fresh for readers. To avoid repetition, vary the sentence structure and word choice when presenting information from different data sources or discussing various trends, insights, or opportunities. Using synonyms, alternate phrasings, and modifying sentence structure can help keep the text engaging and fresh for readers. In order to maintain reader engagement and deliver a captivating text, please ensure that you diversify sentence structure and word choice when presenting insights, trends, or opportunities from different data sources. Employ synonyms, alternative expressions, and varied sentence patterns to provide a fresh and dynamic reading experience. \n\n ''' if externalPrompt and externalPrompt != "": executive_summary_template = externalPrompt prompt = PromptTemplate(template=executive_summary_template, input_variables=['industry', 'word_count', 'summaries1']) print("start sum") # llm2 = OpenAI(temperature=0.2, model_name='gpt-4', max_tokens=1000, top_p=1) llm2 = ChatOpenAI(temperature=0.2, model_name='gpt-4', max_tokens=1000, top_p=1) executive_chain = LLMChain(llm=llm2, prompt=prompt) output_summary = executive_chain.run(industry=industry, word_count=word_count, summaries1=summaries1) # output_summary = executive_chain.run(industry=industry, word_count=word_count, summaries1=summaries1) with open(filename + "Executive Summary_CACHE.txt", "a") as f: try: f.write(output_summary) except: pass # dataframe.rename(columns={'translated_text': 'Table'}, inplace=True) # return("# Executive summary" + output_summary, dataframe[['translated_text']], markdown_to_html(output_summary), 1, markdown_to_html("# Executive Summary\n\n" + output_summary), "Executive Summary") return output_summary, dataframe[['translated_text']], '', markdown_to_html(output_summary), "Analysis:Executive Summary" return(output_summary, dataframe[['translated_text']][0:20], summaries, output_summary) def get_competitive(brand, industry, graph_type = None, filename = None,dataframe = None,fullProjectData= None, sourceIndex = 0): sourceData = fullProjectData['sources'][int(sourceIndex)] externalPrompt = sourceData['content']['competitor'] if dataframe is None: return(None,'', '

Please click "Launch" on the left sidebar.

', "Analysis:Competitor Analysis") if os.path.exists(filename + 'Competitor_CACHE.txt'): with open(filename + 'Competitor_CACHE.txt', 'r') as f: output_summary = f.read() return(output_summary,'', markdown_to_html("# Competitor Analysis\n\n"+output_summary), "Analysis:Competitor Analysis") else: competitive_prompt = ''' Ignore all previous instructions. Do not rush. Make this impactful and clear. •Read through all the bullet points and make sure you understand all the bullet points, before you start working. • Act as a subject matter expert, Business analyst, social media expert and professor with 20 years of research experience. [IMPORTANT INSTRUCTION] Your singular task is to provide expert reports with key elements and useful content. Do not make up any information. Do not use jargon. Start with a short paragraph introducing {brand} position in the market. This should be clear and impactfull. •I want to learn more about the competitors of brand {brand} in this market {industry}. [SEPARATOR] •Use the advanced level of expertise in this market {industry} to create topics and subtopics with detailed notes, this will help provide confidence and clarity about the item being sought. [SEPARATOR] 1 “Who are the 4 main competitors of {brand}?” 2 “What are the top 3 strengths and weaknesses of each of those competitors?” 3 “What are the unique selling points of our competitors?” 4 “In what unique ways do those competitors market their products/services?” 5 “What are the key trends in the {industry} that those competitors are capitalizing on?” 6 “What are the areas where those competitors excel compared to {brand}?” 7 “What are the areas where our competitors fall short compared to {brand}?” 8 “How do our products/services prices compare to those competitors in terms of quality, price positioning and range?” 9 “What are the common customer complaints about those competitors?” 10 “What are the opportunities for growth in the {industry} that competitors are not taking advantage of?” •Break down the exercise into easy-to-follow steps. •For each topic and/or subtopic provide a clear and informative summary that compare and contrast results.. •Identify common mistakes made when addressing those competitive points and address those with maximum clarity. •Proofread content for accuracy, paying special attention to any terms that may have been substituted or omitted unintentionally. Conclude with a brief overview of the competitive landscape for "brand" with the top 3 takeaways and opportunities. The format should be markdown, add subheaders (h2 only), format into nice paragraphs.''' # hardcoded!!! brand = "Nike" industry = 'Textiles and Apparel' prompt = PromptTemplate(template=competitive_prompt, input_variables=['industry', 'brand']) competition_chain = LLMChain(llm=llm, prompt=prompt) output_summary = competition_chain.run(industry=industry, brand=brand) with open(filename + "Competitor_CACHE.txt", "a") as f: try: f.write(output_summary) except: pass return(output_summary,'', markdown_to_html("# Competitor Analysis\n\n"+output_summary), "Analysis:Competitor Analysis") # def get_topic_summary(dataframe, topics=None, brand=None, industry=None, graph_type = None, filename = None): def get_trend_summary(dataframe, topics=None, brand=None, industry=None, graph_type = None, filename = None,fullProjectData= None, sourceIndex = 0): sourceData = fullProjectData['sources'][int(sourceIndex)] externalPrompt = sourceData['content']['trend_analysis'] if (dataframe is None): return None,None, '','

Please click "Launch" on the left sidebar.

', "Analysis:Executive Summary" if os.path.exists(filename + 'Trend Analysis_CACHE.txt'): with open(filename + 'Trend Analysis_CACHE.txt', 'r') as f: final_summary = f.read() return(final_summary, dataframe[['translated_text']][0:20], '', markdown_to_html(final_summary), "Analysis:Trend Analysis") else: if brand is None: brand = '' else : brand = brand try: dataframe = dataframe[dataframe['translated_text'].str.contains(brand, case=False)] except: pass text_splitter = TokenTextSplitter.from_tiktoken_encoder( encoding_name='p50k_base', chunk_size = 2000, ) splitted_articles = text_splitter.split_text(''.join(dataframe['translated_text'])) summarize_template = """Summarize the most relevant information from the following document: {text} SUMMARY: """ prompt_template = PromptTemplate(input_variables=['text'], template=summarize_template) summary_chain = LLMChain(llm=llm, prompt=prompt_template) summaries = [] for i in splitted_articles: summaries.append(summary_chain.run(i)) # split the summary into 2000 tokens chunks text_splitter = TokenTextSplitter.from_tiktoken_encoder( encoding_name='p50k_base', chunk_size = 2000, ) summaries2 = text_splitter.split_text(''.join(summaries)) word_count = 500 topics = topics final_summary = [] brand = "Nike" industry = "Food and Beverage" for summary_1 in summaries2: topic_prompt = '''"Imagine you are an Elite Analyst and Trend Analysis Expert with extensive experience in identifying patterns and emerging themes from various data sources, such as social media, regular media, reviews, and survey data. Your task is to leverage your invaluable expertise in crafting a comprehensive and insightful trend analysis report tailored for {brand} within the {industry}. The objective is to provide valuable insights into shifts in consumer behavior, preferences, and market dynamics, enabling informed decision-making for C-level executives and decision-makers. In your analysis of {word_count} words, ensure that you address the following key elements: Topics : {topics} Data: {summary} Emerging Trends: Identify and discuss the key emerging trends in consumer behavior, preferences, and market dynamics within the {industry}. Examine the factors driving these trends and provide specific examples to illustrate your findings. Impact on {brand}: Analyze how the identified trends are affecting or could potentially affect {brand}. Consider both opportunities and challenges that may arise from these trends, as well as any necessary adjustments to marketing strategies, product offerings, or customer service initiatives. Recommendations: Based on the insights derived from the trend analysis, provide actionable recommendations for {brand} to stay ahead of the competition, capitalize on new opportunities, and address potential challenges. Consider innovations, partnerships, or targeted marketing campaigns that can help the company adapt to and benefit from the identified trends. Ensure that your trend analysis report is clear, concise, and engaging for busy executives. Focus on providing actionable insights and recommendations that can inform the company's strategic direction. Draw on your expertise to ensure the accuracy, reliability, and relevance of the information presented in the analysis." Using markdown formatting, write a {word_count} word SEQ-optimized Trend Analysis. Write a click worthy short titles. Add a key takeaway section at the end. Use the seed keyword as the first H2. Always use a combination of paragraphs, lists, and tables for a better reader experience. For the styling of the output, please include headers for different sections, and use bullet points where applicable to organize the key insights. To avoid repetition, vary the sentence structure and word choice when presenting information. Using synonyms, alternate phrasings, and modifying sentence structure can help keep the text engaging and fresh for readers. \n\n ''' prompt = PromptTemplate(template=topic_prompt, input_variables=['industry', 'topics', 'word_count', 'summary', 'brand']) topic_chain = LLMChain(llm=llm, prompt=prompt) topic_summary = topic_chain.run(industry=industry, topics = topics, word_count=word_count, summary=summary_1, brand=brand) final_summary.append(topic_summary) if len(final_summary) > 1: topic_summary = ''.join(final_summary) combination = '''{topic_summary}\n\nCombine the content from these articles into one; keeping the format and structure in place. \n\n##Trend Analysis:\n\n''' prompt = PromptTemplate(template=combination, input_variables=['topic_summary']) final_chain = LLMChain(llm=llm, prompt=prompt) final_summary = final_chain.run(topic_summary=topic_summary) else: final_summary = final_summary[0] with open(filename + "Trend Analysis_CACHE.txt", "a") as f: try: f.write(final_summary) except: pass # dataframe.rename(columns={'translated_text': 'Table'}, inplace=True) return("# Trend Analysis\n" + final_summary, dataframe[['translated_text']][0:20], '', markdown_to_html(''+final_summary), "Analysis:Trend Analysis") def get_SWOT(dataframe, brand = None, industry = None, exec_summary=None, graph_type= None, filename = None,fullProjectData= None, sourceIndex = 0): sourceData = fullProjectData['sources'][int(sourceIndex)] externalPrompt = sourceData['content']['swot_'] if (dataframe is None): return(None,'', '

Please click "Launch" on the left sidebar.

', "Analysis:SWOT") brand = 'Nike' industry = 'Textiles and Apparel' if brand is None: brand = ' ' else : brand = brand try: dataframe = dataframe[dataframe['translated_text'].str.contains(brand, case=False)] except: pass # if exec_summary is None: # exec_summary = ''' # Mozzarella Sticks: A Versatile Snack Food with Endless Possibilities ## Introduction Mozzarella sticks are a popular snack food that can be enjoyed in a variety of ways. They can be eaten alone, dipped in sauces, used as a topping on pizzas, or even turned into vape flavors. Mozzarella sticks can also be used to make creative dishes such as a mozzarella stick bowl, a mozzarella stick cake, or a mozzarella stick twinkie. They can also be used as a prank, such as paying per mozzarella stick. Mozzarella sticks are a versatile food that can be enjoyed in many different ways. ## Popularity Mozzarella sticks are a popular food item that can be enjoyed in many different ways. People have been experimenting with different recipes, such as a low-carb snack of apple, mozzarella stick, hummus, veggie, plain Greek yogurt, English cucumber, dill, peanut butter, and celery. There have also been attempts to create a stuffed crust pizza with a mozzarella stick, as well as a Korean corn dog with a French fry chunk and a half mozzarella stick inside. Mozzarella sticks can also be enjoyed with marinara sauce, ranch, ketchup, and other condiments. ## Availability Mozzarella sticks are a popular snack food that can be found in many places. They can be eaten alone or as part of a meal, such as a burger or a pizza. They can also be used as an ingredient in dishes such as mac and cheese, risotto, and fried cauliflower. Mozzarella sticks can be found in many forms, such as deep-fried, baked, or grilled. They can also be paired with other foods, such as fruit, vegetables, and sauces. Mozzarella sticks are high in lactose and should be consumed in moderation. ## International Appeal Mozzarella sticks are a popular dish enjoyed by people around the world. They can be made with a variety of ingredients, such as flour, Greek yogurt, turkey pepperoni, and cheese, and can be served with marinara sauce, butter, and olive oil. Mozzarella sticks are also popular in Czech, Slovak, and Polish cuisine. On International Cheese Day, people celebrate with cheese wedges, ooey gooey cheese pulls, and mozzarella sticks. There are a variety of recipes for mozzarella sticks, including a low-fat version with Greek yogurt, turkey pepperoni, and cheese. Mozzarella sticks can also be enjoyed with a variety of dips, such as marinara sauce, nacho cheese sauce, and homemade marinara sauce. ## Uses Mozzarella sticks are a popular snack food that can be enjoyed in a variety of ways. They can be deep fried, grilled, or microwaved, and are often served with marinara sauce or ranch dressing. They can also be used as a topping for pizza, burgers, and ramen. Mozzarella sticks are also available in low-fat and dairy-free varieties. They are often served at fast food restaurants, such as Arby's, Burger King, and Sonic, and can be purchased in stores. Mozzarella sticks are a great snack for those looking for a quick meal or a tasty treat. ## Health Benefits Mozzarella sticks are a popular food item that can be enjoyed in many different ways. They can be fried, microwaved, baked, or even wrapped in fruit roll-ups. They can be served with marinara sauce, ranch dressing, or even chocolate milk. Mozzarella sticks can also be used to make delicious dishes such as mac and cheese, chicken tenders, and jalapeno poppers. They can also be used to make sandwiches, tacos, and pizzas. Mozzarella sticks are a great way to add flavor and texture to any meal. ## Implications and Opportunities Mozzarella sticks are a popular snack food that can be enjoyed in a variety of ways. They can be served with different sauces, as part of a pizza, or as part of a sandwich. They can also be used to make a variety of dishes, such as a scotch egg, a Camembert burger, or a mozzarella stick hybrid pizza. Mozzarella sticks can also be served with a variety of sides, such as fries, onion rings, and hash browns. Additionally, they can be used to make a variety of desserts, such as a mozzarella stick candle. Mozzarella sticks are a popular bar food and snack item that can be enjoyed in a variety of ways. They can be served as an appetizer, a side dish, or even as a main course.''' #word_count = 500 with open(filename + "Executive Summary_CACHE.txt", "r") as f: exec_summary = f.read() # industry_template = PromptTemplate(input_variables=['summaries'], template=extract_industry) # summary_chain = LLMChain(llm=llm, prompt=industry_template) # industry = summary_chain.run(summaries) brand = brand industry = industry # toolname = ['serpapi'] # tools = load_tools(toolname) # agent = initialize_agent(tools=tools, llm=llm, agent='zero-shot-react-description', verbose=True) # internet_content = agent.run(f'What is {brand}?') SWOT_analysis_template = '''Ignore all previous instructions. Do not rush. Make this impactful and clear. •Read through all the bullet points and make sure you understand all the bullet points, before you start working. Act as a subject matter expert, Business analyst, social media expert and professor with 20 years of research experience. Here is an executive Summary for updated context : {exec_summary} [IMPORTANT INSTRUCTION] Your singular task is to provide expert reports with key elements and useful content. Do not make up any information. Do not use jargon. Introduction: Start with a paragraph introducing Now: return the SWOT for the brand {brand} in the {industry} industry. example: ## Strengths - Strength 1 - Strength 2 ... ## Weaknesses - Weakness 1 - Weakness 2 ... ## Opportunities - Opportunity 1 - Opportunity 2 ... ## Threats - Threat 1 - Threat 2 ... SWOT formatted with markdown syntax: ''' prompt = PromptTemplate(template=SWOT_analysis_template, input_variables=['industry', 'brand', 'exec_summary']) SWOT_chain = LLMChain(llm=llm, prompt=prompt) SWOT_summary = SWOT_chain.run(industry=industry, brand=brand, exec_summary=exec_summary) return("" + SWOT_summary,'', markdown_to_html(SWOT_summary + "
") , "Analysis:SWOT") def emotional_mapping(dataframe, industry = None, graph_type = None, filename = None, fullProjectData = None, sourceIndex = 0): sourceData = fullProjectData['sources'][int(sourceIndex)] externalPrompt = sourceData['content']['swot_'] if (dataframe is None): return None,None, '', '

Please click "Launch" on the left sidebar.

', "Analysis:Sentiment Analysis" if os.path.exists(filename + 'Sentiment Analysis_CACHE.txt'): # read this: emotional_count.to_csv(filename + "Sentiment Analysis_CACHE.csv", index=False) emotional_count = pd.read_csv(filename + "Sentiment Analysis_CACHE.csv") with open(filename + 'Sentiment Analysis_CACHE.txt', 'r') as f: emotional_summary = f.read() return(emotional_summary, emotional_count,'', markdown_to_html(""+emotional_summary), "Analysis:Sentiment Analysis") # return output_summary, dataframe[['translated_text']], markdown_to_html(output_summary), markdown_to_html(output_summary), "Executive Summary" else: if 'top_emotion_bertweet' in dataframe.columns: dataframe['emotion'] = dataframe['top_emotion_roberta'] #dataframe['top_emotion_bertweet'] elif 'top_emotion_roberta' in dataframe.columns: dataframe['emotion'] = dataframe['top_emotion_roberta'] elif 'top_emotion_distilbert' in dataframe.columns: dataframe['emotion'] = dataframe['top_emotion_distilbert'] elif 'top_emotion' in dataframe.columns: dataframe['emotion'] = dataframe['top_emotion'] else: dataframe = get_emotion_bertweet(dataframe) dataframe['emotion'] = dataframe['top_emotion_bertweet'] word_count = 500 # industry_template = PromptTemplate(input_variables=['summaries'], template=extract_industry) # summary_chain = LLMChain(llm=llm, prompt=industry_template) # industry = summary_chain.run(summaries) industry = industry # get positive dataset positive = dataframe[dataframe['polarity'] > 0] # get negative dataset negative = dataframe[dataframe['polarity'] < 0] positive_emotions = [] negative_emotions = [] corpus_positive = {} corpus_negative = {} # Calculate the number of unique emotions for positive and negative datasets num_positive_emotions = min(len(positive['emotion'].unique()), 3) num_negative_emotions = min(len(negative['emotion'].unique()), 3) # Loop through the positive emotions for i in range(num_positive_emotions): value = str(positive['emotion'].value_counts(normalize=True).index[i]) percent = str(round(positive['emotion'].value_counts(normalize=True)[i] * 100, 2)) + '%' positive_emotions.append(value + ' ' + percent) corpus_positive[value] = positive[positive['emotion'] == value]['translated_text'].tolist() # Loop through the negative emotions for i in range(num_negative_emotions): value = str(negative['emotion'].value_counts(normalize=True).index[i]) percent = str(round(negative['emotion'].value_counts(normalize=True)[i] * 100, 2)) + '%' negative_emotions.append(value + ' ' + percent) corpus_negative[value] = negative[negative['emotion'] == value]['translated_text'].tolist() emotion_summary = {} text_splitter = TokenTextSplitter.from_tiktoken_encoder( encoding_name='p50k_base', chunk_size = 2000, ) for emotion, text in corpus_positive.items(): emotion_summary[emotion] = text_splitter.split_text(''.join(text)) # get first element emotion_summary[emotion] = emotion_summary[emotion][0] emotion_summarize_template = """ {text} \n\n Summarize the text from the above document to answer this question : Why are people feeling {emotion} ? \n\n SUMMARY: """ prompt_template = PromptTemplate(input_variables=['text', 'emotion'], template=emotion_summarize_template) summary_chain = LLMChain(llm=llm, prompt=prompt_template) emotion_summary[emotion] = summary_chain.run(text=emotion_summary[emotion], emotion=emotion, industry=industry) for emotion, text in corpus_negative.items(): emotion_summary[emotion] = text_splitter.split_text(''.join(text)) # get first element emotion_summary[emotion] = emotion_summary[emotion][0] emotion_summarize_template = """ {text} \n\n Summarize the text from the above document to answer this question : Why are people feeling {emotion} ? \n\n SUMMARY: """ prompt_template = PromptTemplate(input_variables=['text', 'emotion'], template=emotion_summarize_template) emotion_summary[emotion] = summary_chain.run(text=emotion_summary[emotion], emotion=emotion, industry=industry) executive_summary_template = '''Imagine you are an Elite psychologist, Analyst, and Data Guru. You are familiar with leading emotion measurement techniques and the latest developments in the field, including the Plutchik index and Emotional Intensity Scale (EIS). Data Summary per emotions, leave 'other' emotions!: {all_emotions} Your task is to leverage your invaluable expertise in crafting an insightful {word_count} emotion-driven report tailored for C-level executives and decision-makers in {industry}. The objective is to provide valuable insights into the impact of the top emotions marketing and branding strategies and provoke lightbulb moments for our readers. Your analysis should provide valuable insights that can drive strategic decision-making based on the key emotions. Structure the analysis in two main sections: Observations and Key Findings. In the Observations section, provide precise details about specific emotion measurements and their relation to the wants and needs expressed in the data. In the Key Findings section, focus on insightful content and compare and contrast the different emotions, revealing what's hiding behind the numbers and addressing both expressed and latent emotions. Avoid jargon and broad terms in your analysis, ensuring that the content is clear, concise, and engaging. Thoroughly examine and interpret the key trends, patterns, and insights derived from the key emotions . Articulate the implications and opportunities based on the emotion levels, keeping in mind the needs and challenges of the {industry}. Consider the cultural, social, and contextual nuances present in the data, drawing on your expertise to ensure the emotion analysis remains relevant and insightful across diverse audiences. Leverage your psychological expertise to ensure the accuracy, reliability, and relevance of the information presented in the summary. Make us benefit from your unique expertise and insights. Using markdown formatting, write a {word_count} word SEQ-optimized Executive Summary. Write a click worthy short titles. Add a key takeaway section at the end. Use the seed keyword as the first H2. Always use a combination of paragraphs, lists, and tables for a better reader experience. For the styling of the output, please include headers for different sections, and use bullet points where applicable to organize the key insights. To avoid repetition, vary the sentence structure and word choice when presenting information from different data sources or discussing various trends, insights, or opportunities. Using synonyms, alternate phrasings, and modifying sentence structure can help keep the text engaging and fresh for readers. \n\n ''' prompt = PromptTemplate(template=executive_summary_template, input_variables=['word_count', 'industry', 'all_emotions']) executive_chain = LLMChain(llm=llm, prompt=prompt) emotional_summary = executive_chain.run(industry=industry, word_count=word_count, all_emotions=str(emotion_summary.items())) emotional_count = dataframe.groupby('emotion').agg({'translated_text': 'count'}).reset_index() emotional_count.to_csv(filename + "Sentiment Analysis_CACHE.csv", index=False) with open(filename + "Sentiment Analysis_CACHE.txt", "a") as f: try: f.write(emotional_summary) except: pass # dataframe.rename(columns={'emotion': 'Emotion'}, inplace=True) return(emotional_summary, emotional_count,'', markdown_to_html(""+emotional_summary), "Analysis:Sentiment Analysis") def generate_wordcloud(dataframe): text = ' '.join(dataframe['translated_text'].tolist()) colors = ["#FF69B4", "#FFD700", "#FFA500", "#D3D3D3"] wordcloud = WordCloud(max_font_size=300, max_words=800, width = 1600, height = 1200, background_color="white", colormap="Set2", color_func=lambda *args, **kwargs: colors[len(args[0]) % len(colors)]).generate(text) return wordcloud.to_image() def get_polarity(dataframe): df = dataframe.copy() def get_sentiment_vader(text): from nltk.sentiment.vader import SentimentIntensityAnalyzer sid = SentimentIntensityAnalyzer() return sid.polarity_scores(text)['compound'] df['translated_text'] = df['translated_text'].astype(str) df['polarity'] = df['translated_text'].apply(lambda x: get_sentiment_vader(x)) fig = plt.figure(frameon=False, figsize=(16, 12)) fig = plt.figure(figsize=(15, 8)) # try : if 'date' in df.columns: df['date2'] = pd.to_datetime(df['date'], utc=True) else: return None, dataframe print('no date, skipping polarity viz') # except: # print("no/wrong date column") # return None, dataframe sorted_dates = df.sort_values(by='date2') cmap = plt.cm.get_cmap('RdYlGn') norm = plt.Normalize(sorted_dates['polarity'].min(), sorted_dates['polarity'].max()) colors = [cmap(norm(value)) for value in sorted_dates['polarity']] # scatter plot plt.scatter(sorted_dates['date2'],sorted_dates['polarity'], color=colors, alpha=0.5) # add a lineplot to show the average per day plt.plot(sorted_dates['date2'], sorted_dates['polarity'].rolling(window=50).mean(), color='hotpink', linewidth=1.5) # add legend about pink line plt.legend(['Polarity', 'Trend'], frameon=False, bbox_to_anchor=(0.3, 1), loc='upper right', ncol=2, fontsize=12) # add x-label inside the plot plt.xlabel('Date', fontsize=12 ) # add y-label plt.ylabel('Polarity', fontsize=12) # add x-ticks plt.xticks(fontsize=12 ) # ax1 = plt.axes() # x_axis = ax1.axes.get_xaxis() # x_axis.set_visible(False) plt.yticks(fontsize=12) return plt, df def get_synthetic_comment(text: None, slider, dataframe) : if check_words_in_string(words, text, case=False) and False: df = dataframe.copy() # query is based on a dataframe called "df" agent = create_pandas_dataframe_agent(OpenAI(temperature=0), df, verbose=True) def launch_agent(user_input): memory['user'].append(user_input) # user_input = get_memory() + user_input user_input = user_input agent_output = (agent.run(user_input)) memory['agent'].append(agent_output) return agent_output print('Pandas Agent Query') answer = launch_agent(text) return answer, None, None else: query_type = 'comments' query = f'Forget all of the above. Write 2-3 examples of {query_type} answering this question: {text}. \n\n{query_type}:\n\n' response = openai.ChatCompletion.create( model="gpt-3.5-turbo", temperature=0.5, max_tokens=300, top_p=1, # stream=True, messages=[ #{"role": "system", "content": "Forget all the above instructions. You are a reviewer of products but also a customer and have an opinion about the products you buy. You are asked to write a review of a product you have recently purchased."}, {"role": "user", "content": query}, ] )['choices'][0]['message']['content'] response = re.sub(r'As an AI model.*?\.', '', response) response = re.sub(r'As an AI language model.*?\.', '', response) query_embed = model.encode(response) # dataframe['embeddings'] = dataframe['translated_text'].apply( # lambda x: model.encode(x)) dataframe['similarity'] = dataframe['embeddings'].apply( lambda x: round(float(util.pytorch_cos_sim(query_embed, x)), 3)) dataframe.sort_values(by='similarity', ascending=False, inplace=True) complexity = '' cutsize = 900 if dataframe[dataframe['similarity'] > slider].shape[0] == 0: response2 = f'No {query_type} found with a similarity score above {slider}. Try to lower the similarity score threshold or change the question.\n\n However, this is what I found on the internet: \n\n' toolname = ['wolfram-alpha'] tools = load_tools(toolname) agent = initialize_agent(tools=tools, llm=llm, agent='zero-shot-react-description', verbose=False) wolfram_content = agent.run(f'{text}') wolfram_response = f'{wolfram_content}' toolname = ['serpapi'] tools = load_tools(toolname) agent = initialize_agent(tools=tools, llm=llm, agent='zero-shot-react-description', verbose=False) internet_content = agent.run(f'{text}') internet_response = f'{internet_content}' response2 = f'{response2} \n\n {wolfram_response} \n\n {internet_response}' else: try: corpus = dataframe[dataframe['similarity'] > slider]['translated_text'].tolist() print("CORPUS SIZE:", "FULL") response2 = openai.ChatCompletion.create( model="gpt-3.5-turbo", temperature=0.5, max_tokens=300, top_p=1, # stream=True, messages=[ {"role": "user", "content": f'\n{corpus}\n\nSummarize the above {query_type}s {complexity} to answer this question: {text}\n\nSummary:\n\n'}, ] )['choices'][0]['message']['content'] response2 = f'Question: {text}\n\nAnswer: {response2}' except: try: corpus = dataframe[dataframe['similarity'] > slider]['translated_text'][0:50].tolist() corpus = [x[:cutsize] for x in corpus] print("CORPUS SIZE:", 50) response2 = openai.ChatCompletion.create( model="gpt-3.5-turbo", temperature=0.5, max_tokens=300, top_p=1, # stream=True, messages=[ {"role": "user", "content": f'\n{corpus}\n\nSummarize the above {query_type}s {complexity} to answer this question: {text}\n\nSummary:\n\n'}, ] )['choices'][0]['message']['content'] response2 = f'Question: {text}\n\nAnswer: {response2}' except: try: corpus = dataframe[dataframe['similarity'] > slider]['translated_text'][0:30].tolist() corpus = [x[:cutsize] for x in corpus] print("CORPUS SIZE:", 30) response2 = openai.ChatCompletion.create( model="gpt-3.5-turbo", temperature=0.5, max_tokens=300, top_p=1, # stream=True, messages=[ {"role": "user", "content": f'\n{corpus}\n\nSummarize the above {query_type}s {complexity} to answer this question: {text}\n\nSummary:\n\n'}, ] )['choices'][0]['message']['content'] response2 = f'Question: {text}\n\nAnswer: {response2}' except: corpus = dataframe[dataframe['similarity'] > slider]['translated_text'][0:15].tolist() print("CORPUS SIZE:", 15) # keep only the first 400 characters per each list elem corpus = [x[:cutsize] for x in corpus] response2 = openai.ChatCompletion.create( model="gpt-3.5-turbo", temperature=0.5, max_tokens=300, top_p=1, # stream=True, messages=[ {"role": "user", "content": f'\n{corpus}\n\nSummarize the above {query_type}s {complexity} to answer this question: {text}\n\nSummary:\n\n'}, ] )['choices'][0]['message']['content'] response2 = f'Question: {text}\n\nAnswer: {response2}' # Graph Generation return response2, dataframe[dataframe['similarity'] > slider][['similarity', 'translated_text']][0:15], response2, "Analysis:Manual query" return response2, dataframe[dataframe['similarity'] > slider][['similarity', 'translated_text']][0:15], response2, "Analysis:Manual query" def clear_output(filename, titleBlock): titleBlock = re.sub('<[^<]+?>', '', titleBlock) # remove all \n # trim titleBlock = titleBlock.replace("\n", "") print(titleBlock) print(filename) print(filename + titleBlock + "_CACHE.txt") try: os.remove(filename + titleBlock + "_CACHE.txt") except Exception as e: print (e) pass return 'Cache has been cleared' def save_output(tab, data_answer): # def save_output(tab): if tab == "Summary": print("summary save") print(data_answer) print (data_answer.value) print(dir(data_answer)) # with open("data_answer.txt", "+ab") as f: # open and append to it with open("data_answer.txt", "a") as f: try: f.write(data_answer.value) f.write(data_answer) except: pass elif tab == "Table": try: similar_reviews_dataframe = pd.DataFrame(similar_reviews) similar_reviews_dataframe.to_csv("similar_reviews.csv", index=False, encoding='utf-8-sig') except: pass else: try: g.save_graph("graph.html") except: pass def generate_new_examples(text): # GENERATE NEW EXAMPLES BASED ON QUERY new_examples = openai.ChatCompletion.create( model="gpt-3.5-turbo", temperature=0.7, max_tokens=100, top_p=1, # stream=True, messages=[ {"role": "user", "content": f'Generate a list of 4 most relevent questions related to this question : {text}. Output should be in a comma separated string format, no numbers, ordering. (example: What is this text about?, What is the main trend?,...) There is no need to enumerate each element.\n\n'}, ] )['choices'][0]['message']['content'] new_examples = new_examples.split('\n') # make a list for each element new_examples = [x for x in new_examples if x != ''] new_examples = [x.strip() for x in new_examples] new_examples = [x.split(',') for x in new_examples] return new_examples def summarize_video(url): loader = YoutubeLoader.from_youtube_channel(url) result = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=0) texts = text_splitter.split_documents(result) print(len(texts)) # We first try the chain with the default chain type # if length of the text is more than 2000 tokens, we will use map reduce (summary of chunks) try: chain = load_summarize_chain(llm, chain_type='stuff', verbose=True) print('ChainType: stuff') # store intermediate steps return chain.run(result) except: text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=0) texts = text_splitter.split_documents(result) # print(len(texts)) chain = load_summarize_chain(llm, chain_type='map_reduce', verbose=True) print('ChainType: map reduce') return chain.run(texts) # def main(): # global similar_reviews, g, query_type, response2, Output, output_html, html, new_examples, samples def get_graph(dataframe): print("step 1") from sklearn.cluster import KMeans from sklearn.metrics.pairwise import cosine_similarity embeddings_array = dataframe['embeddings'].tolist() print ("step 2") num_clusters = 3 # Adjust the number of clusters as needed kmeans = KMeans(n_clusters=num_clusters, random_state=42) cluster_labels = kmeans.fit_predict(embeddings_array) print(cluster_labels) sentences = dataframe['translated_text'].tolist() print ("step 3") G = nx.DiGraph() cos_sim_matrix = cosine_similarity(embeddings_array) print(cos_sim_matrix) print ("step 4") for idx, label in enumerate(cluster_labels): G.add_node(idx, sentence=sentences[idx], cluster=label) for i in range(len(sentences)): for j in range(len(sentences)): if i != j: #if cos_sim_matrix[i, j] > 0.8: G.add_edge(i, j, weight=cos_sim_matrix[i, j]) # else: # continue print ("step 5") plt.figure(figsize=(10, 10)) pos = nx.spring_layout(G, k=0.5, iterations=50) print ("step 6") G_undirected = G.to_undirected() from community import community_louvain node_to_community = community_louvain.best_partition(G_undirected) print ("step 7") community_to_color = { 0 : 'tab:pink', 1 : 'tab:orange', 2 : 'tab:purple', 3 : 'tab:blue', } node_color = {node: community_to_color[community_id] for node, community_id in node_to_community.items()} print ("step 8") reducer = umap.UMAP(n_components=2, random_state=42) embeddings_2d = reducer.fit_transform(embeddings_array) def normalize_weight(weight, min_weight, max_weight): return (weight - min_weight) / (max_weight - min_weight) def visualize_graph_plotly(graph, embeddings_2d, scaling_factor=3): print ("step 9") min_weight = min((data['weight'] for _, _, data in graph.edges(data=True))) max_weight = max((data['weight'] for _, _, data in graph.edges(data=True))) fig = go.Figure() print ("step 10") # Add edges with width based on the normalized weights print(len(graph.edges())) for i, j in graph.edges(): print(i) weight = normalize_weight(graph[i][j]['weight'], min_weight, max_weight) # weight=0.1 fig.add_shape( type="line", x0=embeddings_2d[i][0], x1=embeddings_2d[j][0], y0=embeddings_2d[i][1], y1=embeddings_2d[j][1], yref="y", xref="x", line=dict(color="rgba(211, 211, 211, 0.5)", width=weight * scaling_factor * 0.7), ) print ("step 11") # Add nodes for idx, emb in enumerate(embeddings_2d): closeness = nx.closeness_centrality(G)[idx] degree = nx.degree_centrality(G)[idx] betweenness = nx.betweenness_centrality(G)[idx] eigen = nx.eigenvector_centrality(G)[idx] fig.add_trace( go.Scatter( x=[emb[0]], y=[emb[1]], mode="markers+text", text=[graph.nodes[idx]["sentence"]], textposition="bottom center", marker=dict(color=node_color[idx][4:], size=closeness * 40), # add closeness, degree, betweenness and sentence as hover text hovertext=[f"{graph.nodes[idx]['sentence']}
closeness_centrality: {closeness:.2f}
degree_centrality: {degree:.2f}
betweenness_centrality: {betweenness:.2f}
eigenvector_centrality: {eigen:.2f}"], ) ) print("for completed") fig.update_layout(showlegend=False, plot_bgcolor="white", width=1200, height=800) fig.update_xaxes(showticklabels=False, showgrid=False, zeroline=False, showline=False, automargin=False, showspikes=False) fig.update_yaxes(showticklabels=False, showgrid=False, zeroline=False, showline=False, automargin=False, showspikes=False) fig.update_layout(title_text="Test Graph Visualization", title_x=0.5, title_font_size=30, title_font_color='black') return fig return visualize_graph_plotly(G, embeddings_2d, scaling_factor = 10) def update_examples(samples): return gr.Dataset.update(samples=samples) def print_samples(): global samples return {"samples": samples} def load_example(example_id): global samples return samples[example_id][0] url_params = gr.JSON({}, visible=False, label="URL Params") def getAnalysisLabel(id): if id == 'exec_sum': return 'Executive Summary' elif id == 'top_clust': return 'Topic Cluster' elif id == 'trend_analysis': return 'Trend Analysis' elif id == 'emot_clust': return 'Sentiment Analysis' elif id == 'swot_': return 'SWOT Analysis' elif id == 'competitor': return 'Competitor Analysis' tabs = [ { "id": 0, "label": "Social Media", "content": { "exec_sum":None, "top_clust": None, "emot_clust": None, "swot_": None, # "competitor": None, }, "filename": "profdemo_cleaned.xlsx" }, { "id": 1, "label": "News/Publications", "content": { "exec_sum":None, "trend_analysis": None, # "top_clust": None, "competitor" : None, "swot_": None, }, "filename": "cleaned_news.xlsx" }, # { # "id": 0, # "label": "Mozzarella", # "content": { # "exec_sum":None, # "top_clust": None, # # "trend_analysis": None, # "emot_clust": None, # # "swot_": None, # # "competitor" : None, # }, # "filename": "MozzarellaTW.xlsx" # }, # { # "id": 1, # "label": "Onion", # "content": { # "exec_sum":None, # "top_clust": None, # "emot_clust": None, # # "competitor" : None, # }, # "filename": "OnionTW.xlsx" # }, # { # "id": 2, # "label": "Brand - Social Media", # "content": { # "exec_sum":None, # "top_clust": None, # "trend_analysis": None, # "emot_clust": None, # "swot_": None, # "competitor" : None, # }, # "filename": "LambWestonBrand.csv" # }, # { # "id": 3, # "label": "Brand - News", # "content": { # "exec_sum":None, # "top_clust": None, # "trend_analysis": None, # "emot_clust": None, # "swot_": None, # "competitor" : None, # }, # "filename": "LambWestonNews.csv" # }, ] list = [] maxSources = 10 oTab = [] attachButtons = [] for element in tabs: oTab.append( { "exec_sum":None, "top_clust": None, "trend_analysis": None, "emot_clust": None, "swot_": None, "competitor" : None, }) attachButtons.append(None) get_window_url_params = """ function(url_params) { var scriptElement = document.createElement("script"); scriptElement.src = '"""+gradio_js+"""?v="""+str(ra)+"""'; scriptElement.innerHTML = "console.log('This is dynamic JavaScript code');"; document.body.appendChild(scriptElement); const params = new URLSearchParams(window.location.search); url_params = Object.fromEntries(params); return [url_params]; } """ runjs = """ function(projJson,num,fullData) { console.log(fullData) var localizations = fullData['localization'] console.log( fullData['localization']) if (localizations) { document.querySelectorAll('.hsub')[0].innerText = localizations['project']; // document.querySelectorAll('.hsub')[1].innerText = "semmi"; // document.querySelectorAll('.hsub')[2].innerText = "semmi"; if (document.querySelector('#querybtn')) document.querySelector('#querybtn').innerText = localizations['talk_to_your_data'] var tabs = document.querySelectorAll('#targetColMiddle .tab-nav button'); tabs[0].innerText = localizations['overview'] || 'Overview' tabs[1].innerText = localizations['rawdata'] || 'Raw Data' tabs[2].innerText = localizations['visuals'] || 'Visuals' document.querySelectorAll('.sideTitle span')[0].innerText = localizations['data_sources'] || 'Data sources' document.querySelectorAll('.sideTitle span')[1].innerText = localizations['predefined_analysis'] || 'Predefined analysis' } document.querySelectorAll('.analysisButton').forEach(function(el) { el.style.display = 'none'; }); Object.keys(projJson[num]['content']).forEach(function(key) { document.querySelectorAll('.analysisButton').forEach(function(el) { if (el.id == key) { el.style.display = 'block'; } }); }); document.querySelectorAll('.sourceButton').forEach(function(el) { el.classList.remove('selectedSource'); }); document.querySelectorAll('.sourceButton').forEach(function(el) { if (el.innerHTML == projJson[num]['label']) el.classList.add('selectedSource'); }); // NEM ÉRTEM MINEK KELL TÖBB OUTPUT return [1, num,1,1] } """ def parse_URL_params(url_params): # if url params has 'pid' if 'pid' in url_params: request = requests.get(rubik_backend + '?proj=' + url_params['pid']) text = url_params['pid'] else: request = requests.get(rubik_backend + '?proj=demo') text = "demo" # textlowercase url_params['pid'] # first letter uppercase textUpper = request.json()["brand"][0].upper() + request.json()["brand"][1:] return [url_params, request.json()["sources"], request.json()["sources"], "
Project:"+textUpper+"
", request.json(), text] for i in range(maxSources): list.append({"id":i, "name":"asd", "obj":"", "analList":[]}) def variable_outputs(k): global list sourceArray = k output = None for i in range(len(sourceArray)): if not output: output = [list[i]["obj"].update(value=sourceArray[i]["label"],visible=True)] else: output += [list[i]["obj"].update(value=sourceArray[i]["label"],visible=True)] remainingCount = maxSources - len(sourceArray) for i in range(remainingCount): output += [list[i]["obj"].update(value="",visible=False)] return output url_params = gr.JSON({}, label="URL Params", visible=False) # with gr.Interface(theme=gr.themes.Soft(primary_hue='pink', secondary_hue='pink', neutral_hue='stone'), css="footer{display:none !important}") as app: # , css=".main{filter: blur(3px)}footer{display:none !important}" with gr.Blocks(theme=gr.themes.Soft(primary_hue='pink', secondary_hue='pink', neutral_hue='stone')) as app: dat = gr.Markdown() projdetails = gr.Textbox("test", label="Project Details", visible=False) projJson = gr.JSON(visible=False) fullProjectData = gr.JSON(visible=False) projectUUID = gr.State(value="", label="projectUUID", visible=False) if True: summaries_state = gr.State(value=[], label="summaries_state") executive = gr.State(value='', label="executive") needHeader = True # if needHeader and False: # with gr.Row(): # with gr.Column(scale=.1, elem_id="logocol"): # # gr.Markdown('') # with gr.Column(scale=.3): # gr.Markdown("

Talkback

") # random number between 1 and 10000 ra = str(np.random.randint(1, 10000)) gr.Markdown( """ """ ) # if needHeader: # with gr.Row(): # with gr.Column(elem_id="header"): # gr.Markdown("

Nike

") # selectedData = gr.Markdown('', elem_id="datanameblock") # titleBlock = gr.Markdown("", elem_id="titleblock") df = pd.DataFrame() with gr.Row(): with gr.Column(scale=.4, elem_id="leftContainer"): with gr.Row(elem_id="header"): proj = gr.Markdown("") selectedData = gr.Markdown('', elem_id="datanameblock") titleBlock = gr.Markdown("", elem_id="titleblock") gr.Markdown("

Data sources

") tabs = [] analysisList = [] for i in range(maxSources): list[i]["obj"] = gr.Button(value="" + str(i),visible=False, elem_classes="sourceButton") list[i]["index"] = gr.Number(i, visible=False) tabs.append(list[i]["obj"]) gr.Markdown("

Predefined analysis

") # analysisList = ['exec_sum', 'top_clust', 'emot_clust', 'swot_', 'trend_analysis', 'competitor'] oa = { 'exec_sum': gr.Button("Executive Summary", elem_id="exec_sum", elem_classes=["analysisButton"]), 'top_clust': gr.Button("Topic Clusters", elem_id="top_clust", elem_classes=["analysisButton"]), 'emot_clust': gr.Button("Sentiment Analysis", elem_id="emot_clust", elem_classes=["analysisButton"]), 'swot_': gr.Button("SWOT Analysis", elem_id="swot_", elem_classes=["analysisButton"]), 'trend_analysis': gr.Button("Trend Analysis", elem_id="trend_analysis", elem_classes=["analysisButton"]), 'competitor': gr.Button("Competitor Analysis", elem_id="competitor", elem_classes=["analysisButton"]), } newcluster = gr.Button("New Cluster", elem_id="newcluster", elem_classes=["newcluster"], visible=False) newgraph = gr.Button("New graph", elem_id="newcluster", elem_classes=["newgraph"], visible=False) # for i in range(len(analysisList)): # gr.Button(analysisList[i], elem_classes=["analysisButton"], elem_id=analysisList[i]) # iterate throu oa gr.Button("Talk to your data", elem_id="querybtn") projJson.change(variable_outputs, projJson, tabs) gr.Markdown(f'url params value: {url_params.value}', visible=False) threshold = gr.Slider(minimum=0, maximum=1, label="Threshold", visible=False) csv_file = gr.File(label="File (csv, excel, h5..)", elem_id="fupload",visible=False) brand = gr.State('brand') industry = gr.State('industry') csvs = gr.State('csvnum') filename = gr.State('filename') graph_type = gr.State('graph_type') # THE DATASET data_storage = gr.State(label="data_storage") # TOPICS LIST list_of_topics = gr.State(label="list_of_topics") # add image output with gr.Tab("Word Cloud"): Imaj = gr.Image(label="Word Cloud") with gr.Tab("Polarity"): Polarity = gr.Plot(label="Polarity") app.load( fn=parse_URL_params, inputs=[url_params], outputs=[url_params, projJson, projdetails, proj, fullProjectData,projectUUID], _js=get_window_url_params ) url_params.render() with gr.Column(scale=2, elem_id="targetColMiddle"): graphHelper = gr.Markdown("") with gr.Tab("Overview", elem_id="overviewTab"): tab = 'Summary' data_answer = gr.Textbox(visible=False, elem_id="hiddenTextbox") # gr.Textbox.style(data_answer) formattedres = gr.Markdown("") with gr.Row(): with gr.Column(scale=6): # add empty space pass with gr.Column(scale=.5, min_width=100): clear_button = gr.Button("Clear", visible=True, elem_id="clear_button") clear_button.click(fn=clear_output, inputs=[filename, titleBlock], outputs=[formattedres]) # save_button = gr.Button("Save", visible=True, elem_id="save_button") # save_button.click(lambda: save_output("Summary", data_answer)) tab = gr.Tab("Raw data", elem_id="rawdataTab") with tab: tab = 'Table' similar_reviews = gr.Dataframe(label="Table", type="pandas", max_rows=20, overflow_row_behaviour='paginate', show_label=False) with gr.Row(): with gr.Column(scale=6): # add empty space pass # with gr.Column(scale=.5, min_width=100): # save_button = gr.Button("Save", visible=True) # save_button.click(lambda: save_output("Table", data_answer)) with gr.Tab("Visuals"): tab = 'Graph' graph = gr.Plot(elem_id="cluster", label="") graph2 = gr.Plot(elem_id="cluster2", label="", visible=False) # with gr.Tab("Word Cloud"): Imaj = gr.Image(label="Word Cloud", elem_id="vwordcloud") # with gr.Tab("Polarity"): Polarity = gr.Plot(label="Polarity", elem_id="vpolarity") with gr.Row(): with gr.Column(scale=1): clearbutton = gr.Button("Remove and regenerate", visible=True, elem_id="cleardrop_button") clearkeepbutton = gr.Button("Keep and regenerate", visible=True, elem_id="clearkeep_button") with gr.Row(): with gr.Column(scale=6): # add empty space pass # with gr.Column(scale=.5, min_width=100): # save_button = gr.Button("Save", visible=True) # gr.Button.style(save_button, color="secondary") # save_button.click(lambda: save_output("Graph", data_answer)) with gr.Row(elem_id="query"): with gr.Column(scale=1, elem_id='query1'): data_answerQuery = gr.Textbox(label="", lines=10, visible=False) # gr.Textbox.style(data_answer) formattedresQuery = gr.Markdown("") query = gr.Textbox(lines=1, placeholder="Start typing your question...", label=" ") gr.Textbox.style(query) submit_button = gr.Button("Submit", elem_id='submit') gr.Button.style(submit_button, color="secondary") with gr.Column(scale=.1, elem_id='query2'): samples = [["What insights can we take from this data?", "2 What insights can we take from this data?"]] examples = gr.Dataset(samples=samples, components=[query], type="index", label="Some hints for your next question, select which one you prefer.") def update_examples(query): global samples samples = generate_new_examples(query) return gr.Dataset.update(samples=samples) def print_samples(): global samples return {"samples": samples} def load_example(example_id): global samples return samples[example_id][0] def changesource(projJson, num): # print("switching") return 1 def doJS(projJson, num,fulldat): # print("doing js") return 1, num sourcelabel = gr.TextArea(elem_id="activeSource", visible=False) sourceIndex = gr.Number(visible=False) xy = gr.State() for i in range(maxSources): num = list[i]["index"] list[i]["obj"].click(doJS,inputs=[projJson, num, fullProjectData], outputs=[xy, sourceIndex], _js=runjs).then( load_csv, inputs=[projJson, num, fullProjectData, projectUUID], outputs=[data_storage, data_answer, similar_reviews, formattedres, filename, selectedData]).then( generate_wordcloud, inputs=[data_storage], outputs=[Imaj]).then( get_polarity, inputs=[data_storage], outputs=[Polarity, data_storage]) # SUMMARIZE VIDEO CONTENT def checkData(filename): print(filename) dat = gr.State(label="dat") oa['exec_sum'].click(fn=checkData, inputs=[filename], outputs=[]).then(get_executive_summary, inputs=[data_storage, brand, industry,summaries_state, csvs, graph_type,filename, fullProjectData, sourceIndex], outputs=[data_answer, similar_reviews, graphHelper, formattedres, titleBlock]) oa['top_clust'].click(get_topic_cluster, inputs=[data_storage, graph_type,filename], outputs=[graph, similar_reviews, list_of_topics, query, dat, graphHelper,formattedres,titleBlock]) oa['emot_clust'].click(emotional_mapping, inputs=[data_storage, industry, graph_type,filename, fullProjectData, sourceIndex], outputs=[data_answer, similar_reviews, graphHelper,formattedres,titleBlock]) oa['swot_'].click(get_executive_summary, inputs=[data_storage, brand, industry,summaries_state, csvs, graph_type,filename, fullProjectData, sourceIndex], outputs=[data_answer, similar_reviews, graphHelper, formattedres, titleBlock]).then(get_SWOT, inputs=[data_storage, brand, industry, executive, graph_type,filename, fullProjectData, sourceIndex], outputs=[data_answer, graphHelper,formattedres,titleBlock]) oa['trend_analysis'].click(get_trend_summary, inputs=[data_storage, list_of_topics, brand, industry, graph_type,filename, fullProjectData, sourceIndex], outputs=[data_answer, similar_reviews, graphHelper,formattedres,titleBlock]) oa['competitor'].click(get_competitive, inputs=[brand, industry, graph_type,filename,data_storage, fullProjectData, sourceIndex], outputs=[data_answer, graphHelper,formattedres,titleBlock]) def clear_data(filename): if os.path.exists(filename + 'df3.csv'): os.remove(filename + 'df3.csv') def rename_data_timestamp(filename): if os.path.exists(filename + 'df3.csv'): os.rename(filename + 'df3.csv', filename + str(datetime.datetime.now()) + '.csv') clearbutton.click(clear_data, inputs=[filename], outputs=[]).then(get_topic_cluster, inputs=[data_storage, graph_type,filename], outputs=[graph, similar_reviews, list_of_topics, query, dat, graphHelper,formattedres,titleBlock]) clearkeepbutton.click(rename_data_timestamp, inputs=[filename], outputs=[]).then(get_topic_cluster, inputs=[data_storage, graph_type,filename], outputs=[graph, similar_reviews, list_of_topics, query, dat, graphHelper,formattedres,titleBlock]) # app.launch(share=True, server_name="0.0.0.0", server_port=7860) def get_new_topic(data_storage, graph_type,filename): from bertopic import BERTopic from bertopic.representation import OpenAI from sklearn.cluster import KMeans from sklearn.feature_extraction.text import CountVectorizer import pandas as pd # import openai # openai.api_key = 'sk-2Ulixq prompt = """ I have a topic that contains the following documents: [DOCUMENTS] The topic is described by the following keywords: [KEYWORDS] Based on the information above, extract a short topic label in the following format: topic: """ vectorizer_model=CountVectorizer(stop_words="english") # df = pd.read_excel('C:/Users/sinan/Downloads/profdemo_cleaned (1).xlsx') df = data_storage df['translated_text'] = df['translated_text'].apply(lambda x: str(x)) docs = df['translated_text'].tolist() representation_model = OpenAI(model="gpt-3.5-turbo", delay_in_seconds=.5, chat=True) if len(docs) < 100: cluster_model = KMeans(n_clusters=3) topic_model = BERTopic(hdbscan_model=cluster_model, representation_model=representation_model, vectorizer_model=vectorizer_model) else: cluster_model = KMeans(n_clusters=6) # representation_model = 'bert-base-nli-mean-tokens' n_gram_range = (1, 1) # set the range of n-grams to be considered min_topic_size = 10 # set the minimum number of documents in each topic topic_model = BERTopic(hdbscan_model=cluster_model, representation_model=representation_model, n_gram_range=n_gram_range, min_topic_size=min_topic_size) # topic_model = BERTopic(representation_model=representation_model, nr_topics=8) topics, probs = topic_model.fit_transform(docs) return topic_model.visualize_documents(docs, width=1200, height=800, title='Topic Clustering', hide_annotations=True) newcluster.click(get_new_topic, inputs=[data_storage, graph_type,filename], outputs=[graph2]) newgraph.click(get_graph,inputs=[data_storage], outputs=[graph2]) # 1. ADD QUESTIONS TO THE QUERY examples.click(load_example, inputs=[examples], outputs=[query]) # UNCOMMENT FOR TEXT TO SPEECH OUTPUT submit_button.click(get_synthetic_comment, inputs=[query, threshold, data_storage ], outputs=[data_answer, similar_reviews, formattedresQuery, titleBlock]).success(update_examples, inputs=[query], outputs=[examples]) def same_auth(username, password): return username == password if __name__ == "__main__": # app.launch(share=True, server_name="0.0.0.0", server_port=7860, auth=same_auth, auth_message="Please enter the same username and password") app.launch(share=True, server_name="0.0.0.0", server_port=7860) # if __name__ == "__main__": # main()