import pandas as pd
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer

# Loading the Excel files into dataframes
file_paths = [
    '../data./评论数据/25美元以下_差评.xlsx',
    '../data./评论数据/25至50美元_差评.xlsx',
    '../data./评论数据/50美元以上_差评.xlsx'
]


# Function to print top words for each topic
def print_top_words(model, feature_names, n_top_words):
    for topic_idx, topic in enumerate(model.components_):
        message = "Topic #%d: " % topic_idx
        message += " ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])
        print(message)
    print("******************************************************")



for file in file_paths:
    dataframes = pd.read_excel(file)

    # Extracting the 'Long Review' column from each dataframe
    long_reviews = dataframes.iloc[:, 2]  # Assuming the second column contains the long reviews
    # 处理空值
    all_reviews = long_reviews.fillna("")

    # Parameters for LDA
    n_features = 1000  # Number of features (vocabulary size)
    n_topics = 3  # Number of topics to extract
    n_top_words = 5  # Number of top words per topic

    # Using CountVectorizer to convert a collection of text documents to a matrix of token counts
    tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features, stop_words='english')
    tf = tf_vectorizer.fit_transform(all_reviews)

    # Running LDA
    lda = LatentDirichletAllocation(n_components=n_topics, max_iter=5, learning_method='online', learning_offset=50.,
                                    random_state=0)
    lda.fit(tf)
    # Displaying the top words for each topic
    tf_feature_names = tf_vectorizer.get_feature_names_out()
    print_top_words(lda, tf_feature_names, n_top_words)
