import pandas as pd
from collections import defaultdict
import jieba
from tqdm import tqdm


def process_and_save_comments(comments_list, stopwords, output_prefix):
    """处理评论并保存词频统计结果和清洗后的文本"""
    word_weight = defaultdict(int)
    cleaned_texts = []

    for comment, like in tqdm(comments_list, desc="处理评论", unit="条"):
        if pd.isna(comment):
            continue
        words = jieba.lcut(comment.strip())
        filtered = [word for word in words if word not in stopwords and len(word) >= 2]
        cleaned_texts.append(" ".join(filtered))
        for word in filtered:
            word_weight[word] += like

    # 保存词频统计结果
    word_freq_df = pd.DataFrame(list(word_weight.items()), columns=["Word", "Weight"])
    word_freq_df.to_csv(f"{output_prefix}_word_freq.csv", index=False)
    print(f"词频统计结果已保存至 {output_prefix}_word_freq.csv")

    # 保存清洗后的文本
    with open(f"{output_prefix}_cleaned_texts.txt", "w", encoding="utf-8") as f:
        f.write("\n".join(cleaned_texts))
    print(f"清洗后的文本已保存至 {output_prefix}_cleaned_texts.txt")

    return word_weight, cleaned_texts


def load_cleaned_texts(file_path):
    """加载清洗后的文本数据"""
    with open(file_path, "r", encoding="utf-8") as f:
        return [line.strip() for line in f if line.strip()]