import os
import sys
import django
from django.core.exceptions import AppRegistryNotReady

# 将项目路径添加到系统搜寻路径当中，确保能找到 Django 项目
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# 设置项目的配置文件
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_vue_api.settings'
# 尝试加载Django项目配置
try:
    django.setup()
except AppRegistryNotReady:
    print("Django is not ready. Check your settings.")
    sys.exit(1)


# 确保Django已经设置完毕再导入models
from car.models import CarsComment, Car
import jieba.analyse
from collections import Counter
def load_stopwords(filepath):
    with open(filepath, 'r', encoding='utf-8') as f:
        return [line.strip() for line in f.readlines()]

# # 调用函数进行评论分析
# user_comment_analysis()
def analyze_comments_by_car():
    # 加载自定义词典
    jieba.load_userdict('./car_word/custom_dict.txt')
    # 加载停用词表
    stopwords_path = "./car_word/baidu_stopwords.txt"
    stopwords = load_stopwords(stopwords_path)
    # 获取所有的汽车类别
    car_ids = set(CarsComment.objects.values_list('car_id', flat=True))
    # 分析每个汽车类别的评论
    for car_id in car_ids:
        # 获取当前汽车类别的所有评论
        car_comments = CarsComment.objects.filter(car_id=car_id)
        # 初始化词频统计器
        word_counter = Counter()
        # 分词并统计词频
        for comment in car_comments:
            comment_text_satisfied = comment.satisfied
            comment_text_dissatisfied = comment.dissatisfied
            # 提取关键词，并过滤停用词，这里使用2-8个字为一个词
            satisfied_keywords = [word for word in jieba.analyse.extract_tags(comment_text_satisfied, topK=20, withWeight=False, allowPOS=()) if word not in stopwords  ]
            dissatisfied_keywords = [word for word in jieba.analyse.extract_tags(comment_text_dissatisfied, topK=20, withWeight=False, allowPOS=()) if word not in stopwords]
            # 合并关键词列表
            all_keywords = satisfied_keywords + dissatisfied_keywords
            # 更新词频统计器
            word_counter.update(all_keywords)
        # 获取词频统计结果
        word_freq = word_counter.most_common(25)
        # 将词频结果存储到对应汽车类别的Car对象中的keyword字段中
        car = Car.objects.get(id=car_id)
        car.key_word = dict(word_freq)
        car.save()


# 调用函数进行评论分析
# analyze_comments_by_car()


# from sklearn.feature_extraction.text import TfidfVectorizer
# import jieba
# from collections import Counter
# from module import CarsComment, Car
#
#
# def analyze_comments_by_car():
#     # 加载自定义词典
#     jieba.load_userdict('./car_word/custom_dict.txt')
#     # 获取所有的汽车类别
#     car_ids = set(CarsComment.objects.values_list('car_id', flat=True))
#     for car_id in car_ids:
#         # 获取当前汽车类别的所有评论
#         car_comments = CarsComment.objects.filter(car_id=car_id)
#         all_comments = []
#         for comment in car_comments:
#             all_comments.append(comment.satisfied)
#             all_comments.append(comment.dissatisfied)
#         # 将文本转换为TF-IDF特征向量
#         vectorizer = TfidfVectorizer(tokenizer=jieba.cut)
#         tfidf_matrix = vectorizer.fit_transform(all_comments)
#         # 获取词汇表和对应的TF-IDF权重
#         feature_names = vectorizer.get_feature_names_out()
#         tfidf_values = tfidf_matrix.sum(axis=0).A1
#         # 获取权重最大的词汇
#         top_keywords = [feature_names[i] for i in tfidf_values.argsort()[-25:][::-1]]
#         # 将关键词存储到对应汽车类别的Car对象中的keyword字段中
#         car = Car.objects.get(id=car_id)
#         car.key_word = dict(Counter(top_keywords))
#         car.save()
#
#









