import math
from collections import Counter
import re
import jieba

class TFIDF:
    def __init__(self):
        self.documents = []
        self.vocab = set()
        self.idf_dict = {}
        
    def preprocess_text(self, text):
        """文本预处理：分词和清洗"""
        # 移除标点符号和特殊字符
        text = re.sub(r'[^\w\s]', '', text)
        # 使用jieba进行中文分词
        words = jieba.lcut(text)
        # 移除停用词（这里可以扩展添加停用词表）
        words = [word.lower() for word in words if len(word.strip()) > 0]
        return words
    
    def add_document(self, document):
        """添加文档到集合中"""
        words = self.preprocess_text(document)
        self.documents.append(words)
        self.vocab.update(words)
        
    def calculate_tf(self, document):
        """计算词频（TF）"""
        word_count = Counter(document)
        # 计算每个词的频率
        tf_dict = {}
        for word, count in word_count.items():
            tf_dict[word] = count / len(document)
        return tf_dict
    
    def calculate_idf(self):
        """计算逆文档频率（IDF）"""
        total_docs = len(self.documents)
        # 计算每个词出现在多少文档中
        word_in_docs = {}
        for word in self.vocab:
            word_in_docs[word] = sum(1 for doc in self.documents if word in doc)
            
        # 计算IDF值
        self.idf_dict = {}
        for word, doc_count in word_in_docs.items():
            self.idf_dict[word] = math.log(total_docs / (1 + doc_count)) + 1
            
    def calculate_tfidf(self, document):
        """计算TF-IDF值"""
        if not self.idf_dict:
            self.calculate_idf()
            
        tf_dict = self.calculate_tf(document)
        tfidf_dict = {}
        for word, tf_value in tf_dict.items():
            if word in self.idf_dict:
                tfidf_dict[word] = tf_value * self.idf_dict[word]
        return tfidf_dict
    
    def get_keywords(self, document, top_k=5):
        """提取文档关键词"""
        tfidf_dict = self.calculate_tfidf(document)
        # 按TF-IDF值排序
        sorted_words = sorted(tfidf_dict.items(), key=lambda x: x[1], reverse=True)
        return sorted_words[:top_k]
    
    def calculate_similarity(self, doc1, doc2):
        """计算两个文档的余弦相似度"""
        # 获取两个文档的TF-IDF向量
        tfidf1 = self.calculate_tfidf(doc1)
        tfidf2 = self.calculate_tfidf(doc2)
        
        # 计算点积
        dot_product = sum(tfidf1.get(word, 0) * tfidf2.get(word, 0) for word in self.vocab)
        
        # 计算向量的模
        norm1 = math.sqrt(sum(value ** 2 for value in tfidf1.values()))
        norm2 = math.sqrt(sum(value ** 2 for value in tfidf2.values()))
        
        # 避免除以零
        if norm1 == 0 or norm2 == 0:
            return 0
            
        return dot_product / (norm1 * norm2)
    
    def get_feature_vector(self, document):
        """将文档转换为特征向量"""
        # 计算文档的TF-IDF值
        tfidf_dict = self.calculate_tfidf(document)
        
        # 基于词汇表构建特征向量
        feature_vector = []
        for word in sorted(self.vocab):  # 确保特征向量的维度顺序一致
            feature_vector.append(tfidf_dict.get(word, 0))
            
        return feature_vector