import re
import math
from collections import defaultdict, Counter
from typing import List, Dict, Any

class TextVectorizer:
    def __init__(self, use_tfidf=True):
        """
        初始化文本向量化器
        使用TF-IDF或词袋模型进行向量化
        
        参数:
        - use_tfidf: 是否使用TF-IDF向量化(默认True)
        """
        # 初始化词汇表和文档频率
        self.vocab = set()
        self.document_freq = defaultdict(int)
        self.total_documents = 0
        self.use_tfidf = use_tfidf
        
        # 内存存储
        self.text_chunks = []
        self.vectors = []  # 存储词频字典或TF-IDF向量
        self.metadata = []

    def preprocess_text(self, text):
        """文本预处理：转小写、移除特殊字符、分词"""
        text = text.lower()
        # 保留中文、英文单词、数字和电话号码格式
        text = re.sub(r'[^\w\s\u4e00-\u9fff]', '', text)
        # 特别处理数字和电话号码，确保它们被正确分割
        text = re.sub(r'(\d+)', r' \1 ', text)
        # 分词，对于中文可以考虑使用jieba等专门的分词工具
        words = text.split()
        # 移除空字符串
        return [word for word in words if word]
    
    def split_text(self, text: str, chunk_size: int = 100, chunk_overlap: int = 20, semantic_split=False) -> List[str]:
        """
        将文本分割成块
        :param text: 输入文本
        :param chunk_size: 每个块的最大词数
        :param chunk_overlap: 块之间的重叠词数
        :param semantic_split: 是否使用基于句子的语义分割
        :return: 文本块列表
        """
        if semantic_split:
            # 基于句子的分割（简单实现）
            # 对于更复杂的语义分割，可以使用NLP库如spaCy
            sentences = re.split(r'[。！？；\n]', text)
            sentences = [s.strip() for s in sentences if s.strip()]
            
            chunks = []
            current_chunk = []
            current_length = 0
            
            for sentence in sentences:
                sentence_words = self.preprocess_text(sentence)
                sentence_length = len(sentence_words)
                
                if current_length + sentence_length > chunk_size:
                    if current_chunk:
                        chunks.append(' '.join(current_chunk))
                        # 重叠处理
                        current_chunk = current_chunk[-chunk_overlap:] if chunk_overlap > 0 else []
                        current_length = len(current_chunk)
                    # 如果句子本身超过chunk_size，强制分割
                    if sentence_length > chunk_size:
                        for i in range(0, sentence_length, chunk_size - chunk_overlap):
                            chunk_words = sentence_words[i:i + chunk_size]
                            chunks.append(' '.join(chunk_words))
                    else:
                        current_chunk.extend(sentence_words)
                        current_length += sentence_length
                else:
                    current_chunk.extend(sentence_words)
                    current_length += sentence_length
            
            if current_chunk:
                chunks.append(' '.join(current_chunk))
            
            return chunks
        else:
            # 基于词数的简单分割
            words = self.preprocess_text(text)
            chunks = []
            for i in range(0, len(words), chunk_size - chunk_overlap):
                chunk = ' '.join(words[i:i + chunk_size])
                chunks.append(chunk)
            return chunks

    def vectorize_text(self, text):
        """将文本向量化为词袋模型或TF-IDF向量"""
        words = self.preprocess_text(text)
        # 更新词汇表
        self.vocab.update(words)
        # 计算词频
        tf = Counter(words)
        
        if not self.use_tfidf:
            return tf
        
        # 计算TF-IDF（带平滑处理）
        tfidf = {}
        for word, count in tf.items():
            # 平滑处理，避免IDF为0
            idf = math.log((self.total_documents + 1) / (self.document_freq[word] + 1)) + 1
            tfidf[word] = count * idf
        
        return tfidf

    def vectorize_and_store(self, text: str, metadata: Dict[str, Any] = None, chunk_size: int = 100, chunk_overlap: int = 20, semantic_split=False) -> Dict[str, Any]:
        """
        将文本向量化并存储到内存
        :param text: 输入文本
        :param metadata: 文本相关的元数据
        :param chunk_size: 每个块的最大词数
        :param chunk_overlap: 块之间的重叠词数
        :param semantic_split: 是否使用基于句子的语义分割
        :return: 存储结果信息
        """
        if metadata is None:
            metadata = {}

        # 分割文本
        chunks = self.split_text(text, chunk_size=chunk_size, chunk_overlap=chunk_overlap, semantic_split=semantic_split)

        # 更新文档频率
        self.total_documents += 1
        for chunk in chunks:
            words = set(self.preprocess_text(chunk))
            for word in words:
                self.document_freq[word] += 1

        # 向量化每个块
        chunk_vectors = [self.vectorize_text(chunk) for chunk in chunks]

        # 存储到内存
        start_index = len(self.text_chunks)
        for i, (chunk, vector) in enumerate(zip(chunks, chunk_vectors)):
            self.text_chunks.append(chunk)
            self.vectors.append(vector)
            self.metadata.append({**metadata, "chunk_index": start_index + i})

        return {
            "success": True,
            "message": f"成功处理并存储 {len(chunks)} 个文本块",
            "chunk_count": len(chunks),
            "total_chunks": len(self.text_chunks),
            "vocab_size": len(self.vocab)
        }

    def _cosine_similarity(self, vec1, vec2):
        """计算两个向量的余弦相似度"""
        # 获取所有唯一单词
        all_words = set(vec1.keys()).union(set(vec2.keys()))
        # 计算点积
        dot_product = sum(vec1.get(word, 0) * vec2.get(word, 0) for word in all_words)
        # 计算模长
        norm1 = sum(vec1.get(word, 0) ** 2 for word in all_words) ** 0.5
        norm2 = sum(vec2.get(word, 0) ** 2 for word in all_words) ** 0.5
        # 避免除以零
        if norm1 == 0 or norm2 == 0:
            return 0.0
        return dot_product / (norm1 * norm2)

    def _bm25_similarity(self, query_vector, doc_vector, k1=1.5, b=0.75):
        """计算BM25相似度"""
        # 文档长度
        doc_length = sum(doc_vector.values())
        avg_doc_length = sum(sum(vec.values()) for vec in self.vectors) / len(self.vectors) if self.vectors else 1
        
        score = 0
        for word, qf in query_vector.items():
            if word not in doc_vector:
                continue
            
            # 词频
            tf = doc_vector[word]
            # 文档频率
            df = self.document_freq.get(word, 0)
            # 逆文档频率
            idf = math.log((self.total_documents - df + 0.5) / (df + 0.5) + 1)
            
            # BM25公式
            score += idf * (tf * (k1 + 1)) / (tf + k1 * (1 - b + b * doc_length / avg_doc_length))
        
        return score

    def query(self, query_text: str, n_results: int = 5, similarity_metric="cosine") -> List[Dict[str, Any]]:
        """
        根据查询文本返回最相似的文本块
        :param query_text: 查询文本
        :param n_results: 返回的结果数量
        :param similarity_metric: 相似度度量方法("cosine"或"bm25")
        :return: 查询结果列表
        """
        if not self.vectors:
            return []

        # 向量化查询文本
        query_vector = self.vectorize_text(query_text)

        # 计算相似度
        similarities = []
        for i, vec in enumerate(self.vectors):
            if similarity_metric.lower() == "bm25":
                similarity = self._bm25_similarity(query_vector, vec)
            else:  # 默认使用余弦相似度
                similarity = self._cosine_similarity(vec, query_vector)
            similarities.append((i, similarity))

        # 按相似度排序
        similarities.sort(key=lambda x: x[1], reverse=True)

        # 格式化结果
        formatted_results = []
        for i, similarity in similarities[:n_results]:
            formatted_results.append({
                "text": self.text_chunks[i],
                "metadata": self.metadata[i],
                "similarity": similarity
            })

        return formatted_results

    def list_documents(self) -> List[Dict[str, Any]]:
        """
        列出所有已上传的文档
        :return: 文档列表
        """
        # 使用集合获取唯一的文档源
        document_sources = set()
        for meta in self.metadata:
            if 'source' in meta:
                document_sources.add(meta['source'])

        # 构建文档列表
        documents = []
        for source in document_sources:
            # 找到该文档的所有块
            document_chunks = [i for i, meta in enumerate(self.metadata) if meta.get('source') == source]
            if document_chunks:
                # 获取第一个块的元数据作为文档元数据
                first_chunk_meta = self.metadata[document_chunks[0]]
                # 过滤掉chunk_index
                doc_metadata = {k: v for k, v in first_chunk_meta.items() if k != 'chunk_index'}
                # 添加文档信息
                documents.append({
                    "source": source,
                    "metadata": doc_metadata,
                    "chunk_count": len(document_chunks)
                })

        return documents

    def delete_document(self, source: str) -> Dict[str, Any]:
        """
        根据source删除文档
        :param source: 文档源标识符
        :return: 删除结果信息
        """
        # 找出所有属于该文档的块索引
        indices_to_delete = [i for i, meta in enumerate(self.metadata) if meta.get('source') == source]

        if not indices_to_delete:
            return {
                "success": False,
                "message": f"未找到源为 {source} 的文档"
            }

        # 按降序删除，避免索引变化问题
        indices_to_delete.sort(reverse=True)
        for i in indices_to_delete:
            del self.text_chunks[i]
            del self.vectors[i]
            del self.metadata[i]

        return {
            "success": True,
            "message": f"成功删除源为 {source} 的文档，共删除 {len(indices_to_delete)} 个块"
        }

# 示例用法
if __name__ == "__main__":
    # 初始化文本向量化器
    vectorizer = TextVectorizer()

    # 示例文本
    sample_text = "这是一个示例文本，用于测试文本向量化和存储功能。文本将被分割成块，每个块会被向量化并存储到内存中。"

    # 向量化并存储
    result = vectorizer.vectorize_and_store(
        sample_text,
        metadata={"source": "example.txt", "author": "test"}
    )

    print(result)

    # 查询示例
    query_result = vectorizer.query("测试文本")
    print("\n查询结果:")
    for item in query_result:
        print(f"ID: {item['id']}")
        print(f"文本: {item['document']}")
        print(f"相似度: {item['similarity']:.4f}")
        print("---")