# rag_system.py - 基于MindSpore的RAG系统
import os
import re
import jieba
import numpy as np
import mindspore as ms
from mindspore import nn, Tensor
import mindspore.ops as ops
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity

# --- MindSpore模型定义（用于文本处理）---
class TextEncoder(nn.Cell):
    """简单的文本编码器模型"""
    def __init__(self, vocab_size=50000, embedding_dim=256, hidden_dim=512):
        super(TextEncoder, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True, bidirectional=True)
        self.fc = nn.Dense(hidden_dim * 2, 256)  # 输出256维向量
        
    def construct(self, x):
        embedded = self.embedding(x)
        lstm_out, _ = self.lstm(embedded)
        # 取最后一个时间步的输出
        last_output = lstm_out[:, -1, :]
        encoded = self.fc(last_output)
        return encoded

# --- RAG系统类 ---
class RAGSystem:
    def __init__(self, doc_path="cangjie_doc_content.txt"):
        self.doc_path = doc_path
        self.documents = []
        self.document_chunks = []
        self.vectorizer = None
        self.tfidf_matrix = None
        self.encoder = None
        
    def load_documents(self):
        """加载文档并分块"""
        try:
            with open(self.doc_path, 'r', encoding='utf-8') as f:
                content = f.read()
            
            # 改进的文档分块（按自然段落和标题）
            chunks = self._split_into_chunks_improved(content)
            self.document_chunks = chunks
            self.documents = content.split('\n')
            
            print(f"✅ 加载文档成功，共{len(chunks)}个块")
            return True
        except Exception as e:
            print(f"❌ 加载文档失败: {e}")
            return False
    
    def _split_into_chunks_improved(self, text, chunk_size=300):
        """改进的文本分块方法，保持语义完整性"""
        # 首先按段落分割
        paragraphs = re.split(r'\n\s*\n', text)
        chunks = []
        
        for para in paragraphs:
            para = para.strip()
            if not para:
                continue
                
            # 如果段落较短，直接作为一个块
            if len(para) <= chunk_size:
                chunks.append(para)
            else:
                # 对于长段落，按句子分割
                sentences = re.split(r'[。！？；\.\!\?;]', para)
                current_chunk = []
                current_length = 0
                
                for sentence in sentences:
                    sentence = sentence.strip()
                    if not sentence:
                        continue
                    
                    if current_length + len(sentence) <= chunk_size:
                        current_chunk.append(sentence)
                        current_length += len(sentence)
                    else:
                        if current_chunk:
                            chunks.append('。'.join(current_chunk) + '。')
                        current_chunk = [sentence]
                        current_length = len(sentence)
                
                if current_chunk:
                    chunks.append('。'.join(current_chunk) + '。')
        
        return chunks
    
    def build_vector_index(self):
        """构建向量索引"""
        if not self.document_chunks:
            print("❌ 没有文档块可供索引")
            return False
            
        try:
            # 使用TF-IDF构建向量索引，减少停用词
            self.vectorizer = TfidfVectorizer(
                max_features=10000,
                stop_words=['的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '好', '自己', '那', '什么', '还', '我们', '吧', '把', '又', '可以', '给', '已经', '吗', '她', '他', '它', '然后', '但是', '因为', '所以', '如果', '虽然', '可是', '不过', '而且', '或者', '还是', '为了'],
                tokenizer=lambda x: jieba.lcut(x)
            )
            self.tfidf_matrix = self.vectorizer.fit_transform(self.document_chunks)
            print("✅ 向量索引构建成功")
            return True
        except Exception as e:
            print(f"❌ 构建向量索引失败: {e}")
            return False
    
    def retrieve_relevant_chunks(self, query, top_k=5):
        """检索最相关的文档块"""
        if self.tfidf_matrix is None or self.vectorizer is None:
            return ["文档索引未建立，请先构建索引"]
            
        try:
            # 将查询转换为TF-IDF向量
            query_vec = self.vectorizer.transform([query])
            
            # 计算余弦相似度
            similarities = cosine_similarity(query_vec, self.tfidf_matrix).flatten()
            
            print(f"🔍 相似度分布: 最高{similarities.max():.3f}, 平均{similarities.mean():.3f}")
            
            # 获取最相关的top_k个块
            top_indices = similarities.argsort()[-top_k:][::-1]
            
            relevant_chunks = []
            for idx in top_indices:
                if similarities[idx] > 0.01:  # 降低相似度阈值
                    relevant_chunks.append({
                        'content': self.document_chunks[idx],
                        'similarity': similarities[idx]
                    })
            
            if not relevant_chunks:
                # 如果没有找到高相似度的块，返回相似度最高的几个
                for idx in top_indices[:3]:
                    relevant_chunks.append({
                        'content': self.document_chunks[idx],
                        'similarity': similarities[idx]
                    })
            
            return relevant_chunks if relevant_chunks else [{"content": "未找到高度相关文档内容，将基于一般知识回答", "similarity": 0}]
        except Exception as e:
            print(f"❌ 检索失败: {e}")
            return [{"content": "检索过程中出现错误", "similarity": 0}]
    
    def initialize_mindspore_models(self):
        """初始化MindSpore模型"""
        try:
            # 初始化文本编码器
            self.encoder = TextEncoder()
            print("✅ MindSpore模型初始化成功")
            return True
        except Exception as e:
            print(f"⚠️ MindSpore模型初始化失败: {e}")
            return True

    def process_with_mindspore(self, text):
        """使用MindSpore处理文本（示例功能）"""
        try:
            words = jieba.lcut(text[:100])
            word_count = len(words)
            
            word_lengths = [len(word) for word in words]
            if word_lengths:
                lengths_tensor = Tensor(word_lengths, dtype=ms.float32)
                avg_length = ops.reduce_mean(lengths_tensor).asnumpy().item()
                max_length = ops.reduce_max(lengths_tensor).asnumpy().item()
            else:
                avg_length = 0
                max_length = 0
            
            return {
                "word_count": word_count,
                "avg_word_length": round(avg_length, 2),
                "max_word_length": max_length,
                "processed_with_mindspore": True
            }
        except Exception as e:
            return {"error": str(e), "processed_with_mindspore": False}# rag_system.py - 基于MindSpore的RAG系统