# rag.py
import os
import shutil
import PyPDF2
import docx
import numpy as np
from sentence_transformers import SentenceTransformer, CrossEncoder
from sklearn.metrics.pairwise import cosine_similarity
import torch
print("CUDA可用:", torch.cuda.is_available())
print("当前设备:", torch.cuda.current_device())


class FileManager:
    def __init__(self, upload_dir="uploads"):
        self.upload_dir = os.path.abspath(upload_dir)  # 确保绝对路径
        os.makedirs(self.upload_dir, exist_ok=True)
        print(f"上传目录初始化为：{self.upload_dir}")

    def save_uploaded_file(self, temp_path):
        try:
            if not os.path.isfile(temp_path):
                print(f"临时文件无效：{temp_path}")
                return False

            # 生成安全文件名
            original_name = os.path.basename(temp_path)
            safe_name = "".join([c if c.isalnum() or c in ('_', '-', '.') else '_' for c in original_name])
            
            # 构造目标路径
            dest_path = os.path.join(self.upload_dir, safe_name)
            counter = 1
            while os.path.exists(dest_path):
                base, ext = os.path.splitext(safe_name)
                dest_path = os.path.join(self.upload_dir, f"{base}_{counter}{ext}")
                counter += 1

            print(f"尝试将文件从 {temp_path} 复制到 {dest_path}")
            # 使用原子操作保存文件
            shutil.copy2(temp_path, dest_path)
            print(f"文件保存成功：{dest_path}")
            os.chmod(dest_path, 0o644)  # 确保文件权限
            return True
        except Exception as e:
            print(f"保存失败：{str(e)}")
            import traceback
            traceback.print_exc()  # 打印完整堆栈
            return False

    def read_file(self, file_path):
        try:
            if not os.path.exists(file_path):
                return f"文件不存在: {file_path}"
            
            _, ext = os.path.splitext(file_path)
            ext = ext.lower()

            if ext in ['.txt', '.md']:
                with open(file_path, "r", encoding="utf-8") as f:
                    return f.read()
            elif ext == '.pdf':
                text = ""
                with open(file_path, 'rb') as f:
                    reader = PyPDF2.PdfReader(f)
                    for page in reader.pages:
                        text += page.extract_text() or ""
                return text.strip() if text else "无法提取 PDF 内容"
            elif ext == '.docx':
                doc = docx.Document(file_path)
                return '\n'.join([paragraph.text for paragraph in doc.paragraphs])
            else:
                return f"不支持的文件类型: {ext}"
        except Exception as e:
            return f"读取文件出错：{str(e)}"


class RAGManager:
    def __init__(self):
        # 第一阶段：检索模型（轻量级）
        self.embedder = SentenceTransformer('../model/shibing624/text2vec-base-chinese')
        # 第二阶段：重排序模型（交叉编码器）
        self.reranker = CrossEncoder('../model/cross-encoder/ms-marco-MiniLM-L-6-v2')
        self.chunks = []
        self.embeddings = None

    def chunk_text(self, text, chunk_size=500):
        self.chunks = [text[i:i+chunk_size] for i in range(0, len(text), chunk_size)]
        return self.chunks

    def generate_embeddings(self):
        if not self.chunks:
            return
        # 使用随机嵌入进行测试
        #self.embeddings = np.random.rand(len(self.chunks), 768)
        # 实际使用时取消注释下面这行
        self.embeddings = self.embedder.encode(self.chunks)

    def retrieve(self, query, top_k=10, rerank_top_k=3):
        # 第一阶段：粗粒度检索
        query_embedding = self.embedder.encode([query])
        similarities = cosine_similarity(query_embedding, self.embeddings)[0]
        top_indices = similarities.argsort()[-top_k:][::-1]
        candidate_chunks = [self.chunks[i] for i in top_indices]
        
        # 第二阶段：重排序
        if self.reranker:
            # 构建(query, chunk)对进行评分
            pairs = [(query, chunk) for chunk in candidate_chunks]
            scores = self.reranker.predict(pairs)
            # 组合分数并重新排序
            reranked = sorted(zip(candidate_chunks, scores), key=lambda x: x[1], reverse=True)
            return [chunk for chunk, score in reranked[:rerank_top_k]]
        
        return candidate_chunks[:rerank_top_k]
