# ingestion.py
import os
import glob
from dotenv import load_dotenv
from pymilvus import connections, FieldSchema, CollectionSchema, DataType, Collection, utility
import docx2txt
import PyPDF2
import markdown
from langchain.text_splitter import RecursiveCharacterTextSplitter
import dashscope  # 直接使用dashscope库

# 加载环境变量
load_dotenv()

# 设置通义千问API密钥
dashscope.api_key = os.getenv('DASHSCOPE_API_KEY')

# 测试通义千问API连接
try:
    # 发送一个简单的请求来测试连接
    from dashscope import TextEmbedding
    test_response = TextEmbedding.call(
        model="text-embedding-v2",
        input=["测试文本"]
    )
    print("通义千问API连接测试成功！")
    print(f"测试响应: {test_response}")
except Exception as e:
    print(f"通义千问API连接测试失败: {str(e)}")
    # 打印完整的错误堆栈，便于调试
    import traceback
    traceback.print_exc()

# 配置Milvus连接
connections.connect(
    host=os.getenv('MILVUS_HOST', 'localhost'),
    port=os.getenv('MILVUS_PORT', '19530')
)

# 定义集合名称和向量维度
collection_name = os.getenv('MILVUS_COLLECTION_NAME', 'contract_documents_v1')
dimension = 1536  # 通义千问text-embedding-v2模型的维度

def create_milvus_collection():
    """创建Milvus集合（如果已存在，则先删除）"""
    if utility.has_collection(collection_name):
        print(f"集合 '{collection_name}' 已存在，正在删除...")
        utility.drop_collection(collection_name)
        print("旧集合已删除。")

    # 定义字段
    fields = [
        FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
        FieldSchema(name="file_name", dtype=DataType.VARCHAR, max_length=200),
        FieldSchema(name="text_chunk", dtype=DataType.VARCHAR, max_length=65535),
        FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=dimension)
    ]

    # 创建集合Schema和集合
    schema = CollectionSchema(fields=fields, description="Contract documents collection")
    collection = Collection(name=collection_name, schema=schema)

    # 创建索引
    index_params = {
        "index_type": "HNSW",
        "metric_type": "L2",
        "params": {"M": 8, "efConstruction": 128}
    }
    collection.create_index(field_name="vector", index_params=index_params)
    print(f"Milvus集合 '{collection_name}' 创建成功。")
    return collection

def get_embedding(text: str, model="text-embedding-v2"):
    """调用通义千问的Embedding API生成向量"""
    try:
        if not text.strip():
            print("文本内容为空，无法生成Embedding")
            return None
        
        from dashscope import TextEmbedding
        response = TextEmbedding.call(
            model=model,
            input=[text]
        )
        
        if not response or not response.output or not response.output['embeddings']:
            print("Embedding API返回空结果")
            return None
        
        return response.output['embeddings'][0]['embedding']
    except Exception as e:
        print(f"生成Embedding时出错: {str(e)}")
        # 打印完整的错误堆栈，便于调试
        import traceback
        traceback.print_exc()
        return None

def process_documents(directory_path: str):
    """处理目录下的所有文档"""
    # 支持的文件格式
    supported_extensions = ['*.pdf', '*.docx', '*.doc', '*.txt', '*.md']
    file_paths = []
    for ext in supported_extensions:
        file_paths.extend(glob.glob(os.path.join(directory_path, '**', ext), recursive=True))
    
    print(f"找到 {len(file_paths)} 个文件进行处理。")
    
    # 初始化文本分割器
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=800,
        chunk_overlap=100
    )
    
    all_chunks = []
    # 导入Document类
    from langchain_core.documents import Document
    
    for file_path in file_paths:
        try:
            print(f"正在处理: {file_path}")
            ext = os.path.splitext(file_path)[1].lower()
            file_name = os.path.basename(file_path)
            text = ""
            
            if ext == '.pdf':
                # 使用PyPDF2处理PDF文件
                with open(file_path, 'rb') as f:
                    reader = PyPDF2.PdfReader(f)
                    print(f"  PDF文件共有 {len(reader.pages)} 页")
                    for i, page in enumerate(reader.pages):
                        page_text = page.extract_text() or ""
                        text += page_text
                        print(f"  提取第 {i+1} 页，长度: {len(page_text)} 字符")
                print(f"  PDF文件总文本长度: {len(text)} 字符")
            elif ext == '.docx':
                # 使用docx2txt处理DOCX文件
                text = docx2txt.process(file_path)
                print(f"  DOCX文件文本长度: {len(text)} 字符")
            elif ext == '.txt':
                # 直接读取TXT文件
                with open(file_path, 'r', encoding='utf-8') as f:
                    text = f.read()
                print(f"  TXT文件文本长度: {len(text)} 字符")
            elif ext == '.md':
                # 读取Markdown文件
                with open(file_path, 'r', encoding='utf-8') as f:
                    text = f.read()
                print(f"  MD文件文本长度: {len(text)} 字符")
            else:
                # 对于其他类型，尝试作为文本文件读取
                try:
                    with open(file_path, 'r', encoding='utf-8') as f:
                        text = f.read()
                    print(f"  其他文件文本长度: {len(text)} 字符")
                except Exception as e:
                    print(f"无法处理文件 {file_path}，跳过。错误: {str(e)}")
                    continue
            
            # 创建Document对象
            if text.strip():
                document = Document(page_content=text, metadata={'file_name': file_name})
                
                # 分割文档
                chunks = text_splitter.split_documents([document])
                print(f"  文档分割完成，生成 {len(chunks)} 个文本块")
                all_chunks.extend(chunks)
            else:
                print(f"文件 {file_path} 内容为空，跳过。")
                
        except Exception as e:
            print(f"处理文件 {file_path} 时出错: {str(e)}")
            import traceback
            traceback.print_exc()
    
    print(f"总共分割成 {len(all_chunks)} 个文本块。")
    return all_chunks
    return all_chunks

def store_in_milvus(collection, chunks):
    """将处理好的文本块向量化并存入Milvus"""
    file_names = []
    text_chunks = []
    vectors = []
    
    for i, chunk in enumerate(chunks):
        if i % 50 == 0:  # 每处理50个块打印一次进度
            print(f"正在处理第 {i}/{len(chunks)} 个文本块...")
        
        text = chunk.page_content
        vector = get_embedding(text)
        
        if vector is not None:
            file_names.append(chunk.metadata['file_name'])
            text_chunks.append(text)
            vectors.append(vector)
    
    # 批量插入数据
    if vectors:
        print(f"开始向Milvus插入 {len(vectors)} 条数据...")
        data = [file_names, text_chunks, vectors]
        insert_result = collection.insert(data)
        collection.flush()  # 刷新使数据可搜索
        print(f"成功插入 {len(vectors)} 条数据到Milvus。")
        print(f"插入的ID范围: {insert_result.primary_keys}")
    else:
        print("没有生成有效的向量数据。")

def main():
    """主函数"""
    # 创建集合
    collection = create_milvus_collection()
    
    # 处理文档
    doc_directory = r"E:\MyNewDoc\合作协议"
    # 检查文档目录是否存在
    if not os.path.exists(doc_directory):
        print(f"文档目录 {doc_directory} 不存在，请创建该目录并添加文档。")
        return
    all_chunks = process_documents(doc_directory)
    
    # 存储到Milvus
    store_in_milvus(collection, all_chunks)
    
    # 加载集合以便后续搜索
    collection.load()
    print("数据入库完成！现在可以运行 query.py 进行问答了。")

if __name__ == "__main__":
    main()