#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
批量文件夹文本向量化工具
功能：读取文件夹下所有txt文件，向量化后存储到Milvus新集合中
"""

import os
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import jieba
import re
from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType, utility
import uuid
from datetime import datetime
from pathlib import Path


def read_folder_txt_files(folder_path, encoding='utf-8'):
    """读取文件夹下所有txt文件"""
    texts = []
    file_info = []
    
    folder_path = Path(folder_path)
    if not folder_path.exists():
        print(f"错误：文件夹 {folder_path} 不存在")
        return [], []
    
    # 查找所有txt文件
    txt_files = list(folder_path.glob("*.txt"))
    
    if not txt_files:
        print(f"警告：文件夹 {folder_path} 中没有找到txt文件")
        return [], []
    
    print(f"找到 {len(txt_files)} 个txt文件")
    
    for txt_file in txt_files:
        try:
            # 尝试UTF-8编码
            with open(txt_file, 'r', encoding=encoding) as file:
                content = file.read().strip()
            
            if content:  # 只处理非空文件
                texts.append(content)
                file_info.append({
                    'filename': txt_file.name,
                    'filepath': str(txt_file),
                    'size': txt_file.stat().st_size
                })
                print(f"✅ 读取文件: {txt_file.name} ({len(content)} 字符)")
            else:
                print(f"⚠️  跳过空文件: {txt_file.name}")
                
        except UnicodeDecodeError:
            # 尝试GBK编码
            try:
                with open(txt_file, 'r', encoding='gbk') as file:
                    content = file.read().strip()
                
                if content:
                    texts.append(content)
                    file_info.append({
                        'filename': txt_file.name,
                        'filepath': str(txt_file),
                        'size': txt_file.stat().st_size
                    })
                    print(f"✅ 读取文件 (GBK): {txt_file.name} ({len(content)} 字符)")
                else:
                    print(f"⚠️  跳过空文件: {txt_file.name}")
                    
            except Exception as e:
                print(f"❌ 读取文件失败: {txt_file.name} - {e}")
        
        except Exception as e:
            print(f"❌ 读取文件失败: {txt_file.name} - {e}")
    
    print(f"成功读取 {len(texts)} 个有效文件")
    return texts, file_info


def preprocess_text(text):
    """文本预处理"""
    # 移除特殊字符和数字
    text = re.sub(r'[^\w\s]', ' ', text)
    text = re.sub(r'\d+', ' ', text)
    text = text.lower()
    
    # 中文分词
    words = jieba.cut(text)
    text = ' '.join(words)
    
    # 移除多余空格
    text = re.sub(r'\s+', ' ', text).strip()
    return text


def vectorize_texts(texts, max_features=100):
    """批量文本向量化"""
    if not texts:
        print("错误：没有文本需要向量化")
        return None, None
    
    print(f"开始向量化 {len(texts)} 个文本...")
    
    # 预处理文本
    processed_texts = [preprocess_text(text) for text in texts]
    
    # TF-IDF向量化
    vectorizer = TfidfVectorizer(max_features=max_features, ngram_range=(1, 2))
    vectors = vectorizer.fit_transform(processed_texts)
    
    print(f"向量化完成，矩阵形状: {vectors.shape}")
    return vectors.toarray(), vectorizer


def connect_to_milvus(host="localhost", port="19530"):
    """连接到Milvus数据库"""
    try:
        connections.connect("default", host=host, port=port)
        print(f"✅ 成功连接到Milvus服务器 {host}:{port}")
        return True
    except Exception as e:
        print(f"❌ 连接Milvus服务器失败: {e}")
        print("请确保Docker Milvus服务正在运行")
        return False


def create_milvus_collection(collection_name, vector_dim):
    """创建新的Milvus集合"""
    # 如果集合已存在，询问是否删除
    if utility.has_collection(collection_name):
        choice = input(f"集合 '{collection_name}' 已存在，是否删除并重新创建？(y/N): ").strip().lower()
        if choice == 'y':
            utility.drop_collection(collection_name)
            print(f"✅ 删除已存在的集合: {collection_name}")
        else:
            print("❌ 操作取消")
            return None

    # 定义字段
    fields = [
        FieldSchema(name="id", dtype=DataType.VARCHAR, max_length=100, is_primary=True),
        FieldSchema(name="filename", dtype=DataType.VARCHAR, max_length=500),
        FieldSchema(name="filepath", dtype=DataType.VARCHAR, max_length=1000),
        FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=65535),  # 增加到最大长度
        FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=vector_dim),
        FieldSchema(name="timestamp", dtype=DataType.VARCHAR, max_length=50)
    ]

    # 创建集合
    schema = CollectionSchema(fields, f"批量文件向量集合 - {collection_name}")
    collection = Collection(collection_name, schema)
    print(f"✅ 成功创建集合: {collection_name}")
    
    # 创建索引
    index_params = {
        "metric_type": "L2",
        "index_type": "IVF_FLAT",
        "params": {"nlist": 128}
    }
    collection.create_index("vector", index_params)
    print("✅ 成功创建向量索引")
    
    return collection


def insert_to_milvus(collection, texts, vectors, file_info):
    """批量插入数据到Milvus"""
    if len(texts) != len(vectors) or len(texts) != len(file_info):
        print("❌ 数据长度不匹配")
        return False
    
    # 准备数据
    ids = [str(uuid.uuid4()) for _ in range(len(texts))]
    filenames = [info['filename'] for info in file_info]
    filepaths = [info['filepath'] for info in file_info]
    timestamps = [datetime.now().strftime("%Y-%m-%d %H:%M:%S") for _ in range(len(texts))]
    
    # 插入数据
    data = [ids, filenames, filepaths, texts, vectors.tolist(), timestamps]
    
    try:
        mr = collection.insert(data)
        collection.flush()
        print(f"✅ 成功插入 {len(texts)} 条向量数据到Milvus")
        return True
    except Exception as e:
        print(f"❌ 插入数据失败: {e}")
        return False


def export_summary_to_excel(texts, file_info, vectors, output_path):
    """导出处理摘要到Excel"""
    data = []
    
    for i, (text, info) in enumerate(zip(texts, file_info)):
        # 将向量转换为字符串
        vector_str = ','.join([f'{x:.6f}' for x in vectors[i]])
        
        data.append({
            '文件名': info['filename'],
            '文件路径': info['filepath'],
            '文件大小(字节)': info['size'],
            '文本长度': len(text),
            '文本预览': text[:100] + '...' if len(text) > 100 else text,
            '向量': vector_str
        })
    
    df = pd.DataFrame(data)
    df.to_excel(output_path, index=False, engine='openpyxl')
    print(f"✅ 处理摘要已导出到: {output_path}")


def process_folder_to_milvus(folder_path, collection_name, output_excel=None, 
                           max_features=100, milvus_host="localhost", milvus_port="19530"):
    """完整处理流程：文件夹 -> 向量化 -> Milvus"""
    print("=" * 60)
    print("🚀 批量文件夹文本向量化处理")
    print("=" * 60)
    print(f"📁 文件夹路径: {folder_path}")
    print(f"🗃️  集合名称: {collection_name}")
    print(f"🔗 Milvus服务器: {milvus_host}:{milvus_port}")
    print(f"📊 向量维度: {max_features}")
    print("=" * 60)
    
    # 1. 读取文件夹中的所有txt文件
    texts, file_info = read_folder_txt_files(folder_path)
    if not texts:
        print("❌ 没有读取到有效文本，程序退出")
        return False
    
    # 2. 向量化
    vectors, vectorizer = vectorize_texts(texts, max_features)
    if vectors is None:
        print("❌ 向量化失败，程序退出")
        return False
    
    # 3. 导出Excel摘要（可选）
    if output_excel:
        export_summary_to_excel(texts, file_info, vectors, output_excel)
    
    # 4. 连接Milvus并存储
    if not connect_to_milvus(milvus_host, milvus_port):
        print("❌ Milvus连接失败，程序退出")
        return False
    
    # 5. 创建集合
    collection = create_milvus_collection(collection_name, vectors.shape[1])
    if collection is None:
        print("❌ 集合创建失败，程序退出")
        return False
    
    # 6. 插入数据
    success = insert_to_milvus(collection, texts, vectors, file_info)
    
    if success:
        print("=" * 60)
        print("🎉 批量处理完成！")
        print(f"📊 处理统计:")
        print(f"   - 处理文件数: {len(texts)}")
        print(f"   - 向量维度: {vectors.shape[1]}")
        print(f"   - 集合名称: {collection_name}")
        print(f"   - 数据库记录: {len(texts)} 条")
        print("=" * 60)
        return True
    else:
        print("❌ 数据插入失败")
        return False


if __name__ == "__main__":
    # 配置参数
    folder_path = "/Users/daijunxiong/Downloads/knowledge_txt"  # 包含txt文件的文件夹路径
    collection_name = "knowledge_vectors"         # 新的Milvus集合名称
    output_excel = "知识库处理结果.xlsx"            # 输出Excel摘要文件（可选）
    max_features = 100                           # 向量维度

    # Milvus服务器配置
    milvus_host = "localhost"
    milvus_port = "19530"
    
    print("📋 配置信息:")
    print(f"   - 文件夹路径: {folder_path}")
    print(f"   - 集合名称: {collection_name}")
    print(f"   - 输出Excel: {output_excel}")
    print(f"   - 向量维度: {max_features}")
    print(f"   - Milvus服务器: {milvus_host}:{milvus_port}")
    print()
    
    # 检查文件夹是否存在
    if not os.path.exists(folder_path):
        print(f"❌ 文件夹 {folder_path} 不存在")
        print("请创建文件夹并放入txt文件，或修改 folder_path 变量")
    else:
        # 执行批量处理
        success = process_folder_to_milvus(
            folder_path=folder_path,
            collection_name=collection_name,
            output_excel=output_excel,
            max_features=max_features,
            milvus_host=milvus_host,
            milvus_port=milvus_port
        )
