#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
逐行文本向量化工具
功能：读取文件夹下所有txt文件，将每一行文本单独向量化并存储到Milvus
"""

import os
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import jieba
import re
from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType, utility
import uuid
from datetime import datetime
from pathlib import Path


def read_all_lines_from_folder(folder_path, encoding='utf-8'):
    """读取文件夹下所有txt文件的每一行"""
    all_lines = []
    line_info = []
    
    folder_path = Path(folder_path)
    if not folder_path.exists():
        print(f"错误：文件夹 {folder_path} 不存在")
        return [], []
    
    # 查找所有txt文件
    txt_files = list(folder_path.glob("*.txt"))
    
    if not txt_files:
        print(f"警告：文件夹 {folder_path} 中没有找到txt文件")
        return [], []
    
    print(f"找到 {len(txt_files)} 个txt文件")
    total_lines = 0
    
    for txt_file in txt_files:
        try:
            # 尝试UTF-8编码
            with open(txt_file, 'r', encoding=encoding) as file:
                lines = file.readlines()
            
            file_lines = 0
            for line_num, line in enumerate(lines, 1):
                line_content = line.strip()
                if line_content:  # 只处理非空行
                    all_lines.append(line_content)
                    line_info.append({
                        'filename': txt_file.name,
                        'filepath': str(txt_file),
                        'line_number': line_num,
                        'line_length': len(line_content)
                    })
                    file_lines += 1
            
            total_lines += file_lines
            print(f"✅ {txt_file.name}: {file_lines} 行有效文本")
                
        except UnicodeDecodeError:
            # 尝试GBK编码
            try:
                with open(txt_file, 'r', encoding='gbk') as file:
                    lines = file.readlines()
                
                file_lines = 0
                for line_num, line in enumerate(lines, 1):
                    line_content = line.strip()
                    if line_content:
                        all_lines.append(line_content)
                        line_info.append({
                            'filename': txt_file.name,
                            'filepath': str(txt_file),
                            'line_number': line_num,
                            'line_length': len(line_content)
                        })
                        file_lines += 1
                
                total_lines += file_lines
                print(f"✅ {txt_file.name} (GBK): {file_lines} 行有效文本")
                    
            except Exception as e:
                print(f"❌ 读取文件失败: {txt_file.name} - {e}")
        
        except Exception as e:
            print(f"❌ 读取文件失败: {txt_file.name} - {e}")
    
    print(f"总计读取 {total_lines} 行有效文本")
    return all_lines, line_info


def preprocess_text(text):
    """文本预处理"""
    # 移除特殊字符和数字
    text = re.sub(r'[^\w\s]', ' ', text)
    text = re.sub(r'\d+', ' ', text)
    text = text.lower()
    
    # 中文分词
    words = jieba.cut(text)
    text = ' '.join(words)
    
    # 移除多余空格
    text = re.sub(r'\s+', ' ', text).strip()
    return text


def vectorize_lines(lines, max_features=100):
    """批量向量化所有行"""
    if not lines:
        print("错误：没有文本行需要向量化")
        return None, None
    
    print(f"开始向量化 {len(lines)} 行文本...")
    
    # 预处理文本
    processed_lines = [preprocess_text(line) for line in lines]
    
    # 过滤掉空的预处理结果
    valid_indices = [i for i, text in enumerate(processed_lines) if text.strip()]
    if len(valid_indices) < len(lines):
        print(f"过滤掉 {len(lines) - len(valid_indices)} 行空文本")
    
    valid_processed_lines = [processed_lines[i] for i in valid_indices]
    valid_original_lines = [lines[i] for i in valid_indices]
    
    if not valid_processed_lines:
        print("错误：没有有效的文本行")
        return None, None, None
    
    # TF-IDF向量化
    vectorizer = TfidfVectorizer(max_features=max_features, ngram_range=(1, 2))
    vectors = vectorizer.fit_transform(valid_processed_lines)
    
    print(f"向量化完成，矩阵形状: {vectors.shape}")
    return vectors.toarray(), vectorizer, valid_indices


def connect_to_milvus(host="localhost", port="19530"):
    """连接到Milvus数据库"""
    try:
        connections.connect("default", host=host, port=port)
        print(f"✅ 成功连接到Milvus服务器 {host}:{port}")
        return True
    except Exception as e:
        print(f"❌ 连接Milvus服务器失败: {e}")
        return False


def create_line_collection(collection_name, vector_dim):
    """创建用于存储行数据的Milvus集合"""
    # 如果集合已存在，询问是否删除
    if utility.has_collection(collection_name):
        choice = input(f"集合 '{collection_name}' 已存在，是否删除并重新创建？(y/N): ").strip().lower()
        if choice == 'y':
            utility.drop_collection(collection_name)
            print(f"✅ 删除已存在的集合: {collection_name}")
        else:
            print("❌ 操作取消")
            return None

    # 定义字段
    fields = [
        FieldSchema(name="id", dtype=DataType.VARCHAR, max_length=100, is_primary=True),
        FieldSchema(name="filename", dtype=DataType.VARCHAR, max_length=500),
        FieldSchema(name="line_number", dtype=DataType.INT64),
        FieldSchema(name="line_text", dtype=DataType.VARCHAR, max_length=5000),
        FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=vector_dim),
        FieldSchema(name="timestamp", dtype=DataType.VARCHAR, max_length=50)
    ]

    # 创建集合
    schema = CollectionSchema(fields, f"逐行文本向量集合 - {collection_name}")
    collection = Collection(collection_name, schema)
    print(f"✅ 成功创建集合: {collection_name}")
    
    # 创建索引
    index_params = {
        "metric_type": "L2",
        "index_type": "IVF_FLAT",
        "params": {"nlist": 128}
    }
    collection.create_index("vector", index_params)
    print("✅ 成功创建向量索引")
    
    return collection


def insert_lines_to_milvus(collection, lines, vectors, line_info, valid_indices):
    """批量插入行数据到Milvus"""
    # 只保留有效的行信息
    valid_line_info = [line_info[i] for i in valid_indices]
    valid_lines = [lines[i] for i in valid_indices]
    
    if len(valid_lines) != len(vectors) or len(valid_lines) != len(valid_line_info):
        print("❌ 数据长度不匹配")
        return False
    
    # 准备数据
    ids = [str(uuid.uuid4()) for _ in range(len(valid_lines))]
    filenames = [info['filename'] for info in valid_line_info]
    line_numbers = [info['line_number'] for info in valid_line_info]
    timestamps = [datetime.now().strftime("%Y-%m-%d %H:%M:%S") for _ in range(len(valid_lines))]
    
    # 检查文本长度
    processed_lines = []
    for line in valid_lines:
        if len(line) > 5000:
            processed_lines.append(line[:5000])  # 截断过长的文本
        else:
            processed_lines.append(line)
    
    # 插入数据
    data = [ids, filenames, line_numbers, processed_lines, vectors.tolist(), timestamps]
    
    try:
        mr = collection.insert(data)
        collection.flush()
        print(f"✅ 成功插入 {len(valid_lines)} 行向量数据到Milvus")
        return True
    except Exception as e:
        print(f"❌ 插入数据失败: {e}")
        return False


def export_line_summary_to_excel(lines, line_info, vectors, valid_indices, output_path):
    """导出行处理摘要到Excel"""
    data = []
    
    valid_line_info = [line_info[i] for i in valid_indices]
    valid_lines = [lines[i] for i in valid_indices]
    
    for i, (line, info) in enumerate(zip(valid_lines, valid_line_info)):
        # 将向量转换为字符串
        vector_str = ','.join([f'{x:.6f}' for x in vectors[i]])
        
        data.append({
            '文件名': info['filename'],
            '行号': info['line_number'],
            '文本长度': len(line),
            '文本内容': line[:200] + '...' if len(line) > 200 else line,
            '向量': vector_str
        })
    
    df = pd.DataFrame(data)
    df.to_excel(output_path, index=False, engine='openpyxl')
    print(f"✅ 行处理摘要已导出到: {output_path}")


def process_lines_to_milvus(folder_path, collection_name, output_excel=None, 
                           max_features=100, milvus_host="localhost", milvus_port="19530"):
    """完整处理流程：文件夹 -> 逐行向量化 -> Milvus"""
    print("=" * 60)
    print("🚀 逐行文本向量化处理")
    print("=" * 60)
    print(f"📁 文件夹路径: {folder_path}")
    print(f"🗃️  集合名称: {collection_name}")
    print(f"🔗 Milvus服务器: {milvus_host}:{milvus_port}")
    print(f"📊 向量维度: {max_features}")
    print("=" * 60)
    
    # 1. 读取所有文件的所有行
    lines, line_info = read_all_lines_from_folder(folder_path)
    if not lines:
        print("❌ 没有读取到有效文本行，程序退出")
        return False
    
    # 2. 向量化所有行
    result = vectorize_lines(lines, max_features)
    if result[0] is None:
        print("❌ 向量化失败，程序退出")
        return False
    
    vectors, vectorizer, valid_indices = result
    
    # 3. 导出Excel摘要（可选）
    if output_excel:
        export_line_summary_to_excel(lines, line_info, vectors, valid_indices, output_excel)
    
    # 4. 连接Milvus并存储
    if not connect_to_milvus(milvus_host, milvus_port):
        print("❌ Milvus连接失败，程序退出")
        return False
    
    # 5. 创建集合
    collection = create_line_collection(collection_name, vectors.shape[1])
    if collection is None:
        print("❌ 集合创建失败，程序退出")
        return False
    
    # 6. 插入数据
    success = insert_lines_to_milvus(collection, lines, vectors, line_info, valid_indices)
    
    if success:
        print("=" * 60)
        print("🎉 逐行处理完成！")
        print(f"📊 处理统计:")
        print(f"   - 处理文件数: {len(set(info['filename'] for info in line_info))}")
        print(f"   - 总文本行数: {len(lines)}")
        print(f"   - 有效向量数: {len(vectors)}")
        print(f"   - 向量维度: {vectors.shape[1]}")
        print(f"   - 集合名称: {collection_name}")
        print("=" * 60)
        return True
    else:
        print("❌ 数据插入失败")
        return False


if __name__ == "__main__":
    # 配置参数
    folder_path = "/Users/daijunxiong/Downloads/knowledge_txt"  # 包含txt文件的文件夹路径
    collection_name = "knowledge_lines"                       # 新的Milvus集合名称
    output_excel = "逐行处理结果.xlsx"                          # 输出Excel摘要文件（可选）
    max_features = 100                                       # 向量维度
    
    # Milvus服务器配置
    milvus_host = "localhost"
    milvus_port = "19530"
    
    print("📋 配置信息:")
    print(f"   - 文件夹路径: {folder_path}")
    print(f"   - 集合名称: {collection_name}")
    print(f"   - 输出Excel: {output_excel}")
    print(f"   - 向量维度: {max_features}")
    print(f"   - Milvus服务器: {milvus_host}:{milvus_port}")
    print()
    
    # 检查文件夹是否存在
    if not os.path.exists(folder_path):
        print(f"❌ 文件夹 {folder_path} 不存在")
    else:
        # 执行逐行处理
        success = process_lines_to_milvus(
            folder_path=folder_path,
            collection_name=collection_name,
            output_excel=output_excel,
            max_features=max_features,
            milvus_host=milvus_host,
            milvus_port=milvus_port
        )
