#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
增强版向量化工具
功能：将清洗后的决策树数据向量化存储到Milvus，提高搜索命中率
"""

import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import jieba
import re
from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType, utility
import uuid
from datetime import datetime
from data_cleaner import DataCleaner


class EnhancedVectorizer:
    def __init__(self, collection_name="enhanced_knowledge", max_features=200):
        self.collection_name = collection_name
        self.max_features = max_features
        self.vectorizer = None
        self.collection = None
        
    def connect_to_milvus(self, host="localhost", port="19530"):
        """连接到Milvus"""
        try:
            connections.connect("default", host=host, port=port)
            print(f"✅ 成功连接到Milvus服务器 {host}:{port}")
            return True
        except Exception as e:
            print(f"❌ 连接失败: {e}")
            return False
    
    def create_enhanced_collection(self, vector_dim):
        """创建增强版集合"""
        if utility.has_collection(self.collection_name):
            choice = input(f"集合 '{self.collection_name}' 已存在，是否删除并重新创建？(y/N): ").strip().lower()
            if choice == 'y':
                utility.drop_collection(self.collection_name)
                print(f"✅ 删除已存在的集合: {self.collection_name}")
            else:
                print("❌ 操作取消")
                return None

        # 定义字段 - 增强版结构
        fields = [
            FieldSchema(name="id", dtype=DataType.VARCHAR, max_length=100, is_primary=True),
            FieldSchema(name="original_text", dtype=DataType.VARCHAR, max_length=2000),
            FieldSchema(name="search_text", dtype=DataType.VARCHAR, max_length=2000),
            FieldSchema(name="product_code", dtype=DataType.VARCHAR, max_length=50),
            FieldSchema(name="cargo_number", dtype=DataType.VARCHAR, max_length=50),
            FieldSchema(name="problem_category", dtype=DataType.VARCHAR, max_length=200),
            FieldSchema(name="problem_detail", dtype=DataType.VARCHAR, max_length=500),
            FieldSchema(name="condition", dtype=DataType.VARCHAR, max_length=500),
            FieldSchema(name="solution", dtype=DataType.VARCHAR, max_length=1000),
            FieldSchema(name="variant_type", dtype=DataType.VARCHAR, max_length=50),
            FieldSchema(name="keywords", dtype=DataType.VARCHAR, max_length=500),
            FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=vector_dim),
            FieldSchema(name="timestamp", dtype=DataType.VARCHAR, max_length=50)
        ]

        # 创建集合
        schema = CollectionSchema(fields, f"增强版知识库 - {self.collection_name}")
        self.collection = Collection(self.collection_name, schema)
        print(f"✅ 成功创建集合: {self.collection_name}")
        
        # 创建索引
        index_params = {
            "metric_type": "L2",
            "index_type": "IVF_FLAT",
            "params": {"nlist": 128}
        }
        self.collection.create_index("vector", index_params)
        print("✅ 成功创建向量索引")
        
        return self.collection
    
    def vectorize_enhanced_data(self, enhanced_data):
        """向量化增强数据"""
        # 提取搜索文本用于向量化
        search_texts = [record['search_text'] for record in enhanced_data]
        
        # 预处理文本
        processed_texts = []
        for text in search_texts:
            # 简单清理
            text = re.sub(r'[^\w\s]', ' ', text)
            text = re.sub(r'\s+', ' ', text).strip()
            processed_texts.append(text)
        
        # TF-IDF向量化
        self.vectorizer = TfidfVectorizer(
            max_features=self.max_features,
            ngram_range=(1, 2),
            min_df=1,  # 最小文档频率
            max_df=0.95  # 最大文档频率
        )
        
        vectors = self.vectorizer.fit_transform(processed_texts)
        print(f"✅ 向量化完成，矩阵形状: {vectors.shape}")
        
        return vectors.toarray()
    
    def insert_enhanced_data(self, enhanced_data, vectors):
        """插入增强数据到Milvus"""
        if len(enhanced_data) != len(vectors):
            print("❌ 数据长度不匹配")
            return False
        
        # 准备数据
        ids = [record['id'] for record in enhanced_data]
        original_texts = [record['original_text'] for record in enhanced_data]
        search_texts = [record['search_text'] for record in enhanced_data]
        product_codes = [record['product_code'] for record in enhanced_data]
        cargo_numbers = [record['cargo_number'] for record in enhanced_data]
        problem_categories = [record['problem_category'] for record in enhanced_data]
        problem_details = [record['problem_detail'] for record in enhanced_data]
        conditions = [record['condition'] for record in enhanced_data]
        solutions = [record['solution'] for record in enhanced_data]
        variant_types = [record['variant_type'] for record in enhanced_data]
        keywords = [record['keywords'] for record in enhanced_data]
        timestamps = [datetime.now().strftime("%Y-%m-%d %H:%M:%S") for _ in enhanced_data]
        
        # 插入数据
        data = [
            ids, original_texts, search_texts, product_codes, cargo_numbers,
            problem_categories, problem_details, conditions, solutions,
            variant_types, keywords, vectors.tolist(), timestamps
        ]
        
        try:
            mr = self.collection.insert(data)
            self.collection.flush()
            print(f"✅ 成功插入 {len(enhanced_data)} 条增强记录到Milvus")
            return True
        except Exception as e:
            print(f"❌ 插入数据失败: {e}")
            return False
    
    def process_raw_data_to_milvus(self, raw_lines, output_csv=None):
        """完整流程：原始数据 -> 清洗增强 -> 向量化 -> 存储"""
        print("=" * 60)
        print("🚀 增强版数据处理流程")
        print("=" * 60)
        
        # 1. 数据清洗和增强
        print("🧹 数据清洗和增强...")
        cleaner = DataCleaner()
        enhanced_data = cleaner.clean_and_enhance_data(raw_lines)
        
        if not enhanced_data:
            print("❌ 没有有效的增强数据")
            return False
        
        print(f"✅ 原始记录: {len(set(d['original_text'] for d in enhanced_data))}")
        print(f"✅ 增强记录: {len(enhanced_data)}")
        
        # 2. 导出CSV（可选）
        if output_csv:
            cleaner.export_to_csv(enhanced_data, output_csv)
        
        # 3. 向量化
        print("\n🔢 向量化处理...")
        vectors = self.vectorize_enhanced_data(enhanced_data)
        
        # 4. 连接Milvus
        print("\n🔗 连接Milvus...")
        if not self.connect_to_milvus():
            print("❌ Milvus连接失败")
            return False
        
        # 5. 创建集合
        print("\n🗃️  创建集合...")
        collection = self.create_enhanced_collection(vectors.shape[1])
        if collection is None:
            print("❌ 集合创建失败")
            return False
        
        # 6. 插入数据
        print("\n💾 插入数据...")
        success = self.insert_enhanced_data(enhanced_data, vectors)
        
        if success:
            print("\n" + "=" * 60)
            print("🎉 增强版处理完成！")
            print(f"📊 处理统计:")
            print(f"   - 原始记录数: {len(set(d['original_text'] for d in enhanced_data))}")
            print(f"   - 增强记录数: {len(enhanced_data)}")
            print(f"   - 向量维度: {vectors.shape[1]}")
            print(f"   - 集合名称: {self.collection_name}")
            
            # 变体类型统计
            variant_counts = {}
            for record in enhanced_data:
                vtype = record['variant_type']
                variant_counts[vtype] = variant_counts.get(vtype, 0) + 1
            
            print(f"\n📈 变体类型分布:")
            for vtype, count in variant_counts.items():
                print(f"   - {vtype}: {count}")
            
            print("=" * 60)
            return True
        else:
            print("❌ 数据插入失败")
            return False


def demo_enhanced_processing():
    """演示增强版处理"""
    # 示例数据
    sample_data = [
        "PK511 -> 无产物或产物量很少 -> 模板 -> 是否鉴定过模板质量 -> 是 -> 质量较好，则没有问题 [货号:PK511]",
        "PK511 -> 无产物或产物量很少 -> 模板 -> 是否鉴定过模板质量 -> 是 -> 模板降解，则可能扩增不出条带 -> 建议重新制备模板 [货号:PK511]",
        "PK511 -> 无产物或产物量很少 -> 模板 -> 是否鉴定过模板质量 -> 否 -> 建议鉴定模板质量 -> cDNA不可直接跑胶，可以跑胶鉴定RNA的质量 [货号:PK511]",
        "PK511 -> 无产物或产物量很少 -> 模板 -> 是否鉴定过模板质量 -> 否 -> 建议鉴定模板质量 -> 质粒/gDNA可以跑胶鉴定，上样量200-400ng [货号:PK511]",
        "PK511 -> 无产物或产物量很少 -> 模板 -> 50 μl投入量 -> 基因组DNA -> ＞500ng，则可能抑制PCR反应 -> 建议降低投入量 [货号:PK511]",
        "PK511 -> 无产物或产物量很少 -> 模板 -> 50 μl投入量 -> 基因组DNA -> 5-500 ng -> 没有问题 [货号:PK511]"
    ]
    
    # 创建增强版向量化器
    vectorizer = EnhancedVectorizer(collection_name="demo_enhanced_knowledge", max_features=100)
    
    # 处理数据
    success = vectorizer.process_raw_data_to_milvus(
        raw_lines=sample_data,
        output_csv="demo_enhanced_data.csv"
    )
    
    if success:
        print("\n🎯 演示完成！您可以使用增强版搜索工具进行测试。")


if __name__ == "__main__":
    demo_enhanced_processing()
