#!/bin/bash

echo "Running complete Milvus performance test in Docker container..."

# 创建临时目录用于共享测试结果
TEMP_DIR="/tmp/milvus_complete_test_$$"
mkdir -p $TEMP_DIR

# 创建完整的测试脚本
cat > $TEMP_DIR/milvus_complete_test.py << 'EOF'
import time
import numpy as np
from pymilvus import connections, FieldSchema, CollectionSchema, DataType, Collection, utility
import random
import threading
from datetime import datetime

def test_milvus_complete():
    results = {}
    test_log = []
    
    def log(msg):
        print(msg)
        test_log.append(msg)
    
    # 连接到 Milvus
    log("Connecting to Milvus...")
    try:
        connections.connect("default", host="127.0.0.1", port="19530")
        log("Great! Connected to Milvus successfully")
    except Exception as e:
        log(f"Error: Failed to connect to Milvus: {e}")
        return results, test_log
    
    # 测试 1: 创建集合和索引
    log("1. Creating collection and indexes...")
    start_time = time.time()
    
    try:
        # 定义字段
        fields = [
            FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
            FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=128),
            FieldSchema(name="category", dtype=DataType.INT64),
            FieldSchema(name="timestamp", dtype=DataType.INT64),
            FieldSchema(name="metadata", dtype=DataType.VARCHAR, max_length=200),
        ]
        
        # 创建 schema
        schema = CollectionSchema(fields, "Complete Milvus performance test collection")
        
        # 创建集合
        collection_name = "complete_perf_test"
        if utility.has_collection(collection_name):
            utility.drop_collection(collection_name)
        
        collection = Collection(name=collection_name, schema=schema)
        
        # 创建向量索引
        vector_index_params = {
            "index_type": "IVF_FLAT",
            "metric_type": "L2",
            "params": {"nlist": 128}
        }
        collection.create_index("vector", vector_index_params)
        
        end_time = time.time()
        results["collection_creation"] = end_time - start_time
        log(f"Great! Collection creation time: {results['collection_creation']:.4f} seconds")
    except Exception as e:
        log(f"Error: Collection creation failed: {e}")
        return results, test_log
    
    # 测试 2: 批量插入性能
    log("2. Testing batch insertion performance...")
    start_time = time.time()
    
    try:
        # 生成测试数据
        num_vectors = 5000  # 增加到5000条记录
        batch_size = 1000
        total_inserted = 0
        
        for batch in range(0, num_vectors, batch_size):
            current_batch_size = min(batch_size, num_vectors - batch)
            vectors = [[random.random() for _ in range(128)] for _ in range(current_batch_size)]
            categories = [random.randint(1, 20) for _ in range(current_batch_size)]
            timestamps = [int(time.time()) + i for i in range(current_batch_size)]
            metadata = [f"item_{batch + i}" for i in range(current_batch_size)]
            
            # 准备插入数据
            entities = [vectors, categories, timestamps, metadata]
            
            # 插入数据
            insert_result = collection.insert(entities)
            total_inserted += current_batch_size
        
        collection.flush()
        
        end_time = time.time()
        results["batch_insertion"] = end_time - start_time
        results["insertion_rate"] = total_inserted / results["batch_insertion"]
        log(f"Great! Batch insertion time for {total_inserted} vectors: {results['batch_insertion']:.4f} seconds")
        log(f"Insertion rate: {results['insertion_rate']:.2f} vectors/second")
    except Exception as e:
        log(f"Error: Batch insertion failed: {e}")
        return results, test_log
    
    # 测试 3: 查询性能 - 精确搜索
    log("3. Testing exact search performance...")
    start_time = time.time()
    
    try:
        collection.load()
        
        # 精确搜索
        search_vectors = [[random.random() for _ in range(128)] for _ in range(5)]
        search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
        
        results_query = collection.search(
            data=search_vectors,
            anns_field="vector",
            param=search_params,
            limit=10,
            output_fields=["category", "metadata"]
        )
        
        end_time = time.time()
        results["exact_search"] = end_time - start_time
        log(f"Great! Exact search time for 5 queries: {results['exact_search']:.4f} seconds")
    except Exception as e:
        log(f"Error: Exact search test failed: {e}")
    
    # 测试 4: 查询性能 - 过滤搜索
    log("4. Testing filtered search performance...")
    start_time = time.time()
    
    try:
        search_vectors = [[random.random() for _ in range(128)]]
        search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
        
        # 带过滤条件的搜索
        results_filtered = collection.search(
            data=search_vectors,
            anns_field="vector",
            param=search_params,
            limit=5,
            expr="category == 1",
            output_fields=["category", "metadata"]
        )
        
        end_time = time.time()
        results["filtered_search"] = end_time - start_time
        log(f"Great! Filtered search time: {results['filtered_search']:.4f} seconds")
    except Exception as e:
        log(f"Error: Filtered search test failed: {e}")
    
    # 测试 5: 复杂查询 - 多条件过滤
    log("5. Testing complex filtered search...")
    start_time = time.time()
    
    try:
        search_vectors = [[random.random() for _ in range(128)]]
        search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
        
        # 复杂过滤条件
        results_complex = collection.search(
            data=search_vectors,
            anns_field="vector",
            param=search_params,
            limit=10,
            expr="category > 5 and category < 15",
            output_fields=["category", "metadata"]
        )
        
        end_time = time.time()
        results["complex_search"] = end_time - start_time
        log(f"Great! Complex search time: {results['complex_search']:.4f} seconds")
    except Exception as e:
        log(f"Error: Complex search test failed: {e}")
    
    # 测试 6: 聚合查询
    log("6. Testing aggregation queries...")
    start_time = time.time()
    
    try:
        # 使用 query 进行标量字段的聚合
        query_result = collection.query(
            expr="category >= 1",
            output_fields=["category"],
            limit=1000
        )
        
        # 手动计算类别分布（模拟聚合）
        category_counts = {}
        for item in query_result:
            category = item['category']
            category_counts[category] = category_counts.get(category, 0) + 1
        
        end_time = time.time()
        results["aggregation"] = end_time - start_time
        log(f"Great! Aggregation query time: {results['aggregation']:.4f} seconds")
        log(f"Category distribution sample: {dict(list(category_counts.items())[:5])}")
    except Exception as e:
        log(f"Error: Aggregation test failed: {e}")
    
    # 测试 7: 并发查询性能
    log("7. Testing concurrent query performance...")
    start_time = time.time()
    
    try:
        def concurrent_search(thread_id, results_list):
            try:
                search_vector = [[random.random() for _ in range(128)]]
                search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
                
                result = collection.search(
                    data=search_vector,
                    anns_field="vector",
                    param=search_params,
                    limit=5
                )
                results_list[thread_id] = len(result[0])
            except Exception as e:
                results_list[thread_id] = f"Error: {str(e)}"
        
        # 创建多个并发搜索线程
        num_concurrent = 10
        threads = []
        thread_results = [None] * num_concurrent
        
        for i in range(num_concurrent):
            thread = threading.Thread(target=concurrent_search, args=(i, thread_results))
            threads.append(thread)
            thread.start()
        
        for thread in threads:
            thread.join()
        
        end_time = time.time()
        results["concurrent_queries"] = end_time - start_time
        
        successful_searches = sum(1 for r in thread_results if isinstance(r, int))
        log(f"Great! Concurrent search time for {num_concurrent} queries: {results['concurrent_queries']:.4f} seconds")
        log(f"Successful searches: {successful_searches}/{num_concurrent}")
    except Exception as e:
        log(f"Error: Concurrent query test failed: {e}")
    
    # 测试 8: 数据统计和系统信息
    log("8. Testing data statistics and system info...")
    start_time = time.time()
    
    try:
        # 获取集合统计
        stats = collection.num_entities
        
        # 获取分区信息
        partitions = collection.partitions
        
        # 简单的系统信息查询
        system_info = {
            "total_entities": stats,
            "partitions_count": len(partitions),
            "collection_name": collection_name
        }
        
        end_time = time.time()
        results["statistics"] = end_time - start_time
        log(f"Great! Statistics collection time: {results['statistics']:.4f} seconds")
        log(f"System info: {system_info}")
    except Exception as e:
        log(f"Error: Statistics test failed: {e}")
    
    # 测试 9: 更新操作性能（向量数据库通常不支持传统更新，这里测试删除+重新插入）
    log("9. Testing update-equivalent operations...")
    start_time = time.time()
    
    try:
        # 先查询一些记录
        query_result = collection.query(expr="category == 1", limit=10, output_fields=["id"])
        
        if query_result:
            # 模拟更新：删除旧记录并插入新记录
            ids_to_delete = [item['id'] for item in query_result]
            
            # 执行删除
            collection.delete(expr=f"id in {ids_to_delete}")
            
            # 插入新记录
            new_vectors = [[random.random() for _ in range(128)] for _ in range(len(ids_to_delete))]
            new_categories = [2] * len(ids_to_delete)  # 更新类别为2
            new_timestamps = [int(time.time())] * len(ids_to_delete)
            new_metadata = [f"updated_{i}" for i in range(len(ids_to_delete))]
            
            new_entities = [new_vectors, new_categories, new_timestamps, new_metadata]
            collection.insert(new_entities)
            collection.flush()
            
            end_time = time.time()
            results["update_operations"] = end_time - start_time
            log(f"Great! Update-equivalent operations time: {results['update_operations']:.4f} seconds")
        else:
            log("No records found for update test")
            results["update_operations"] = 0
    except Exception as e:
        log(f"Error: Update operations test failed: {e}")
    
    # 清理
    try:
        utility.drop_collection(collection_name)
        log("Great! Cleanup completed")
    except Exception as e:
        log(f"Error: Cleanup failed: {e}")
    
    return results, test_log

if __name__ == "__main__":
    print("Starting complete Milvus performance test...")
    start_total = time.time()
    
    results, log_messages = test_milvus_complete()
    
    end_total = time.time()
    total_time = end_total - start_total
    
    print("\n" + "="*60)
    print("COMPLETE MILVUS PERFORMANCE TEST RESULTS")
    print("="*60)
    
    print(f"Total test execution time: {total_time:.2f} seconds")
    print("\nPerformance Results:")
    print("-" * 40)
    
    for test_name, duration in results.items():
        if "rate" in test_name:
            print(f"{test_name}: {duration:.2f} vectors/second")
        else:
            print(f"{test_name}: {duration:.4f} seconds")
    
    # 计算总体性能指标
    if "batch_insertion" in results and "insertion_rate" in results:
        print(f"\nOverall Performance Summary:")
        print(f"- Data ingestion rate: {results['insertion_rate']:.2f} vectors/second")
    
    if "concurrent_queries" in results:
        print(f"- Concurrent query throughput: {10 / results['concurrent_queries']:.2f} queries/second")
    
    print("\n" + "="*60)
    print("DETAILED TEST LOG")
    print("="*60)
    for msg in log_messages:
        print(msg)
    
    # 保存结果到文件
    with open("/results/milvus_complete_results.txt", "w") as f:
        f.write("Complete Milvus Performance Test Results\n")
        f.write("========================================\n\n")
        f.write(f"Test completed at: {datetime.now()}\n")
        f.write(f"Total execution time: {total_time:.2f} seconds\n\n")
        
        f.write("Performance Results:\n")
        f.write("-" * 40 + "\n")
        for test_name, duration in results.items():
            if "rate" in test_name:
                f.write(f"{test_name}: {duration:.2f} vectors/second\n")
            else:
                f.write(f"{test_name}: {duration:.4f} seconds\n")
        
        f.write("\nOverall Performance Summary:\n")
        f.write("-" * 40 + "\n")
        if "batch_insertion" in results and "insertion_rate" in results:
            f.write(f"- Data ingestion rate: {results['insertion_rate']:.2f} vectors/second\n")
        
        if "concurrent_queries" in results:
            f.write(f"- Concurrent query throughput: {10 / results['concurrent_queries']:.2f} queries/second\n")
        
        f.write("\nDetailed Test Log:\n")
        f.write("-" * 40 + "\n")
        for msg in log_messages:
            f.write(f"{msg}\n")
EOF

# 运行完整的 Docker 测试
echo "Starting complete Docker-based Milvus test..."
docker run --rm \
    --network host \
    -v $TEMP_DIR:/scripts \
    -v /home/data/database/test_results:/results \
    python:3.9-slim \
    bash -c "
        echo 'Installing dependencies for complete test...' && \
        pip install -i https://pypi.tuna.tsinghua.edu.cn/simple marshmallow==3.19.0 > /dev/null 2>&1 && \
        pip install -i https://pypi.tuna.tsinghua.edu.cn/simple environs==9.5.0 > /dev/null 2>&1 && \
        pip install -i https://pypi.tuna.tsinghua.edu.cn/simple pymilvus==2.3.4 > /dev/null 2>&1 && \
        echo 'Running complete performance test...' && \
        python /scripts/milvus_complete_test.py
    "

# 检查结果
if [ -f "/home/data/database/test_results/milvus_complete_results.txt" ]; then
    echo ""
    echo "Great! Complete Milvus test completed successfully!"
    echo "Results saved to: /home/data/database/test_results/milvus_complete_results.txt"
    echo ""
    echo "Quick summary:"
    echo "=============="
    grep -A 20 "Performance Results:" /home/data/database/test_results/milvus_complete_results.txt | head -25
else
    echo "Error: Complete Docker test failed"
fi

# 清理
rm -rf $TEMP_DIR

echo ""
echo "Complete Milvus performance test finished!"