#!/usr/bin/env python
# -*- coding: utf-8 -*-

import argparse
import time
import glob
import yaml
import numpy
import multiprocessing as mp
from functools import partial
from ann_datasets import DATASETS, get_dataset
from distance import metrics
from ann_results import store_results
from definitions import instantiate_algorithm, get_definitions


def calculate_distances(query_vector, candidates, train_data_subset, distance):
    """
    计算查询向量与候选项之间的距离

    Args:
        query_vector: 查询向量
        candidates: 候选项索引列表
        train_data_subset: 已经预先获取的训练数据子集
        distance: 距离度量类型

    Returns:
        包含(候选项索引, 距离)元组的列表
    """
    results = []
    for i, cand_idx in enumerate(candidates):
        results.append((int(cand_idx), float(metrics[distance].distance(query_vector, train_data_subset[i]))))
    return results


# 将嵌套函数移到外部，使其可以被pickle
def single_query(algo, v, count):
    """仅执行查询并返回原始结果"""
    start = time.time()
    candidates = algo.query(v, count)
    query_time = time.time() - start

    # 这里不再直接访问train_data，而是返回索引，让主进程计算距离
    return (query_time, candidates)


# 修改进程查询函数，使其创建自己的算法实例，不传递h5py对象
def process_queries(idx, definition, X_subset, count, max_time, query_arguments=None, tablename=None):
    """每个进程创建自己的算法实例并执行查询"""

    # 创建新的算法实例
    algo = instantiate_algorithm(definition)
    if hasattr(algo, "tablename"):
        algo.tablename = tablename
    
    if hasattr(algo, "reset_data_count"):
        algo.reset_data_count()

    # 如果有查询参数，设置它们
    if query_arguments:
        algo.set_query_arguments(*query_arguments)
        
    # 对于Milvus算法，需要加载collection
    if hasattr(algo, "load_collection"):
        try:
            print(f"[进程{idx}] 加载collection {algo.collection_name}...")
            algo.load_collection()
        except Exception as e:
            print(f"[进程{idx}] 加载collection时出错: {e}")
            # 尝试初始化collection对象但不加载
            if hasattr(algo, "collection") and algo.collection is None and hasattr(algo, "collection_name"):
                from pymilvus import Collection
                try:
                    print(f"[进程{idx}] 尝试初始化collection对象...")
                    algo.collection = Collection(
                        algo.collection_name,
                        consistence_level="STRONG"
                    )
                except Exception as e2:
                    print(f"[进程{idx}] 初始化collection对象失败: {e2}")

    query_results = []
    end_time = time.time() + max_time
    current_idx = 0

    while time.time() < end_time:
        idx = current_idx % len(X_subset)
        x = X_subset[idx]
        query_time, candidates = single_query(algo, x, count)
        query_results.append((query_time, (candidates, idx)))
        current_idx += 1

    return query_results


def run_individual_query(algo, X_test, distance, count, reads=1, duration=0, definition=None, query_arguments=None,
                         tablename=None):
    # 默认使用持续时间模式，如果未设置则使用默认的60秒
    duration = duration if duration > 0 else 60
    print(f"Running test for {duration} seconds...")

    all_results = []

    if reads > 1:
        # 确保使用spawn模式
        if mp.get_start_method(allow_none=True) != 'spawn':
            mp.set_start_method('spawn', force=True)

        ctx = mp.get_context('spawn')
        # 多进程查询 - 每个进程创建自己的算法实例
        pool = ctx.Pool(processes=reads)

        # 所有进程同时运行指定的时间
        results_list = pool.map(
            partial(process_queries,
                    definition=definition,
                    X_subset=X_test,
                    count=count,
                    max_time=duration,
                    query_arguments=query_arguments,
                    tablename=tablename),
            range(reads)
        )

        pool.close()
        pool.join()

        # 合并所有进程的结果
        for proc_results in results_list:
            # 不再计算距离，只返回原始结果
            processed_results = []
            for query_time, (candidates, idx) in proc_results:
                processed_results.append((query_time, (candidates, idx)))
            all_results.extend(processed_results)
    else:
        # 单进程查询 - 使用现有算法实例
        all_results = []
        end_time = time.time() + duration
        current_idx = 0

        while time.time() < end_time:
            idx = current_idx % len(X_test)
            x = X_test[idx]
            query_time, candidates = single_query(algo, x, count)

            # 不再计算距离，只返回原始结果
            all_results.append((query_time, (candidates, idx)))
            current_idx += 1

    # 获取实际执行的查询数
    queries_count = len(all_results)
    print(f"实际执行的查询数: {queries_count}")

    # 总查询时间
    total_query_time = sum(time for time, _ in all_results)

    # 计算平均查询时间
    avg_query_time = total_query_time / queries_count
    print(f"平均查询时间: {avg_query_time:.6f} 秒")

    # 最小查询时间
    best_query_time = min(time for time, _ in all_results)
    print(f"最小查询时间: {best_query_time:.6f} 秒")

    # 最大查询时间
    max_query_time = max(time for time, _ in all_results)
    print(f"最大查询时间: {max_query_time:.6f} 秒")

    # 数据库查询时间 (考虑并发)
    best_search_time = total_query_time / queries_count / reads

    # 统计候选项数量
    total_candidates = sum(len(candidates) for _, (candidates, _) in all_results)
    avg_candidates = total_candidates / queries_count

    attrs = {
        "name": str(algo),
        "best_search_time": best_search_time,
        "avg_query_time": avg_query_time,
        "min_query_time": best_query_time,
        "max_query_time": max_query_time,
        "queries_count": queries_count,
        "candidates": avg_candidates,
        "distance": distance,
        "count": int(count),
        "reads": reads,
        "duration": duration,
        "total_queries": queries_count
    }

    additional = algo.get_additional()
    for k in additional:
        attrs[k] = additional[k]
    return (attrs, all_results)


def load_and_transform_dataset(dataset_name):
    """只返回 HDF5 dataset 对象和测试数据"""
    D, _ = get_dataset(dataset_name)
    X_test = numpy.array(D["test"])  # 测试集通常较小,可以完整加载
    distance = D.attrs["distance"]

    return D, X_test, distance


def copy_and_create_index(algo, dataset, reuse, index_conflict_strategy='drop'):
    """Builds the ANN index.
    
    Args:
        algo: 算法实例
        dataset: 数据集
        reuse: 是否尝试重用现有数据和索引
    """
    copy_time, create_index_time, build_time = 0, 0, 0
    index_size, table_size = 0, 0
    if reuse:
        print("\n用户指定--reuse参数，尝试重用现有数据和索引")
        # 分别检查是否可以重用数据和索引
        has_same_data = algo.has_same_data(dataset)
        has_same_index = algo.has_same_index()

        if has_same_data and has_same_index:
            print("索引和数据都存在，且匹配，不需要重新复制和创建索引")
            index_size = algo.get_memory_usage()
            table_size = algo.get_table_usage()
            print(f"索引大小: {index_size/1024:.2f} MB")
            print(f"表大小: {table_size/1024:.2f} MB")
            return 0, 0, 0, index_size, table_size
        
        if not has_same_data:
            # 数据不能重用，需要重新复制
            print("数据检查发现数据不能重用，需要重新复制数据")
            algo.drop()
            t0 = time.time()
            algo.copy(dataset)
            copy_time = time.time() - t0
            print(f"重新复制数据，耗时: {copy_time:.2f}秒")
            has_same_index = False  # 数据重新复制后，索引也需要重建
        elif has_same_data and not has_same_index:
            # 数据可以重用，但索引不匹配
            print("数据可以重用，但索引不匹配")
        
        if not has_same_index:
            # 索引不能重用，需要重新创建
            print("需要重新创建索引")
            t0 = time.time()
            algo.create_index()
            create_index_time = time.time() - t0
            print(f"重新创建索引，耗时: {create_index_time:.2f}秒")

    else:
        # 不重用，全部重新创建
        print("未指定--reuse参数，需要重新复制数据和创建索引")
        algo.drop()
        t0 = time.time()
        algo.copy(dataset)
        copy_time = time.time() - t0
        print(f"复制数据，耗时: {copy_time:.2f}秒")
        
        t0 = time.time()
        algo.create_index()
        create_index_time = time.time() - t0
        print(f"创建索引，耗时: {create_index_time:.2f}秒")

    build_time = copy_time + create_index_time
    print(f"总构建时间: {build_time:.2f}秒")
    
    index_size = algo.get_memory_usage()
    table_size = algo.get_table_usage()
    print(f"索引大小: {index_size/1024:.2f} MB")
    print(f"表大小: {table_size/1024:.2f} MB")

    return copy_time, create_index_time, build_time, index_size, table_size


def run(definition, dataset_name, count, runs, parallel_workers, reuse, reads=1, duration=0,
        drop_after_test=False, results_dir=None, tablename=None):
    """Run the algorithm benchmarking."""
    # 获取 HDF5 dataset
    dataset, X_test, distance = load_and_transform_dataset(dataset_name)

    algo = instantiate_algorithm(definition)
    if hasattr(algo, "parallel_workers"):
        algo.parallel_workers = parallel_workers
    
    if hasattr(algo, "tablename"):
        algo.tablename = tablename

    if hasattr(algo, "reset_data_count"):
        algo.reset_data_count()

    try:
        copy_time, create_index_time, build_time, index_size, table_size = copy_and_create_index(
            algo, dataset, reuse
        )

        # 确保至少有一组查询参数
        query_argument_groups = definition.query_argument_groups or [[]]

        for pos, query_arguments in enumerate(query_argument_groups, 1):
            print(f"\n运行第 {pos}/{len(query_argument_groups)}个参数组: {query_arguments}")
            if query_arguments:
                algo.set_query_arguments(*query_arguments)

            # 存储每次运行的结果
            run_results = []

            print(f"执行 {runs} 次运行，记录性能指标...")
            for run in range(runs):
                print(f"运行第 {run + 1} 次, 总共 {runs} 次")
                # 保存完整结果，但不计算距离
                descriptor, results = run_individual_query(
                    algo, X_test, distance, count, reads, duration,
                    definition=definition, query_arguments=query_arguments, tablename=tablename
                )
                # 记录QPS和完整结果
                qps = 1.0 / descriptor["best_search_time"]
                run_results.append((run, qps, descriptor, results))
                print(f"第 {run + 1} 次运行的QPS: {qps:.4f}")

            # 找出QPS最高的运行
            best_run, best_qps, best_descriptor, best_results = max(run_results, key=lambda x: x[1])
            print(f"\n最高QPS的运行是第 {best_run + 1} 次，QPS = {best_qps:.4f}")

            # 计算最佳运行结果的距离
            print("计算最佳运行结果的距离...")
            train_data = dataset["train"]
            processed_results = []

            # 收集所有需要的候选项索引
            all_candidates = []
            candidate_mapping = []  # 记录每个查询对应的候选项在all_candidates中的位置

            for _, (candidates, idx) in best_results:
                start_idx = len(all_candidates)
                all_candidates.extend(candidates)
                candidate_mapping.append((start_idx, len(candidates), idx))

            # 对索引排序并去重
            unique_sorted_candidates = sorted(set(all_candidates))

            # 使用多线程优化的方法批量获取所需的训练数据
            print(f"多线程优化批量获取{len(unique_sorted_candidates)}个训练数据点...")
            t0 = time.time()
            
            # 根据数据量选择最优策略
            total_candidates = len(unique_sorted_candidates)
            if total_candidates < 10000:
                # 小数据量，直接批量获取
                print(f"小数据量({total_candidates})，使用直接批量获取")
                train_data_subset = train_data[unique_sorted_candidates]
                candidates_data_map = {idx: data for idx, data in zip(unique_sorted_candidates, train_data_subset)}
            else:
                # 大数据量，使用多线程优化
                import threading
                import concurrent.futures
                import os
                
                max_workers = min(8, os.cpu_count())
                print(f"大数据量({total_candidates})，使用{max_workers}线程优化")
                
                # 线程安全的读取函数
                lock = threading.Lock()
                
                def read_batch_safe(batch_indices):
                    with lock:
                        batch_data = train_data[batch_indices]
                        return {idx: data for idx, data in zip(batch_indices, batch_data)}
                
                # 计算批次大小
                batch_size = max(1000, total_candidates // (max_workers * 2))
                batches = [unique_sorted_candidates[i:i+batch_size] 
                          for i in range(0, total_candidates, batch_size)]
                
                candidates_data_map = {}
                
                with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
                    futures = [executor.submit(read_batch_safe, batch) for batch in batches]
                    
                    for i, future in enumerate(concurrent.futures.as_completed(futures)):
                        batch_result = future.result()
                        candidates_data_map.update(batch_result)
                        
            
            print(f"批量获取原始数据耗时: {time.time() - t0:.4f}秒")
            t0 = time.time()

            # 对每个查询分别计算距离
            for query_time, _ in best_results:
                start_idx, length, idx = candidate_mapping.pop(0)
                query_vector = X_test[idx]
                candidates_indices = all_candidates[start_idx:start_idx + length]

                # 通过映射获取对应的训练数据
                train_subset = [candidates_data_map[cand_idx] for cand_idx in candidates_indices]

                candidates_with_distance = calculate_distances(
                    query_vector, candidates_indices, train_subset, distance
                )
                processed_results.append((query_time, (candidates_with_distance, idx)))
            print(f"计算距离耗时: {time.time() - t0:.4f}秒")

            # 使用最佳运行的描述符和已计算距离的结果
            descriptor, results = best_descriptor, processed_results

            descriptor.update({
                "copy_time": copy_time,
                "create_index_time": create_index_time,
                "build_time": build_time,
                "index_size": index_size,
                "table_size": table_size,
                "algo": definition.algorithm,
                "dataset": dataset_name
            })

            store_results(
                dataset_name=dataset_name,
                count=count,
                definition=definition,
                query_arguments=query_arguments,
                attrs=descriptor,
                results=results,
                results_dir=results_dir
            )
    finally:
        try:
            if drop_after_test:
                algo.drop()
                print(f"清理 {definition.algorithm}")
        except Exception as e:
            print(f"清理 {definition.algorithm} 时出错: {e}")


def load_all_algorithms():
    """Load all algorithm configurations from config.yml files."""
    algorithms = []
    for config_file in glob.glob("algorithms/*/config.yml"):
        with open(config_file) as f:
            try:
                config = yaml.safe_load(f)
                if config and "float" in config and "any" in config["float"]:
                    for algo in config["float"]["any"]:
                        if not algo.get("disabled", False):
                            algorithms.append(algo)
            except Exception as e:
                print(f"加载 {config_file} 时出错: {e}")
    return algorithms


def parse_args():
    """Parse command line arguments."""
    parser = argparse.ArgumentParser(description='Run ANN benchmarks')
    parser.add_argument('--dataset',
                        choices=DATASETS.keys(),
                        required=True,
                        help='数据集名称')
    parser.add_argument('--count',
                        type=int,
                        default=10,
                        help='返回的最近邻居数量')
    parser.add_argument('--runs',
                        type=int,
                        default=1,
                        help='运行测试的次数, 默认1次，因为有些数据库有缓存优化，多次运行结果会不一样。当runs>1时，只保留QPS最高的运行结果。')
    parser.add_argument('--duration',
                        type=int,
                        default=30,
                        help='运行测试的时间')
    parser.add_argument('--parallel_workers',
                        type=int,
                        default=8,
                        help='并行构建索引的线程数')
    parser.add_argument("--algorithms",
                        metavar="NAME",
                        help="只运行指定的算法，多个算法用英文逗号分隔",
                        default=None)
    parser.add_argument("--tablename",
                        metavar="NAME",
                        help="表名",
                        default="items")
    parser.add_argument("--reuse",
                        action='store_true',
                        help="是否尝试重用现有的数据和索引，默认False表示重新复制数据和创建索引",
                        default=False)
    parser.add_argument("--drop_after_test",
                        action='store_true',
                        help="运行测试后删除表",
                        default=False)
    parser.add_argument('--reads',
                        type=int,
                        default=1,
                        help='请求的并发数')
    parser.add_argument("--results_dir",
                        type=str,
                        help="结果保存目录",
                        default="results")

    return parser.parse_args()


def main():
    """Main entry point to run all enabled algorithms."""
    args = parse_args()

    print(f"\n处理数据集: {args.dataset}")

    # 获取数据集信息
    dataset, dimension = get_dataset(args.dataset)
    distance = dataset.attrs["distance"]
    point_type = dataset.attrs.get("point_type", "float")

    # 使用 get_definitions 获取算法定义
    definitions = get_definitions(
        dimension=dimension,
        point_type=point_type,
        distance_metric=distance,
        count=args.count
    )
    print(f"发现 {len(definitions)} 个算法定义")

    # 如果指定了算法名称，只运行指定算法
    if args.algorithms:
        print(f"运行用户指定算法: {args.algorithms}")
        # 获取所有可用的算法名称
        available_algorithms = {d.algorithm for d in definitions}
        # 获取用户指定的算法名称
        specified_algorithms = set(args.algorithms.split(","))
        # 检查是否有不存在的算法
        invalid_algorithms = specified_algorithms - available_algorithms
        if invalid_algorithms:
            print(f"错误: 以下算法不存在: {', '.join(invalid_algorithms)}")
            print(f"可用算法: {', '.join(available_algorithms)}")
            return
        # 过滤出指定的算法
        definitions = [d for d in definitions if d.algorithm in specified_algorithms]

    # 打印算法信息
    print(f"需要运行 {len(definitions)} 个算法:")
    for i, definition in enumerate(definitions):
        print(
            f"\n运行第{i + 1}个算法: {definition.algorithm}, 参数: {definition.arguments}, 查询参数: {definition.query_argument_groups}")
        try:
            run(definition,
                dataset_name=args.dataset,
                count=args.count,
                runs=args.runs,
                parallel_workers=args.parallel_workers,
                tablename=args.tablename,
                reuse=args.reuse,
                reads=args.reads,
                duration=args.duration,
                drop_after_test=args.drop_after_test,
                results_dir=args.results_dir)
        except Exception as e:
            print(f"运行 {definition.algorithm} 时出错: {e}")


if __name__ == "__main__":
    main()
