import random
import time
import os
import pickle

import numpy as np
import tensorflow as tf

from src.evaluator import has_precision_problem_occurred
from src.generator_utils import get_random_seed_tensor, exec_method_by_index

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"

# Base directory for saving tensors
SAVE_BASE_DIR = "problem_tensors"


def save_tensor(tensor, operator, pattern, framework, method="random", index=None, counter=0):
    """Save tensor that triggers precision problem"""
    # Create directory structure: problem_tensors/{operator}/{framework}/
    save_dir = os.path.join(SAVE_BASE_DIR, operator, framework)
    os.makedirs(save_dir, exist_ok=True)

    timestamp = time.strftime("%Y%m%d_%H%M%S")
    if method == "random":
        filename = f"{pattern}_{method}_{counter:05d}_{timestamp}.npy"
    else:
        filename = f"{pattern}_{method}_idx{index}_{counter:05d}_{timestamp}.npy"

    filepath = os.path.join(save_dir, filename)

    # Convert tensor to numpy if needed
    if isinstance(tensor, tf.Tensor):
        tensor_np = tensor.numpy()
    else:
        tensor_np = np.array(tensor)

    np.save(filepath, tensor_np)
    # print(f"Saved problem tensor to {filepath}")
    return filepath


def sample(p):
    p_mapped = [x * 100 for x in p]
    total = sum(p_mapped)
    rand_num = random.uniform(0, total)
    while rand_num == 0:
        rand_num = random.uniform(0, total)
    accumulate = 0
    for i in range(len(p)):
        accumulate += p_mapped[i]
        if accumulate >= rand_num:
            return i


def sample_mutated(p, k=3):
    selected = []
    for t in range(k):
        maxnum = -1
        maxindex = -1
        for i in range(len(p)):
            if p[i] > maxnum and i not in selected:
                maxindex = i
                maxnum = p[i]
        selected.append(maxindex)
    p_selected = [p[i] for i in selected]
    p_mapped = [x * 100 for x in p_selected]
    total = sum(p_mapped)
    rand_num = random.uniform(0, total)
    while rand_num == 0:
        rand_num = random.uniform(0, total)
    accumulate = 0
    for i in range(k):
        accumulate += p_mapped[i]
        if accumulate >= rand_num:
            return selected[i]


def init_p(iteration_times, b, r, operator, pattern, shape):
    p_tf = [0 for i in range(42)]
    p_torch = [0 for i in range(42)]
    p_mnn = [0 for i in range(42)]
    for t in range(iteration_times):
        print('iter {0}: sampling...'.format(t))
        # 初始化seed tensor
        tensor_seed = get_random_seed_tensor(shape)
        for index in range(42):
            # print('- executing mutation type {0}'.format(index + 1))
            new_tensor = exec_method_by_index(tensor_seed, index)
            # 如果该操作得到的内容是溢出的 说明效率不高 直接判定不成功
            if not isinstance(new_tensor, str):
                detect_res = has_precision_problem_occurred(new_tensor, operator, pattern, b, r)
                if detect_res[0]:
                    p_tf[index] += 1
                if detect_res[1]:
                    p_torch[index] += 1
                if detect_res[2]:
                    p_mnn[index] += 1
    res_tf = [x / iteration_times for x in p_tf]
    res_torch = [x / iteration_times for x in p_torch]
    res_mnn = [x / iteration_times for x in p_mnn]
    return res_tf, res_torch, res_mnn


def run_process(operator, pattern, b, r, shape):
    print(f"\n{'=' * 80}")
    print(f"Running: operator={operator}, pattern={pattern}, shape={shape}")
    print(f"{'=' * 80}\n")

    p_matrix_tf, p_matrix_torch, p_matrix_mnn = init_p(500, b, r, operator, pattern, shape)
    print("TensorFlow probabilities:", p_matrix_tf)
    print("PyTorch probabilities:", p_matrix_torch)
    print("MNN probabilities:", p_matrix_mnn)

    # random - counters for each framework
    success_case_random_torch = 0
    success_case_random_tf = 0
    success_case_random_mnn = 0

    counter_random_tf = 0
    counter_random_torch = 0
    counter_random_mnn = 0

    t = 0

    time_random = time.time()
    while success_case_random_torch < 100 or success_case_random_tf < 100 or success_case_random_mnn < 100:
        if t % 100 == 0:
            print('{0} case. (TF:{1}, Torch:{2}, MNN:{3})'.format(
                t + 1, success_case_random_tf, success_case_random_torch, success_case_random_mnn))

        seed_tensor = get_random_seed_tensor(shape)
        res = has_precision_problem_occurred(seed_tensor, operator, pattern, b, r)
        if res[0]:
            success_case_random_tf += 1
            save_tensor(seed_tensor, operator, pattern, "tensorflow", method="random", counter=counter_random_tf)
            counter_random_tf += 1

        seed_tensor = get_random_seed_tensor(shape)
        res = has_precision_problem_occurred(seed_tensor, operator, pattern, b, r)
        if res[1]:
            success_case_random_torch += 1
            save_tensor(seed_tensor, operator, pattern, "pytorch", method="random", counter=counter_random_torch)
            counter_random_torch += 1

        seed_tensor = get_random_seed_tensor(shape)
        res = has_precision_problem_occurred(seed_tensor, operator, pattern, b, r)
        if res[2]:
            success_case_random_mnn += 1
            save_tensor(seed_tensor, operator, pattern, "mnn", method="random", counter=counter_random_mnn)
            counter_random_mnn += 1

        t += 1

    time_random = time.time() - time_random

    print('random: {0}'.format(time_random))
    print(f'Total iterations: {t}')
    print(f'Saved tensors - TF: {counter_random_tf}, Torch: {counter_random_torch}, MNN: {counter_random_mnn}')

    # weighted sampling - counters for each framework
    success_case_tf_better = 0
    success_case_torch_better = 0
    success_case_mnn_better = 0

    counter_weighted_tf = 0
    counter_weighted_torch = 0
    counter_weighted_mnn = 0

    t = 0

    time_weighted = time.time()
    while success_case_tf_better < 100 or success_case_mnn_better < 100 or success_case_torch_better < 100:
        if t % 100 == 0:
            print('{0} case. (TF:{1}, Torch:{2}, MNN:{3})'.format(
                t + 1, success_case_tf_better, success_case_torch_better, success_case_mnn_better))

        seed_tensor = get_random_seed_tensor(shape)

        if success_case_tf_better < 100:
            index_sampled_tf_b = sample_mutated(p_matrix_tf, 1)
            final_tensor_tf_b = exec_method_by_index(seed_tensor, index_sampled_tf_b)
            if has_precision_problem_occurred(final_tensor_tf_b, operator, pattern, b, r)[0]:
                success_case_tf_better += 1
                save_tensor(final_tensor_tf_b, operator, pattern, "tensorflow",
                            method="weighted", index=index_sampled_tf_b, counter=counter_weighted_tf)
                counter_weighted_tf += 1

        if success_case_torch_better < 100:
            index_sampled_torch_b = sample_mutated(p_matrix_torch, 1)
            final_tensor_torch_b = exec_method_by_index(seed_tensor, index_sampled_torch_b)
            if has_precision_problem_occurred(final_tensor_torch_b, operator, pattern, b, r)[1]:
                success_case_torch_better += 1
                save_tensor(final_tensor_torch_b, operator, pattern, "pytorch",
                            method="weighted", index=index_sampled_torch_b, counter=counter_weighted_torch)
                counter_weighted_torch += 1

        if success_case_mnn_better < 100:
            index_sampled_mnn_b = sample_mutated(p_matrix_mnn, 1)
            final_tensor_mnn_b = exec_method_by_index(seed_tensor, index_sampled_mnn_b)
            if has_precision_problem_occurred(final_tensor_mnn_b, operator, pattern, b, r)[2]:
                success_case_mnn_better += 1
                save_tensor(final_tensor_mnn_b, operator, pattern, "mnn",
                            method="weighted", index=index_sampled_mnn_b, counter=counter_weighted_mnn)
                counter_weighted_mnn += 1

        t += 1

    time_weighted = time.time() - time_weighted

    print('weighted: {0}'.format(time_weighted))
    print(f'Total iterations: {t}')
    print(f'Saved tensors - TF: {counter_weighted_tf}, Torch: {counter_weighted_torch}, MNN: {counter_weighted_mnn}')

    return {
        'operator': operator,
        'pattern': pattern,
        'time_random': time_random,
        'time_weighted': time_weighted,
        'iterations_random': t,
        'tensors_saved_random': {'tf': counter_random_tf, 'torch': counter_random_torch, 'mnn': counter_random_mnn},
        'tensors_saved_weighted': {'tf': counter_weighted_tf, 'torch': counter_weighted_torch,
                                   'mnn': counter_weighted_mnn}
    }


def batch_run_operators(operator_configs, pattern='MRE', b=0.015, r=0.005):
    """
    Batch process multiple operator configurations

    Args:
        operator_configs: List of operator configuration dicts
        pattern: Pattern type (default 'MRE')
        b: Threshold parameter b
        r: Threshold parameter r
    """
    results = []

    print(f"\n{'=' * 80}")
    print(f"Starting batch processing of {len(operator_configs)} operators")
    print(f"Pattern: {pattern}, b={b}, r={r}")
    print(f"Tensors will be saved to: {SAVE_BASE_DIR}/{{operator}}/{{framework}}/")
    print(f"{'=' * 80}\n")

    for i, config in enumerate(operator_configs):
        print(f"\n[{i + 1}/{len(operator_configs)}] Processing configuration: {config}")

        operator = config['operator']
        shape = config['shape']

        try:
            result = run_process(operator, pattern, b, r, shape)
            results.append(result)

            print(f"\n✓ Completed {operator}")
            print(f"  Random time: {result['time_random']:.2f}s")
            print(f"  Weighted time: {result['time_weighted']:.2f}s")
            print(f"  Tensors saved (random): TF={result['tensors_saved_random']['tf']}, "
                  f"Torch={result['tensors_saved_random']['torch']}, MNN={result['tensors_saved_random']['mnn']}")
            print(f"  Tensors saved (weighted): TF={result['tensors_saved_weighted']['tf']}, "
                  f"Torch={result['tensors_saved_weighted']['torch']}, MNN={result['tensors_saved_weighted']['mnn']}")

        except Exception as e:
            print(f"\n✗ Error processing {operator}: {str(e)}")
            import traceback
            traceback.print_exc()
            results.append({
                'operator': operator,
                'error': str(e)
            })

    # Print summary
    print(f"\n{'=' * 80}")
    print("BATCH PROCESSING SUMMARY")
    print(f"{'=' * 80}")
    for result in results:
        if 'error' in result:
            print(f"{result['operator']}: ERROR - {result['error']}")
        else:
            speedup = result['time_random'] / result['time_weighted'] if result['time_weighted'] > 0 else 0
            total_tensors = sum(result['tensors_saved_random'].values()) + sum(
                result['tensors_saved_weighted'].values())
            print(
                f"{result['operator']}: Random={result['time_random']:.2f}s, Weighted={result['time_weighted']:.2f}s, "
                f"Speedup={speedup:.2f}x, Total tensors saved={total_tensors}")

    # Save summary to file
    summary_dir = os.path.join(SAVE_BASE_DIR, "summaries")
    os.makedirs(summary_dir, exist_ok=True)
    summary_file = os.path.join(summary_dir, f"batch_summary_{time.strftime('%Y%m%d_%H%M%S')}.pkl")
    with open(summary_file, 'wb') as f:
        pickle.dump(results, f)
    print(f"\nSummary saved to {summary_file}")

    return results


if __name__ == '__main__':
    # 算子配置列表
    OPERATOR_CONFIGS = [
        # 池化操作
        # {'operator': 'avg_pool', 'shape': (4, 3, 14, 14)},
        # {'operator': 'max_pool', 'shape': (4, 3, 14, 14)},

        # 卷积相关
        # {'operator': 'conv2d', 'shape': (4, 3, 14, 14), 'variable_shape': (3, 3, 3, 32)},
        # {'operator': 'bias_add', 'shape': (4, 32, 12, 12), 'variable_shape': (32,)},

        # 全连接层
        # {'operator': 'dense', 'shape': (4, 16)},

        # 归约操作
        # {'operator': 'reduce_mean', 'shape': (4, 3, 14, 14)},
        # {'operator': 'reduce_max', 'shape': (4, 3, 14, 14)},

        # 激活函数
        # {'operator': 'softmax', 'shape': (4, 10)},
        # {'operator': 'relu', 'shape': (4, 64)},
        # {'operator': 'sigmoid', 'shape': (4, 1)},
        # {'operator': 'tanh', 'shape': (4, 16)},

        # 批归一化
        {'operator': 'batch_normalization', 'shape': (1, 1, 14, 14)}
    ]

    pattern = 'MRE'
    b = 0.0003
    r = 0.0001

    # Run batch processing
    batch_run_operators(OPERATOR_CONFIGS, pattern, b, r)
