import time

import numpy as np
from numba import get_num_threads, get_thread_id, njit, prange
from sklearn.metrics import confusion_matrix


# njit已经默认nopython=True,
# conf_matrix 的写操作在并行环境中存在数据竞争问题，因为多个线程会同时修改 conf_matrix 中的同一位置
@njit(parallel=True, nogil=False)
def compute_confusion_matrix_numba_parallel(y_true, y_pred):
    num_classes = 2  # 假设是二分类问题
    conf_matrix = np.zeros((num_classes, num_classes), dtype=np.int64)
    for i in prange(len(y_true)):  # 使用 prange 允许并行化
        conf_matrix[y_true[i], y_pred[i]] += 1
    return conf_matrix


@njit(parallel=True, nogil=True)
def compute_confusion_matrix_numba_parallel_fixed(y_true, y_pred):
    num_classes = 2  # 假设二分类问题
    n_threads = get_num_threads()  # 获取线程数
    thread_local_matrices = np.zeros((n_threads, num_classes, num_classes),
                                     dtype=np.int64)

    # 每个线程使用自己的局部矩阵
    for i in prange(len(y_true)):
        thread_id = get_thread_id()  # 当前线程 ID
        thread_local_matrices[thread_id, y_true[i], y_pred[i]] += 1

    # 合并局部矩阵到全局矩阵
    conf_matrix = np.zeros((num_classes, num_classes), dtype=np.int64)
    for local_matrix in thread_local_matrices:
        conf_matrix += local_matrix

    return conf_matrix


@njit
def compute_confusion_matrix_numba(y_true, y_pred):
    tn = fp = fn = tp = 0
    for i in range(len(y_true)):
        if y_true[i] == 1 and y_pred[i] == 1:
            tp += 1
        elif y_true[i] == 0 and y_pred[i] == 0:
            tn += 1
        elif y_true[i] == 0 and y_pred[i] == 1:
            fp += 1
        elif y_true[i] == 1 and y_pred[i] == 0:
            fn += 1
    return np.array([[tn, fp], [fn, tp]])


def compute_confusion_matrix(y_true, y_pred):
    tp = np.sum((y_true == 1) & (y_pred == 1))
    tn = np.sum((y_true == 0) & (y_pred == 0))
    fp = np.sum((y_true == 0) & (y_pred == 1))
    fn = np.sum((y_true == 1) & (y_pred == 0))
    return np.array([[tn, fp], [fn, tp]])


# 示例数据
y_true = np.random.randint(0, 2, size=1000000)
y_pred = np.random.randint(0, 2, size=1000000)

t1 = time.time()
conf_matrix = compute_confusion_matrix(y_true, y_pred)
t2 = time.time()
print(f'numpy时长: {t2-t1:.6f} 秒', conf_matrix)

compute_confusion_matrix_numba(
    np.random.randint(0, 2, size=1), np.random.randint(0, 2, size=1))
t1 = time.time()
conf_matrix = compute_confusion_matrix_numba(y_true, y_pred)
t2 = time.time()
print(f'numba时长: {t2-t1:.6f} 秒', conf_matrix)

t1 = time.time()
conf_matrix = confusion_matrix(y_true, y_pred)
t2 = time.time()
print(f'sklearn时长: {t2-t1:.6f} 秒', conf_matrix)

# Scikit-learn 的内置方法 confusion_matrix 之所以较慢，主要有以下原因：
# 函数设计的通用性：函数内部包含较多的检查、验证和预处理步骤，confusion_matrix 是为了支持多分类问题和标签不连续的场景
# 没有使用矢量化操作：逐元素处理数据，每次操作都需要 Python 的解释器参与，for循环之类的写法
# 兼容性开销：Pandas 数据框、NumPy 数组等不同的数据结构兼容

compute_confusion_matrix_numba_parallel_fixed(
    np.random.randint(0, 2, size=1), np.random.randint(0, 2, size=1))
t1 = time.time()
conf_matrix = compute_confusion_matrix_numba_parallel_fixed(y_true, y_pred)
t2 = time.time()
print(f'numba并发时长: {t2-t1:.6f} 秒', conf_matrix)
