# coding: utf-8

import numba
from commpy.channelcoding.ldpc import get_ldpc_code_params, triang_ldpc_systematic_encode
import torch
import numpy as np
from scipy.sparse import coo_matrix
import warnings

#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
#       Torch version for LDPC encoding, could accelerate by GPUs
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#

__all__ = [
    "triang_ldpc_systematic_encode_torch", "torch_ldpc_bp_decode"
]

# 全局常量
_llr_max = 500.0  # LLR裁剪阈值


def triang_ldpc_systematic_encode_torch(message_bits, ldpc_code_params, pad=True):
    device = message_bits.device
    generator_matrix = torch.tensor(ldpc_code_params['generator_matrix'].toarray(), device=device)
    block_length = generator_matrix.shape[1]
    modulo = message_bits.shape[0] % block_length

    if modulo:
        if pad:
            pad_len = block_length - modulo
            padding = torch.zeros(pad_len, device=device, dtype=message_bits.dtype)
            message_bits = torch.cat((message_bits, padding), dim=0)
        else:
            raise ValueError('Padding is disable but message length is not a multiple of block length.')
    num_blocks = message_bits.shape[0] // block_length

    # Reshape message: (block_length, num_blocks)
    message_bits_matrix = message_bits.view(num_blocks, block_length).t()

    # parity_part = G * message % 2
    parity_part = (generator_matrix @ message_bits_matrix.float()) % 2

    # 输出 (systematic 部分 + parity 部分)
    coded_message = torch.cat((message_bits_matrix, parity_part), dim=0).contiguous().to(torch.int8)

    return coded_message


def torch_ldpc_bp_decode(llr_vec, ldpc_code_params, decoder_algorithm, n_iters):
    """
    PyTorch加速版LDPC BP译码器

    参数:
        llr_vec: 输入LLR向量 (1D 或 2D torch.Tensor)
        ldpc_code_params: 字典，包含LDPC码参数
            - parity_check_matrix: scipy.sparse.coo_matrix 格式的校验矩阵
            - n_vnodes: 变量节点数
        decoder_algorithm: 译码算法 ('SPA' 或 'MSA')
        n_iters: 最大迭代次数

    返回:
        dec_word: 译码结果 (torch.Tensor)
        out_llrs: 输出LLR值 (torch.Tensor)
    """
    device = llr_vec.device
    n_vnodes = ldpc_code_params['n_vnodes']

    # 输入LLR向量整形
    if llr_vec.dim() == 1:
        batch_size = llr_vec.size(0) // n_vnodes
        llr_vec = llr_vec.view(batch_size, n_vnodes)
    else:
        batch_size, n_vnodes_ = llr_vec.shape
        assert n_vnodes_ == n_vnodes, "LLR向量维度与参数不符"

    # 裁剪LLR值
    llr_vec = torch.clamp(llr_vec, -_llr_max, _llr_max)

    # 获取校验矩阵的稀疏表示
    H_sparse = ldpc_code_params['parity_check_matrix'].astype(float).tocoo()
    n_rows = H_sparse.shape[0]

    # 准备索引
    row_indices = torch.tensor(H_sparse.row, dtype=torch.long, device=device)
    col_indices = torch.tensor(H_sparse.col, dtype=torch.long, device=device)
    num_edges = len(row_indices)

    # 初始化变量
    out_llrs = llr_vec.clone()
    dec_word = (llr_vec < 0).to(torch.int8)
    edge_messages = torch.zeros(batch_size, num_edges, device=device)

    # 初始化边消息 (变量节点到校验节点)
    for i in range(num_edges):
        vnode_idx = col_indices[i]
        edge_messages[:, i] = llr_vec[:, vnode_idx]

    # 主迭代循环
    for iter_cnt in range(n_iters):
        # 提前终止检查: 计算校验子
        edge_bits = dec_word[:, col_indices]
        check_sum = torch.zeros(batch_size, n_rows, device=device)
        check_sum.scatter_add_(1, row_indices.expand(batch_size, -1), edge_bits.type(check_sum.dtype))
        syndrome = check_sum % 2
        all_valid = (syndrome == 0).all(dim=1).all()
        # 提前终止条件
        if all_valid:
            break

        # 校验节点更新
        if decoder_algorithm == 'SPA':
            # SPA (和积算法)
            # Step 1: 计算tanh(message/2)
            msg_half = 0.5 * edge_messages
            tanh_msg = torch.tanh(msg_half)

            # Step 2: 符号和幅度分离
            sign_tanh = torch.sign(tanh_msg)
            abs_tanh = torch.abs(tanh_msg)
            abs_tanh = torch.clamp(abs_tanh, 1e-12, 1.0)  # 避免数值问题

            # Step 3: 计算每行的乘积
            abs_prod = torch.ones(batch_size, n_rows, device=device)
            sign_prod = torch.ones(batch_size, n_rows, device=device)

            # 使用scatter操作计算行乘积
            abs_prod.scatter_reduce_(1, row_indices.expand(batch_size, -1),
                                     abs_tanh, 'prod', include_self=False)
            sign_prod.scatter_reduce_(1, row_indices.expand(batch_size, -1),
                                      sign_tanh, 'prod', include_self=False)

            # Step 4: 计算每条边的输出
            row_abs_prod = abs_prod[:, row_indices]
            row_sign_prod = sign_prod[:, row_indices]

            # 计算其他边的乘积
            abs_others = row_abs_prod / abs_tanh
            sign_others = row_sign_prod * sign_tanh

            # Step 5: 计算arctanh并更新消息
            msg_out = 2.0 * torch.atanh(torch.clamp(sign_others * abs_others, -1 + 1e-10, 1 - 1e-10))
            edge_messages = torch.clamp(msg_out, -_llr_max, _llr_max)

        elif decoder_algorithm == 'MSA':
            # MSA (最小和算法)
            for row in range(n_rows):
                # 获取当前行的边索引
                edge_mask = (row_indices == row)
                if not edge_mask.any():
                    continue

                # 提取当前行对应的消息
                row_messages = edge_messages[:, edge_mask]

                # 计算符号和幅度
                sign_msg = torch.sign(row_messages)
                abs_msg = torch.abs(row_messages)

                # 计算行符号乘积
                sign_prod = torch.prod(sign_msg, dim=1)

                # 计算最小值和次小值
                sorted_abs, _ = torch.sort(abs_msg, dim=1)
                min_val = sorted_abs[:, 0]
                second_min_val = sorted_abs[:, 1]

                # 计算最小值的计数
                min_count = (abs_msg == min_val.unsqueeze(1)).sum(dim=1)

                # 计算输出幅度
                output_abs = min_val.unsqueeze(1).expand(-1, sorted_abs.size(1)).clone()
                condition = (abs_msg == min_val.unsqueeze(1)) & (min_count == 1).unsqueeze(1)
                output_abs[condition] = second_min_val.unsqueeze(1).expand(-1, sorted_abs.size(1))[condition]

                # 计算输出符号
                output_sign = sign_prod.unsqueeze(1) * sign_msg

                # 更新消息
                row_messages_out = output_sign * output_abs
                edge_messages[:, edge_mask] = row_messages_out
        else:
            raise ValueError("不支持的译码算法. 请选择 'SPA' 或 'MSA'")

        # 变量节点更新
        total_sum = torch.zeros(batch_size, n_vnodes, device=device)
        total_sum.scatter_add_(1, col_indices.expand(batch_size, -1), edge_messages.to(dtype=total_sum.dtype))

        # 计算新的边消息
        vnode_llrs = llr_vec.gather(1, col_indices.expand(batch_size, -1))
        vnode_sums = total_sum.gather(1, col_indices.expand(batch_size, -1))
        edge_messages = vnode_llrs + (vnode_sums - edge_messages)

        # 更新输出LLR和判决字
        out_llrs = llr_vec + total_sum
        dec_word = (out_llrs < 0).to(torch.int8)

    # 结果整形 (与输入格式匹配)
    out_llrs = out_llrs.t().contiguous()
    dec_word = dec_word.t().contiguous()

    if batch_size == 1:
        out_llrs = out_llrs.squeeze(1)
        dec_word = dec_word.squeeze(1)

    return dec_word, out_llrs


#  TODO: version 2


# TODO:
def plot_ldpc_compare(param:dict, code_lengths: list):
    import matplotlib.pyplot as plt
    traditional_ldpc_encode = []
    torch_ldpc_encode = []
    traditional_ldpc_decode = []
    torch_ldpc_decode = []
    for code_length in code_lengths:
        bits = np.random.randint(0, 2, code_length)
        t1 = time.time()
        ldpc_encode_bits = triang_ldpc_systematic_encode(bits, param)
        t2 = time.time()
        traditional_ldpc_encode.append(t2 - t1)
        bits1 = torch.tensor(bits, device="cuda")
        ldpc_encode_bits1 = triang_ldpc_systematic_encode_torch(bits1, param)
        t3 = time.time()
        torch_ldpc_encode.append(t3-t2)
        ldpc_encoded_bits = 1 - 2 * ldpc_encode_bits
        t4 = time.time()
        rec, _ = ldpc_bp_decode(ldpc_encoded_bits.reshape(-1, order='F').astype(float), param, "MSA", 10)
        t5 = time.time()
        traditional_ldpc_decode.append(t5-t4)
        ldpc_encoded_bits1 = torch.tensor(ldpc_encoded_bits.reshape(-1, order='F').astype(float), device="cuda")
        rec1, _ = torch_ldpc_bp_decode(ldpc_encoded_bits1, param, "MSA", 10)
        t6 = time.time()
        torch_ldpc_decode.append(t6-t5)
    code_len_m = [x / 1e6 for x in code_lengths]
    fig, ax = plt.subplots(1, 2, figsize=(8, 4))
    # 左图：编码耗时
    ax[0].plot(code_len_m, traditional_ldpc_encode, 'b-^', label='commpy LDPC encode')
    ax[0].plot(code_len_m, torch_ldpc_encode, 'r--*', label='torch LDPC encode')
    ax[0].set_xlabel("Code Length (million bits)")
    ax[0].set_ylabel("Time (s)")
    ax[0].set_title("LDPC Encode")
    ax[0].legend()
    # 右图：解码耗时
    ax[1].plot(code_len_m, traditional_ldpc_decode, 'b-^', label='commpy LDPC decode')
    ax[1].plot(code_len_m, torch_ldpc_decode, 'r--*', label='torch LDPC decode')
    ax[1].set_xlabel("Code Length (million bits)")
    ax[1].set_ylabel("Time (s)")
    ax[1].set_title("LDPC Decode")
    ax[1].legend()
    plt.tight_layout()
    # plt.show()
    plt.savefig(r"F:\PCL\month\2025\25-07\ldpc_encode_decode.png", dpi=600)


if __name__ == "__main__":
    import numpy as np
    import time
    import commpy
    from channel import *
    from commpy.channelcoding.ldpc import get_ldpc_code_params, ldpc_bp_decode, triang_ldpc_systematic_encode
    import os
    torch.manual_seed(0)
    np.random.seed(0)
    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
    param = get_ldpc_code_params(r"F:\research-code\2025\H256+LDPC\channel_codec\1440.720.txt", compute_matrix=True)
    bits = np.random.randint(0, 2, 1440*10)
    t_2 = time.time()
    ldpc_encoded_bits = triang_ldpc_systematic_encode(bits, param)
    t_1 = time.time()
    bits1 = torch.tensor(bits, device="cuda")
    ldpc_encoded_bits1 = triang_ldpc_systematic_encode_torch(bits1, param)
    t0 = time.time()
    print("encode equal? : ", np.allclose(ldpc_encoded_bits, ldpc_encoded_bits1.detach().cpu().numpy()))
    print(f"Commpy ldpc coding consuming: {t_1-t_2} secs.\nTorch ldpc coding consuming: {t0-t_1} secs.")
    ldpc_encoded_bits = 1 - 2 * ldpc_encoded_bits
    # TODO: 加入噪声
    snr = 5
    # # ldpc_encoded_bits = awgn_torch(ldpc_encoded_bits, snr)
    ldpc_encoded_bits = commpy.awgn(ldpc_encoded_bits.reshape(-1), snr)
    #-----------------------------------------------------------------------------   -----------------#
    t1 = time.time()
    rec, _ = ldpc_bp_decode(ldpc_encoded_bits.reshape(-1, order='F').astype(float), param, "MSA", 10)
    t2 = time.time()
    print(f"Commpy ldpc decoding consuming: {t2 - t1} secs.")
    ldpc_encoded_bits = torch.tensor(ldpc_encoded_bits.reshape(-1, order='F').astype(float), device="cuda")
    rec1, _ = torch_ldpc_bp_decode(ldpc_encoded_bits, param, "MSA", 10)
    t3 = time.time()
    print(f"Torch ldpc decoding consuming: {t3 - t2} secs.")
    print("decode equal? : ", np.allclose(rec, rec1.detach().cpu().numpy()))

    # codelengths = [1440*144, 1440*1440, 1440*14400]
    # plot_ldpc_compare(param, codelengths)
    pass
