# coding: utf-8


import numpy as np
import numba
import torch
from torch import nn
from sympy.combinatorics.graycode import GrayCode
from commpy.utilities import signal_power
import commpy


def bitarray2dec_torch(bits, num_bits_symbol):
    """convert bit groups to integer indices."""
    device = bits.device
    k = num_bits_symbol
    exponents = 2 ** torch.arange(k-1, -1, -1, device=device)
    return torch.matmul(bits.float(), exponents.float())


def dec2bitarray_torch(indices, num_bits_symbol):
    """convert integer indices to arrays."""
    device = indices.device
    bits = ((indices.unsqueeze(-1) >> torch.arange(num_bits_symbol-1, -1, -1, device=device)) & 1).float()
    return bits.view(-1)


class Modem:
    def __init__(self, constellation, device, reorder_as_gray=True):
        self.device = device
        if reorder_as_gray:
            m = np.log2(len(constellation))
            gray_code_sequence = GrayCode(m).generate_gray()
            gray_code_sequence_array = np.fromiter((int(g, 2) for g in gray_code_sequence), int, len(constellation))
            self.constellation = np.array(constellation)[gray_code_sequence_array.argsort()]
        else:
            self.constellation = constellation

    @property
    def constellation(self):
        return self._constellation

    @constellation.setter
    def constellation(self, value):
        # Check value input
        num_bits_symbol = np.log2(len(value))
        if num_bits_symbol != int(num_bits_symbol):
            raise ValueError('Constellation length must be a power of 2.')

        # Set constellation as an array
        self._constellation = torch.tensor(value, device=self.device)

        # Update other attributes
        self.Es = signal_power(value)
        # self.m = self._constellation.size
        self.m = len(self._constellation.reshape(-1))
        self.num_bits_symbol = int(num_bits_symbol)

    def modulate(self, input_bits):
        # input_bits: 1D array of ints
        # baseband_symbols: 1D array of complex floats
        total_bits = input_bits.shape[0]
        num_symbols = total_bits // self.num_bits_symbol
        #reshape bits -> (num_symbols, num_bits_symbols)
        bits_grouped = input_bits.view(num_symbols, self.num_bits_symbol)
        # 转整数 index
        indices = bitarray2dec_torch(bits_grouped, self.num_bits_symbol).long()
        # 查表
        baseband_symbols = self._constellation[indices]
        return baseband_symbols

    def demodulate(self, input_symbols, demod_type, noise_var=0.0):
        M = self._constellation.shape[0]
        device = input_symbols.device
        num_symbols = input_symbols.shape[0]

        if demod_type == "hard":
            distances = torch.abs(input_symbols.unsqueeze(-1) - self._constellation.unsqueeze(0)) ** 2
            index_list = distances.argmin(dim=1)
            demod_bits = dec2bitarray_torch(index_list, self.num_bits_symbol)
        elif demod_type == "soft":
            demod_bits = []
            for bit_index in range(self.num_bits_symbol):
                # mask for constellation symbols where current bit is 1/0
                bit_mask = ((torch.arange(M, device=device) >> bit_index) & 1).bool()
                # distances to all constellation points
                distances = torch.abs(input_symbols.unsqueeze(1) - self._constellation.unsqueeze(0)) ** 2
                # calculate LLR: log(sum(exp(-d/var)) for bit=1 / sum for bit=0)
                llr_num = torch.logsumexp(-distances[:, bit_mask] / noise_var, dim=1)
                llr_den = torch.logsumexp(-distances[:, ~bit_mask] / noise_var, dim=1)
                llr_bit = llr_num - llr_den
                demod_bits.append(llr_bit)
        # elif demod_type == "soft":
        #     sigma2 = noise_var  # noise_var应传入N0/2（= σ^2），否则请改成noise_var/2
        #     demod_bits = []
        #     for bit_index in range(self.num_bits_symbol):
        #         bit_mask = ((torch.arange(M, device=device) >> (self.num_bits_symbol - 1 - bit_index)) & 1).bool()
        #         distances = torch.abs(input_symbols.unsqueeze(1) - self._constellation.unsqueeze(0)) ** 2
        #         llr_num = torch.logsumexp(-distances[:, bit_mask] / (2 * sigma2), dim=1)
        #         llr_den = torch.logsumexp(-distances[:, ~bit_mask] / (2 * sigma2), dim=1)
        #         demod_bits.append(llr_num - llr_den)
        #     demod_bits = torch.stack(demod_bits, dim=1).view(-1)  # 不要再[::-1]
        #     stack bits and reorder to match original version
            demod_bits = torch.stack(demod_bits[::-1], dim=1).view(-1)
        else:
            raise ValueError("demod type must be 'hard' or 'soft'")
        return demod_bits


class QAMModem(Modem):
    def __init__(self,
                 m,
                 device="cuda"):
        m = torch.tensor(m, dtype=torch.int64, device=device)
        num_symb_pam = torch.sqrt(m)
        if num_symb_pam != int(num_symb_pam):
            raise ValueError('m must lead to a square QAM.')
        pam = torch.arange(-num_symb_pam + 1, num_symb_pam, 2)
        constellation = torch.tile(torch.hstack((pam, pam.flip(0))), (int(num_symb_pam) // 2,)) * 1j + \
                        pam.reshape(-1, 1).expand((-1, int(num_symb_pam))).reshape(-1)
        super().__init__(constellation.detach().cpu().numpy(), device)


def plot_qam_compare(code_lengths: list):
    commpy_qam_encode, commpy_qam_decode = [], []
    torch_qam_encode, torch_qam_decode = [], []
    commpy_qam = commpy.QAMModem(16)
    torch_qam = QAMModem(16, device="cuda")
    for code_length in code_lengths:
        bits = np.random.randint(0, 2, code_length)
        t1 = time.time()
        modu1 = commpy_qam.modulate(bits)
        t2 = time.time()
        commpy_qam_encode.append(t2-t1)
        bits1 = torch.tensor(bits, device="cuda")
        t3 = time.time()
        modu2 = torch_qam.modulate(bits1)
        t4 = time.time()
        torch_qam_encode.append(t4-t3)
        t5 = time.time()
        demodu1 = qam1.demodulate(modu1, demod_type="hard")
        t6 = time.time()
        commpy_qam_decode.append(t6-t5)
        demodu2 = qam.demodulate(modu2, demod_type="hard")
        t7 = time.time()
        torch_qam_decode.append(t7-t6)
    # plt.figure(figsize=(10, 5))
    # plt.title("QAM Speed Compare")
    # plt.subplot(1,2,1)
    # plt.plot(code_lengths, commpy_qam_encode, "->", label="commpy QAM encode")
    # plt.plot(code_lengths, torch_qam_encode, "-*", label="torch QAM encode")
    # plt.xlabel("Code Length")
    # plt.ylabel("Consuming Time (secs)")
    # plt.legend(loc="best")
    # plt.subplot(1,2,2)
    # plt.plot(code_lengths, commpy_qam_decode, "->", label="commpy QAM decode")
    # plt.plot(code_lengths, torch_qam_decode, "-*", label="torch QAM decode")
    # plt.xlabel("Code Length")
    # plt.ylabel("Consuming Time (secs)")
    # plt.legend(loc="best")
    # plt.tight_layout()
    # plt.show()
    code_len_m = [x / 1e6 for x in code_lengths]
    fig, ax = plt.subplots(1, 2, figsize=(8, 4))
    # 左图：编码耗时
    ax[0].plot(code_len_m, commpy_qam_encode, 'b-^', label='commpy QAM encode')
    ax[0].plot(code_len_m, torch_qam_encode, 'r--*', label='torch QAM encode')
    ax[0].set_xlabel("Code Length (million bits)")
    ax[0].set_ylabel("Time (s)")
    ax[0].set_title("QAM Encode")
    ax[0].legend()
    # 右图：解码耗时
    ax[1].plot(code_len_m, commpy_qam_decode, 'b-^', label='commpy QAM decode')
    ax[1].plot(code_len_m, torch_qam_decode, 'r--*', label='torch QAM decode')
    ax[1].set_xlabel("Code Length (million bits)")
    ax[1].set_ylabel("Time (s)")
    ax[1].set_title("QAM Decode")
    ax[1].legend()
    plt.tight_layout()
    # plt.show()
    plt.savefig(r"F:\PCL\month\2025\25-07\qam_encode_decode.png", dpi=600)


if __name__ == '__main__':
    import commpy
    import time
    import matplotlib.pyplot as plt
    import os
    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
    qam1 = commpy.QAMModem(16)
    qam = QAMModem(16, device='cuda')
    # print(qam1.constellation, "\n", qam.constellation)
    print(np.allclose(qam1.constellation, qam.constellation.detach().cpu().numpy()))
    bits = np.random.randint(0, 2, 10000000)
    t1 = time.time()
    modu1 = qam1.modulate(bits)
    demodu1 = qam1.demodulate(modu1, demod_type="hard")
    t2 = time.time()
    bits1 = torch.tensor(bits, dtype=torch.int64, device="cuda")
    modu2 = qam.modulate(bits1)
    demodu2 = qam.demodulate(modu2, demod_type="hard")
    t3 = time.time()
    print(np.allclose(modu1, modu2.detach().cpu().numpy()))
    print(np.allclose(demodu1, demodu2.detach().cpu().numpy()))
    print(f"Commpy consuming: {t2 -t1} secs.")
    print(f"Torch consuming: {t3-t2} secs.")
    code_lengths = [100, 100_0, 100_00, 100_000, 100_000_0, 100_000_00]
    plot_qam_compare(code_lengths)
    pass
