
function hMultiUserSelectionPUSCH(csi, tddPattern, carrier, PUSCHs, bsAntSize)
    slotType = tddPattern[mod(carrier[:NSlot], length(tddPattern)) + 1]
    if slotType == "D"
        nSymbolsUL = 0
    elseif slotType == "S"
        nSymbolsUL = 0
    elseif slotType == "SC"
        nSymbolsUL = 0
    else  # slotType == "U"
        nSymbolsUL = carrier[:SymbolsPerSlot]
    end

    if nSymbolsUL == 0
        active = zeros(Int, 1, 0)
        return active, PUSCHs
    end

    # Determine the number of downlink antennas
    nTxAnts = prod(bsAntSize)

    # Replace NaNs in CSI with nearest non-NaN values, to provide CSI in
    # RBs outside of SRS bandwidth
    numUEs = length(PUSCHs)
    for ue in 1:numUEs
        H_2D = csi[ue]["H"][:, :, 1, 1]
        nanidx = findall(isnan.(H_2D))
        nonnanidx = findall(.!isnan.(H_2D))
        
        # 将 CartesianIndex{2} 转换为线性索引
        nanidx_linear = LinearIndices(H_2D)[nanidx]
        nonnanidx_linear = LinearIndices(H_2D)[nonnanidx]
        
        replaceidx = [nonnanidx_linear[argmin(abs.(nonnanidx_linear .- x))] for x in nanidx_linear]

        csi[ue]["nVarPUSCH"][nanidx, :, :] = csi[ue]["nVarPUSCH"][replaceidx, :, :]
        csi[ue]["H"][nanidx, :, :, :] = csi[ue]["H"][replaceidx, :, :, :]
    end

    numLayers = [pusch["Config"]["NumLayers"] for pusch in PUSCHs]
    totLayers = sum(numLayers)

    NPRB = carrier[:NSizeGrid]
    allUEs = fill(NaN, NPRB, 1)

    servedThroughput = 1e-6 * ones(numUEs)
    selectedUE = Int
    # For each PRG
    for PRB in 1:NPRB
        # Create channel estimates 'H' and noise estimates 'nVar' across
        # all layers, and create array 'ueLayers' which indicates which
        # layers of the overall estimates correspond to each UE
        H = zeros(Complex{Float64},totLayers, nTxAnts)
        nVar = zeros(Complex{Float64},totLayers, 1)
        layer = 1
        ueLayers = Vector{Any}(undef, numUEs)
        for ue in 1:numUEs
            ueLayers[ue] = layer
            for nu in 1:numLayers[ue]
                H[layer, :] = csi[ue]["H"][PRB + carrier[:NStartGrid], 1, :, nu]
                nVar[layer] = csi[ue]["nVarPUSCH"][PRB + carrier[:NStartGrid], 1, nu]
                layer += 1
            end
            ueLayers[ue]=collect(ueLayers[ue]:(layer-1))
        end
        println(size(H))
        # Compute power for each layer in the overall channel
        HH=H * H'
        signalPowers = [HH[i, i] for i in 1:minimum(size(HH))]

        # Compute total signal power, noise power, and SNR for each UE
        uePowers = [sum(signalPowers[ueLayers[ue]]) for ue in 1:numUEs]
        ueNoise = [sum(nVar[ueLayers[ue]]) for ue in 1:numUEs]

        ueSNR = Vector{Complex{Float64}}(undef, numUEs)
    
        for ue in 1:numUEs
                ueSNR[ue]=complex_divide(uePowers[ue],ueNoise[ue])
        end      

        ueCapacity = capacity(ueSNR)
        metricPF = ueCapacity ./ servedThroughput

        maxPF=-Inf
        maxIdx=Int
        global selectedUE
        for ue in 1:numUEs
           uemetricPF=abs(metricPF[ue])
           if isnan( uemetricPF)
              continue
           else
              if uemetricPF>maxPF
                 maxPF=uemetricPF
                 maxIdx = ue
              end
            end
         end  
             
        if maxPF > -Inf
            selectedUE = maxIdx
        end
 
        servedThroughput[selectedUE] += ueCapacity[selectedUE]
        allUEs[PRB] = selectedUE
    end

    p = [0, 1, 4, 5, 2, 3, 6, 7]
    # 初始化端口分配存储
    ports = Vector{Any}(undef, numUEs)
    # 初始化扰频标识存储
    ID = zeros(1, numUEs)
    # 端口索引初始化
    c = 0

    # 为每个用户分配端口和扰频标识
    for ue in 1:numUEs
        # 获取当前UE需要的层数
        nu = numLayers[ue]

        # 分配端口，循环使用可用端口列表
        ports[ue] = [p[mod(c + i, length(p)) + 1] for i in 0:(nu-1)]

        # 分配扰频标识，基于端口重用次数
        ID[ue] = floor(c / length(p))

        # 更新端口索引，确保下一个用户分配不同的端口
        c += nu
    end

    # Establish which UEs are scheduled
    active = convert(Matrix{Int64}, reshape(unique(allUEs[.!isnan.(allUEs[:])]), 1, :))

    # For each scheduled UE
    for ue in active
        # Configure the symbol allocation according to the TDD configuration
        PUSCHs[ue]["Config"]["SymbolAllocation"] = [0, nSymbolsUL]

        # Configure the set of PRBs allocated to this UE
        i = findall(allUEs .== ue)
        prg = [ind[1] for ind in i]
        PUSCHs[ue]["Config"]["PRBSet"] = sort(unique(prg)) .- 1

        # Configure precoding matrices
        nu = numLayers[ue]
        PUSCHs[ue]["Config"]["NumLayers"] = nu

        # Configure DM-RS port set
        PUSCHs[ue]["Config"]["DMRS"]["DMRSPortSet"] = ports[ue]

        # Configure scrambling identities if required
        if ID[ue] != 0
            PUSCHs[ue]["Config"]["DMRS"]["NIDNSCID"] = ID[ue]
            PUSCHs[ue]["Config"]["DMRS"]["NSCID"] = 1
        end
    end

    return active, PUSCHs
end

# Calculate capacity according to Shannon–Hartley theorem. The bandwidth B
# is not considered because it is the same in all contexts where the
# capacity is compared
function capacity(SINR)
    return log2.(1 .+ SINR)
end

function complex_divide(z1::Complex{T}, z2::Complex{T}) where T
    # 如果分母是0+0im
    if real(z2) == 0 && imag(z2) == 0
        if real(z1) != 0 && imag(z1) == 0
            return Inf + 0im  # 实数分子
        elseif real(z1) == 0 && imag(z1) != 0
            return 0 + Inf*im  # 纯虚数分子
        elseif real(z1) != 0 && imag(z1) != 0
            return Inf + Inf*im  # 复数分子
        end
    end
    # 使用标准的复数除法
    return z1 / z2
end
############################################################################################################
numUEs = 5;  
bsAntSize = [16 8 2]
tddPattern = ["SC" "D" "D" "S" "U" "SC" "D" "D" "S" "U"]
numLayers = [1,4,4,2,4]
ueAntSizes = 1 .+ (numLayers .> [4 2 1])

function setupCSI(carrier, bsAntSize, ueAntSizes)
    # Set up record of CSI obtained via SRS
    
    NCRB = carrier[:NSizeGrid] + carrier[:NStartGrid]
    numUEs = size(ueAntSizes, 1)
    
    # 创建字典数组
    csi = Vector{Dict{String, Any}}(undef, numUEs)
    P = prod(bsAntSize)
    
    for ue in 1:numUEs
        R = prod(ueAntSizes[ue, :])
        
        # 初始化每个CSI实例的字段
        csi[ue] = Dict{Any, Any}(
            "H" => fill(NaN,  NCRB, 1, P, R),
            "nVar" => zeros(Int, NCRB, 1, R),           # nVar 字段
            "nVarPUSCH" => zeros(Int, NCRB, 1, R),      # nVarPUSCH 字段
            "NSlot" => fill(NaN,  NCRB, 1)    # NSlot 字段
        )
    end
    
    return csi
end

carrier=Dict{Any, Any}(
    :NCellID => 1,
    :SubcarrierSpacing => 30,
    :CyclicPrefix => "normal",
    :NSizeGrid => 51,
    :NStartGrid => 0,
    :NSlot => 4,
    :NFrame => 0,
    :IntraCellGuardBands => [0.0 0.0],

    :SymbolsPerSlot => 14,
    :SlotsPerSubframe => 2,
    :SlotsPerFrame => 20
)
using MAT
csi = setupCSI(carrier, bsAntSize, ueAntSizes)
data_file = matread("E:\\学位打工仔\\julia转写\\5G\\code\\hMultiUserSelectionPUSCHData.mat")
csii=data_file["csi"]
csi[1]["H"]=csii["H"][1]
csi[2]["H"]=csii["H"][2]
csi[3]["H"]=csii["H"][3]
csi[4]["H"]=csii["H"][4]
csi[5]["H"]=csii["H"][5]
csi[1]["NSlot"]=csii["NSlot"][1]
csi[2]["NSlot"]=csii["NSlot"][1]
csi[3]["NSlot"]=csii["NSlot"][1]
csi[4]["NSlot"]=csii["NSlot"][1]
csi[5]["NSlot"]=csii["NSlot"][1]
# 定义 PUSCHs 数据
PUSCHs = [
    Dict(
        "Config" => Dict(
            "NID" => [],
            "NSizeBWP" => [],
            "NStartBWP" => [],
            "Modulation" => "64QAM",
            "TransformPrecoding" => 0,
            "TransmissionScheme" => "nonCodebook",
            "NumAntennaPorts" => 1,
            "TPMI" => 0,
            "CodebookType" => "codebook1_n...",
            "BetaOffsetACK" => 20,
            "BetaOffsetCSI1" => 6.2500,
            "BetaOffsetCSI2" => 6.2500,
            "UCIScaling" => 1,
            "NRAPID" => [],
            "DMRS" => Dict(
                "DMRSConfigurationType" => 1,
                "GroupHopping" => 0,
                "SequenceHopping" => 0,
                "NRSID" => 0,
                "NumCDMGroupsWithoutData" => 2,
                "DMRSUplinkR16" => 0,
                "DMRSUplinkTransformPrecodingR16" => 0,
                "DMRSTypeAPosition" => 2,
                "DMRSAdditionalPosition" => 1,
                "DMRSLength" => 2,
                "CustomSymbolSet" => 1,
                "DMRSPortSet" => 1,
                "NDNSCID" => 1,
                "NSCID" => 0,
                "DMRSEnhancedR18" => 0,
                "CDMGroups" => 0,
                "DeltaShifts" => 0,
                "FrequencyWeights" => [1, 1],
                "TimeWeights" => [1, 1],
                "DMRSSubcarrierLocations" => [0, 2, 4, 6, 8, 10],
                "CDMLengths" => [1, 1]
            ),
            "PTRS" => Dict(
                "NumPTRSamples" => 2,
                "NumPTRSGroups" => 2,
                "PTRSPortSet" => [],
                "NID" => [],
                "TimeDensity" => 1,
                "FrequencyDensity" => 2,
                "REOffset" => "00"
            ),
            "NumLayers" => 1,
            "MappingType" => "A",
            "SymbolAllocation" => [0, 14],
            "PRBSet" => collect(1:52),  # 假设 PRBSet 是 1 到 52 的数组
            "RNTI" => 1,
            "EnablePTRS" => 0,
            "NumCodewords" => 1,
            "FrequencyHopping" => "neither",
            "SecondHopStartPRB" => 1,
            "Interlacing" => 0,
            "InterlaceIndex" => 0,
            "RBSetIndex" => 0
        ),
        "Extension" => Dict(
            "TargetCodeRate" => 0.7539,
            "XOverhead" => 0
        )
    ) for _ in 1:5  # 创建 5 个相同的 Dict
]
for i in 1:5
    PUSCHs[i]["Config"]["NumLayers"]=numLayers[i]
end
@run active,PUSCHss = hMultiUserSelectionPUSCH(csi, tddPattern, carrier, PUSCHs, bsAntSize)

