#!/usr/bin/env python3
#-*- coding: utf-8 -*-
import argparse
from collections import OrderedDict
from omegaconf import OmegaConf
import os
import time
from math import pi, sqrt

import torch

import sys
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(project_dir)
from transcoder.utils import distributed as dist_utils
from transcoder.utils import config as config_utils
from transcoder.data.comm_channel import SV_Channel
from transcoder.losses.nmse import NMSE
from transcoder.utils.meters import AverageMeter, ProgressMeter
from transcoder.utils.misc import check_loss_nan, get_metrics_dict, get_grad_norm, tensor2image

import wandb

# 训练
# torchrun --nproc_per_node=1 --nnodes=1 --node_rank=0 scripts/main_comm_estimator.py --config configs/estimator/sv_channel_ce_lamp_gmmv_pilot32_snr10_path2.yaml --output-dir ./outputs/comm_estimator/sv_channel_ce_lamp_gmmv_pilot32_snr10_path2 

def get_args_parser():
    parser = argparse.ArgumentParser(description='Channel Estimator', add_help=False)
    parser.add_argument('--config', type=str, help='config')
    parser.add_argument('--output-dir', default='./', help='Output directory')
    parser.add_argument('--resume', default=None, type=str, help='checkpoint to resume')
    parser.add_argument('--eval-freq', default=200, type=int, help='evaluation frequency')
    parser.add_argument('--save-freq', default=200, type=int, help='save frequency')

    # system
    parser.add_argument('--start-iter', default=0, type=int, help='starting iteration')
    parser.add_argument('--print-freq', default=50, type=int, help='print frequency')
    parser.add_argument('--vis-freq', default=0, type=int, help='visualization frequency')
    parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
                        help='number of data loading workers per process')
    parser.add_argument('--evaluate', action='store_true', help='eval only')
    parser.add_argument('--world-size', default=1, type=int,
                        help='number of nodes for distributed training')
    parser.add_argument('--rank', default=0, type=int,
                        help='node rank for distributed training')
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument('--dist-url', default='env://', type=str,
                        help='url used to set up distributed training')
    parser.add_argument('--dist-backend', default='nccl', type=str)
    parser.add_argument('--seed', default=0, type=int)
    parser.add_argument('--gpu', default=0, type=int, help='GPU id to use.')
    parser.add_argument('--no-wandb', action='store_true', help='disable wandb')
    return parser

def main(args):
    dist_utils.init_distributed_mode(args)
    dist_utils.random_seed(args.seed, dist_utils.get_rank())

    config = OmegaConf.load(args.config)
    print(config)
    print(args)

    if not args.no_wandb and config.get("wandb", None) is not None and dist_utils.is_main_process():
        run_name = f"{config.wandb.get('run', 'anonymous')}-{time.strftime('%Y-%m-%d-%H-%M-%S')}"
        try:
            wandb.init(
                # set the wandb project where this run will be logged
                project=config.wandb.get("project", "videocoding"),
                name=run_name,
                # track hyperparameters and run metadata
                config=OmegaConf.to_container(config, resolve=True),
                # save python code files
                settings=wandb.Settings(code_dir=".")
            )
            with_wandb = True
        except:
            print("Failed to initialize wandb")
            with_wandb = False
    else:
        print("Not using wandb; set wandb in the config to use.")
        with_wandb = False
    
    os.makedirs(config.model.save_path, exist_ok=True)

    # 通信参数
    Nc = config.params.Nc # 子载波数量
    Nt = config.params.Nt # 发射天线数量
    Nr = config.params.Nr # 接收天线数量
    SNR_dB = config.params.SNR_dB # 信噪比
    SNR = 10 ** (SNR_dB / 10) # 信噪比
    K = config.params.K   # 用户数量
    sigma = 1 / SNR       # 噪声功率
    pho = config.params.pho # 稀疏度比例
    L = config.params.pilots
    N = config.params.paths

    # 其他
    factor = 100 * L / 32  # 标准化因子

    # 构建离散傅里叶变换矩阵 A_t
    N_2 = int(pho * 8)  # 稀疏度控制
    A = torch.zeros(Nt, N_2 * N_2).cuda() + 0j
    for i in range(N_2):
        for j in range(N_2):
            a_h = torch.arange(0, 8).reshape(1, 8).cuda() / 8 / pho * i * 2 * pi
            a_v = torch.arange(0, 8).reshape(1, 8).cuda() / 8 / pho * j * 2 * pi
            a = torch.kron(a_h, a_v).reshape(Nt)  # Kronecker 生成二维稀疏模式
            A[:, i * N_2 + j] = torch.exp(1j * a)
    A_t = A  # DFT 矩阵

    # 初始化导频信号
    S_digital = torch.randn(L, Nc, K, 1).cuda() + 1j * torch.randn(L, Nc, K, 1).cuda()  # 数字导频
    S_analog = torch.rand(L, 1, Nt, 4).cuda() * 2 * pi  # 模拟导频相位

    # 初始化 Phi 矩阵
    Phi = torch.zeros(Nc, L, Nt).cuda() + 0j
    for l in range(L):
        s = (torch.exp(1j * S_analog[l, :, :, :]) @ S_digital[l, :, :, :]).reshape(Nc, Nt, 1)  # 导频 [Nc, Nt, 1]
        s_sigma = torch.sqrt(torch.sum(torch.abs(s * s)))
        s = s / s_sigma.reshape(1, 1, 1) * torch.min(s_sigma, torch.tensor(sqrt(Nc))).reshape(1, 1, 1)
        Phi[:, l, :] = s[:, :, 0]  # 填充感知矩阵
    Theta = Phi @ A_t
    Theta = Theta / sqrt(factor)  # 标准化感知矩阵

    iter = args.start_iter
    batch_time = AverageMeter('Time', ':6.2f')
    model_time = AverageMeter('Model', ':6.2f')
    mem = AverageMeter('Mem', ':6.1f')
    grad_norm = AverageMeter('GradNorm', ':6.2f')
    metric_names = [
        'train/total_loss', 'train/nmse_loss',
    ]
    metrics = OrderedDict([(name, AverageMeter(name, ':.2e')) for name in metric_names])
    progress = ProgressMeter(
        config.train.max_iter,
        [batch_time, model_time, mem, grad_norm, *metrics.values()],
        prefix="iter: ",
    )

    for T in range(args.start_iter, config.train.max_iter):
        end = time.time()
        if T == 0:
            config.model.single.params.Theta = {
                "real": Theta.real.tolist(),
                "imag": Theta.imag.tolist()
            }
            config.model.single.params.N = Nt * pho * pho
            model = config_utils.instantiate_from_config(config.model.single)
            model.cuda(args.gpu)
        else:
            model_1 = os.path.join(config.model.save_path, f"GMMV_LAMP{T}_{L}pilots_{N}paths_{SNR_dB}SNR.pth")
            config.model.multi.params.Theta = {
                "real": Theta.real.tolist(),
                "imag": Theta.imag.tolist()
            }
            config.model.multi.params.N = Nt * pho * pho
            config.model.multi.params.net = model_1
            model = config_utils.instantiate_from_config(config.model.multi)
            S_digital = torch.load(os.path.join(config.model.save_path, f"S_digital{L}pilots_{N}paths_{SNR_dB}SNR.pth"))
            S_analog = torch.load(os.path.join(config.model.save_path, f"S_analog{L}pilots_{N}paths_{SNR_dB}SNR.pth"))
            model.cuda(args.gpu)
        
        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=0.001)  # Adam 优化器
        best_nmse = 2  # 初始最优 NMSE
        
        for step in range(config.train.Step):
            tic = time.time()
            model.train()
            # 生成信道 H
            H = SV_Channel(config.train.batch, Nc, N, [8, 8], 1)  # 补充定义的信道生成函数

            # 初始化观测矩阵 Y 和 Phi
            Y = torch.zeros(H.shape[0], Nc, L).cuda() + 1j
            Phi = torch.zeros(Nc, L, Nt).cuda() + 0j
            for l in range(L):
                s = (torch.exp(1j * S_analog[l, :, :, :]) @ S_digital[l, :, :, :]).reshape(Nc, Nt, 1)
                s_sigma = torch.sqrt(torch.sum(torch.abs(s * s)))
                s = s / s_sigma.reshape(1, 1, 1) * torch.min(s_sigma, torch.tensor(sqrt(Nc))).reshape(1, 1, 1)
                n = (torch.randn(H.shape[0], Nc) + 1j * torch.randn(H.shape[0], Nc)).cuda() * sqrt(sigma / 2)  # 噪声
                a = s.permute(0, 2, 1)
                y = (a @ H.reshape(-1, Nc, Nt, 1)).reshape(-1, Nc) + n  # 观测信号
                Y[:, :, l] = y
                Phi[:, l, :] = a[:, 0, :]

            Theta = Phi @ A_t
            Theta = Theta / sqrt(factor)
            Y = Y / sqrt(factor)

            # 前向传播与误差计算
            H_t_hat, V = model(Y, Theta)
            H_t_hat = H_t_hat.permute(0, 2, 1).reshape(Y.shape[0], Nc, Nt * pho * pho, 1)
            H_hat = A_t @ H_t_hat
            H_hat = H_hat.reshape(-1, Nc, Nt)
            loss = NMSE(H_hat, H)  # 计算 NMSE 损失
            check_loss_nan(loss)

            # 反向传播与优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            model_time.update(time.time() - tic)
            batch_time.update(time.time() - end)
            grad_norm.update(get_grad_norm(model))
            metrics['train/total_loss'].update(loss.item())
            metrics['train/nmse_loss'].update(loss.item())
            end = time.time()
            mem.update(torch.cuda.max_memory_allocated())

            if step % args.print_freq == 0:
                progress.display(iter)

            if step % args.eval_freq == 0:
                model.eval()
                with torch.inference_mode():
                    H = SV_Channel(config.train.batch, Nc, N, [8, 8], 1)
                    Y = torch.zeros(H.shape[0], Nc, L).cuda() + 1j
                    Phi = torch.zeros(Nc, L, Nt).cuda() + 0j
                    for l in range(L):
                        s = (torch.exp(1j * S_analog[l, :, :, :]) @ S_digital[l, :, :, :]).reshape(Nc, Nt, 1)
                        s_sigma = torch.sqrt(torch.sum(torch.abs(s * s)))
                        s = s / s_sigma.reshape(1, 1, 1) * torch.min(s_sigma, torch.tensor(sqrt(Nc))).reshape(1, 1, 1)
                        n = (torch.randn(H.shape[0], Nc) + 1j * torch.randn(H.shape[0], Nc)).cuda() * sqrt(sigma / 2)
                        a = s.permute(0, 2, 1)
                        y = (a @ H.reshape(-1, Nc, Nt, 1)).reshape(-1, Nc) + n
                        Y[:, :, l] = y
                        Phi[:, l, :] = a[:, 0, :]
                    Theta = Phi @ A_t
                    Theta = Theta / sqrt(factor)
                    Y = Y / sqrt(factor)

                    H_t_hat, V = model(Y, Theta)
                    H_t_hat = H_t_hat.permute(0, 2, 1).reshape(Y.shape[0], Nc, Nt * pho * pho, 1)
                    H_hat = A_t @ H_t_hat
                    H_hat = H_hat.reshape(-1, Nc, Nt)
                    loss = NMSE(H_hat, H)

                    # 保存最优模型
                    if loss < best_nmse:
                        best_nmse = loss
                        torch.save(model, os.path.join(config.model.save_path, f"GMMV_LAMP{T+1}_{L}pilots_{N}paths_{SNR_dB}SNR.pth"))
                        torch.save(S_digital, os.path.join(config.model.save_path, f"S_digital{L}pilots_{N}paths_{SNR_dB}SNR.pth"))
                        torch.save(S_analog, os.path.join(config.model.save_path, f"S_analog{L}pilots_{N}paths_{SNR_dB}SNR.pth"))
                        print('Model saved!')
        iter += 1
    progress.synchronize()

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Channel Estimator', parents=[get_args_parser()])
    args = parser.parse_args()
    os.makedirs(args.output_dir, exist_ok=True)
    main(args)
