#!/usr/bin/env python
# coding: utf-8

# In[1]:


import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import TensorDataset, random_split
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts
from scipy.io import loadmat
import pandas as pd
import numpy as np


# # 读取数据集

# In[2]:


data = loadmat('../autodl-tmp/NetworkData.mat')
print(type(data))
print(data.keys())

feature = torch.from_numpy(data['feature']).to(torch.complex64).unsqueeze(1)
labels = torch.from_numpy(data['Label']).to(torch.complex64).unsqueeze(1)

# 归一化操作：对每个通道进行 min-max 归一化
def min_max_normalize(tensor):
    # 按照 (C, H, W) 维度进行归一化（通常情况下 C 是第一个维度）
    min_val = tensor.min(dim=2, keepdim=True).values  # 对每个像素点的通道（dim=2）进行最小值操作
    max_val = tensor.max(dim=2, keepdim=True).values  # 对每个像素点的通道（dim=2）进行最大值操作
    return (tensor - min_val) / (max_val - min_val)

# 分别处理实部和虚部
feature_real = min_max_normalize(feature.real)
feature_imag = min_max_normalize(feature.imag)
labels_real = min_max_normalize(labels.real)
labels_imag = min_max_normalize(labels.imag)

# 合并实部和虚部
feature = torch.cat([feature_real, feature_imag], dim=1)
labels = torch.cat([labels_real, labels_imag], dim=1)

print(feature.shape)
print(torch.max(feature))
print(torch.min(feature))

dataset = TensorDataset(feature, labels)
len_dataset = len(dataset)
# 将长度计算结果转换为整数类型
train_size = int(0.8 * len_dataset)
valid_size = len_dataset - train_size
train_data, valid_data = random_split(dataset, [train_size, valid_size])


# # 定义网络

# In[3]:


# 编码器类
class Encoder(nn.Module):
    def __init__(self, C_):
        super(Encoder, self).__init__()
        self.conv1 = nn.Conv2d(2, 16, kernel_size=4, stride=2, padding=1)
        self.conv2 = nn.Conv2d(16, C_, kernel_size=4, stride=2, padding=1)
    def forward(self, X):
        Y1 = torch.relu(self.conv1(X))
        Y = torch.relu(self.conv2(Y1))
        return Y

# 量化与二进制转换类
class ReshapeAndQuant(nn.Module):
    def __init__(self, q_):
        super(ReshapeAndQuant, self).__init__()
        self.q = q_  # 量化位数
    def forward(self, X, v):
        v = X.view(X.size(0), -1) # reshape
        Z = torch.max(v) - torch.min(v)
        mu = Z / (2 ** self.q)
        v_quant = mu * torch.floor(v / mu)
        v_bit = torch.round(v_quant)
        return v_bit

# 解量化与重塑类
class DequanAndReshape(nn.Module):
    def __init__(self, q_):
        super(DequanAndReshape, self).__init__()
        self.q = q_
    def forward(self, v_bit, C_, M_, N_, N0_, Z_):
        mu = Z_ / (2 ** self.q)
        v_dequant = mu * v_bit
        batch_size_ = v_bit.size(0)
        v_reshape = v_dequant.view((batch_size_, C_, M_ // 4, N // 4 // N0)).clone()
        return v_reshape

# 残差块类
class Residual(nn.Module):  
    def __init__(self, input_channels, num_channels):
        super().__init__()
        self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, stride=1, padding=1)
        self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, stride=1, padding=1) 
    def forward(self, X):
        Y1 = F.relu(self.conv1(X))
        Y = F.relu(self.conv2(Y1))
        Y = Y + X  # 替代Y += X，避免就地操作
        return Y
# 定义残差网络块
def resnet_block(input_channels, num_channels, num_residuals):
    blk = []
    for i in range(num_residuals):
        blk.append(Residual(num_channels, num_channels))
    return blk

# 解码器类
class Decoder(nn.Module):
    def __init__(self, C_, B_, N0_):
        super(Decoder, self).__init__()
        self.tconv1 = nn.ConvTranspose2d(C_, 16, kernel_size=4, stride=2, padding=1)
        self.tconv2 = nn.ConvTranspose2d(16, 16, kernel_size=4, stride=2, padding=1)
        self.resblocks = nn.Sequential(*resnet_block(16, 16, B_))
        self.upsampling = nn.ConvTranspose2d(16, 16, kernel_size=3, stride=(1, N0_), padding=1, output_padding=(0, N0_-1))
        self.final_conv = nn.Conv2d(16, 2, kernel_size=3, stride=1, padding=1)  
    def forward(self, X):
        # 2 x (TConv + ReLU) 
        Y1 = F.relu(self.tconv2(F.relu(self.tconv1(X))))
        # B x ResBlock
        Y2 = self.resblocks(Y1)
        # Upsampling
        Y3 = self.upsampling(Y2)
        # final Conv
        Y = self.final_conv(Y3)
        return Y

# 组合网络类
class JDCNet(nn.Module):
    def __init__(self, encoder, reshape_and_quant,
                 dequan_and_reshape, decoder, C_, M_, N_, N0_):
        super(JDCNet, self).__init__()
        self.encoder = encoder
        self.reshape_and_quant = reshape_and_quant
        self.dequan_and_reshape = dequan_and_reshape
        self.decoder = decoder
        self.C = C_
        self.M = M_
        self.N = N_
        self.N0 = N0_
    def forward(self, X):
        # 编码器部分
        encoded_output = self.encoder(X)
        v_bit = self.reshape_and_quant(encoded_output, X)

        # 这里假设直接通过网络获得Z，实际中经过传播后应该有另外的方法是的BS得到Z
        Z_ = torch.max(encoded_output) - torch.min(encoded_output)
        v_reshape = self.dequan_and_reshape(v_bit, self.C, self.M, self.N, self.N0, Z_) 

        # 解码器部分
        H_hat = self.decoder(v_reshape)

        return H_hat


# # 定义损失函数

# In[4]:


import torch.linalg as LA

# 定义均方误差损失函数
def mse_loss(predictions, targets):
    Loss = 0
    for i in range(len(predictions)):
        H = torch.complex(targets[i,0], targets[i,1])
        H_hat = torch.complex(predictions[i,0], predictions[i,1])
        Loss += LA.norm((H_hat - H), ord=2) ** 2
    
    return Loss
    
def nmse_metric(predictions, targets):
    mse = 0
    for i in range(len(predictions)):
        H = torch.complex(targets[i,0], targets[i,1])
        H_hat = torch.complex(predictions[i,0], predictions[i,1])
        mse += LA.norm((H_hat - H), ord=2) ** 2
    mse = mse / len(predictions)
    
    var = 0
    for i in range(len(predictions)):
        H = torch.complex(targets[i,0], targets[i,1])
        var += LA.norm((H), ord=2) ** 2
    var = var / len(predictions)
    
    return (10 * torch.log10(mse / var)) 


# # 训练及可视化

# In[5]:


import matplotlib.pyplot as plt
import matplotlib.animation as animation
import torch

class Animator:
    """For plotting data in animation."""
    def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
                 ylim=None, xscale='linear', yscale='linear',
                 fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
                 figsize=(3.5, 2.5)):
        if legend is None:
            legend = []
        self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize)
        if nrows * ncols == 1:
            self.axes = [self.axes, ]
        self.config_axes = lambda: self.set_axes(
            self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
        self.X, self.Y, self.fmts = None, None, fmts
        self.lines = []
        self.legend = legend

    def set_axes(self, axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
        axes.set_xlabel(xlabel)
        axes.set_ylabel(ylabel)
        axes.set_xscale(xscale)
        axes.set_yscale(yscale)
        if xlim:
            axes.set_xlim(xlim)
        if ylim:
            axes.set_ylim(ylim)
        if legend:
            axes.legend(legend)

    def add(self, x, *ys):
        """Add new data points to the plot."""
        n = len(ys)
        if not self.X:
            self.X = [[] for _ in range(n)]
        if not self.Y:
            self.Y = [[] for _ in range(n)]
        for i, y in enumerate(ys):
            if x is not None and y is not None:
                self.X[i].append(x)
                self.Y[i].append(y)
        self.update_plot()

    def update_plot(self):
        """Update the plot with new data."""
        for line in self.lines:
            line.set_data([], [])
        self.lines = []
        for x, y, fmt in zip(self.X, self.Y, self.fmts):
            line, = self.axes[0].plot(x, y, fmt)
            self.lines.append(line)
        self.config_axes()
        self.fig.canvas.draw()  # Force the figure to update


# In[6]:


class CosineAnnealingWarmUpRestarts(torch.optim.lr_scheduler._LRScheduler):
    def __init__(self, optimizer, T_0, T_w, eta_max, eta_min=5e-5, last_epoch=-1):
        self.T_0 = T_0  # 最大调整周期
        self.T_w = T_w  # 预热周期
        self.eta_max = eta_max  # 最大学习率
        self.eta_min = eta_min  # 最小学习率
        super(CosineAnnealingWarmUpRestarts, self).__init__(optimizer, last_epoch)

    def get_lr(self):
        epoch = self.last_epoch
        if epoch < self.T_w:
            # 预热阶段：线性增加学习率
            eta_t = self.eta_min + (self.eta_max - self.eta_min) * epoch / self.T_w
        else:
            # 余弦退火阶段：逐渐减小学习率
            cos_inner = (epoch - self.T_w) / (self.T_0 - self.T_w)
            cos_inner = torch.tensor(cos_inner, dtype=torch.float32)  # 将cos_inner转换为Tensor
            cos_out = torch.cos(torch.pi * cos_inner) + 1
            eta_t = self.eta_min + (self.eta_max - self.eta_min) / 2 * cos_out
        return [eta_t for _ in self.optimizer.param_groups]


def train(net, train_loader, valid_loader, num_epochs, learning_rate, weight_decay, batch_size, device):
    net.to(device)
    train_ls, valid_ls = [], []

    # 先定义优化器
    optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate, weight_decay=weight_decay)
    
    # 然后定义调度器
    scheduler = CosineAnnealingWarmUpRestarts(optimizer, T_0=200, T_w=30, eta_max=2e-3, eta_min=5e-5)
    
    # Initialize Animator
    animator = Animator(xlabel='Epoch', ylabel='Loss', legend=['Train Loss', 'Valid Loss'])

    for epoch in range(num_epochs):
        net.train()
        running_loss = 0.0
        for X, y in train_loader:
            X, y = X.to(device), y.to(device)
            optimizer.zero_grad()
            y_pred = net(X)
            # print(y_pred.shape)
            loss = mse_loss(y_pred, y)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()

        train_ls.append(running_loss / len(train_loader))

        # Validation loss
        net.eval()
        valid_loss  = 0.0
        with torch.no_grad():
            for X_val, y_val in valid_loader:
                X_val, y_val = X_val.to(device), y_val.to(device)
                y_pred_val = net(X_val)
                valid_loss += mse_loss(y_pred_val, y_val).item()
                
        valid_ls.append(valid_loss / len(valid_loader))

        # Update animator with the losses
        animator.add(epoch + 1, train_ls[-1], valid_ls[-1])

        # 更新学习率
        scheduler.step()

        # 打印每个epoch的损失和学习率
        print(f"Epoch [{epoch + 1}/{num_epochs}], train loss: {train_ls[-1]:.4f}, valid loss: {valid_ls[-1]:.4f}, LR: {scheduler.get_lr()[0]:.8f}")

    # 如果是最后一个 epoch，计算 NMSE 指标
    if epoch == num_epochs - 1:
        # 计算训练集的 NMSE
        net.eval()
        total_train_nmse = 0.0
        with torch.no_grad():
            for X_train, y_train in train_loader:
                X_train, y_train = X_train.to(device), y_train.to(device)
                y_pred_train = net(X_train)
                total_train_nmse += nmse_metric(y_pred_train, y_train).item()

        avg_train_nmse = total_train_nmse / len(train_loader)
        print(f"Final Train NMSE: {avg_train_nmse:.4f}")

        # 计算验证集的 NMSE
        total_valid_nmse = 0.0
        with torch.no_grad():
            for X_val, y_val in valid_loader:
                X_val, y_val = X_val.to(device), y_val.to(device)
                y_pred_val = net(X_val)
                total_valid_nmse += nmse_metric(y_pred_val, y_val).item()

        avg_valid_nmse = total_valid_nmse / len(valid_loader)
        print(f"Final Valid NMSE: {avg_valid_nmse:.4f}")

    return train_ls, valid_ls


# In[7]:


# Hyperparameters
C = 8
M = 16
N = 64
N0 = 2  # 2 or 4
B = 4
q = 8
batch_size = 256
num_workers = 16
epochs = 20
lr = 0.002
weight_decay = 0
# 超参数设置
eta_max = 2e-3
eta_min = 5e-5
T_w = 30  # 预热周期
T_0 = 200  # 最大调整周期


train_loader = torch.utils.data.DataLoader(train_data, 
                batch_size=batch_size, shuffle=True, 
                num_workers=num_workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_data, 
                batch_size=batch_size, shuffle=False, 
                num_workers=num_workers, pin_memory=True)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Instantiate the network
encoder = Encoder(C_=C)
reshape_and_quant = ReshapeAndQuant(q_=q)
dequan_and_reshape = DequanAndReshape(q_=q)
decoder = Decoder(C_=C, B_=B, N0_=N0)
net = JDCNet(encoder, reshape_and_quant, dequan_and_reshape, decoder,
            C_=C, M_=M, N_=N, N0_=N0)

# 在模型实例化后使用 DataParallel，实现数据并行
net = torch.nn.DataParallel(net)

net.to(device)

train_net = train(net, train_loader, valid_loader, num_epochs=epochs, learning_rate=eta_max, 
      weight_decay=0, batch_size=256, device=device)


# In[ ]:




