import torch
import torch.nn as nn
import torch.optim as optim
import scipy.io
import numpy as np
import os
import matplotlib.pyplot as plt

#############################
# 数据预处理部分不可以运行
#############################
# 参数
m = 50  # 深度（时间点数）
k = 481  # 丰度矩阵的维度
K_nonzero = 20  # 稀疏性要求
lambda_sparsity = 0.1  # 稀疏正则化系数
epochs = 1000
learning_rate = 0.001

data_folder1 = "dync"
file_prefix = "spatial_correlation_"
file_extension = ".mat"

# 初始化输入矩阵 (8,8,m) 复数数据
result_matrix = np.zeros((8, 8, m), dtype=np.complex64)

# 读取数据并填充 result_matrix
for i in range(1, m + 1):
    fname2 = os.path.join(data_folder1, f"{file_prefix}{i}{file_extension}")
    if os.path.exists(fname2):
        mat2 = scipy.io.loadmat(fname2)
        if "p" in mat2:
            Y = mat2["p"]
            if Y.size == 64:
                Y_reshaped = Y.reshape(8, 8)
                result_matrix[:, :, i - 1] = Y_reshaped
            else:
                print(f"Shape of 'p' in {fname2} is not compatible for reshaping to 8x8.")
        else:
            print(f"'p' not found in {fname2}")
    else:
        print(f"File not found: {fname2}")

# 分离实部和虚部，并整合为 (1, 1, m, 16, 8) 的张量
Y_real = torch.from_numpy(np.real(result_matrix)).float().cuda()
Y_imag = torch.from_numpy(np.imag(result_matrix)).float().cuda()

Y_combined = torch.zeros((1, 1, m, 16, 8), dtype=torch.float32).cuda()
for i in range(m):
    Y_combined[0, 0, i, ::2, :] = Y_real[:, :, i]
    Y_combined[0, 0, i, 1::2, :] = Y_imag[:, :, i]

# 读取端元矩阵 D 并处理为 (1,16,8,k)
fname4 = "Data/DC1/phasecha.mat"
mat4 = scipy.io.loadmat(fname4)
D = mat4["phasecha"]
if D.shape[0] != 64:
    raise ValueError("The number of rows in D is not compatible with 8x8 reshaping.")
D1 = D.reshape(8, 8, -1)

D_real = torch.from_numpy(np.real(D1)).float().cuda()
D_imag = torch.from_numpy(np.imag(D1)).float().cuda()

D_combined = torch.zeros((1, 16, 8, k), dtype=torch.float32).cuda()
for i in range(k):
    D_combined[0, ::2, :, i] = D_real[:, :, i]
    D_combined[0, 1::2, :, i] = D_imag[:, :, i]


#############################
# 定义辅助函数和模块
#############################
class Concat(nn.Module):
    def __init__(self, dim, *args):
        super(Concat, self).__init__()
        self.dim = dim
        for idx, module in enumerate(args):
            self.add_module(str(idx), module)

    def forward(self, input):
        inputs = []
        for module in self._modules.values():
            inputs.append(module(input))

        inputs_shapes2 = [x.shape[2] for x in inputs]
        inputs_shapes3 = [x.shape[3] for x in inputs]

        if np.all(np.array(inputs_shapes2) == min(inputs_shapes2)) and np.all(np.array(inputs_shapes3) == min(inputs_shapes3)):
            inputs_ = inputs
        else:
            target_shape2 = min(inputs_shapes2)
            target_shape3 = min(inputs_shapes3)
            inputs_ = []
            for inp in inputs:
                diff2 = (inp.size(2) - target_shape2) // 2
                diff3 = (inp.size(3) - target_shape3) // 2
                inputs_.append(inp[:, :, diff2: diff2 + target_shape2, diff3:diff3 + target_shape3])

        return torch.cat(inputs_, dim=self.dim)

def act(act_fun='LeakyReLU'):
    if isinstance(act_fun, str):
        if act_fun == 'LeakyReLU':
            return nn.LeakyReLU(0.1, inplace=True)
        elif act_fun == 'ReLU':
            return nn.ReLU(inplace=True)
        elif act_fun == 'Sigmoid':
            return nn.Sigmoid()
        elif act_fun == 'Tanh':
            return nn.Tanh()
        elif act_fun == 'softmax':
            return nn.Softmax(dim=1)
        elif act_fun == 'ELU':
            return nn.ELU()
        elif act_fun == 'none':
            return nn.Sequential()
        else:
            raise ValueError("Unknown activation function.")
    else:
        return act_fun()

def bn(num_features):
    return nn.BatchNorm2d(num_features)

def conv(in_f, out_f, kernel_size, stride=1, bias=True, pad='zero', downsample_mode='stride'):
    to_pad = int((kernel_size - 1) / 2)
    padder = None
    if pad == 'reflection':
        padder = nn.ReflectionPad2d(to_pad)
        to_pad = 0

    convolver = nn.Conv2d(in_f, out_f, kernel_size, stride, padding=to_pad, bias=bias)
    layers = [x for x in [padder, convolver] if x is not None]
    return nn.Sequential(*layers)


#############################
# 定义 UnmixArch 网络结构
#############################
def UnmixArch(
        num_input_channels=2, num_output_channels=3, output=3,
        num_channels_down=[16, 32, 64, 128, 128], num_channels_up=[16, 32, 64, 128, 128], num_channels_skip=[4, 4, 4, 4, 4], 
        filter_size_down=3, filter_size_up=3, filter_skip_size=1,
        need_sigmoid=True, need_bias=True, 
        pad='zero', upsample_mode='nearest', downsample_mode='stride', act_fun='LeakyReLU', 
        need1x1_up=True):

    assert len(num_channels_down) == len(num_channels_up) == len(num_channels_skip)

    n_scales = len(num_channels_down) 

    if not (isinstance(upsample_mode, list) or isinstance(upsample_mode, tuple)) :
        upsample_mode   = [upsample_mode]*n_scales

    if not (isinstance(downsample_mode, list)or isinstance(downsample_mode, tuple)):
        downsample_mode   = [downsample_mode]*n_scales
    
    if not (isinstance(filter_size_down, list) or isinstance(filter_size_down, tuple)) :
        filter_size_down   = [filter_size_down]*n_scales

    if not (isinstance(filter_size_up, list) or isinstance(filter_size_up, tuple)) :
        filter_size_up   = [filter_size_up]*n_scales

    last_scale = n_scales - 1 
    model = nn.Sequential()
    model_tmp = model

    input_depth = num_input_channels
    for i in range(len(num_channels_down)):

        deeper = nn.Sequential()
        skip = nn.Sequential()

        if num_channels_skip[i] != 0:
            concat_module = Concat(1, skip, deeper)
            model_tmp.add_module(f"concat_{i}", concat_module)
        else:
            model_tmp.add_module(f"deeper_{i}", deeper)
        
        out_features = num_channels_skip[i] + (num_channels_up[i + 1] if i < last_scale else num_channels_down[i])
        model_tmp.add_module(f"bn_{i}", bn(out_features))

        if num_channels_skip[i] != 0:
            skip.add_module("conv_skip", conv(input_depth, num_channels_skip[i], filter_skip_size, bias=need_bias, pad=pad))
            skip.add_module("bn_skip", bn(num_channels_skip[i]))
            skip.add_module("act_skip", act(act_fun))
            
        deeper.add_module("conv_down_1", conv(input_depth, num_channels_down[i], filter_size_down[i], 2, bias=need_bias, pad=pad))
        deeper.add_module("bn_down_1", bn(num_channels_down[i]))
        deeper.add_module("act_down_1", act(act_fun))

        deeper.add_module("conv_down_2", conv(num_channels_down[i], num_channels_down[i], filter_size_down[i], bias=need_bias, pad=pad))
        deeper.add_module("bn_down_2", bn(num_channels_down[i]))
        deeper.add_module("act_down_2", act(act_fun))

        deeper_main = nn.Sequential()

        if i == len(num_channels_down) - 1:
            # 最深层
            k = num_channels_down[i]
        else:
            deeper.add_module("deeper_main", deeper_main)
            k = num_channels_up[i + 1]

        deeper.add_module("upsample", nn.Upsample(scale_factor=2, mode=upsample_mode[i]))

        model_tmp.add_module(f"conv_up_{i}", conv(num_channels_skip[i] + k, num_channels_up[i], filter_size_up[i], 1, bias=need_bias, pad=pad))
        model_tmp.add_module(f"bn_up_{i}", bn(num_channels_up[i]))
        model_tmp.add_module(f"act_up_{i}", act(act_fun))

        if need1x1_up:
            model_tmp.add_module(f"conv_1x1_up_{i}", conv(num_channels_up[i], num_channels_up[i], 1, bias=need_bias, pad=pad))
            model_tmp.add_module(f"bn_1x1_up_{i}", bn(num_channels_up[i]))
            model_tmp.add_module(f"act_1x1_up_{i}", act(act_fun))

        input_depth = num_channels_down[i]
        model_tmp = deeper_main

    model.add_module("final_conv", conv(num_channels_up[0], num_output_channels, 1, bias=need_bias, pad=pad))
    if need_sigmoid:
        model.add_module("final_sigmoid", nn.Sigmoid())

    return model


#############################
# 使用 UnmixArch 模型的示例
#############################
# 假设 UnmixArch 使用 (N, C, H, W) 输入
# 我们从 (1, 1, m, 16, 8) -> (1, m, 16, 8) 将m作为channel
Y_reshaped = Y_combined.view(1, m, 16, 8)

num_input_channels = m
num_output_channels = 2

model = UnmixArch(
    num_input_channels=num_input_channels, 
    num_output_channels=num_output_channels,
    num_channels_down=[16,32], 
    num_channels_up=[16,32], 
    num_channels_skip=[4,4],
    filter_size_down=3, 
    filter_size_up=3, 
    filter_skip_size=1,
    need_sigmoid=False, 
    need_bias=True, 
    pad='zero', 
    upsample_mode='nearest', 
    downsample_mode='stride', 
    act_fun='LeakyReLU', 
    need1x1_up=True
).cuda()

criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

for epoch in range(epochs):
    model.train()
    optimizer.zero_grad()

    Y_reconstructed = model(Y_reshaped)
    # 假设直接比较 MSE（实际应添加X和D的运算）
    mse_loss = criterion(Y_reconstructed, Y_reshaped)

    mse_loss.backward()
    optimizer.step()

    if (epoch + 1) % 100 == 0:
        print(f"Epoch [{epoch + 1}/{epochs}], Loss: {mse_loss.item():.4f}")

model.eval()
with torch.no_grad():
    Y_reconstructed = model(Y_reshaped)
mse_total = criterion(Y_reconstructed, Y_reshaped).item()
print(f"Final MSE: {mse_total:.6f}")
