import os
import gc
import time
import torch
import torchaudio
import wandb
import argparse
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch_stft import STFT
import librosa.display
import datetime
from torch.cuda.amp import GradScaler, autocast
import soundfile as sf
import scipy.fftpack
from torch.utils.data import DataLoader
from typing import Tuple


from pystct import sdct_torch, isdct_torch
from losses import StegoLoss, SNR
from loader import loader
from model import StegoUNet
from pydtw import SoftDTW


parser = argparse.ArgumentParser()
parser.add_argument('--beta', type=float, default=0.35, help='Beta hyperparameter for loss calculation')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--epochs', type=int, default=8, help='Number of training epochs')
parser.add_argument('--experiment', type=int, default=0, help='Experiment identifier')
parser.add_argument('--summary', type=str, default='', help='Summary for the wandb run')
parser.add_argument('--add_dtw_term', type=bool, default=False, help='Whether to add DTW term in loss')
parser.add_argument('--transform', type=str, default='cosine', help='Transform type: cosine or fourier')
args = parser.parse_args()

# 结果保存路径
results_folder = f"./Train_Result/"
os.makedirs(results_folder, exist_ok=True)
images_folder = os.path.join(results_folder, "images")
os.makedirs(images_folder, exist_ok=True)



def visualize_spectrogram(host_audio, container_audio, secret_audio, revealed_audio, epoch, iteration, sr=22050, n_fft=256, 
                          hop_length=260, save_to_disk=True, results_folder='results', images_folder='images'):
    """
    可视化频谱差异并上传到 wandb。
    参数:
    - host_audio, container_audio, secret_audio, revealed_audio: 要可视化的音频数据，应为频域数据
    - epoch, iteration: 当前的epoch和iteration，用于命名保存的图片
    - sr: 音频的采样率，默认为22050，根据你的音频数据可能需要调整
    - n_fft: FFT窗口大小
    - hop_length: 帧之间的步长
    - save_to_disk: 是否将图像保存到磁盘
    - results_folder: 结果保存目录
    - images_folder: 图像保存子目录
    """
    # 创建图像保存目录
    if save_to_disk:
        images_path = os.path.join(results_folder, images_folder)
        os.makedirs(images_path, exist_ok=True)

    # 绘制频谱图
    fig, axes = plt.subplots(4, 1, figsize=(10, 8))
    titles = ["Host Audio", "Secret Audio", "Container Audio", "Revealed Audio"]
    
    for i, audio in enumerate([host_audio, secret_audio, container_audio, revealed_audio]):
        if isinstance(audio, np.ndarray):
            audio = audio.squeeze()  # 确保audio是1D
        else:
            audio = audio[0].cpu().squeeze().numpy()  # 转换为numpy数组
        
        # 由于audio已经是频域数据，直接使用librosa.amplitude_to_db转换为dB
        D = librosa.amplitude_to_db(np.abs(audio), ref=np.max)
        if audio == host_audio and audio == container_audio: 
            librosa.display.specshow(D, sr=sr, hop_length=130, x_axis='time', y_axis='log', ax=axes[i])
        else:
            librosa.display.specshow(D, sr=sr, hop_length=260, x_axis='time', y_axis='log', ax=axes[i])
        axes[i].set_title(titles[i])
        axes[i].label_outer()  # 隐藏x轴和y轴的标签

    plt.tight_layout()

    # 保存到磁盘
    if save_to_disk:
        image_path = os.path.join(images_path, f"spectrogram_epoch_{epoch}_iter_{iteration}.png")
        plt.savefig(image_path)


    wandb.log({
        "Spectrogram": wandb.Image(fig, caption=f"Epoch {epoch} - Iteration {iteration}")
    })
    
    plt.close()



def save_audio_files(epoch, host_audio, container_audio, secret_audio, revealed_audio,results_folder='results' ,audio_folder='audio_files'):
    """
    将音频数据保存为文件
    参数:
    - epoch: 当前的epoch，用于命名保存的音频文件
    - host_audio, container_audio, secret_audio, revealed_audio: 要保存的音频数据
    - audio_folder: 音频文件保存的目录
    """
    """
    执行IDCT转换，将频域数据转换回时域，并保存为音频文件。
    """
    # 确保输出目录存在
    output_path = os.path.join(results_folder, audio_folder)
    os.makedirs(output_path, exist_ok=True)
    
    # 音频文件名列表
    filenames = ['host_audio', 'container_audio', 'secret_audio', 'revealed_audio']
    # 音频数据列表
    audios = [host_audio, container_audio, secret_audio, revealed_audio]

    for name, audio in zip(filenames, audios):
        # 预处理音频数据，执行IDCT转换
        # 假设 audio 是通过DCT处理的频域数据，形状为[1, 1, freq_bins, time_steps]
        # 我们需要将其转换为适合IDCT的形状，执行IDCT，然后保存
        audio = audio.cpu().squeeze()  # 移到CPU，转换为numpy数组，并去掉多余的维度
        audio_signal = isdct_torch(audio,frame_step=260,window=torch.hamming_window)
        audio_file_path = os.path.join(output_path, f"{name}_epoch{epoch}.wav")
        torchaudio.save(audio_file_path,audio_signal.unsqueeze(0),44100)



# 训练函数
def train(model : StegoUNet, tr_loader : DataLoader, vd_loader : DataLoader, lr : float, epochs : int = 8, 
          slide : int = 50, add_dtw_term : bool = False, 
          transform : str = 'cosine', on_phase : bool = False) -> tuple[StegoUNet, float]:


    # wandb
    project : str = "WavInWav"
    name : str = "PixMethod-1.0-0"
    wandb.init(project=project, name=name)
    wandb.run.name = f"{name}"
    wandb.run.save()
    wandb.watch(model)

    # 准备设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f'Using device: {device}')

    # 并行化
    if torch.cuda.device_count() > 1:
        print(f"Let's use {torch.cuda.device_count()} GPUs!")
        model = nn.DataParallel(model)

    model.to(device)

    # 设置为训练模式
    model.train()

    # 优化器
    optimizer = optim.Adam(model.parameters(), lr=lr)
    # scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)
    ini = time.time()
    best_loss = np.inf

    # 初始化DTW损失构造器
    softDTW = SoftDTW(gamma=1.0, normalize=True) 


    # Initialize STFT transform constructor

    if transform == 'fourier':
        stft = STFT(
			filter_length=2 ** 11 - 1,
			hop_length=132,
			win_length=2 ** 11 - 1,
			window='hann'
		).to(device)
        stft.num_samples = 67522

    scaler = GradScaler()

    cnt = 0
    
    # 开始训练...
    for epoch in range(epochs):
        # 训练循环
        train_loss, train_loss_host, train_loss_secret, snr, train_dtw_loss = [], [], [], [], []
        vd_loss, vd_loss_host, vd_loss_secret, vd_snr,vd_dtw= [], [], [], [],[]
        for i, data in enumerate(tr_loader):
            optimizer.zero_grad()
            # 加载数据
            host_audio : torch.Tensor = data[0].to(device)
            secret_audio : torch.Tensor = data[1].to(device)
            if transform == 'fourier': phase : torch.Tensor = data[2].to(device)
            host_audio, secret_audio = host_audio.to(device), secret_audio.to(device)
            # [1, 1024, 256] -> [1, 1, 1024, 256]
            secret_audio = secret_audio.unsqueeze(1) if transform == 'cosine' else secret_audio
            # [1, 1, 1024, 256] -> [1, 2, 1024, 256]
            secret_audio = secret_audio.repeat(1,2,1,1)
            # [1, 1024, 512] -> [1, 1, 1024, 512]
            host_audio = host_audio.unsqueeze(1) if transform == 'cosine' else host_audio

            # 前向传播
            containers, revealed = model(secret_audio, host_audio)

            if transform == 'cosine':
                original_wav=isdct_torch(host_audio.squeeze(0).squeeze(0), frame_length=1024, frame_step=260, window=torch.hamming_window)
                container_wav = isdct_torch(containers.squeeze(0).squeeze(0), frame_length=1024, frame_step=260, window=torch.hamming_window)
            elif transform == 'fourier':
                if on_phase:
                    original_wav = stft.inverse(host_audio.squeeze(1), phase.squeeze(1))
                    container_wav = stft.inverse(host_audio.squeeze(1), containers.squeeze(1))
                else:
                    original_wav = stft.inverse(host_audio.squeeze(1), phase.squeeze(1))
                    container_wav = stft.inverse(containers.squeeze(1), phase.squeeze(1))

            # 计算损失
            loss,loss_host,loss_secret = StegoLoss(host_audio, containers, secret_audio, revealed)
            
            snr_audio = SNR(
                host_audio, 
                containers, 
                phase=None if transform == 'cosine' else phase,
                transform=transform,
                transform_constructor= None if transform == 'cosine' else stft,
                on_phase=on_phase
            )
            dtw_loss = softDTW(original_wav.cpu().unsqueeze(0), container_wav.cpu().unsqueeze(0))
            if transform == 'fourier': dtw_loss = dtw_loss[0]
            if add_dtw_term:
                loss += 10**(np.floor(np.log10(1/33791)) + 1) * dtw_loss

            # 反向传播和优化
            loss.backward()
            optimizer.step()

            # 日志记录
            train_loss.append(loss.detach().item())
            train_loss_host.append(loss_host.detach().item())
            train_loss_secret.append(loss_secret.detach().item())
            snr.append(snr_audio)
            train_dtw_loss.append(dtw_loss.detach().item())

            avg_train_loss = np.mean(train_loss[-slide:])
            avg_train_loss_host = np.mean(train_loss_host[-slide:])
            avg_train_loss_secret = np.mean(train_loss_secret[-slide:])
            avg_snr = np.mean(snr[-slide:])
            avg_dtw_loss = np.mean(train_dtw_loss[-slide:])

            print(
				f'(#{i})[{np.round(time.time()-ini,2)}s]\
                Epoch [{epoch + 1}/{epochs}], \
				Train Loss {loss.detach().item()},\
				MSE host {loss_host.detach().item()},\
				MSE secret {loss_secret.detach().item()},\
				SNR {snr_audio},\
				DTW {dtw_loss.detach().item()}' 
			)

            wandb.log({
				'train_loss': avg_train_loss,
				'train_loss_host': avg_train_loss_host,
				'train_loss_secret': avg_train_loss_secret,
				'train_snr': avg_snr,
                })

            if (i % 300 == 0) and (i != 0):
                # 在每50个迭代后进行验证
                cnt = cnt + 1
                if(cnt == 300) :
                    cnt = 0
                avg_valid_loss, avg_valid_loss_host, avg_valid_loss_secret, avg_valid_snr, avg_valid_dtw = validate(model, vd_loader, cnt = cnt , transform = transform, 
                                                                                                                    transform_constructor = stft if transform=='fourier' else None, 
                                                                                                                    on_phase=on_phase, dtw_criterion=softDTW, epoch=epoch,
                                                                                                                    save_visualization=True)
                vd_loss.append(avg_valid_loss) 
                vd_loss_host.append(avg_valid_loss_host) 
                vd_loss_secret.append(avg_valid_loss_secret) 
                vd_snr.append(avg_valid_snr) 
                vd_dtw.append(avg_valid_dtw)

                # 保存检查点
                is_best = avg_valid_loss < best_loss
                if is_best:
                    best_loss = avg_valid_loss
                    checkpoint_path = os.path.join(results_folder, f"checkpoint_epoch_{epoch}_iter_{i}.pth")
                    torch.save(model.state_dict(), checkpoint_path)

                
        # 每轮结束后的日志
        print(
			f'Epoch [{epoch + 1}/{epochs}], \
			Average_loss: {avg_train_loss}, \
			Average_loss_host: {avg_train_loss_host}, \
			Average_loss_secret: {avg_train_loss_secret}, \
			Average SNR: {avg_snr}, \
			Average DTW: {avg_dtw_loss}'
		)

        # 保存训练日志
        log_path = os.path.join(results_folder, f"train_log_epoch_{epoch}.txt")
        with open(log_path, "w") as file:
            file.write(f"Epoch {epoch}: Average_loss: {avg_train_loss}, \
                       Average_loss_host: {avg_train_loss_host}, \
                       Average_loss_secret: {avg_train_loss_secret}, \
                       Average SNR: {avg_snr}, \
                       Average DTW: {avg_dtw_loss}'\n")

    # 保存最终模型
    model_path = os.path.join(results_folder, "final_model.pth")
    torch.save(model.state_dict(), model_path)

    # 计算整个训练过程的平均损失
    total_avg_train_loss = np.mean(train_loss) if train_loss else 0

    # 在函数末尾返回模型和平均训练损失
    return model, total_avg_train_loss

# 验证函数
def validate(model, vd_loader, cnt, transform='cosine',transform_constructor=None,on_phase=False,dtw_criterion=None, epoch=None,save_visualization=True):

    # 准备设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f'Using device: {device}')

    model.eval()
    loss = 0 
    valid_loss, valid_loss_cover, valid_loss_secret, valid_snr, valid_dtw = [], [], [], [], []
    iniv = time.time()
    with torch.no_grad():
        for i, data in enumerate(vd_loader):
            # 加载数据
            host_audio : torch.Tensor = data[0].to(device)
            secret_audio : torch.Tensor = data[1].to(device)
            if transform == 'fourier': phase : torch.Tensor = data[2].to(device)
            host_audio, secret_audio = host_audio.to(device), secret_audio.to(device)
            # [1, 1024, 256] -> [1, 1, 1024, 256]
            secret_audio = secret_audio.unsqueeze(1) if transform == 'cosine' else secret_audio
            secret_audio_visual = secret_audio
            # [1, 1, 1024, 256] -> [1, 2, 1024, 256]
            secret_audio = secret_audio.repeat(1,2,1,1)
            # [1, 1024, 512] -> [1, 1, 1024, 512]
            host_audio = host_audio.unsqueeze(1) if transform == 'cosine' else host_audio


            # 前向传播
            containers, revealed = model(secret_audio, host_audio)

            if transform == 'cosine':
                container_wav=isdct_torch(containers.squeeze(0).squeeze(0), frame_length=1024, frame_step=260, window=torch.hamming_window)
            elif transform == 'fourier':
                if on_phase:
                    container_wav = transform_constructor.inverse(host_audio.squeeze(1), containers.squeeze(1))
                else:
                    container_wav = transform_constructor.inverse(containers.squeeze(1), phase.squeeze(1))

            loss,loss_cover,loss_secret = StegoLoss(host_audio, containers, secret_audio, revealed) # loss

            vd_snr_audio = SNR(
				host_audio, 
				containers,
				phase=None if transform == 'cosine' else phase,
				transform=transform, 
				transform_constructor=None if transform == 'cosine' else transform_constructor,
				on_phase=on_phase
			)
            if dtw_criterion is not None:
                if transform == 'cosine':
                    original_wav = isdct_torch(host_audio.squeeze(0).squeeze(0), frame_length=1024, frame_step=260, window=torch.hamming_window)
                elif transform == 'fourier':
                    original_wav = transform_constructor.inverse(host_audio.squeeze(1), phase.squeeze(1))
                dtw_loss = dtw_criterion(original_wav.cpu().unsqueeze(0), container_wav.cpu().unsqueeze(0))
                if transform == 'fourier': dtw_loss = dtw_loss[0]

            valid_loss.append(loss.detach().item())
            valid_loss_cover.append(loss_cover.detach().item())
            valid_loss_secret.append(loss_secret.detach().item())
            valid_snr.append(vd_snr_audio)
            valid_dtw.append(dtw_loss.detach().item())

            print(
				f'(#{i})[{np.round(time.time()-iniv,2)}s]\
				Valid Loss {loss.detach().item()},\
				host_error {loss_cover.detach().item()},\
				secret_error {loss_secret.detach().item()},\
				DTW {dtw_loss.detach().item()}'
			)
            # 可视化对比并上传
            if save_visualization and i == cnt:  # 只在第一批数据上进行可视化
                visualize_spectrogram(host_audio, containers, secret_audio_visual, revealed, epoch, i)

            if i >= 500: break

        avg_valid_loss = np.mean(valid_loss)
        avg_valid_loss_host = np.mean(valid_loss_cover)
        avg_valid_loss_secret = np.mean(valid_loss_secret)
        avg_valid_snr = np.mean(valid_snr)
        avg_valid_dtw = np.mean(valid_dtw)

        wandb.log({
			'valid_loss': avg_valid_loss,
			'valid_loss_host': avg_valid_loss_host,
			'valid_loss_secret': avg_valid_loss_secret,
			'valid_snr': avg_valid_snr,
		})

        print(f"Validation took {time.time() - iniv} seconds")

    del valid_loss
    del valid_loss_cover
    del valid_loss_secret
    del valid_snr
    del valid_dtw
    gc.collect()

    return avg_valid_loss, avg_valid_loss_host, avg_valid_loss_secret, avg_valid_snr, avg_valid_dtw


if __name__ == '__main__':
    # 处理命令行参数
    args = parser.parse_args()
    
    # 加载数据
    train_loader = loader(root_dir='E:/JasonWang/WavInWav/data', folder='train')
    test_loader = loader(root_dir='E:/JasonWang/WavInWav/data', folder='test')

    # 初始化模型
    model : StegoUNet = StegoUNet(transform=args.transform)
    # ba786a2ca2baf3417353b3911e4b62e04506d72f
    # 训练模型
    trained_model, avg_train_loss = train(model, train_loader, test_loader, lr = args.lr, 
                                          epochs = args.epochs, transform = args.transform)

    # 保存最终结果
    final_results_path = os.path.join(results_folder, "final_results.txt")
    with open(final_results_path, "w") as file:
        file.write(f"Final Average Training Loss: {avg_train_loss}\n")

#  python ./train.py