# -*- coding: utf-8 -*-
"""
-------------------------------------------------
   File Name:       main_aynet_das.py
   Project Name:    beamform_AYnet
   Author :         Chunshan YANG
   Date:            2025/1/31
   Device:          GTX2070
-------------------------------------------------
   Change Activity:
                   2025/1/31:
-------------------------------------------------
"""

import os
import time
import random
import logging

import torch
import torchvision
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import numpy as np
import scipy.stats as st
import skimage.metrics

from networks.model_aynet import AYNet
from utils.data_loader import ReconDataset

# For updating learning rate
def update_lr(optimizer, lr):
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


if __name__ == "__main__":

    logging.basicConfig(
    filename='E:/YCS_Temp/project/thesis_dataset/dataset/PA_DATA/app_0205.log',  # 日志文件路径
    level=logging.INFO,  # 日志级别
    format='%(asctime)s - %(levelname)s - %(message)s')  # 日志格式

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    dataset_pathr = 'E:/YCS_Temp/project/thesis_dataset/dataset/PA_DATA/'
    modality = 'PA'
    learning_rate = 0.005
    #batch_size = 32
    batch_size = 4
    test_batch = 4
    start_epoch = 0
    loadcp = False

    curr_lr = learning_rate

    #source activate pytorch
    train_dataset = ReconDataset(dataset_pathr, train=True, das=True, modality = modality)
    test_dataset = ReconDataset(dataset_pathr, train=False, das=True, modality = modality)

    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle = True)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=test_batch, shuffle = True)

    # Model
    model = AYNet(in_channels=1, up_mode='upsample', merge_mode='concat')
    model = nn.DataParallel(model)
    model = model.to(device)

    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate)

    if loadcp:
        checkpoint = torch.load('reconstruction_Unet_2200.ckpt')
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch=checkpoint['epoch']-1
        curr_lr = checkpoint['curr_lr']
        optimizer.load_state_dict(checkpoint['optimizer'])
                
    cudnn.benchmark = True
    total_step = len(train_loader)
    test_total_step = len(test_loader)
    
    epoch = start_epoch
    print("start")
    print('train_data :{}'.format(train_dataset.__len__()))
    print('test_data :{}'.format(test_dataset.__len__()))
    end = time.time()

    # Train
    while True:
        train_losses = []
        for batch_idx, (rawdata ,reimage, bfimg) in enumerate(train_loader): 
            rawdata = rawdata.to(device)
            reimage = reimage.to(device)
            bfimg = bfimg.to(device)
        #     bfimg = F.upsample(bfimg, (128, 128), mode='bilinear')
        #     reimage = F.upsample(reimage, (128, 128), mode='bilinear')

            outputs = model(rawdata, bfimg)
            loss = criterion(outputs, reimage)
            train_losses.append(loss.detach().cpu().numpy())

            # Backward and optimize
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            # Convert tensors to numpy arrays for SSIM and PSNR calculation
            reimage_np = reimage.detach().cpu().numpy().squeeze()
            outputs_np = outputs.detach().cpu().numpy().squeeze()  

            ssim = 0.0
            psnr = 0.0
            for idx in range(0, reimage_np.shape[0]):
                ssim = ssim + skimage.metrics.structural_similarity(reimage_np[idx, :, :], outputs_np[idx, :, :], data_range=reimage_np.max() - reimage_np.min())
                psnr = psnr + skimage.metrics.peak_signal_noise_ratio(reimage_np[idx, :, :], outputs_np[idx, :, :], data_range=reimage_np.max() - reimage_np.min())
            ssim = ssim / reimage_np.shape[0]
            psnr = psnr / reimage_np.shape[0]

            batch_time=(time.time() - end)
            end = time.time()

            # if (batch_idx + 1) % 20 == 0:
            #     print('Epoch [{}], Start [{}], Step [{}/{}], Loss: {:.4f},Time:[{:.4f}], SSIM: {:.4f}, PSNR: {:.4f}'.format(epoch + 1, start_epoch, batch_idx + 1, total_step, loss.item(),batch_time, ssim, psnr))
            #     logging.info('training: Epoch [{}], Start [{}], Step [{}/{}], Loss: {:.4f},Time:[{:.4f}], SSIM: {:.4f}, PSNR: {:.4f}'.format(epoch + 1, start_epoch, batch_idx + 1, total_step, loss.item(),batch_time, ssim, psnr))
        avg_train_loss = np.mean(train_losses)
        print('Epoch [{}], Start [{}], Step [{}/{}], Loss: {:.4f},Time:[{:.4f}], SSIM: {:.4f}, PSNR: {:.4f}'.format(epoch + 1, start_epoch, batch_idx + 1, total_step, avg_train_loss, batch_time, ssim, psnr))
        logging.info('training: Epoch [{}], Start [{}], Step [{}/{}], Loss: {:.4f},Time:[{:.4f}], SSIM: {:.4f}, PSNR: {:.4f}'.format(epoch + 1, start_epoch, batch_idx + 1, total_step, loss.item(),batch_time, ssim, psnr)) 
        
        # Validata
        if (epoch + 1) % 10 == 0:
            with torch.no_grad():
                validate_losses = []
                for batch_idx, (rawdata ,reimage, bfimg) in enumerate(test_loader):
                    rawdata = rawdata.to(device)
                    reimage = reimage.to(device)
                    bfimg = bfimg.to(device)
                    outputs = model(rawdata, bfimg)  
                    validation_loss = criterion(outputs, reimage)
                    validate_losses.append(validation_loss.detach().cpu().numpy())
                    
                    # Convert tensors to numpy arrays for SSIM and PSNR calculation
                    reimage_np = reimage.detach().cpu().numpy().squeeze()
                    outputs_np = outputs.detach().cpu().numpy().squeeze()  

                    ssim = 0.0
                    psnr = 0.0
                    for idx in range(0, reimage_np.shape[0]):
                        ssim = ssim + skimage.metrics.structural_similarity(reimage_np[idx, :, :], outputs_np[idx, :, :], data_range=reimage_np.max() - reimage_np.min())
                        psnr = psnr + skimage.metrics.peak_signal_noise_ratio(reimage_np[idx, :, :], outputs_np[idx, :, :], data_range=reimage_np.max() - reimage_np.min())
                    ssim = ssim / reimage_np.shape[0]
                    psnr = psnr / reimage_np.shape[0]
                avg_validate_loss = np.mean(validate_losses)
                logging.info('validation: Epoch [{}],  Validation Loss: {:.4f}, SSIM: {:.4f}, PSNR: {:.4f}'.format(epoch + 1, avg_validate_loss, ssim, psnr))

        # Decay learning rate
        if (epoch + 1) % 50 == 0:
            curr_lr /= 5
            update_lr(optimizer, curr_lr)

        if (epoch+1) % 100 ==0:
            torch.save({'epoch': epoch + 1,
                        'state_dict':model.state_dict(),
                        'optimizer': optimizer.state_dict(),
                        'curr_lr': curr_lr,},
                        'E:/YCS_Temp/project/thesis_dataset/dataset/PA_DATA/bfrec_ynet_3cat_das_{}.ckpt'.format(epoch + 1))            
        #     torch.save({'epoch': epoch + 1,
        #                 'state_dict':model.state_dict(),
        #                 'optimizer': optimizer.state_dict(),
        #                 'curr_lr': curr_lr,
        #                 'loss_avg':losses_list_avg,
        #                 'loss_val':losses_list_val},
        #                 './checkpoint/bfrec_ynet_3cat_das_{}.ckpt'.format(epoch + 1))
            print('Save ckpt successfully!')
        epoch = epoch+1

