# MPE CNN应用
import sys
from typing import Dict
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau
from apps.fmcw.ias.mpe_cnn_model import MpeCnnModel
from apps.fmcw.ias.mpe_cnn_model_v2 import MpeCnnModelV2
from apps.fmcw.ias.mpe_ccnn_model import MpeCcnnModel
from dss.fmcw_mpe_dss import FmcwMpeDss
from apps.fmcw.conf.app_config import AppConfig as AF

class MpeCnnApp(object):
    device = None
    threshold = 4.0
    y_min = None
    y_max = None

    def __init__(self):
        self.name = 'apps.fmcw.mpe_cnn_app.MpeCnnApp'

    @staticmethod
    def startup(params:Dict = {}) -> None:
        print(f'MPE CNN App v0.0.2')
        MpeCnnApp.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        if params['ann_dtype'] == 1:
            print(f'实数神经网络... V2版')
            # model = MpeCnnModel().to(MpeCnnApp.device)
            model = MpeCnnModelV2().to(MpeCnnApp.device)
        else:
            print(f'复数神经网络...')
            model = MpeCcnnModel().to(MpeCnnApp.device)
        print(model)
        MpeCnnApp.y_min = AF.Y_MIN.to(MpeCnnApp.device)
        MpeCnnApp.y_max = AF.Y_MAX.to(MpeCnnApp.device)
        loss_fn = nn.MSELoss()
        optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5, betas=(0.9, 0.99), weight_decay=1e-2)
        training_data = FmcwMpeDss()
        test_data = FmcwMpeDss()
        demo_data = FmcwMpeDss()
        train_dataloader = DataLoader(training_data, batch_size=16, shuffle=True)
        test_dataloader = DataLoader(test_data, batch_size=32, shuffle=True)
        demo_dataloader = DataLoader(test_data, batch_size=1, shuffle=True)
        # X, y = next(iter(train_dataloader))
        # print(f'X: {X.shape}; {X.dtype}; y: {y.shape};')
        # X = X.to(device)
        # y_hat = model(X)
        # print(f'y_hat: {y_hat.shape}; {y_hat.dtype}; \n{y_hat};')
        # warmup
        print(f'Warmup...')
        warmup_opt = torch.optim.AdamW(model.parameters(), lr=1e-8, betas=(0.9, 0.99), weight_decay=1e-2)
        warmup_epochs = 5
        for epoch in range(warmup_epochs):
            MpeCnnApp.train(train_dataloader, model, loss_fn, warmup_opt)
            MpeCnnApp.test(test_dataloader, model, loss_fn)
        print(f'Training...')
        # 定义代价函数和优化器
        optimizer = torch.optim.AdamW(model.parameters(), lr=1e-7, betas=(0.9, 0.99), weight_decay=1e-2)
        scheduler = ReduceLROnPlateau(optimizer, 'min', threshold=1e-5, eps=1e-12)
        epochs = 2000000
        best_loss = sys.float_info.max
        improve_threshold = 0.000001
        cumulative_steps = 0
        max_unimproves = 5000
        pt_fn = './work/ckpts/mpe_cnn/mpe_cnn.pt'
        for t in range(epochs):
            print(f"Epoch {t+1}\n-------------------------------")
            loss = MpeCnnApp.train(train_dataloader, model, loss_fn, optimizer)
            MpeCnnApp.test(test_dataloader, model, loss_fn)
            MpeCnnApp.compare_results(model, demo_dataloader)
            cumulative_steps += 1
            print(f'best_loss={best_loss}; vs loss={loss}; cumulative_steps={cumulative_steps};')
            if best_loss > loss:
                if best_loss - loss > improve_threshold:
                    torch.save(model, pt_fn)
                    cumulative_steps = 0
                    best_loss = loss
            if cumulative_steps > max_unimproves:
                print(f'Earlly Stopping!!!!!!')
                break
        print("Done!")
        print(f'^_^ The End! ^_^')

    @staticmethod
    def compare_results(model, dataloader):
        model.eval()
        with torch.no_grad():
            X, y = next(iter(dataloader))
            X = X.to(MpeCnnApp.device)
            y = y.to(MpeCnnApp.device)
            y_hat = model(X)
            y_ = MpeCnnApp.y_min + y*(MpeCnnApp.y_max-MpeCnnApp.y_min)
            y_hat_ = MpeCnnApp.y_min + y_hat*(MpeCnnApp.y_max-MpeCnnApp.y_min)
            print(f'*****************************************************************************')
            print(f'目标1:{y_hat_[0][0]}, 距离：{y_[0][1]} vs {y_hat_[0][1]}; 速度：{y_[0][2]} vs {y_hat_[0][2]}; 角度：{y_[0][3]} vs {y_hat_[0][3]};')
            print(f'目标2:{y_hat_[0][4]}, 距离：{y_[0][5]} vs {y_hat_[0][5]}; 速度：{y_[0][6]} vs {y_hat_[0][6]}; 角度：{y_[0][7]} vs {y_hat_[0][7]};')
            print(f'目标3:{y_hat_[0][8]}, 距离：{y_[0][9]} vs {y_hat_[0][9]}; 速度：{y_[0][10]} vs {y_hat_[0][10]}; 角度：{y_[0][11]} vs {y_hat_[0][11]};')
            print(f'*****************************************************************************')

    @staticmethod
    def train(dataloader, model, loss_fn, optimizer, scheduler=None):
        size = len(dataloader.dataset)
        model.train()
        total_loss = torch.tensor([0.0]).to(MpeCnnApp.device)
        for batch, (X, y) in enumerate(dataloader):
            X, y = X.to(MpeCnnApp.device), y.to(MpeCnnApp.device)
            # Compute prediction error
            pred = model(X)
            loss = loss_fn(pred, y)
            total_loss += loss
            # Backpropagation
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            if batch % 10 == 0:
                loss, current = loss.item(), (batch + 1) * len(X)
                print(f"loss: {loss:>7f}  [{current:>5d}/{size:>5d}]")
        if scheduler is not None:
            scheduler.step(total_loss)
        return total_loss.detach().cpu().item()

    @staticmethod
    def test(dataloader, model, loss_fn):
        size = len(dataloader.dataset)
        num_batches = len(dataloader)
        model.eval()
        test_loss, correct = 0, 0
        with torch.no_grad():
            for X, y in dataloader:
                X, y = X.to(MpeCnnApp.device), y.to(MpeCnnApp.device)
                pred = model(X)
                test_loss += loss_fn(pred, y).item()
                delta = torch.sum((y-pred)**2)
                # correct += (pred.argmax(1) == y).type(torch.float).sum().item()
                correct += (delta < MpeCnnApp.threshold).type(torch.float).sum().item()
        test_loss /= num_batches
        correct /= size
        print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")