# MPE CNN应用
import sys
import time
from typing import Dict
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau
from apps.fmcw.ias.mpe_cnn_model import MpeCnnModel
from apps.fmcw.ias.mpe_cnn_model_v3 import MpeCnnModelV3
from apps.fmcw.ias.mpe_ccnn_model import MpeCcnnModel
from dss.fmcw_mpe_dss import FmcwMpeDss
from dss.fmcw_mpe_dss_v3 import FmcwMpeDssV3
from apps.fmcw.conf.app_config import AppConfig as AF

# nohup python -u -m apps.fmcw.fmcw_main --run_mode 3 --ann_dtype 1 > ./work/logs/v3.log 2>&1 &

class MpeCnnAppV3(object):
    device = None
    threshold = 4.0
    y_min = None
    y_max = None

    def __init__(self):
        self.name = 'apps.fmcw.mpe_cnn_app.MpeCnnApp'

    @staticmethod
    def t001() -> None:
        print(f'测试程序 v0.0.1')
        MpeCnnAppV3.device = torch.device('cuda:2' if torch.cuda.is_available() else 'cpu')
        model = MpeCnnModelV3().to(MpeCnnAppV3.device)
        MpeCnnAppV3.y_min = AF.Y_MIN[1:4].unsqueeze(0).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).to(MpeCnnAppV3.device)
        print(f'### y_min: {MpeCnnAppV3.y_min.shape}; ?????')
        MpeCnnAppV3.y_max = AF.Y_MAX[1:4].unsqueeze(0).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).to(MpeCnnAppV3.device)
        print(f'### y_max: {MpeCnnAppV3.y_min.shape}; ?????')
        training_data = FmcwMpeDssV3()
        test_data = FmcwMpeDssV3()
        demo_data = FmcwMpeDssV3()
        train_dataloader = DataLoader(training_data, batch_size=16, shuffle=True)
        test_dataloader = DataLoader(test_data, batch_size=32, shuffle=True)
        demo_dataloader = DataLoader(test_data, batch_size=1, shuffle=True)
        MpeCnnAppV3.compare_results(model, demo_dataloader)

    @staticmethod
    def t002() -> None:
        print(f'测试当前训练结果 v001')
        # FmcwMpeDssV3.prepare_rsp_yolo_ds_v3()
        MpeCnnAppV3.device = torch.device('cuda:2' if torch.cuda.is_available() else 'cpu')
        AF.Y_MIN = AF.Y_MIN.to(MpeCnnAppV3.device)
        AF.Y_MAX = AF.Y_MAX.to(MpeCnnAppV3.device)
        model = MpeCnnModelV3()
        # model = torch.load('./work/ckpts/mpe_cnn/mpe_cnn_v3.pt', weights_only=False)
        model = model.to(MpeCnnAppV3.device)
        model.eval()
        demo_data = FmcwMpeDssV3()
        demo_dataloader = DataLoader(demo_data, batch_size=1, shuffle=True)
        X, y_p, y_rvt, y_rvt_base = next(iter(demo_dataloader))
        X = X.to(MpeCnnAppV3.device)
        y_p = y_p.to(MpeCnnAppV3.device)
        y_rvt = y_rvt.to(MpeCnnAppV3.device)
        y_rvt_base = y_rvt_base.to(MpeCnnAppV3.device)
        print(f'### X: {X.shape} {X.dtype}; y_p: {y_p.shape}; y_rvt: {y_rvt.shape}; y_rvt_base: {y_rvt_base.shape};')
        # # MpeCnnAppV3.compare_results(model, demo_dataloader)
        b_i = 0
        nz_idxs = torch.nonzero(y_p[b_i] > 0.006, as_tuple=False)
        idx = 1
        y_rvt += y_rvt_base
        for nzi in nz_idxs:
            target = y_rvt[b_i,:,nzi[1],nzi[2]]
            # target = AF.Y_MIN[1:4] + target*(AF.Y_MAX[1:4]-AF.Y_MIN[1:4])
            print(f'目标{idx}: {target[0]}, {target[1]}, {target[2]};')
            idx += 1
        y_hat_p, y_hat_rvt = model(X)
        print(f'y_hat_p: {y_hat_p.shape}; y_hat_rvt: {y_hat_rvt.shape}')
        prop_loss = nn.BCELoss() # 用于判断锚点是否有目标
        regn_loss = nn.MSELoss() # 用于距离、速度、水平到达角回归
        l1 = prop_loss(y_p, y_hat_p)
        l2 = regn_loss(y_rvt + y_rvt_base, y_hat_rvt + y_rvt_base)
        print(f'### l1: {l1}; l2: {l2};  {type(l1)}; ????')
        # m1 = torch.mean(y_rvt)
        # m2 = torch.mean(y_hat_rvt)
        # print(f'### {m1}; {m2}; {type(m1)};')
        # std1 = torch.std(y_rvt)
        # std2 = torch.std(y_hat_rvt)
        # print(f'### {std1}; {std2}; {type(std1)};')
        MpeCnnAppV3.compare_results(model=model, dataloader=demo_dataloader)

    @staticmethod
    def t003() -> None:
        import torch
        import torch.nn as nn
        m = nn.Sigmoid()
        loss = nn.BCELoss(reduction='sum')
        input = torch.ones(16, 1, 256, 88, requires_grad=True) * 88888888888
        target = torch.zeros(16, 1, 256, 88, requires_grad=False)
        output = loss(m(input), target)
        print(f'output: {output};')


    @staticmethod
    def startup(params:Dict = {}) -> None:
        print(f'MPE CNN App v3 v0.0.3')
        i_debug = 10
        if i_debug == 1:
            MpeCnnAppV3.t003()
            return
        MpeCnnAppV3.device = torch.device('cuda:2' if torch.cuda.is_available() else 'cpu')
        if params['ann_dtype'] == 1:
            print(f'实数神经网络... V3版')
            # model = MpeCnnModel().to(MpeCnnAppV3.device)
            model = MpeCnnModelV3().to(MpeCnnAppV3.device)
        else:
            print(f'复数神经网络...未实现！')
            return
        print(model)
        MpeCnnAppV3.y_min = AF.Y_MIN[1:4].unsqueeze(0).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).to(MpeCnnAppV3.device)
        MpeCnnAppV3.y_max = AF.Y_MAX[1:4].unsqueeze(0).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).to(MpeCnnAppV3.device)
        prop_loss = nn.BCELoss(reduction='sum') # 用于判断锚点是否有目标
        regn_loss = nn.MSELoss(reduction='sum') # 用于距离、速度、水平到达角回归
        # optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5, betas=(0.9, 0.99), weight_decay=1e-2)
        training_data = FmcwMpeDssV3()
        test_data = FmcwMpeDssV3()
        demo_data = FmcwMpeDssV3()
        train_dataloader = DataLoader(training_data, batch_size=16, shuffle=True)
        test_dataloader = DataLoader(test_data, batch_size=32, shuffle=True)
        demo_dataloader = DataLoader(test_data, batch_size=1, shuffle=True)
        # warmup
        print(f'Warmup...')
        warmup_rvt_opt = torch.optim.AdamW(model.parameters(), lr=1e-7, betas=(0.9, 0.99), weight_decay=1e-2)
        warmup_rvt_scheduler = ReduceLROnPlateau(warmup_rvt_opt, 'min', threshold=1e-4, eps=1e-8)
        warmup_p_opt = torch.optim.AdamW(model.parameters(), lr=1e-3, betas=(0.9, 0.99), weight_decay=1e-2)
        warmup_p_scheduler = ReduceLROnPlateau(warmup_p_opt, 'min', threshold=1e-4, eps=1e-8)
        warmup_epochs = 5
        for epoch in range(warmup_epochs):
            MpeCnnAppV3.train(train_dataloader, model, prop_loss, regn_loss, warmup_p_opt, warmup_rvt_opt, p_scheduler=warmup_p_scheduler, rvt_scheduler=warmup_rvt_scheduler)
            # MpeCnnAppV3.test(test_dataloader, model, prop_loss, regn_loss)
        print(f'Training...')
        # 定义代价函数和优化器
        p_optimizer = torch.optim.AdamW(model.parameters(), lr=1e-6, betas=(0.9, 0.99), weight_decay=1e-2)
        p_scheduler = ReduceLROnPlateau(p_optimizer, 'min', threshold=1e-4, eps=1e-8)
        rvt_optimizer = torch.optim.AdamW(model.parameters(), lr=1e-2, betas=(0.9, 0.99), weight_decay=1e-2)
        rvt_scheduler = ReduceLROnPlateau(rvt_optimizer, 'min', threshold=1e-4, eps=1e-8)
        epochs = 2000000
        best_loss = sys.float_info.max
        improve_threshold = 0.000001
        cumulative_steps = 0
        max_unimproves = 5000
        pt_fn = './work/ckpts/mpe_cnn/mpe_cnn_v3.pt'
        for t in range(epochs):
            print(f"Epoch {t+1}\n-------------------------------")
            loss = MpeCnnAppV3.train(train_dataloader, model, prop_loss, regn_loss, p_optimizer, rvt_optimizer, p_scheduler=p_scheduler, rvt_scheduler=rvt_scheduler)
            MpeCnnAppV3.test(test_dataloader, model, prop_loss, regn_loss)
            cumulative_steps += 1
            print(f'best_loss={best_loss}; vs loss={loss}; cumulative_steps={cumulative_steps};')
            MpeCnnAppV3.compare_results(model=model, dataloader=test_dataloader)
            if best_loss > loss:
                if best_loss - loss > improve_threshold:
                    torch.save(model, pt_fn)
                    cumulative_steps = 0
                    best_loss = loss
            if cumulative_steps > max_unimproves:
                print(f'Earlly Stopping!!!!!!')
                break
        print("Done!")
        print(f'^_^ The End! ^_^')

    @staticmethod
    def compare_results(model, dataloader):
        model.eval()
        AF.Y_MIN = AF.Y_MIN.to(MpeCnnAppV3.device)
        AF.Y_MAX = AF.Y_MAX.to(MpeCnnAppV3.device)
        with torch.no_grad():
            X, y_p, y_rvt, y_rvt_base = next(iter(dataloader))
            X = X.to(MpeCnnAppV3.device)
            y_p, y_rvt, y_rvt_base = y_p.to(MpeCnnAppV3.device), y_rvt.to(MpeCnnAppV3.device), y_rvt_base.to(MpeCnnAppV3.device)
            y_hat_p, y_hat_rvt = model(X) 
            y_rvt += y_rvt_base
            y_hat_rvt += y_rvt_base
            print(f'*****************************************************************************')
            for b_i in range(y_p.shape[0]):
                nz_idxs = torch.nonzero(y_hat_p[b_i] > 0.6, as_tuple=False)
                # print(f'nz_idxs: {nz_idxs.shape}; \n{torch.topk(y_hat_p, k=5)};')
                if nz_idxs.shape[0] < 1:
                    print(f'训练早期未收敛（未检测到物体）！nz_idxs: {nz_idxs.shape};')
                    return
                if nz_idxs.shape[0] > 30:
                    print(f'训练早期未收敛（检测到物体过多）！nz_idxs: {nz_idxs.shape};')
                    return
                idx = 1
                for nzi in nz_idxs:
                    target = y_hat_rvt[b_i,:,nzi[1],nzi[2]]
                    print(f'batch_{b_i}: 目标{idx}: 概率={y_hat_p[b_i][0][nzi[1]][nzi[2]]}, 距离＝{target[0]}, 速度＝{target[1]}, 水平到达角＝{target[2]};')
                    idx += 1
            print(f'*****************************************************************************')

    @staticmethod
    def train(dataloader, model, prop_loss, regn_loss, p_optimizer, rvt_optimizer, p_scheduler=None, rvt_scheduler=None):
        torch.autograd.set_detect_anomaly(True)
        size = len(dataloader.dataset)
        model.train()
        total_loss = torch.tensor([0.0]).to(MpeCnnAppV3.device)
        for batch, (X, y_p, y_rvt, y_rvt_base) in enumerate(dataloader):
            X, y_p, y_rvt, y_rvt_base = X.to(MpeCnnAppV3.device), y_p.to(MpeCnnAppV3.device), y_rvt.to(MpeCnnAppV3.device), y_rvt_base.to(MpeCnnAppV3.device)
            # Compute prediction error
            batch_size = X.shape[0]
            # start_time = time.time()
            y_hat_p, y_hat_rvt = model(X)
            # end_time = time.time()
            # print(f'推理运行时间：{batch}  {end_time - start_time}秒')
            loss_p = prop_loss(y_hat_p, y_p)
            loss_rvt = regn_loss(y_hat_rvt + y_rvt_base, y_rvt + y_rvt_base)
            # Backpropagation
            # 概率头
            # p_optimizer.zero_grad()
            # loss_p.backward(retain_graph=True)
            # p_optimizer.step()
            # 距离、速度、水平到达角头
            rvt_optimizer.zero_grad()
            # loss_rvt.backward()
            loss = 100*loss_p + loss_rvt
            total_loss += loss
            loss.backward()
            rvt_optimizer.step()
            if batch % 10 == 0:
                loss, current = loss.item(), (batch + 1) * len(X)
                print(f"loss: {loss:>7f}  [{current:>5d}/{size:>5d}]; P: {loss_p:>7f}; RVT: {loss_rvt:>7f};")
        # if p_scheduler is not None:
        #     p_scheduler.step(total_loss)
        if rvt_scheduler is not None:
            rvt_scheduler.step(total_loss)
        return total_loss.detach().cpu().item()

    @staticmethod
    def test(dataloader, model, prop_loss, regn_loss):
        size = len(dataloader.dataset)
        num_batches = len(dataloader)
        model.eval()
        test_loss, correct = 0, 0
        with torch.no_grad():
            for X, y_p, y_rvt, y_rvt_base in dataloader:
                X, y_p, y_rvt, y_rvt_base = X.to(MpeCnnAppV3.device), y_p.to(MpeCnnAppV3.device), y_rvt.to(MpeCnnAppV3.device), y_rvt_base.to(MpeCnnAppV3.device)
                y_hat_p, y_hat_rvt = model(X)
                loss_p = prop_loss(y_hat_p, y_p)
                loss_rvt = regn_loss(y_hat_rvt + y_rvt_base, y_rvt + y_rvt_base)
                loss = loss_p + loss_rvt
                test_loss += loss.item()
                delta = torch.sum((y_hat_p-y_p)**2) + torch.sum((y_hat_rvt-y_rvt)**2)
                # correct += (pred.argmax(1) == y).type(torch.float).sum().item()
                correct += (delta < MpeCnnAppV3.threshold).type(torch.float).sum().item()
        test_loss /= num_batches
        correct /= size
        print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")