import torch
import numpy as np
from tqdm import tqdm
from torchmetrics import R2Score
from src.dataset import PPDataset
from src.utils import *


dataset_path = "E:/RealSeisData/Diting50hz/"


class DistanceLoss(torch.nn.Module):
    def __init__(self, wp=0.5):
        super().__init__()
        self.wp = wp
        self.ws = 1-wp
        self.act = torch.nn.Tanh()

    def forward(self, x, y):
        # dim=(batchsize, 2, len)
        # return torch.mean(torch.pow((torch.argmax(x, 2)-torch.argmax(y, 2)), 2)/x.shape[2]**2)
        loss = torch.mean(
            torch.abs((torch.argmax(x, 2)-torch.argmax(y, 2)))/x.shape[2])
        return (self.act((loss-0.045)*100)+1)/2

class BCELoss_simple(torch.nn.Module):
    def __init__(self) -> None:
        super().__init__()

    def forward(self, x, d):
        loss = - (d * torch.log(x+1e-9) + (1-d) * torch.log(1-x+1e-9))
        return loss.mean()
    
class CELoss(torch.nn.Module):
    def __init__(self) -> None:
        super().__init__()

    def forward(self, x, d):
        loss = - d * torch.log(x+1e-9)
        return loss.mean()

class WeighedBCELoss(torch.nn.Module):
    def __init__(self, w=0.3) -> None:
        super().__init__()
        self.w = w

    def forward(self, y, yhat):
        # y:ground truth
        # yhat:prediction
        loss = -(yhat*torch.log(y+1e-9)+(1-yhat)*torch.log(1-y+1e-9)) * (1+self.w*(yhat-y))
        return loss.mean()
    
class WeighedBCELossV2(torch.nn.Module):
    def __init__(self, w=0.8) -> None:
        super().__init__()
        self.w = w

    def forward(self, y, yhat):
        # y:ground truth
        # yhat:prediction
        loss = -(yhat*torch.log(y+1e-9)+(1-yhat)*torch.log(1-y+1e-9)
                 ) * (torch.abs(y-yhat)+self.w*(y-yhat))
        return loss.mean()
    
class ModelAnalyzer:
    def __init__(self, dataset_csv, thres=0.3, maxdist=25, methodname='fcy2', dlen=6000) -> None:
        print('[ModelAnalyzer]\ndataset_csv:', dataset_csv)
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")
        self.test_dataset = PPDataset(
            dataset_path, dataset_csv, methodmame=methodname, dlen=dlen)
        self.catalog = self.test_dataset.get_catalog()
        self.t = thres
        self.d = maxdist
        self.methodname = methodname
        # self.test_loader = torch.utils.data.DataLoader(self.test_dataset, batch_size=128, num_workers=10)
        pass

    def analyse_findmax(self, model_path):
        # 寻找输出最大值并评估模型
        def calc_f1(r, p):
            return 2*p*r/(p+r)
        model = torch.load(model_path)
        # //10 when test
        datanum = self.test_dataset.__len__()//10
        truePositiveCnt_p = 0
        falsePositiveCnt_p = 0
        truePositiveCnt_s = 0
        falsePositiveCnt_s = 0
        p_residual = []
        s_residual = []
        for i in tqdm(range(datanum), ncols=0):
            d = self.test_dataset.__getitem__(i)
            x = d[0]
            # info = self.catalog.iloc[i]
            x = np.expand_dims(x, (0)).copy()
            # print("shape of data:", td.shape)
            x = torch.from_numpy(x).to(torch.device(self.device))
            x = torch.as_tensor(x, dtype=torch.float)

            predict_y = model(x)[:, :2]
            predict_y = predict_y.detach().squeeze(0).cpu().numpy()

            # calc accuracy
            # find phase estimate time: average of all prob=1 time
            p_pred, s_pred = np.argmax(predict_y, axis=1)
            # p_label = info['p_pick']+d[2]
            # s_label = info['s_pick']+d[2]
            p_label, s_label = np.argmax(d[1], axis=1)
            p_predval = predict_y[0][p_pred]
            s_predval = predict_y[1][s_pred]
            if (p_predval >= self.t):
                # p wave positive
                p_residual.append(p_pred-p_label)
                if (abs(p_pred-p_label) <= self.d):
                    truePositiveCnt_p += 1
                else:
                    falsePositiveCnt_p += 1
                # print('info', info)
                # print('p pred label', p_pred, p_label)
                # print('s pred label', s_pred, s_label)
                # plot_ps(predict_y)
                # plot_ps(d[1])
            if (s_predval >= self.t):
                # s wave positive
                s_residual.append(s_pred-s_label)
                if (abs(s_pred-s_label) <= self.d):
                    truePositiveCnt_s += 1
                else:
                    falsePositiveCnt_s += 1
        p_recall = truePositiveCnt_p/datanum
        p_precision = truePositiveCnt_p/(truePositiveCnt_p+falsePositiveCnt_p)
        s_recall = truePositiveCnt_s/datanum
        s_precision = truePositiveCnt_s/(truePositiveCnt_s+falsePositiveCnt_s)
        p_f1 = calc_f1(p_recall, p_precision)
        s_f1 = calc_f1(s_recall, s_precision)

        print('\n[threshold={}, maxdist={}, prepro={}, max]'.format(
            self.t, self.d, self.methodname))
        print('   recall / precision / f1')
        print('p: {:.4f}% {:.4f}% {:.4f}'.format(
            p_recall*100, p_precision*100, p_f1))
        print('s: {:.4f}% {:.4f}% {:.4f}'.format(
            s_recall*100, s_precision*100, s_f1))
        return p_recall, p_precision, p_f1, s_recall, s_precision, s_f1

    def analyse_findpeak(self, model_path):
        # 寻找输出峰值并评估模型
        def calc_f1(r, p):
            return 2*p*r/(p+r)
        model = torch.load(model_path)
        # //10 when test
        datanum = self.test_dataset.__len__()//10
        truePositiveCnt_p = 0
        falsePositiveCnt_p = 0
        truePositiveCnt_s = 0
        falsePositiveCnt_s = 0
        p_residual = []
        s_residual = []
        pbar=tqdm(range(datanum), ncols=0, mininterval=1)
        pbar.set_description("analyse")
        for i in pbar:
            d = self.test_dataset.__getitem__(i)
            p_label, s_label = np.argmax(d[1], axis=1)
            x = d[0]
            # info = self.catalog.iloc[i]
            x = np.expand_dims(x, (0)).copy()
            # print("shape of data:", td.shape)
            x = torch.from_numpy(x).to(torch.device(self.device))
            x = torch.as_tensor(x, dtype=torch.float)

            predict_y = model(x)[:, :2]
            predict_y = predict_y.detach().squeeze(0).cpu().numpy()

            # calc accuracy
            # find phase estimate time: average of all prob=1 time
            # TODO
            p_pred_list, p_prob = detect_peaksV2(
                predict_y[0], self.t, self.d*2)
            s_pred_list, s_prob = detect_peaksV2(
                predict_y[1], self.t, self.d*2)
            # print('p pred label', p_pred_list, p_label)
            # print('s pred label', s_pred_list, s_label)
            # plot_ps(predict_y)
            # plot_ps(d[1])
            for p_pred in p_pred_list:
                # do the same as benchmark
                if (abs(p_pred-p_label) < int(1.5*50)):
                    p_residual.append(p_pred-p_label)
                    if (abs(p_pred-p_label) <= self.d):
                        truePositiveCnt_p += 1
                    else:
                        falsePositiveCnt_p += 1
            for s_pred in s_pred_list:
                # do the same as benchmark
                if (abs(s_pred-s_label) < int(1.5*50)):
                    s_residual.append(s_pred-s_label)
                    if (abs(s_pred-s_label) <= self.d):
                        truePositiveCnt_s += 1
                    else:
                        falsePositiveCnt_s += 1
        p_recall = truePositiveCnt_p/datanum
        p_precision = truePositiveCnt_p/(truePositiveCnt_p+falsePositiveCnt_p)
        s_recall = truePositiveCnt_s/datanum
        s_precision = truePositiveCnt_s/(truePositiveCnt_s+falsePositiveCnt_s)
        p_f1 = calc_f1(p_recall, p_precision)
        s_f1 = calc_f1(s_recall, s_precision)

        print('\n[threshold={}, maxdist={}, prepro={}, peak]'.format(
            self.t, self.d, self.methodname))
        print('   recall / precision / f1')
        print('p: {:.4f}% {:.4f}% {:.4f}'.format(
            p_recall*100, p_precision*100, p_f1))
        print('s: {:.4f}% {:.4f}% {:.4f}'.format(
            s_recall*100, s_precision*100, s_f1))

        # return p_recall, p_precision, p_f1, p_residual, s_recall, s_precision, s_f1, s_residual
        return p_recall, p_precision, p_f1, s_recall, s_precision, s_f1


if __name__ == '__main__':
    l = WeighedBCELoss()
    a = torch.rand(10, requires_grad=True)
    b = torch.rand(10, requires_grad=True)
    loss = l(a, b)
    print(loss)
    exit()
    analyzer = ModelAnalyzer('DiTing330km_validation.csv', 0.2, 25, 'fcy2')
    model_fname = "./"+"model/unetstft2_231101154751_ep16.pth"
    # model_fname = "./"+"model/unetstft_230625162035_ep9.pth"
    # p_recall, p_precision, p_f1, p_residual, s_recall, s_precision, s_f1, s_residual = analyzer.analyse_findpeak(
    #     model_fname)
    p_recall, p_precision, p_f1, s_recall, s_precision, s_f1 = analyzer.analyse_findpeak(
        model_fname)
    # plt.subplot(121)
    # plt.hist(p_residual)
    # plt.subplot(122)
    # plt.hist(s_residual)
    # plt.show()
