import math
import numpy as np
import statsmodels.api as sm
import pandas as pd
import torch
import warnings
import folium
from folium.plugins import HeatMap
import branca
from sklearn.metrics import (accuracy_score,
                             mean_squared_error,
                             precision_score,
                             f1_score,
                             r2_score,
                             recall_score,
                             roc_auc_score)
from sklearn.linear_model import LassoLarsIC
#from memory_profiler import profile
import gc
import os
import torch
from torch.utils.checkpoint import checkpoint
from typing import Callable

def block_matrix_multiply(A, B, block_size):
    # 获取矩阵的维度
    batch_size, m, n = A.shape
    _, p, q = B.shape

    # 确保B的第三维等于A的第二维
    assert n == p

    # 初始化结果矩阵，保持batch_size不变
    C = torch.zeros((batch_size, m, q))

    # 遍历每个batch
    for b in range(batch_size):
        # 按块进行矩阵乘法
        for i in range(0, m, block_size):
            for j in range(0, q, block_size):
                for k in range(0, n, block_size):
                    # 计算当前块的索引
                    i_end = min(i + block_size, m)
                    j_end = min(j + block_size, q)
                    k_end = min(k + block_size, n)

                    # 提取当前batch的A和B的块
                    A_block = A[b, i:i_end, k:k_end]
                    B_block = B[b, k:k_end, j:j_end]

                    # 进行块乘法
                    C[b, i:i_end, j:j_end] += torch.matmul(A_block, B_block)

    return C


def SSR(y_pred, y_data):
    return torch.sum((y_pred - y_data) ** 2)

def Hat_com(x_data):
    hat_com = torch.mm(torch.linalg.inv(torch.mm(x_data.transpose(-2, -1),x_data)),
                       x_data.transpose(-2, -1))
    return hat_com

def OLS_hat(x_data, hat_com):
    ols_hat = torch.mm(x_data, hat_com)
    return ols_hat

def _t_multi(__x_data, __n):
    x_data_tile = __x_data.repeat(__n, 1)
    x_data_tile = x_data_tile.view(__n, __n, -1)
    x_data_tile_t = x_data_tile.transpose(1, 2)
    # print('\nx_data_tile_t: ', x_data_tile_t.shape)
    # print('x_data_tile: ', x_data_tile.shape)
    A = torch.bmm(x_data_tile_t, x_data_tile)
    # A = torch.matmul(x_data_tile_t, x_data_tile)
    return A, x_data_tile_t

def __inverse_multi(A, x_data_tile_t):
    A_inverse = torch.inverse(A)
    # print('\nA_inverse: ', A_inverse.shape)
    # print('x_data_tile_t: ', x_data_tile_t.shape)
    # B = torch.matmul(A_inverse, x_data_tile_t)
    B = torch.bmm(A_inverse, x_data_tile_t)
    return B


def get_hatS_temp(__weight, B):
    gtweight_3d = torch.diag_embed(__weight)
    # print('\ngtweight_3d', gtweight_3d.shape)
    # print('B', B.shape)
    # hatS_temp = torch.matmul(gtweight_3d, B)
    hatS_temp = torch.bmm(gtweight_3d, B)
    # print('hatS_temp', hatS_temp.shape)
    return hatS_temp

def get_B(__x_data, __n):
    A, x_data_tile_t = _t_multi(__x_data, __n)
    B = __inverse_multi(A, x_data_tile_t)
    return B

def get_hatS(__x_data,hatS_temp, __n):
    x_data_view = __x_data.view(-1, 1, __x_data.size(1))
    # print('\nx_data_view', x_data_view.shape)
    # print('hatS_temp', hatS_temp.shape)
    # hatS = torch.matmul(x_data_view, hatS_temp)
    hatS = torch.bmm(x_data_view, hatS_temp)
    # print('hatS', hatS.shape)
    hatS = hatS.view(-1, __n)
    return hatS

def hat(__x_data, __n, __weight):
    """
    :return: hat matrix
    """
    B = get_B(__x_data, __n)
    hatS_temp = get_hatS_temp(__weight, B)
    __hat = get_hatS(__x_data, hatS_temp, __n)
    return __hat

def create_get_osl_hat(__x_data, __ols_hat_file_path):
    if os.path.exists(__ols_hat_file_path):
        __ols_hat = torch.load(__ols_hat_file_path)
    else:
        hat_com = Hat_com(__x_data)
        __ols_hat = OLS_hat(__x_data, hat_com)
        torch.save(__ols_hat, __ols_hat_file_path)
    return __ols_hat

def create_get_hat(__x_data, __weight, __n, __hat_file_path):
    if os.path.exists(__hat_file_path):
        __hat = torch.load(__hat_file_path)
    else:
        __hat = hat(__x_data, __n, __weight)
        torch.save(__hat, __hat_file_path)
    return __hat

class DIAGNOSIS:
    """
    `DIAGNOSIS` is the class to calculate the diagnoses of the result of GNNWR/GTNNWR.
    These diagnoses include F1-test, F2-test, F3-test, AIC, AICc, R2, Adjust_R2, RMSE (Root Mean Square Error).
    The explanation of these diagnoses can be found in the paper
    `Geographically neural network weighted regression for the accurate estimation of spatial non-stationarity <https://doi.org/10.1080/13658816.2019.1707834>`.
    :param weight: output of the neural network
    :param x_data: the independent variables
    :param y_data: the dependent variables
    :param y_pred: output of the GNNWR/GTNNWR
    """

    # @profile
    def __init__(self, weight, x_data, y_data, y_pred, tmp_id_str, tmp_dir):
        self.__weight = weight.clone().to('cpu').detach()
        self.__x_data = x_data.clone().to('cpu').detach()
        self.__y_data = y_data.clone().to('cpu').detach()
        self.__y_pred = y_pred.clone().to('cpu').detach()
        if not os.path.exists(tmp_dir):
            os.mkdir(tmp_dir)
        self.__ols_hat_file_path = f'{tmp_dir}/osl_hat_{tmp_id_str}.pt'
        self.__hat_file_path = f'{tmp_dir}/hat_{tmp_id_str}.pt'
        # print('self.__hat_file_path: ', self.__hat_file_path)
        # print('self.__x_data', self.__x_data.shape)
        # print('self.__y_data', self.__y_data.shape)
        # print('self.__y_pred', self.__y_pred.shape)
        # print('self.__weight', self.__weight.shape)
        self.__n = len(self.__y_data)
        self.__k = len(self.__x_data[0])
        self.__residual = self.__y_data - self.__y_pred
        self.f3_dict = None
        self.f3_dict_2 = None


    def F1_Global(self):
        """
        :return: F1-test
        """
        __ols_hat = create_get_osl_hat(self.__x_data, self.__ols_hat_file_path)
        __hat = create_get_hat(self.__x_data, self.__weight, self.__n, self.__hat_file_path)

        __ssr = SSR(self.__y_pred, self.__y_data)
        k1 = self.__n - 2 * torch.trace(__hat) + \
             torch.trace(torch.mm(__hat.transpose(-2, -1), __hat))
        del __hat
        gc.collect()

        k2 = self.__n - self.__k - 1
        rss_olr = torch.sum((self.__y_data - torch.mm(__ols_hat, self.__y_data)) ** 2)
        F_value = __ssr / k1 / (rss_olr / k2)
        # p_value = f.sf(F_value, k1, k2)
        return F_value

    def F2_Global(self):
        """
        :return: F2-test
        """
        # A = (I - H) - (I - S)^T*(I - S)
        __ols_hat = create_get_osl_hat(self.__x_data, self.__ols_hat_file_path)
        __hat = create_get_hat(self.__x_data, self.__weight, self.__n, self.__hat_file_path)

        A = (torch.eye(self.__n) - __ols_hat) - torch.mm(
            (torch.eye(self.__n) - __hat).transpose(-2, -1),
            (torch.eye(self.__n) - __hat))
        v1 = torch.trace(A)
        # DSS = y^T*A*y
        DSS = torch.mm(self.__y_data.transpose(-2, -1), torch.mm(A, self.__y_data))
        k2 = self.__n - self.__k - 1
        rss_olr = torch.sum(
            (torch.mean(self.__y_data) - torch.mm(__ols_hat, self.__y_data)) ** 2)

        return DSS / v1 / (rss_olr / k2)

    def F3_Local(self):
        """
        :return: F3-test of each variable
        """
        __ssr = SSR(self.__y_pred, self.__y_data)
        ek_dict = {}
        self.f3_dict = {}
        self.f3_dict_2 = {}
        B = get_B(self.__x_data, self.__n)
        __hat_temp = get_hatS_temp(self.__weight, B)
        for i in range(self.__x_data.size(1)):
            ek_zeros = torch.zeros([self.__x_data.size(1)])
            ek_zeros[i] = 1
            ek_dict['ek' + str(i)] = torch.reshape(torch.reshape(torch.tile(ek_zeros.clone().detach(), [self.__n]),
                                                                 [self.__n, -1]),
                                                   [-1, 1, self.__x_data.size(1)])
            hatB = torch.matmul(ek_dict['ek' + str(i)], __hat_temp)
            hatB = torch.reshape(hatB, [-1, self.__n])

            J_n = torch.ones([self.__n, self.__n]) / self.__n
            L = torch.matmul(hatB.transpose(-2, -1), torch.matmul(torch.eye(self.__n) - J_n, hatB))

            vk2 = 1 / self.__n * torch.matmul(self.__y_data.transpose(-2, -1), torch.matmul(L, self.__y_data))
            trace_L = torch.trace(1 / self.__n * L)
            f3 = torch.squeeze(vk2 / trace_L / (__ssr / self.__n))
            self.f3_dict['f3_param_' + str(i)] = f3

            bk = torch.matmul(hatB, self.__y_data)
            vk2_2 = 1 / self.__n * torch.sum((bk - torch.mean(bk)) ** 2)
            f3_2 = torch.squeeze(vk2_2 / trace_L / (__ssr / self.__n))
            self.f3_dict_2['f3_param_' + str(i)] = f3_2
        return self.f3_dict, self.f3_dict_2

    def AIC(self):
        """
        :return: AIC
        """
        __ssr = SSR(self.__y_pred, self.__y_data)
        return self.__n * (math.log(__ssr / self.__n * 2 * math.pi, math.e)) + self.__n + self.__k

    def AICc(self):
        """

        :return: AICc
        """
        __ssr = SSR(self.__y_pred, self.__y_data)
        __hat = create_get_hat(self.__x_data, self.__weight, self.__n, self.__hat_file_path)
        __S = torch.trace(__hat)
        return self.__n * (math.log(__ssr / self.__n * 2 * math.pi, math.e) + (self.__n + __S) / (
                self.__n - __S - 2))

    def R2(self):
        """
        :return: R2 of the result
        """
        return 1 - torch.sum(self.__residual ** 2) / torch.sum((self.__y_data - torch.mean(self.__y_data)) ** 2)

    def Adjust_R2(self):
        """

        :return: Adjust R2 of the result
        """
        return 1 - (1 - self.R2()) * (self.__n - 1) / (self.__n - self.__k - 1)

    def RMSE(self):
        """
        :return: RMSE of the result
        """
        return torch.sqrt(torch.sum(self.__residual ** 2) / self.__n)

    def AUC(self):
        y_shape = self.__y_pred.shape
        y_pred_1d = self.__y_pred.view(y_shape[0]*y_shape[1])
        y_data_1d = self.__y_data.view(y_shape[0]*y_shape[1])
        auc_val = roc_auc_score(y_data_1d.numpy(), y_pred_1d.numpy().clip(0,1))
        return auc_val

    def AUC_normal(self):
        pass

    def f1_val(self, ave_method='macro'):
        y_shape = self.__y_pred.shape
        y_pred_1d = self.__y_pred.view(y_shape[0] * y_shape[1])
        y_pred_class_1d = torch.where(y_pred_1d >= 0.5, torch.tensor(1.0), torch.tensor(0.0))
        y_data_1d = self.__y_data.view(y_shape[0] * y_shape[1])
        f1_val = f1_score(y_data_1d.numpy(), y_pred_class_1d.numpy(), average=ave_method)
        return f1_val

    def recall_val(self, ave_method='macro'):
        y_shape = self.__y_pred.shape
        y_pred_1d = self.__y_pred.view(y_shape[0] * y_shape[1])
        y_pred_class_1d = torch.where(y_pred_1d >= 0.5, torch.tensor(1.0), torch.tensor(0.0))
        y_data_1d = self.__y_data.view(y_shape[0] * y_shape[1])
        recall_val = recall_score(y_data_1d.numpy(), y_pred_class_1d.numpy(), average=ave_method)
        return recall_val
    def Accuracy(self):
        y_shape = self.__y_pred.shape
        y_pred_1d = self.__y_pred.view(y_shape[0] * y_shape[1])
        y_pred_class_1d = torch.where(y_pred_1d >= 0.5, torch.tensor(1.0), torch.tensor(0.0))
        y_data_1d = self.__y_data.view(y_shape[0] * y_shape[1])
        acc_val = accuracy_score(y_data_1d.numpy(), y_pred_class_1d.numpy())
        return acc_val

    def __del__(self):
        if os.path.exists(self.__ols_hat_file_path):
            os.remove(self.__ols_hat_file_path)
        if os.path.exists(self.__hat_file_path):
            os.remove(self.__hat_file_path)


class DIAGNOSIS_Opt:
    """
    Optimized DIAGNOSIS class with memory-efficient computations
    """

    def __init__(self, weight, x_data, y_data, y_pred, tmp_id_str, tmp_dir):
        # Clone and detach all inputs to avoid affecting original computation graph
        self.__weight = weight.clone().to('cpu').detach()
        self.__x_data = x_data.clone().to('cpu').detach()
        self.__y_data = y_data.clone().to('cpu').detach()
        self.__y_pred = y_pred.clone().to('cpu').detach()

        # Basic problem dimensions
        self.__n = len(self.__y_data)
        self.__k = self.__x_data.shape[1]
        self.__residual = self.__y_data - self.__y_pred
        self.__ssr = torch.sum(self.__residual ** 2)

        # Precompute frequently used values
        self.__y_mean = torch.mean(self.__y_data)
        self.__y_var = torch.var(self.__y_data)
        self.__sst = torch.sum((self.__y_data - self.__y_mean) ** 2)

        # Cache for expensive computations
        self.__cache = {}
        self.__trace_vectors = 50  # Default for Hutchinson estimator

    def set_trace_vectors(self, num_vectors):
        """Set number of random vectors for Hutchinson trace estimation"""
        self.__trace_vectors = num_vectors

    def _trace_estimator(self, operator: Callable, n=None):
        """
        Hutchinson randomized trace estimator
        Avoids O(n^2) memory by using matrix-free operations
        """
        if n is None:
            n = self.__n

        trace = 0.0
        for _ in range(self.__trace_vectors):
            v = torch.randn(n, 1, device=self.__x_data.device)
            v = v / torch.norm(v)  # Normalize for stability
            Av = operator(v)
            trace += torch.sum(v * Av)

        return trace / self.__trace_vectors

    def _hat_operator(self, v):
        """
        Matrix-free implementation of S*v operation
        Avoids explicit construction of hat matrix
        """
        # Step 1: Compute X^T * v
        Xtv = torch.mm(self.__x_data.transpose(0, 1), v)

        # Step 2: Solve (X^T X) w = X^T v using Cholesky
        XtX = torch.mm(self.__x_data.transpose(0, 1), self.__x_data)
        try:
            L = torch.linalg.cholesky(XtX)
            w = torch.cholesky_solve(Xtv, L)
        except:
            # Fallback to LU if not positive definite
            w = torch.linalg.solve(XtX, Xtv)

        # Step 3: Compute S*v = X * w * diag(weight)
        weighted_w = w * self.__weight.view(-1, 1)
        return torch.mm(self.__x_data, weighted_w)

    def F1_Global(self):
        """Optimized F1-test with Hutchinson estimator"""
        if 'f1' in self.__cache:
            return self.__cache['f1']

        # Avoid building full hat matrix
        def s_operator(v):
            return self._hat_operator(v)

        # Compute trace(S) and trace(S^T S) using Hutchinson
        tr_s = self._trace_estimator(s_operator)

        # Compute trace(S^T S) = ||S||_F^2
        def sts_operator(v):
            return s_operator(s_operator(v))

        tr_sts = self._trace_estimator(sts_operator)

        # Compute k1 = n - 2tr(S) + tr(S^T S)
        k1 = self.__n - 2 * tr_s + tr_sts

        # Compute OLS residuals without full hat matrix
        XtX = torch.mm(self.__x_data.transpose(0, 1), self.__x_data)
        try:
            L = torch.linalg.cholesky(XtX)
            w = torch.cholesky_solve(
                torch.mm(self.__x_data.transpose(0, 1), self.__y_data), L
            )
        except:
            w = torch.linalg.solve(XtX, torch.mm(self.__x_data.transpose(0, 1), self.__y_data))
        y_hat_ols = torch.mm(self.__x_data, w)
        rss_olr = torch.sum((self.__y_data - y_hat_ols) ** 2)

        # Final F-value
        k2 = self.__n - self.__k - 1
        F_value = self.__ssr / k1 / (rss_olr / k2)

        self.__cache['f1'] = F_value
        return F_value

    def F2_Global(self):
        """Currently not optimized due to complexity"""
        warnings.warn("F2_Global not optimized, returning None")
        return None

    def F3_Local(self):
        """Optimized F3-test using per-variable computation"""
        f3_dict = {}
        f3_dict_2 = {}

        # Precompute global SSR
        ssr = self.__ssr

        # Precompute centering matrix J_n
        J_n = torch.eye(self.__n) - torch.ones(self.__n, self.__n) / self.__n

        for i in range(self.__k):
            # Create selector vector for current variable
            ek = torch.zeros(self.__k)
            ek[i] = 1

            # Compute hatB for this variable
            hatB = []
            for j in range(self.__n):
                # Compute only the j-th row of hatB
                v = torch.zeros(self.__n, 1)
                v[j] = 1
                hatB_j = self._hat_operator(v * ek.view(-1, 1))
                hatB.append(hatB_j)
            hatB = torch.cat(hatB, dim=1)

            # Compute L matrix products
            L = torch.mm(hatB.t(), torch.mm(J_n, hatB))

            # Compute trace(L) efficiently
            tr_L = torch.trace(L) / self.__n

            # Compute vk2 using vector products
            ytL = torch.mm(self.__y_data.t(), L)
            vk2 = torch.mm(ytL, self.__y_data) / self.__n

            # Final f3 value
            f3 = (vk2 / tr_L) / (ssr / self.__n)
            f3_dict[f'f3_param_{i}'] = f3

            # Alternative calculation
            bk = torch.mm(hatB, self.__y_data)
            vk2_2 = torch.sum((bk - torch.mean(bk)) ** 2) / self.__n
            f3_2 = (vk2_2 / tr_L) / (ssr / self.__n)
            f3_dict_2[f'f3_param_{i}'] = f3_2

        return f3_dict, f3_dict_2

    def AIC(self):
        """Direct computation without large matrices"""
        return self.__n * torch.log(self.__ssr / self.__n * 2 * math.pi) + self.__n + self.__k

    def AICc(self):
        """Optimized AICc with Hutchinson trace estimation"""
        if 'aicc' in self.__cache:
            return self.__cache['aicc']

        # Estimate trace(S) using Hutchinson
        def s_operator(v):
            return self._hat_operator(v)

        tr_s = self._trace_estimator(s_operator)

        # Compute AICc
        aicc = self.__n * (
                torch.log(self.__ssr / self.__n * 2 * math.pi) +
                (self.__n + tr_s) / (self.__n - tr_s - 2)
        )

        self.__cache['aicc'] = aicc
        return aicc

    def R2(self):
        """Efficient R2 computation"""
        return 1 - self.__ssr / self.__sst

    def Adjust_R2(self):
        """Efficient adjusted R2"""
        return 1 - (1 - self.R2()) * (self.__n - 1) / (self.__n - self.__k - 1)

    def RMSE(self):
        """Direct computation"""
        return torch.sqrt(self.__ssr / self.__n)

    def AUC(self):
        """Using sklearn's efficient implementation"""
        y_pred_flat = self.__y_pred.view(-1).numpy()
        y_data_flat = self.__y_data.view(-1).numpy()
        return roc_auc_score(y_data_flat, y_pred_flat)

    def f1_val(self, ave_method='macro'):
        """Using sklearn's efficient implementation"""
        y_pred_flat = (self.__y_pred > 0.5).float().view(-1).numpy()
        y_data_flat = self.__y_data.view(-1).numpy()
        return f1_score(y_data_flat, y_pred_flat, average=ave_method)

    def recall_val(self, ave_method='macro'):
        """Using sklearn's efficient implementation"""
        y_pred_flat = (self.__y_pred > 0.5).float().view(-1).numpy()
        y_data_flat = self.__y_data.view(-1).numpy()
        return recall_score(y_data_flat, y_pred_flat, average=ave_method)