import numpy as np
from numpy import *
from sympy import simplify, expand
from scipy.optimize import minimize
from contextlib import contextmanager
import threading
import _thread
import time
import subprocess
import re
from collections import Counter
class TimeoutException(Exception):
    def __init__(self, msg=''):
        self.msg = msg

@contextmanager
def time_limit(seconds, msg=''):
    timer = threading.Timer(seconds, lambda: _thread.interrupt_main())
    timer.start()
    try:
        yield
    except KeyboardInterrupt:
        raise TimeoutException("Timed out for operation {}".format(msg))
    finally:
        # if the action ends in specified time, timer is canceled
        timer.cancel()


def simplify_eq(eq):
    print(eq)
    return str(expand(simplify(eq)))

def prune_poly_c(eq):
    '''
    if polynomial of C appear in eq, reduce to C for computational efficiency. 
    '''
    eq = simplify_eq(eq)
    if 'C**' in eq:
        c_poly = ['C**'+str(i) for i in range(10)]
        for c in c_poly:
            if c in eq: eq = eq.replace(c, 'C')
    return simplify_eq(eq)

def sigmoid(x):
    return 1/(1+np.exp(-x))   

def BCE_loss(y_pred, y_true):
    epsilon = 1e-15  # 避免 log(0)
    y_pred = np.clip(y_pred, epsilon, 1 - epsilon)  # 裁剪预测值
    loss = - np.mean(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred))
    return loss

def js_divergence(y_pred, y_true):
    from scipy.spatial.distance import jensenshannon
    # 计算 JSD
    return jensenshannon(y_pred, y_true, base=2) ** 2

def kl_divergence(y_pred, y_true):
    # 确保数组是numpy数组并且转为float类型
    y_pred = np.asarray(y_pred, dtype=np.float)
    y_true = np.asarray(y_true, dtype=np.float)

    # 计算KL散度时避免log(0)的情况
    y_pred = np.where(y_pred == 0, 1e-10, y_pred)
    y_true = np.where(y_true == 0, 1e-10, y_true)

    # 计算KL散度
    return np.sum(y_pred * np.log(y_pred / y_true))

def focal_loss(predict_y, target_y, focal_gamma=2, debug_log=False):
    # if self.single_domain_ensemble:
    #     target_y = target_y.repeat(1, predict_y.shape[1])
    num_negative_samples = len(np.where(target_y==0)[0])
    num_positive_samples = len(np.where(target_y>0)[0])
    epsilon = 1e-3
    alpha_positive = (num_negative_samples+1e-3) / (
        num_positive_samples+num_negative_samples)
    alpha_negative = (num_positive_samples+1e-3) / (
        num_positive_samples+num_negative_samples)
    # positive
    weight_positive = np.power(1. - predict_y + epsilon, focal_gamma)
    focal_positive = -alpha_positive * weight_positive * np.log(predict_y + epsilon)
    loss_positive = target_y * focal_positive
    if debug_log:
        print(
            f"debug log focal loss weight positive: {weight_positive.shape}")
        print(
            f"debug log focal loss focal_positive: {focal_positive.shape}")
        print(f"debug log focal loss loss_positive: {loss_positive.shape}")
    # negative
    weight_negative = np.power(predict_y, focal_gamma)
    focal_negative = -alpha_negative * weight_negative * np.log(1. - predict_y + epsilon)
    loss_negative = (1. - target_y) * focal_negative

    loss = np.mean(loss_positive + loss_negative)
    # print(f"debug log focal loss: {loss.shape}")
    return loss

def MSE_loss(y_pred, y_true):
    mse = np.linalg.norm(y_pred - y_true, 2) ** 2 / y_true.shape[0]
    return mse

def Top_50_acc_loss(y_pred, y_true, percent=0.5):
    def get_confusion_matrix(sel_indexes, non_sel_indexes, y_test):
        positive_indexes = np.nonzero(y_test)[0]
        negative_indexes = np.nonzero(y_test == 0)[0]
        
        predict_positive_indexes = sel_indexes
        predict_negative_indexes = non_sel_indexes
        
        tp=0
        tn=0
        fp=0
        fn=0
        tp_indexes = list(set(predict_positive_indexes).intersection(positive_indexes))
        tp = len(tp_indexes)
        fp = len(predict_positive_indexes) - tp 

        tn_indexes = list(set(predict_negative_indexes).intersection(negative_indexes))
        tn = len(tn_indexes)
        fn = len(predict_negative_indexes) - tn
        return tp, tn, fp, fn
    y_prediction_indexes = np.argsort(y_pred, kind = 'stable')
    total_num = len(y_pred)
    sel_num = int(percent * total_num)
    sel_indexes = y_prediction_indexes[total_num-sel_num:]
    non_sel_indexes = y_prediction_indexes[:total_num-sel_num]
    tp_topk, tn_topk, fp_topk, fn_topk = get_confusion_matrix(sel_indexes, non_sel_indexes, y_true)
    accuracy = tp_topk / (tp_topk+fn_topk)
    return accuracy

def recall_loss(y_pred, y_true):
    true_positives = np.sum(y_true * y_pred)
    possible_positives = np.sum(y_true)
    predicted_positives = np.sum(y_pred)

    recall = true_positives / (possible_positives + 1e-7)  # Adding epsilon to avoid division by zero
    precision = true_positives / (predicted_positives + 1e-7)  # Adding epsilon to avoid division by zero
    positive_ratio = predicted_positives / len(y_pred)
    loss = 1 - recall + 0.3 * positive_ratio

    return loss

def recall_mse_loss(y_pred, y_true):
    true_positives = np.sum(y_true * y_pred)
    possible_positives = np.sum(y_true)
    recall = true_positives / (possible_positives + 1e-7)  # Adding epsilon to avoid division by zero
    mse = MSE_loss(y_pred, y_true)
    loss = 1 - recall + mse

    return loss

# GLNN_loss = epsilon * focal + (1-epsilon) * mse
def GLNN_loss(y_pred, y_true, epsilon):
    y_labels = (y_true >= 1).astype('float32')
    y_prediction = y_true - y_labels
    focal = focal_loss(y_pred, y_labels)
    mse = MSE_loss(y_pred, y_prediction)
    return epsilon * focal + (1 - epsilon) * mse

    # # positive
    # weight_positive = torch.pow(1.-predict_y+epilson, focal_gamma)
    # focal_positive = -alpha_positive * \
    #     weight_positive * torch.log(predict_y+epilson)
    # loss_positive = target_y * focal_positive
    # if debug_log:
    #     print(
    #         f"debug log focal loss weight positive: {weight_positive.shape}")
    #     print(
    #         f"debug log focal loss focal_positive: {focal_positive.shape}")
    #     print(f"debug log focal loss loss_positive: {loss_positive.shape}")
    # # negative
    # weight_negative = torch.pow(predict_y, focal_gamma)
    # focal_negative = -alpha_negative * \
    #     weight_negative * torch.log(1.-predict_y+epilson)
    # loss_negative = (1.-target_y) * focal_negative

    # loss = torch.mean(loss_positive+loss_negative)
    # # print(f"debug log focal loss: {loss.shape}")
    # return loss

def score_with_est(eq, tree_size, data, loss_type, epsilon, feature_selection, t_limit = 1.0, eta=0.999):
    """
    Calculate reward score for a complete parse tree 
    If placeholder C is in the equation, also excute estimation for C
    Reward = 1 / (1 + MSE) * Penalty ** num_term

    Parameters
    ----------
    eq : Str object.
        the discovered equation (with placeholders for coefficients). 
    tree_size : Int object.
        number of production rules in the complete parse tree. 
    data : 2-d numpy array.
        measurement data, including independent and dependent variables (last row). 
    t_limit : Float object.
        time limit (seconds) for ssingle evaluation, default 1 second. 
        
    Returns
    -------
    score: Float
        discovered equations. 
    eq: Str
        discovered equations with estimated numerical values. 
    """

    # added by yqbai 20240503
    # aig_eq = eq.replace('~', '!').replace('&', '*').replace('|', '+')
    # temp_eqn_file_path = './score.txt'
    # with open('./score.txt', 'w') as f:
    #     inorder_row = 'INORDER = ' + ''.join([f'x_{k} ' for k in range(input_num)]) + ';' + '\n'
    #     f.writelines(inorder_row)
    #     outorder_row = 'OUTORDER = ' + 'F_0' + ';' + '\n'
    #     f.writelines(outorder_row)
    #     f.write('F_0' + '=' + aig_eq + ';' + '\n')  
    # command_truth = f"/yqbai/boolfuncgen/motivations/abc -c 'read_eqn {temp_eqn_file_path}; strash; print_stats;'"
    # output = subprocess.check_output(command_truth, shell=True)
    # aig_nd = int(re.search(r'and\s+=\s+(\d+)', output.decode('utf-8')).group(1))
    # added by yqbai 20240505
    # expr_list = re.findall(r'\([^()]+\)', eq) 
    # aig_nd = eq.count('|') + eq.count('&') + eq.count('~') + eq.count('x') - sum([(key.count('&') + key.count('|') + key.count('~') + key.count('x')) * (frequency-1) for key, frequency in Counter(expr_list).items()])
    # if num_var <= 1: ## most cases ([x], [x,y], or [x,y,z])
    #     current_var = 'x'
    #     for i in range(num_var):
    #         globals()[current_var] = data[i, :]
    #         current_var = chr(ord(current_var) + 1)
    #     globals()['f_true'] = data[-1, :]
    # else:            ## currently only double pendulum case has more than 3 independent variables
        # globals()['x1'] = data[0, :]
        # globals()['x2'] = data[1, :]
        # globals()['w1'] = data[2, :]
        # globals()['w2'] = data[3, :]
        # globals()['wdot'] = data[4, :]
        # globals()['f_true'] = data[5, :]
    # added by yqbai 20240418
    # define independent variables and dependent variable
    # data = data.astype(float32)
    num_var = data.shape[0] - 1
    for i in range(num_var):
        globals()[f'x_{i}'] = data[i, :]
    globals()['f_true'] = data[-1, :]
    try:
        f_pred = eval(eq)  ## eval函数执行字符串中的python语句，返回运算结果
        if feature_selection != 'lut':
            f_pred = sigmoid(f_pred)
        if loss_type == 'mse_penalty':
            r = float(eta ** tree_size / (1.0 + np.linalg.norm(f_pred - f_true, 2) ** 2 / f_true.shape[0]))
        elif loss_type == 'mse':
            r = 1 - MSE_loss(f_pred, f_true)
        elif loss_type == 'binary_classification':
            r = 1 - BCE_loss(f_pred, f_true)
        elif loss_type == 'focal':
            r = 1 - focal_loss(f_pred, f_true)
        elif loss_type == 'kl':
            r = 1 - kl_divergence(f_pred, f_true)
        elif loss_type == 'js':
            r = 1 - js_divergence(f_pred, f_true)
        elif loss_type == 'GLNN':
            r = 1 - GLNN_loss(f_pred, f_true, epsilon)
        elif loss_type == 'GLNN':
                    r = 1/(1 + GLNN_loss(f_pred, f_true, epsilon))
        elif loss_type == 'recall':
            r = 1 - recall_loss(f_pred, f_true)
        elif loss_type == 'recall_mse':
            r = 1 - recall_mse_loss(f_pred, f_true)
        elif loss_type == 'top_50':
            r = Top_50_acc_loss(f_pred, f_true)
        else:
            raise ValueError("Unsupported loss type")
        if np.isnan(r):
            raise ValueError("Computed r is NaN")
        # penalty
        r = float(eta ** tree_size * r)
        return r, eq
    except Exception as e:
        # print(f'Error: {e}')
        # print('wrong return for score.py')
        return 0, eq
    ## count number of numerical values in eq
    # c_count = eq.count('C')
    # start_time = time.time()
    # with time_limit(t_limit, 'sleep'):
    #     try: 
    #         if c_count == 0:       ## no numerical values
    #             f_pred = eval(eq)  ## eval函数执行字符串中的python语句，返回运算结果
    #             # print(f_pred)
    #         elif c_count >= 10:    ## discourage over complicated numerical estimations
    #             return 0, eq
    #         else:                  ## with numerical values: coefficient estimationwith Powell method

    #             # eq = prune_poly_c(eq)
    #             c_lst = ['c'+str(i) for i in range(c_count)]
    #             for c in c_lst: 
    #                 eq = eq.replace('C', c, 1)

    #             def eq_test(c):
    #                 for i in range(len(c)): globals()['c'+str(i)] = c[i]
    #                 return np.linalg.norm(eval(eq) - f_true, 2)

    #             x0 = [1.0] * len(c_lst)
    #             c_lst = minimize(eq_test, x0, method='Powell', tol=1e-6).x.tolist() 
    #             c_lst = [np.round(x, 4) if abs(x) > 1e-2 else 0 for x in c_lst]
    #             eq_est = eq
    #             for i in range(len(c_lst)):
    #                 eq_est = eq_est.replace('c'+str(i), str(c_lst[i]), 1)
    #             eq = eq_est.replace('+-', '-')
    #             f_pred = eval(eq)
    #     except: 
    #         print('wrong return for score.py')
    #         return 0, eq

    # r = float(eta ** tree_size * (1 - np.count_nonzero(f_pred != f_true)/len(f_pred)))
    # r = float(eta ** tree_size / (1.0 + np.linalg.norm(f_pred - f_true, 2) ** 2 / f_true.shape[0]))

    # added by yqbai 20240418
    # r = 1 - np.count_nonzero(f_pred != f_true)/len(f_pred)
    # r = 1 - np.count_nonzero(f_pred != f_true)/len(f_pred)


    # run_time = np.round(time.time() - start_time, 3)
    # print('runtime :', run_time,  eq,  np.round(r, 3))
    
    # return r, eq

