from sklearn.metrics import hamming_loss,jaccard_score,accuracy_score,\
                            precision_score,recall_score,fbeta_score,\
                            label_ranking_loss,label_ranking_average_precision_score,\
                            coverage_error,f1_score,roc_auc_score

import numpy as np

'''
    input:predict,test
    其中predict格式是list(list),test格式是list(list) 
'''
class Example_based():
    def __init__():
        pass

    def Subset_Accuracy(true,pred):
        return accuracy_score(true,pred)
    
    def Hamming_Loss(true,pred):
        if len(pred)!=len(true):
            raise Exception("预测大小与实际大小长度不等")
        else:
            n=len(pred)
        return np.mean([hamming_loss(true[i], pred[i]) for i in range(n)])
        
    def Accuracy_exam(true,pred):
        if len(pred)!=len(true):
            raise Exception("预测大小与实际大小长度不等")
        else:
            n=len(pred)
        return np.mean([jaccard_score(true[i], pred[i]) for i in range(n)])

    
    def Precision_Exam(true,pred):
        if len(pred)!=len(true):
            raise Exception("预测大小与实际大小长度不等")
        else:
            n=len(pred)
        return np.mean([precision_score(true[i], pred[i]) for i in range(n)])
    
    def Recall_Exam(true,pred):
        if len(pred)!=len(true):
            raise Exception("预测大小与实际大小长度不等")
        else:
            n=len(pred)
        return np.mean([recall_score(true[i], pred[i]) for i in range(n)])
    
    def F_beta_exam(true,pred,beta=1):
        if len(pred)!=len(true):
            raise Exception("预测大小与实际大小长度不等")
        else:
            n=len(pred)
        return np.mean([fbeta_score(true[i], pred[i],beta=beta) for i in range(n)])

    
    def One_Error(true,score):
        N = len(true)
        for i in range(N):
            if max(true[i]) == -1:
                print("该条数据哪一类都不是")
        label_index = []
        for i in range(N):
            index = np.where(true[i] == 1)[0]
            label_index.append(index)
        OneError = 0
        for i in range(N):
            if np.argmax(score[i]) not in label_index[i]:
                OneError += 1
        OneError = OneError * 1.0 / N
        return OneError
        
    def Coverage(true,score):
        Q=len(true[0])
        true=np.array(true)
        score=np.array(score)
        true[true==-1]=0
        return (coverage_error(true, score)-1)/Q
    
    def Ranking_Loss(true,score):
        true=np.array(true)
        score=np.array(score)
        true[true==-1]=0
        return label_ranking_loss(true, score)
    
    def Average_Precision(true,score):
        true=np.array(true)
        score=np.array(score)
        true[true==-1]=0
        return label_ranking_average_precision_score(true, score)
    
    
class Label_based():
    def __init__():
        pass
    
    def Macro_Averaging(true,pred):#may be have problem
        if len(pred)!=len(true):
            raise Exception("预测大小与实际大小长度不等")
        else:
            n=len(pred)
        return np.mean([f1_score(true[i], pred[i],average='macro') for i in range(n)])

    def Micro_Averaging(true,pred):#may be have problem
        if len(pred)!=len(true):
            raise Exception("预测大小与实际大小长度不等")
        else:
            n=len(pred)
        return np.mean([f1_score(true[i], pred[i],average='micro') for i in range(n)])
    
    def AUC_Macro(true,score):
        if len(score)!=len(true):
            raise Exception("预测大小与实际大小长度不等")
        else:
            n=len(score)
        return np.mean([roc_auc_score(true,score,average="macro") for i in range(n)])
        
    
    def AUC_Micro(true,score):
        if len(score)!=len(true):
            raise Exception("预测大小与实际大小长度不等")
        else:
            n=len(score)
        return np.mean([roc_auc_score(true,score,average="micro") for i in range(n)])