from DATASET import Datasets
from METRICS import Example_based,Label_based

import numpy as np
import sys
from sklearn.cluster import KMeans
from sklearn.model_selection import KFold
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.svm import SVC
from copy import copy

'''
LIFT deals with multi-label learning problem by introducing label-specific
Return:
    [HammingLoss, RankingLoss, OneError, Coverage, AvearagePrecision,Score,Pre_Labels]
Description:
    train_data    -An M1xN ndarray, the ith instance of training instance is stored in train_data(i,:)
    train_targe   -A QxM1 ndarray, if the ith training instance belongs to the jth class, then train_target(i,j) equals +1, otherwise train_target(i,j) equals -1
    test_data     -An M2xN ndarray, the ith instane of testing instance is stored in test_data(i,:)
    test_target   -A QxM2 ndarray, if the ith testing instance belongs to the jth class, test_target(i,j) equals +1, otherwise test_target(i,j) equals -1
    ratio         -The number of clusters(i.e. k1 for positive examples, k2 for negative examples) considered for the i-th class is set to k2=k1=min(ratio*num_pi,ratio*num_nu),
                   where num_pi and num_ni are the number of positive and negative examples for the i-th class respectively.
                   ***The default configuration is ratio=0.1***
    svm           -A struct variable with two fields
                  -svm:{'kernel':'Linear','degree':3,'gamma':'auto','coef0':0.0}
    
    
    
    
'''
class LIFT():
    def __init__(self,ratio,svm_type):
        #svm_type:1:Linear;2:RBF;3:Poly
        self.ratio=ratio
        self.type=svm_type
        self.model=[]
        self.binary=[]
        
    def fit(self,X,Y):
        train_data,train_target=copy(X),copy(Y)
        num_train,num_dim=train_data.shape
        num_target,num_class=train_target.shape
        
        self.P_Centers,self.N_Centers=[],[]
        
        #Find key instances of each label
        for i in range(num_class):
            print("Performing clusteirng for the %d/%d-th class"%(i+1,num_class))
            
            p_idx=np.where(train_target[:,i]==1)
            n_idx=np.where(train_target[:,i]==-1)
            p_data=train_data[p_idx[0],:]
            n_data=train_data[n_idx[0],:]
            
            k1=int(np.ceil(self.ratio*min(len(p_idx[0]),len(n_idx[0]))))
            
            if k1==0:#All is positive or all is negative
                label=1 if len(p_idx[0])>len(n_idx[0]) else -1
                self.binary.append(label)
                POS_C=[]
                NEG_C=[]
            else:
                self.binary.append(2)
                if p_data.shape[0]==1:
                    POS_C=p_data
                else:
                    POS_C=KMeans(n_clusters=k1).fit(p_data).cluster_centers_
                
                if n_data.shape[0]==1:
                    NEG_C=n_data
                else:
                    NEG_C=KMeans(n_clusters=k1).fit(n_data).cluster_centers_
            self.P_Centers.append(POS_C)
            self.N_Centers.append(NEG_C)
            
        #Perform representation transformation and training
        for i in range(num_class):
            print("Building classifiers: %d / %d"%(i+1,num_class))
            
            centers=np.vstack((self.P_Centers[i],self.N_Centers[i]))
            num_centers=centers.shape[0]
            if num_centers>=50000:
                raise Exception('Too many cluster centers, please try to decrease the number of clusters (i.e. decreasing the value of ratio) and try again...');

            training_instance_data=euclidean_distances(train_data,centers)
            training_label_data=train_target[:,i]
            
            self.model.append(SVC(kernel=self.type,probability=True).fit(training_instance_data,training_label_data))

    #Perform representation transformation and testing
    def predict(self,test_X,test_Y):
        test_data,test_target=copy(test_X),copy(test_Y)
        num_test,num_class=test_target.shape
        
        pre_labels=np.zeros((num_test,num_class))
        outputs=np.zeros((num_test,num_class))
        
        for i in range(num_class):
            if self.binary[i]!=2:
                pre_labels[:,i]=self.binary[i]
                outputs[:,i]=1 if self.binary[i]==1 else 0
            else:
                centers=np.vstack((self.P_Centers[i],self.N_Centers[i]))
                num_centers=centers.shape[0]
                      
                if num_centers>=5000:
                    raise Exception('Too many cluster centers, please try to decrease the number of clusters (i.e. decreasing the value of ratio) and try again...');

                testing_instance_data=euclidean_distances(test_data,centers)

                pre_labels[:,i]=self.model[i].predict(testing_instance_data)                
                outputs[:,i]=self.model[i].predict_proba(testing_instance_data)[:,1]
                
        HammingLoss=Example_based.Hamming_Loss(test_target,pre_labels)
        OneError=Example_based.One_Error(test_target,outputs)
        Coverage=Example_based.Coverage(test_target,outputs)
        RankingLoss=Example_based.Ranking_Loss(test_target,outputs)
        AveragePrecision=Example_based.Average_Precision(test_target,outputs)
        AUCMacro=Label_based.AUC_Macro(test_target,outputs)
        
        return [HammingLoss, OneError, Coverage, RankingLoss, AveragePrecision,AUCMacro]
        


if __name__ == '__main__':
    datasets_name=['image','yeast']
    # dataset = ['birds','CAL500','corel5k','emotions',
    #            'enron','genbase','Image','languagelog',
    #            'recreation','scene','slashdot','yeast']
    
    dataset=Datasets()
    ratio = 0.1
    svm_type = 'linear'
    for i in range(1):
        [data,target]=dataset.get_data(datasets_name[i])
        print('The attributes of the datasets named %s are as follows:'%datasets_name[i])
        print(dataset.attr)
        
        Kf=KFold(n_splits=10,shuffle=True,random_state=8888)
        result=[]
        for dev_index,val_index in Kf.split(data):
            train_data,test_data=data[dev_index],data[val_index]
            train_target,test_target=target[dev_index],target[val_index]
            
            lift=LIFT(ratio,svm_type)
            lift.fit(train_data,train_target)
            result.append(lift.predict(test_data,test_target))
        res_mean=np.mean(result,axis=0)
        res_std=np.std(result,0)
        print('Hamming Loss:',res_mean[0],'+-',res_std[0])
        print('One Error:',res_mean[1],'+-',res_std[1])
        print('Coverage:',res_mean[2],'+-',res_std[2])
        print('Ranking Loss:',res_mean[3],'+-',res_std[3])
        print('Average Precision:',res_mean[4],'+-',res_std[4])
        print('Macro Averaging AUC:',res_mean[5],'+-',res_std[5])
