from DATASET import Datasets
from METRICS import Example_based, Label_based
from scipy.spatial.distance import pdist, squareform
from scipy import optimize


import numpy as np
import sys
from sklearn.model_selection import KFold
from sklearn.neighbors import NearestNeighbors
from copy import copy
from copy import deepcopy


class RLI():
    def __init__(self,mode,tau,labda,alpha,k,rou):
        self.mode=mode
        self.tau=tau
        self.labda=labda
        self.alpha=alpha
        self.k=k
        self.rou=rou
        
    def similarity(self,xi,xj):
        sigma=1
        return np.exp(-1*(np.linalg.norm((xi-xj)))/(2*sigma*sigma))
    
    def f(self,theta,x):
        res=np.exp(x.dot(theta.T))
        res=res/(np.sum(res,axis=1).reshape(-1,1))
        return res
    
    def V(self,theta):
        num_class=self.train_target.shape[1]
        num_train,num_dim=self.train_data.shape
        theta=theta.reshape((num_class+1,num_dim))
        
        pre_result=self.f(theta,train_data)
        res_1=-np.sum(self.U*pre_result)
        res_2=0
        for i in range(num_train):
            y0=pre_result[i,0]
            
            idx_related=np.where(train_target[i]==1)[0]+1
            idx_related_len=len(idx_related)
            related=pre_result[i,idx_related]
            
            idx_unrelated=np.where(train_target[i]==-1)[0]+1
            idx_unrelated_len=len(idx_unrelated)
            unrelated=pre_result[i,idx_unrelated]
            
            r=idx_related_len/idx_unrelated_len
            
            res_2+=-((np.sum(related)-idx_related_len*y0)+r*(idx_unrelated_len*y0-np.sum(unrelated)))

        return res_1+self.labda*res_2
            
    
    def fit(self,X,Y):
        train_data,train_target=copy(X),copy(Y)
        num_train,num_dim=train_data.shape
        num_target,num_class=train_target.shape
        
        if self.mode=='global':
            #Construct the similariy matrix W(pxp)
            W=squareform(pdist(train_data,self.similarity))
            #Construct the label propagation matrix P(pxp)
            D_pro=np.diag(np.sum(W,axis=1)**(-0.5))
            P=D_pro.dot(W).dot(D_pro)
            #Construct the initial-importance matrix Fai(px(q+1))
            Fai=np.ones((num_train,1))
            Fai=Fai*self.tau
            temp=deepcopy(train_target)
            temp[temp==-1]=0
            Fai=np.hstack((Fai,temp))
            #Construct the converaged solution F_star
            F_star=(1-self.alpha)*np.linalg.inv(np.identity(num_train)-self.alpha*P).dot(Fai)
            #Estimate the implicit RLI degress
            U=F_star/(np.sum(F_star,axis=1).reshape(-1,1))
        elif self.mode=='local':
            #construct the augmented label space (px(q+1))
            Y_=np.ones((num_train,1))*self.tau
            temp=deepcopy(train_target)
            temp[temp==-1]=0
            Y_=np.hstack((Y_,temp))
            #construct the neighbours matrix X (pxk)
            knn=NearestNeighbors(n_neighbors=self.k+1).fit(train_data)
            indices=knn.kneighbors(X,return_distance=False)[:,1:]
            X=train_data[indices]
            #construct the labeiling matrix Y (pxk)
            Y=Y_[indices]
            #obtain the reconstruction coefficient beta (pxk)
            beta=np.zeros((num_train,self.k))
            for i in range(num_train):
                temp_X=X[i].T
                beta[i]=np.linalg.inv(temp_X.T.dot(temp_X)).dot(temp_X.T).dot(train_data[i])
  

            #set the confidence vector g
            g=np.zeros((num_train,num_class+1))
            for i in range(num_train):
                temp_Y=Y[i].T
                g[i]=self.rou*Y_[i]+(1-self.rou)*(temp_Y.dot(beta[i]))
                g[i][g[i]<0]=0
            #Estimate the implicit RLI degress
            U=g/(np.sum(g,axis=1).reshape(-1,1))
        else:
            pass
        
        #Initialize model parameters theta
        self.theta=np.ones((num_class+1,num_dim))/((num_class+1)*num_dim)
        self.train_data=train_data
        self.U=U
        self.train_target=train_target
        opt=optimize.minimize(self.V, self.theta.flatten(),method='L-BFGS-B')
        self.theta=opt.get('x')
        
        
        
    def predict(self,test_X,test_Y):
        test_data,test_target=copy(test_X),copy(test_Y)
        num_test,num_dim=test_data.shape
        num_test,num_class=test_target.shape
        
        theta=self.theta.reshape((num_class+1,num_dim))
        
        pre_result=self.f(theta,test_data)
        
        pre_labels=np.zeros((num_test,num_class))
        outputs=pre_result[:,1:]
        
        for i in range(num_test):
            idx_related=np.where(pre_result[i,:]>pre_result[i,0])[0]-1
            pre_labels[i,idx_related]=1
            pre_labels[pre_labels==0]=-1
            
        OneError=Example_based.One_Error(test_target,outputs)
        Coverage=Example_based.Coverage(test_target,outputs)
        RankingLoss=Example_based.Ranking_Loss(test_target,outputs)
        AveragePrecision=Example_based.Average_Precision(test_target,outputs)
        Macro_averaging_F1=Label_based.Macro_Averaging(test_target,pre_labels)
        Micro_averaging_F1=Label_based.Micro_Averaging(test_target,pre_labels)
        
        return [OneError, Coverage, RankingLoss, AveragePrecision,Macro_averaging_F1,Micro_averaging_F1]
        
    
    
if __name__=='__main__':
    datasets_name=['CAL500','image','yeast']
    dataset=Datasets()
    ###parameter######
    mode='local'
    tau=0.1#[0.1,0.15,0.2,0.25,0.3,0.3,0.4,0.45,0.5]
    labda=0.1#[0.001,0.01,0.1,10]
    
    if mode=='global':#RELIAB-LP
        alpha=0.5
        k=None
        rou=None
    elif mode=='local':
        alpha=None
        k=10#10 for regular-scale and 30 for large-scale
        rou=0.3
    else:
        pass
    ################    
    
    for i in range(1):
        [data,target]=dataset.get_data(datasets_name[i])
        print('The attributes of the datasets named %s are as follows:'%datasets_name[i])
        print(dataset.attr)
        
        Kf=KFold(n_splits=2,shuffle=True,random_state=87654)
        result=[]
        for dev_index, val_index in Kf.split(data):
            train_data,test_data=data[dev_index],data[val_index]
            train_target,test_target=target[dev_index],target[val_index]
            
            train_data=np.delete(train_data,243,axis=0)#because it is the same with trian_data[228] when n_split==2 in 'CAL500' dataset
            train_target=np.delete(train_target,243,axis=0)
            
            rli=RLI(mode,tau,labda,alpha,k,rou)
            rli.fit(train_data,train_target)
            result.append(rli.predict(test_data,test_target))
        res_mean=np.mean(result,axis=0)
        res_std=np.std(result,axis=0)
        print('One Error:',res_mean[0],'+-',res_std[0])
        print('Coverage:',res_mean[1],'+-',res_std[1])
        print('Ranking Loss:',res_mean[2],'+-',res_std[2])
        print('Average Precision:',res_mean[3],'+-',res_std[3])
        print('Macro averaging F1:',res_mean[4],'+-',res_std[4])
        print('Micro averaging F1:',res_mean[5],'+-',res_std[5])
