#!/usr/bin/python3
# by Sun Smallwhite <niasw@pku.edu.cn>(https://github.com/niasw)
# this branch can choose methods and measure time cost

import sw.calc.calcMat
import sw.calc.gcfClus
import sw.calc.isoClus
import sw.calc.dypClus
import sw.calc.pckClus
import sw.io.loadCSV
import sw.io.saveJSON
import sw.io.adaptor
import scipy.sparse
import numpy
import json
import time

timestart=0.;
timefinal=0.;
methods={'gcf':sw.calc.gcfClus.calcClusters,'iso':sw.calc.isoClus.calcClusters,'dyp':sw.calc.dypClus.calcClusters};

def clusMethod(adjmat,clusnum=40,history=None,monitor=False,method='gcf'):
  '''
assign history=[] to enable history
  '''
  stats=[];
  timestart=time.clock(); # >>>
  eigvs=sw.calc.calcMat.maxEigVec(adjmat);
  timefinal=time.clock(); # <<<
  print('Time for maxEigVec: '+str(timefinal-timestart)+' s');
  eigvec=eigvs['eigenvector'];
  eigval=eigvs['eigenvalue'];
  timestart=time.clock(); # >>>
  clusters=methods[method](eigvec,clusnum=clusnum,monitor=monitor);
  timefinal=time.clock(); # <<<
  print('Time for calcClusters ('+method+'): '+str(timefinal-timestart)+' s');
  stats.append({'time':(timefinal-timestart)});
  timestart=time.clock(); # >>>
  xvec=sw.calc.calcMat.calcXvector(clusters,len(eigvec));
  dotprod=xvec.dot(eigvec);
  unparallel=numpy.sqrt(1-dotprod**2/numpy.float64(len(clusters))/eigvec.dot(eigvec)); # sqrt(1-(x.a)(x.a)/(x.x)/(a.a)),the relative length of perpendicular part
  primarycomponent=dotprod**2*eigval/eigvec.dot(eigvec); # eigval*(x.a)^2 is the max component of the total flow. we only maximize this.
  continuouslimit=numpy.float64(len(clusters))*eigval; # x//a is the best situation which only continuous problem can reach. our discrete problem should be lower than that.
  totalflow=xvec.dot(xvec*adjmat); # total flow = x'Ax
  stats[0]['unparallel']=numpy.float64(unparallel.real);
  stats[0]['primarycomponent']=numpy.float64(primarycomponent.real);
  stats[0]['continuouslimit']=numpy.float64(continuouslimit.real);
  stats[0]['dotprod']=numpy.float64(dotprod.real);
  stats[0]['clusnum']=len(clusters);
  stats[0]['nodenum']=len(eigvec);
  stats[0]['totalflow']=numpy.float64(totalflow.real);
  timefinal=time.clock(); # <<<
  print('Time for calcXvector: '+str(timefinal-timestart)+' s');
  if (history is None):
    timestart=time.clock(); # >>>
    clusters=sw.calc.calcMat.simplify(None,clusters);
    timefinal=time.clock(); # <<<
    print('Time for organizing result: '+str(timefinal-timestart)+' s');
    return stats,clusters;
  else:
    timestart=time.clock(); # >>>
    clusters,histinit,trash=sw.calc.calcMat.simplify(None,clusters,[]);
    timefinal=time.clock(); # <<<
    print('Time for organizing result: '+str(timefinal-timestart)+' s');
    return stats,clusters,histinit;

def pip_clusMethod(adjmat,numvec,lastcluslist=None,lastflowmat=None,lastnumvec=None,clusnum=40,lasthistory=None,monitor=False):
  '''
assign lasthistory=[] to enable history in initialization
adjmat is the origin flow matrix
numvec is the origin number vector 
  '''
  if ((lastflowmat is None) and (lastnumvec is None)): # need to calculate flowmat from last
    if (lastcluslist!=None): # in the middle of a pipeline
      timestart=time.clock(); # >>>
      lastflowmat,lastnumvec=sw.calc.calcMat.pip_flowMat(adjmat,numvec,lastcluslist);
      timefinal=time.clock(); # <<<
      print('Time for pipeline flow matrix calculation: '+str(timefinal-timestart)+' s');
    else: # first time (pipeline start)
      lastflowmat=adjmat;
      lastnumvec=numvec;
  elif (lastflowmat.shape[0]!=lastflowmat.shape[1]):
    raise(Exception("Error: LastFlowMat is not square. Row: "+lastflowmat.shape[0]+", Col: "+lastflowmat.shape[1]));
  elif (lastflowmat.shape[0]!=len(lastnumvec)):
    raise(Exception("Error: LastFlowMat and LastNumVec do not match. Mat Size: "+lastflowmat.shape[0]+", Vec Size: "+len(lastnumvec)));
  stats=[];
  timestart=time.clock(); # >>>
  lastflowmat=scipy.sparse.coo_matrix(lastflowmat); # only sparse matrix has partly eigs solver.
  timefinal=time.clock(); # <<<
  print('Time for sparse structure transforming: '+str(timefinal-timestart)+' s');
  timestart=time.clock(); # >>>
  eigvs=sw.calc.calcMat.maxEigVec(lastflowmat);
  timefinal=time.clock(); # <<<
  print('Time for maxEigVec: '+str(timefinal-timestart)+' s');
  eigvec=eigvs['eigenvector'];
  eigval=eigvs['eigenvalue'];
  timestart=time.clock(); # >>>
  clusters=sw.calc.pckClus.calcClusters(eigvec,lastnumvec,clusnum=clusnum,monitor=monitor);
  timefinal=time.clock(); # <<<
  print('Time for calcClusters (pck): '+str(timefinal-timestart)+' s');
  stats.append({'time':(timefinal-timestart)});
  timestart=time.clock(); # >>>
  xvec=sw.calc.calcMat.calcXvector(clusters,len(eigvec));
  dotprod=xvec.dot(eigvec);
  unparallel=numpy.sqrt(1-dotprod**2/numpy.float64(len(clusters))/eigvec.dot(eigvec)); # sqrt(1-(x.a)(x.a)/(x.x)/(a.a)),the relative length of perpendicular part
  primarycomponent=dotprod**2*eigval/eigvec.dot(eigvec); # eigval*(x.a)^2 is the max component of the total flow. we only maximize this.
  continuouslimit=numpy.float64(len(clusters))*eigval; # x//a is the best situation which only continuous problem can reach. our discrete problem should be lower than that.
  totalflow=xvec.dot(xvec*lastflowmat); # total flow = x'Ax
  stats[0]['unparallel']=numpy.float64(unparallel.real);
  stats[0]['primarycomponent']=numpy.float64(primarycomponent.real);
  stats[0]['continuouslimit']=numpy.float64(continuouslimit.real);
  stats[0]['dotprod']=numpy.float64(dotprod.real);
  stats[0]['clusnum']=len(clusters);
  stats[0]['nodenum']=len(eigvec);
  stats[0]['totalflow']=numpy.float64(totalflow.real);
  if (lasthistory is None): # history flag off
    if (lastcluslist!=None): # in the middle of a pipeline
      timestart=time.clock(); # >>>
      newcluslist=sw.calc.calcMat.simplify(lastcluslist,clusters);
      timefinal=time.clock(); # <<<
      print('Time for organizing result: '+str(timefinal-timestart)+' s');
    else: # first time
      clusters=sw.calc.calcMat.simplify(None,clusters);
      newcluslist=clusters;
    return stats,newcluslist;
  else: # history flag on
    timestart=time.clock(); # >>>
    newcluslist,newhistory,lasthistory=sw.calc.calcMat.simplify(lastcluslist,clusters,lasthistory);
    timefinal=time.clock(); # <<<
    print('Time for organizing result: '+str(timefinal-timestart)+' s');
    return stats,newcluslist,newhistory,lasthistory;

def Adaptor4VEGAS(in_linkfile,in_numvecfile,out_clusprefix,clusnumlist,indexstart=0,withdata=False,history=False,monitor=False,localoutput=False,method='gcf'):
  '''
# adaptor to adapt Xuan Yuan's IO from VEGAS
#
# input:
#  in_linkfile: the file stores links. (sparse matrix coo format)
#  in_numvecfile: the file stores number vector. (column csv)
#  out_clusprefix: output filename prefix
#  clusnumlist: the list of cluster number requests
#  indexstart: index start from 1
#  withdata: in_linkfile contains value column
#  history: whether generate history json data
#  monitor: see eigenvector figures and error reports
#  localoutput: create data file for Sun Sibai's preview engine
# output:
#  write clustering result into files: out_clusprefix.clustering_times.paper_number.cluster_number
  '''
  stats=[]; # statistical items
  stat=[]; # statistical item
  print('Loading Link Data & Number Vector Data ...');
  timestart=time.clock(); # >>>
  adjmat=sw.io.loadCSV.linkCSV2adjMat('../dat/'+in_linkfile,indexstart=indexstart,withdata=withdata);
  timefinal=time.clock(); # <<<
  print('Time for loading adjacent matrix: '+str(timefinal-timestart)+' s');
  print('Total Node Number: '+str(adjmat.shape[0]));
  if (withdata):
    timestart=time.clock(); # >>>
    numvec=sw.io.loadCSV.loadCSVvector('../dat/'+in_numvecfile);
    timefinal=time.clock(); # <<<
    print('Time for loading number vector: '+str(timefinal-timestart)+' s');
  else:
    timestart=time.clock(); # >>>
    numvec=numpy.array([1 for it in range(0,adjmat.shape[0])]);
    timefinal=time.clock(); # <<<
    print('Time for setting number vector: '+str(timefinal-timestart)+' s');
  print('Total Paper Number: '+str(numvec.sum()));
  if (adjmat.shape[0]!=adjmat.shape[1]):
    raise(Exception('Adjacent Matrix is not square. Row:'+str(adjmat.shape[0])+' ,Col:'+str(adjmat.shape[1])));
  if (adjmat.shape[0]!=len(numvec)):
    raise(Exception('Dimension of number vector and adjacent matrix do not match. Mat:'+str(adjmat.shape[0])+' ,Vec:'+str(len(numvec))));
  cluslist=None; # first time: cluslist=None
  histlist=[];
  flowmat=None;
  curnumvec=None;
  for it in range(0,len(clusnumlist)):
   clusnum=clusnumlist[it];
   print('Stair Clustering ['+str(clusnum)+','+str(it+1)+'/'+str(len(clusnumlist))+'] ...');
   if (history): # history flag on
     if (cluslist is None): # first time
      if (withdata): # origin data with numvec, use incremental algorithm only
       stat,cluslist,histnew,histold=pip_clusMethod(adjmat,numvec,cluslist,lastflowmat=flowmat,lastnumvec=curnumvec,clusnum=clusnum,lasthistory=[],monitor=monitor);
       histlist=[histnew];
      else: # origin data without numvec, use normal algorithm to speed up
       stat,cluslist,histinit=clusMethod(adjmat,clusnum=clusnum,history=[],monitor=monitor,method=method);
       histlist=[histinit];
     else: # in the middle of a pipeline
       stat,cluslist,histnew,histold=pip_clusMethod(adjmat,numvec,cluslist,lastflowmat=flowmat,lastnumvec=curnumvec,clusnum=clusnum,lasthistory=histlist[0],monitor=monitor);
       histlist[0]=histold;
       histlist.insert(0,histnew);
   else: # history flag off
     if (cluslist is None): # first time
       stat,cluslist=clusMethod(adjmat,clusnum=clusnum,monitor=monitor,method=method);
     else: # in the middle of a pipeline
       stat,cluslist=pip_clusMethod(adjmat,numvec,cluslist,lastflowmat=flowmat,lastnumvec=curnumvec,clusnum=clusnum,monitor=monitor);
   if (clusnum<len(numvec)): # otherwise the clusters do not change.
     timestart=time.clock(); # >>>
     flowmat,curnumvec=sw.calc.calcMat.pip_flowMat(adjmat,numvec,cluslist);
     timefinal=time.clock(); # <<<
     print('Time for pipeline flow matrix calculation: '+str(timefinal-timestart)+' s');
     stat[0]['timeflowmatcalc']=timefinal-timestart;
     if monitor:
       print('-> Number of Clusters = '+str(len(cluslist)));
       print('-> Total Flow = '+str(flowmat.sum()));
       print('-> Total Flow Square = '+str(numpy.linalg.norm(flowmat,ord='fro')**2));
     if (localoutput):
       timestart=time.clock(); # >>>
       sw.io.saveJSON.saveJSON('../out/'+out_clusprefix+'_clus.'+str(it+1)+'.'+str(numvec.sum())+'.'+str(clusnum)+'.json',cluslist);
       sw.io.saveJSON.saveJSON('../out/'+out_clusprefix+'_flow.'+str(it+1)+'.'+str(numvec.sum())+'.'+str(clusnum)+'.json',flowmat.tolist());
       timefinal=time.clock(); # <<<
       print('Time for saving local results (flow and clus): '+str(timefinal-timestart)+' s');
   stat[0]['flow']=flowmat.sum();
   stats.extend(stat);
   stat=[];
  timestart=time.clock(); # >>>
  sw.io.saveJSON.saveJSON('../out/'+out_clusprefix+'_stat.'+str(numvec.sum())+'.'+str(clusnumlist[0])+'.json',stats);
  timefinal=time.clock(); # <<<
  print('Time for quality criteria saving (parallel & total flow): '+str(timefinal-timestart)+' s');
  if (history):
   timestart=time.clock(); # >>>
   histlist=sw.io.adaptor.histIdxMap(histlist,indexstart);
   sw.io.saveJSON.saveJSON('../out/'+out_clusprefix+'_hist.'+str(numvec.sum())+'.'+str(clusnumlist[0])+'.json',histlist);
   timefinal=time.clock(); # <<<
   print('Time for saving history: '+str(timefinal-timestart)+' s');

if (__name__=='__main__'):
  '''
# python qualityCheck.py in_linkfile in_numvecfile out_clusprefix clusnumlist indexstart=0 withdata=True history=False monitor=False localoutput=False method='gcf'
# this branch can choose methods and measure time cost
  '''
  import sys
  args=[None,'','','',[],0,'True','False','False','False','gcf'];
  for it in range(0,len(sys.argv)):
    args[it]=sys.argv[it];
  Adaptor4VEGAS(args[1],args[2],args[3],json.loads(args[4]),int(args[5]),args[6]=='True',args[7]=='True',args[8]=='True',args[9]=='True',args[10]);
