#!/usr/local/bin/python
#-*- encoding:utf-8 -*-

import numpy as np;
import scipy.sparse as sp
from scipy import linalg
import time
import threading
import os
from multiprocessing import Process, Manager
import profile
import sys
import utils
import hashlib
from scipy.sparse.linalg import eigsh

config = utils.get_config();

beta = config.getfloat('rmls', 'beta');
gamma = config.getfloat('rmls', 'gamma')
thetax = config.getfloat('rmls', 'thetax');
thetay = config.getfloat('rmls', 'thetay');
converge_threshhold= config.getfloat('rmls', 'converge_threshhold');
dx = 20134
dy = 20134

d = 100

basic = 1;
firstbatch = 25;
secondbatch = 49;
thirdbatch = 73;
fourthbatch = 97;
D_mean = np.matrix(np.load('D.mean.npy'));
print type(D_mean), D_mean.shape
Q_mean = np.matrix(np.load('Q.mean.npy'));
print type(Q_mean), Q_mean.shape

class COO_Struct:
    def __init__(self):
        self.data = [];
        self.row = [];
        self.col = [];
    def append(self, r, c, d):
        self.row.append(r);
        self.col.append(c);
        self.data.append(d);

#for a give query-document pair, we need to compute the probability of this pair
def updateDig(dig):
    pass

def getRow2(s):
    row  = [];
    li = s.strip().split(' ');
    for l in li:
        pair = l.strip().split(':');
        index = int(pair[0]);
        value = float(pair[1]);
        row.append((index-1,value))
    return row;

logFileList = []
def listAllLogFile(logDir):
    if(os.path.isdir(logDir)):
        for f in os.listdir(logDir):
            sub = os.path.join(logDir, f);
            listAllLogFile(sub);
    elif os.path.isfile(logDir):
        logFileList.append(logDir);

def processline(line, D_Struct, Q_Struct, P_Struct, queryMap, count):
    global nSample;
    li = line.split('\t', 1);
    if (len(li) != 2):
        return;
    res = float(li[0]);
    print count;
    pair = li[1];
    if count % 10000 == 0:
        print 'processline %d' % count;
    digest = hashlib.sha224(pair).hexdigest();
    if queryMap.has_key(digest):
        P_Struct.append(queryMap[digest], queryMap[digest], res);
    else:
        instance = pair.split('\t');
        xi = getRow2(instance[0]);
        yij = getRow2(instance[1]);
    
        if len(xi) == 0:
            return;
        if len(yij) == 0:
            return;
        for j, data in xi:
            Q_Struct.append(nSample, j, data);
        for j, data in yij:
            D_Struct.append(nSample, j, data);

        P_Struct.append(nSample, nSample, res);
        queryMap[digest] = nSample;
        nSample += 1;

nSample = 0;
#Tricking Part is contrust D,P,Q Before known n;
def batchProcess(training, c, batchstop):
    D_Struct = COO_Struct();
    P_Struct = COO_Struct();
    Q_Struct = COO_Struct();

    #Make sure the dimension of D and Q
    D_Struct.append(0, dx-1, 0);
    Q_Struct.append(0, dy-1, 0);

    queryMap = {}
    count = 0;
    global nSample;
    nSample = 0;
    while c < batchstop:
        logFile = open(logFileList[c], 'r');
        print 'Processing %s' % logFileList[c];
        for line in logFile:
            if not line.startswith('*'):
                count += 1;
                processline(line, D_Struct, Q_Struct, P_Struct, queryMap, count);
        logFile.close();
        c += 1;

    D =  sp.coo_matrix((D_Struct.data, (D_Struct.row, D_Struct.col)), dtype=np.float64);
    P = sp.coo_matrix((P_Struct.data, (P_Struct.row, P_Struct.col)), dtype=np.float64);
    Q = sp.coo_matrix((Q_Struct.data, (Q_Struct.row, Q_Struct.col)), dtype =np.float64);
    updateDig(P);
   
    #P = P * 1/float(P.sum());
    D = P.tocsr().dot(D.tocsc());
    Q = Q.tocsc();
    W= sp.csc_matrix(utils.computeWandN2(D, Q, D.mean(0), Q.mean(0)));
    N = W.transpose().tocsc();
    #W = D.tocsc().transpose().dot(P.tocsc()).dot(Q.tocsc());
    #N = Q.tocsc().transpose().dot(P.tocsc()).dot(D.tocsc());
    T = 20;

    return (W, N, T);
    
def onlineControl():
    logDir = '/home/wangshuxin/destLog/';
    listAllLogFile(logDir);
    logFileList.sort();
    print logFileList;

    training = None;
    global Lx, Ly;
    I = sp.identity(dx);
    evals, evecs = eigsh(I, d, which='LM', ncv=3*d);
    Lx = sp.lil_matrix(evecs);
    Ly = sp.lil_matrix(evecs);

    #Lx = sp.rand(dx, d, density=0.1, format='lil', dtype=np.float64);
    #Ly = sp.rand(dy, d, density=0.1, format='lil', dtype=np.float64);
    c = 0;

    #batch 0
    batch = 0;
    print 'Processing %d batch' % batch;
    W0,N0, T= batchProcess(training, c, basic); 

    print W0.shape, N0.shape;
    RMLS(W0.tocsc(), N0.tocsc(), T, batch);
    
    #batch 1
    batch = 1;
    print 'Processing %d batch' % batch;
    W1, N1, T = batchProcess(training, basic, firstbatch);
    #W1 = W1 + W0;
    #N1 = N1 + N0;
    RMLS(W1.tocsc(), N1.tocsc(), T, batch);

    #batch 2
    batch = 2;
    print 'Processing %d batch' % batch;
    W2, N2,T = batchProcess(training, firstbatch, secondbatch);
    #W2 = W2 + W1;
    #N2 = N2 + N1;
    del(W1);
    del(N1);
    RMLS(W2.tocsc(), N2.tocsc(), T, batch);

    #batch 3
    batch = 3;
    print 'Processing %d batch' % batch;
    W3, N3, T = batchProcess(training, secondbatch, thirdbatch);

    #W3 = W3 + W2;
    #N3 = N3 + N2;
    del(W2);
    del(N2);
    RMLS(W3.tocsc(), N3.tocsc(), T, batch)
    
    #batch 4
    batch = 4;
    print 'Processing %d batch' % batch;
    W4, N4, T = batchProcess(training, thirdbatch, fourthbatch);
    #W4 = W4 + W3;
    #N4 = N4 + N3;
    utils.save_sparse_csc('W4', W4.tocsc());
    utils.save_sparse_csc('N4', N4.tocsc());
    del(W3);
    del(N3);
    import RMLS_Final
    RMLS_Final.RMLS(W4.tocsc(), N4.tocsc(), T, 100);
    RMLS(W4.tocsc(), N4.tocsc(), T, batch)

#For this time, Return csc_matrix
def getColumnVector(s, d):
    row = [];
    column = [];
    data = [];
    li = s.strip().split(' ');
    for l in li:
        pair = l.strip().split(':');
        index = int(pair[0]);
        value = float(pair[1]);
        row.append(index-1);
        column.append(0);
        data.append(value)
    return sp.csc_matrix((data,(row, col)), shape=(d,1));


def saveModel(Lx, Ly, batch):
    print 'Saving Model For Batch %d' % batch;

    LxFile = 'Lx_%d.txt' % batch;
    LyFile = 'Ly_%d.txt' % batch;

    utils.save_sparse_csr(LxFile, Lx.tocsr());
    utils.save_sparse_csr(LyFile, Ly.tocsr());
  

def calcC(row, theta):
    t = np.linalg.norm(row);
    if t == 0:
        return 0;
    return theta/t;

def checkConverge(LPx,LPy, Lx, Ly):
    t = np.linalg.norm((Lx-LPx).toarray()) + np.linalg.norm((Ly-LPy).toarray());
    if t < converge_threshhold:
        return (True, t);
    else:
        return (False, t);

def checkConvergeM(LPx, LPy, Lx, Ly):
    t = utils.sparseFroNorm(LPx - Lx) + utils.sparseFroNorm(LPy - Ly);
    if t < converge_threshhold:
        return (True, t);
    else:
        return (False, t);

def compute(x, t):
    return np.sign(x) * max(np.abs(x)-t, 0);

def updateLxu(slx, Lx, pid, start, end ,debug=False):
    #print 'pid: %d, start: %d, end: %d' % (pid, start, end);
    compute_lxu = np.frompyfunc(lambda x: compute(x, beta), 1, 1);
    for u in range(start, end):
        if debug:
            start = time.time();
        wu = WX.getcol(u);

        lxu= compute_lxu(wu.transpose().todense());

        cu = calcC(lxu, thetax);
        ##WARNING
        Lx[u] = cu * lxu;

        if debug:
            print  u, time.time() -start;
    slx.append((Lx, start, end));

def updateLyv(sly, Ly, pid, start, end, debug=False):
    #print 'pid: %d, start: %d, end: %d' % (pid, start, end);
    compute_lyv = np.frompyfunc(lambda x:compute(x, gamma), 1, 1);
    for v in range(start, end):
        if debug:
            start = time.time();
        nv = NY.getcol(v);
        lyv = compute_lyv(nv.transpose().todense());
        cv = calcC(lyv, thetay);
        Ly[v] = cv * lyv;

        if debug:
            print v,time.time() -start;
    sly.append((Ly, start, end));

def update(M, sl):
    t = 0;
    for triple in sl:
        R, s, e = triple;
        M[s:e] = R[s:e];
        
    return t;
def RMLS(W, N, T=25, batchno=-1):
    con = -1;
  
    global WX, NY, Lx, Ly;
    K = 20;
    manager = Manager();
    t = 0
    con = utils.sparseFroNorm(Lx) + utils.sparseFroNorm(Ly);
    global WX,NY;
    while True:
        t += 1;
        LPx = Lx.copy();
        LPy = Ly.copy();
        print 'This is %d iteration, last converge is %f' %(t, con);
        start = time.time();
        WX = Ly.tocsr().transpose().dot(W);
        #print 'WX', type(WX);
        ranx = dx/K;

        processListx = [];
        u = 0;
        slx = manager.list();
        for pid in range(K-1):
            processListx.append(Process(target=updateLxu, args=(slx, Lx, pid, u, u+ranx)));
            u = u + ranx;
        processListx.append(Process(target=updateLxu, args=(slx, Lx, K-1, u, dx, False)));

        for pid in range(K):
            processListx[pid].start();
        for pid in range(K):
            processListx[pid].join();
    
        norm =  update(Lx, slx);
 
        print 'Update Lx cost: ' + str(time.time() -start); 
    
        start = time.time();
        NY = Lx.tocsr().transpose().dot(N);
        #print 'NY', type(NY);
        #Update Ly
        rany = dy/K;
        processListy = [];
        v  = 0;
        sly  = manager.list();
        for pid in range(K-1):
            processListy.append(Process(target=updateLyv, args=(sly,Ly,pid, v, v+rany)));
            v = v + rany;
        processListy.append(Process(target=updateLyv, args=(sly, Ly, K-1, v, dy, False)));

        for pid in range(K):
            processListy[pid].start();
        for pid in range(K):
            processListy[pid].join();

        norm +=  update(Ly, sly)    
        print 'Update Ly cost: ' + str(time.time() - start);

        #print LPx, Lx;
        flag, con = checkConvergeM(LPx, LPy, Lx, Ly);
       
        print 'Converge: ', flag, con;
        if flag:
            break;
        if t >= T:
            break;

    saveModel(Lx, Ly, batchno);


if __name__ == '__main__':
    onlineControl();
