#!/usr/local/bin/python
#-*- encoding:utf-8 -*-

'''This script written to go through the training file to get words covariance for query-document pairs'''

import numpy as np;
import scipy.sparse as sp
from scipy import linalg
import time
import threading
import os
from multiprocessing import Process, Manager
import profile
import sys
import utils
import hashlib
import resource
import argparse

rsrc = resource.RLIMIT_DATA
soft, hard = resource.getrlimit(rsrc)
print 'Soft limit starts as  :', soft

resource.setrlimit(rsrc, (1024*1024*1024*180, hard)) #limit to one kilobyte

soft, hard = resource.getrlimit(rsrc)
print 'Soft limit changed to :', soft

logFileList = []
def listAllLogFile(logDir):
    if(os.path.isdir(logDir)):
        for f in os.listdir(logDir):
            sub = os.path.join(logDir, f);
            listAllLogFile(sub);
    elif os.path.isfile(logDir):
        logFileList.append(logDir);

class COO_Struct:
    def __init__(self):
        self.data = [];
        self.row = [];
        self.col = [];

    def append(self, r, c, d):
        self.row.append(r);
        self.col.append(c);
        self.data.append(d);

#for a give  query-document pair, we need to compute the probability of this pair
def updateDig(dig):
    pass

#Only for lil_matrix
def normlize(S):
    C = S.mean(axis=0).getA1();
    print C;
    for i in range(S.shape[0]):
        for k in range(len(S.data[i])):
            S.data[i][k] -= C[S.rows[i][k]];
def getRow(s):
    if s.strip() in ['', 'None']:
        return [];
    row  = [];
    li = s.strip().split(' ');
    for l in li:
        pair = l.strip().split(':');
        index = int(pair[0]);
        value = float(pair[1]);
        row.append((index,value))
    return row;

def processline(line, D_Struct, Q_Struct, P_Struct, queryMap, count):
    global nSample;
    li = line.strip().split('\t', 1);
    if (len(li) != 2):
        return;
    res = float(li[0]);
    #print count;
    pair = li[1];
    if count % 10000 == 0:
        print 'processline %d' % count;
    digest = hashlib.sha224(pair).hexdigest();
    if queryMap.has_key(digest):
        #print 'please check', pair;
        P_Struct.append(queryMap[digest], queryMap[digest], res);
    else:
        instance = pair.split('\t');
        xi = getRow(instance[0]);
        yij = getRow(instance[1]);

        if len(xi) == 0:
            return;
        if len(yij) == 0:
            return;

        for j, data in xi:
            Q_Struct.append(nSample, j, data);
            #Q_Struct.append(nSample, j, 1);
        for j, data in yij:
            D_Struct.append(nSample, j, data);
            #D_Struct.append(nSample, j, 1);
        P_Struct.append(nSample, nSample, res);
        queryMap[digest] = nSample;
        nSample += 1;
nSample = 0;
#Computing W and N  from a list of training file
#Tricking Part is contrust D,P,Q Before known n;
def batchProcess(logFileList, trainsetflag):
    D_Struct = COO_Struct();
    P_Struct = COO_Struct();
    Q_Struct = COO_Struct();
    #Trict to make sure D and Q 
    config = utils.get_config();
    dx = int(config.get('rmls', 'dx'));
    dy = int(config.get('rmls', 'dy'));

    D_Struct.append(0, dx-1, 0);
    Q_Struct.append(0, dy-1, 0);

    queryMap = {}
    count = 0;
    global nSample;
    nSample = 0;
    for c  in range(len(logFileList)):
        logFile = open(logFileList[c], 'r');
        print 'Processing %s' % logFileList[c];
        for line in logFile:
            if not line.startswith('*') and not line.startswith('#'):
                count += 1;
                #print line;
                processline(line, D_Struct, Q_Struct, P_Struct, queryMap, count);
        logFile.close();
        c += 1;

    D =  sp.coo_matrix((D_Struct.data, (D_Struct.row, D_Struct.col)), dtype=np.float64);
    P = sp.coo_matrix((P_Struct.data, (P_Struct.row, P_Struct.col)), dtype=np.float64);
    Q = sp.coo_matrix((Q_Struct.data, (Q_Struct.row, Q_Struct.col)), dtype =np.float64);
    updateDig(P);

    # D and Q in CSR_MATRIX FORMAT  
    #D = utils.normlize(D);
    #Q = utils.normlize(Q); 
    print P.sum();
    #P = P * 1/float(P.sum());
    PD = P.tocsr().dot(D.tocsc());
    PQ = P.tocsr().dot(Q.tocsc());

    D_Mean = PD.sum(0) /float(P.sum());
    Q_Mean = PQ.sum(0) /float(P.sum());

    D_Mean_Name, Q_Mean_Name = utils.spellMeanSet(trainsetflag);
    np.save(D_Mean_Name, D_Mean.getA1());
    np.save(Q_Mean_Name, Q_Mean.getA1());

    print 'D.shape',D.shape, 'Q.shape', Q.shape, 'nSample', nSample, 'P.shape', P.shape;
    
    Q = Q.tocsc();
    W = D.transpose().dot(P).dot(Q).tocsc();
    print 'P.sum', P.sum();
    
    #W= sp.csc_matrix(utils.computeWandN2(D, Q, D_Mean, Q_Mean, P));
    #for pos, d in enumerate(W.data):
    #    if abs(d) < 0.0001:
    #        W.data[pos] = 0;
    
    W.eliminate_zeros();

    N = W.transpose().tocsc();
    #W = D.transpose().dot(P.tocsc()).dot(Q);
    #N = Q.transpose().dot(P.tocsc()).dot(D);
    T = count;
    print type(W);

    return (W, N, T);

if __name__ == '__main__':

    parser = argparse.ArgumentParser(description='Get Covariance Matrix of Query and Docment');
    parser.add_argument('--trainsetflag', choices=['five', 'week', 'month'], default='five', help='specify trainset');
    args = parser.parse_args();
    trainsetflag = args.trainsetflag;

    train = utils.spellTrainSet(trainsetflag);
    #logDir = '/home/wangshuxin/destLog/';
    #listAllLogFile(logDir);
    #logFileList.sort();
    logFileList.append(train);
    W, N, T = batchProcess(logFileList, trainsetflag);
    WName, NName = utils.spellWNSet(trainsetflag);
    utils.save_sparse_csc(WName, W);
    utils.save_sparse_csc(NName, N);

