#!/usr/local/bin/python
# -*- encoding:utf-8 -*-
import numpy as np;
import scipy.sparse as sp
from scipy import linalg
import time
import threading
import os
from scipy.sparse.linalg import eigsh
from multiprocessing import Process, Manager
import sys
import utils
'''
USING SGD SO SOLVE
'''

'''This file is designed to compute Regularized Partial
Least Square Algorithms, The target is to minimize following
L = min 1/2 \sum_{i=1}^{dx}\sum_{j=1}^{dy}I_{ij}(C_(ij) - U_iV_j)^2
    + \alpha/2 \sum s_if||u_i - u_f||
    + \beta/2 \sum sjq ||v_j - v_q||
    + \lambda_x/2||U||
    + \lambda_y/2||V|| 

    alpha, beta controls the relational data weights.
    gamma_1, gamma_2 stand for the learning rate of SGD;
'''

'''This File reads the covariace matrix C_xy, the query synonym words file, 
the document synonym words file'''

config = utils.get_config();
beta = config.getfloat('rpls', 'alpha');
gamma = config.getfloat('rpls', 'beta')
converge_threshhold= config.getfloat('rpls', 'converge_threshhold');
dx = config.getint('rpls', 'dx');
dy = config.getint('rpls', 'dy');
d = config.getint('rpls', 'd');
lambda_1 = config.getfloat('rpls', 'lambda_1');
lambda_2 = config.getfloat('rpls', 'lambda_2');
#Learning rate for U
gamma_1 = config.getfloat('rpls', 'gamma_1');
#Learning rate for V
gamma_2 = config.getfloat('rpls', 'gamma_2');


def updateLxu(slx, Lx, pid, start, end ,debug=False):
    if debug:
        print 'pid: %d, start: %d, end: %d' % (pid, start, end);
    for u in range(start, end):
        if debug:
            start_time = time.time();
        wu = W.getrow(u);
        lxu = (wu + getRelationGradientLx(u)).dot(Y);
        #cu = calcC(lxu, 1.0);
        Lx[u] = lxu #* cu;
        if debug:
            print  u, time.time() -start_time;
    slx.append((Lx, start, end));

def saveModel(Lx, Ly, batch):
    print 'Saving Model For Batch %d' % batch;
    LxFile = 'Lx_%d.txt' % batch;
    LyFile = 'Ly_%d.txt' % batch;

    utils.save_sparse_csr(LxFile, Lx.tocsr());
    utils.save_sparse_csr(LyFile, Ly.tocsr());
   
def calcC(row, theta):
    t = np.linalg.norm(row);
    if t == 0:
        return 0;
    return theta/t;

def checkConverge(LPx,LPy, Lx, Ly):
    t = np.linalg.norm((Lx-LPx).toarray()) + np.linalg.norm((Ly-LPy).toarray());
    if t < converge_threshhold:
        return (True, t);
    else:
        return (False, t);

def checkConvergeM(LPx, LPy, Lx, Ly):
    t = utils.sparseFroNorm(LPx - Lx) + utils.sparseFroNorm(LPy - Ly);
    if t < converge_threshhold:
        return (True, t);
    else:
        return (False, t);


def computeGradientU(delta, u, v):
    
    return delta*v - lambda_1*u;

def computeGradientV(delta, u, v):
    return delta*u - lambda_2*v;

def computeRelationalGradientU(i, U):
    li = rU_dict[i];
    s_u = numpy.matrix(numpy.zeros(U[i].shape));
    for j, s in li:
        s_u += s *(U[i] - U[j])

    return alpha * s_u;

def computeRelationalGradientV(j, V):
    li = rV_dict[j];
    s_v = numpy.matrix(numpy.zeros(V[j].shape));

    for q, s in li:
        s_v += s*(V[j] - V[q]);

    return beta * s_v;

def computeDelta(data, u, v):
    return  data - u.dot(v.transpose()).todense()[0][0];

def RPLS(C, T=25, batchno=-1):
    con = -1;
    global WX, NY, U, V;
    K = 20;

    #Generate U and V;
    U = sp.rand(dx, d, density=0.1, format='lil', dtype=np.float64);
    V = sp.rand(dy, d, density=0.1, format='lil', dtype=np.float64);
    t = 0
    con = utils.sparseFroNorm(U) + utils.sparseFroNorm(V);
    print C.shape, U.shape, V.shape;
    while True:
        t += 1;
        UB = U.copy();
        VB = V.copy();
        print 'This is %d iteration, last converge is %f' %(t, con);
        start = time.time();
        print len(C.data), len(C.indices);

        i = 0;
        while(i < C.shape[0]):
            row_data = C.data[C.indptr[i]:C.indptr[i+1]];
            row_index = C.indices[C.indptr[i]:C.indptr[i+1]];
            for pos, data in enumerate(row_data):
                j = row_index[pos];
                delta = computeDelta(data, U[i], V[j]);
                print (i,j)
                #U[i] = U[i] + gamma_1*(computeGradientU(delta, U[i], V[j]) - computeRelationalGradientU(i, U));
                #V[i] = V[i] + gamma_2*(computeGradientV(delta, U[i], V[j]) - computeRelationalGradientV(j, V));
            
                U[i] = U[i] + gamma_1*(computeGradientU(delta, U[i], V[j]));
                V[j] = V[j] + gamma_2*(computeGradientV(delta, U[i], V[j]));
            i += 1;
        #converge test
        flag, con = checkConvergeM(UB, VB, U, V);
        if  flag or t >= T:
            break;
    #Get the Lx, Ly from U, V

    utils.l2norm(U, d);
    utils.l2norm(V, d);
    Lx = U;
    Ly = V;
    return (Lx, Ly);
   

def parse(relational):
    '''this fucntion parse the relational data to a symmetric dict'''
    di = {};

    for line in relational:
        li = line.strip().split('\t');
        i = int(li[0]);
        j = int(li[1]);
        s = float(li[2]);
        if di.has_key(i):
            di[i].append((j,s));
        else:
            di[i] = [];
            di[i].append((j,s));
        if di.has_key(j):
            di[j].append((i, s));
        else:
            di[j] = [];
            di[j].append((i, s));

    return di;

if __name__ == '__main__':
    batchno = 5;
    relationalU = open('relationalU', 'r');
    rU_dict = parse(relationalU);
    relationalU.close();
    relationalV = open('relationalV', 'r');
    rV_dict = parse(relationalV);
    relationalV.close();

    try:
        W = utils.load_sparse_csc('W_Week.npz');
        N = W.transpose().tocsc();
        print W.max(), W.min(), N.max(), N.min();
        Lx, Ly = RPLS(N, 20, batchno); 
        saveModel(Lx, Ly, batchno);
    except KeyboardInterrupt:
        saveModel(Lx, Ly, batchno);
        sys.exit(0);
