#!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import division
import numpy as np
import logging
import h5py
import os
from test_logging import logging_conf
import scipy.io as sio
from kmeans_alg import *
from sklearn import preprocessing


logging_conf()
logger = logging.getLogger('load_dataset.py')

"""
Description: General Method for Load TXT file
Param:
    @file_name
Return:
    return a data list
"""
def load_txt(file_name):
    data = []
    f = open(file_name, "r")
    line = f.readline()
    while line:
        row = line.split()
        data.append([int(tk) for tk in row])
        line = f.readline()
    f.close()
    return data

"""
Description: General Method for Construct Data set with Adjacency Matrix and Content Matrix (0 or 1)
Param:
    @adj_path
    @content_path
    @dataset_name
Return:
    return a matrix constructed by adjacency matrix and content matrix
"""
def construct_dataset(adj_path, content_path):

    b = load_modularity(adj_path, content_path)
    m = load_ncut(adj_path, content_path)
    # ################################################################
    b_square_sum = np.sum(b**2)
    m_square_sum = np.sum(m**2)
    logger.info("the result by combining link data and content data is:")
    link_content_mat = np.column_stack((np.asarray(b), b_square_sum / m_square_sum * np.asarray(m)))
    # link_content_mat = np.vstack((np.asarray(b), b_square_sum / m_square_sum * np.asarray(m)))
    return link_content_mat


"""
Description: General Method for Construct Modularity Matrix with Adjacency Matrix
Param:
    @adj_path
    @content_path
Return:
    return modularity matrix
"""

def load_modularity(adj_path,content_path):

    content = load_txt(content_path)

    sample_num = len(content)
    # similar matrix

    # adj matrix
    adj = load_txt(adj_path)
    logger.info("adj matrix is :")

    # construct matrix B = A -P
    edge_num = np.sum(adj) / 2
    d = np.sum(adj, axis=1)
    p = [([0] * sample_num) for row in range(sample_num)]
    for i in range(sample_num):
        for j in range(sample_num):
            p[i][j] = (d[i] * d[j]) / (2 * edge_num)

    b = np.asarray(adj) - np.asarray(p)

    return b

"""
Description: General Method for Construct Markov Matrix with Content Matrix
Param:
    @adj_path
    @content_path
Return:
    return markov matrix D^(-1) * S
"""
def load_ncut(adj_path, content_path):

    content = load_txt(content_path)

    sample_num = len(content)
    # compute the similar by computing distances
    distance_mat = [([0] * sample_num) for row in range(sample_num)]

    # compute similar by cost similar
    for i in range(sample_num):
        for j in range(sample_num):
            print("i=%d,j=%d" % (i, j))
            sample_i = np.array(content[i])
            sample_j = np.array(content[j])
            numerator = np.sum(sample_i * sample_j)
            denominator_i = np.sqrt(np.sum(sample_i ** 2))
            denominator_j = np.sqrt(np.sum(sample_j ** 2))
            cost_distance = numerator / (denominator_i * denominator_j)
            distance_mat[i][j] = cost_distance

    sorted_distance_mat = np.argsort(-np.asarray(distance_mat), axis=1)

    similar = [([0] * sample_num) for row in range(sample_num)]
    # TODO: how to choose the value of K
    k = 9
    for i in range(sample_num):
        for j in range(k):
            index = sorted_distance_mat[i][j]
            similar[i][index] = distance_mat[i][index]

    # construct matrix M = D(-1) * S
    d_prime = np.sum(similar, axis=1)
    d_mat = np.diag(d_prime)

    m = np.dot(np.matrix(d_mat).getI(), np.matrix(similar))
    return np.asarray(m)

"""
Description: Load Ground Truth
Param:
    @ground_truth_path
Return:
    return ground truth data
"""
def load_groundtruth(ground_truth_path):
    temp_true = []
    f = open(ground_truth_path, "r")
    line = f.readline()
    while line:
        row = line.split()
        temp_true.append(int(row[0]))
        line = f.readline()
    f.close()
    return temp_true

def compute_cosin_similarity(vec1, vec2):

    sample_1 = np.array(vec1)
    sample_2 = np.array(vec2)
    numerator = np.sum(sample_1 * sample_2)
    denominator_i = np.sqrt(np.sum(sample_1 ** 2))
    denominator_j = np.sqrt(np.sum(sample_2 ** 2))
    cost_distance = numerator / (denominator_i * denominator_j)
    return cost_distance


"""
Description: Load constraint data
Param:
    @semi_path
Return:
    return constraint data
"""
def load_adj_semi(semi_path):
    semi_adj = load_txt(semi_path)
    d_prime = np.sum(semi_adj, axis=1)
    d_mat = np.diag(d_prime)
    semi_adj_matrix = np.matrix(d_mat) - np.matrix(semi_adj)
    return np.asarray(semi_adj_matrix)

def construct_adj_semi(adj_path, content_path):
    # construct content similar (by KNN, the k is depended by the degree of the current node)
    adj = load_txt(adj_path)
    content = load_txt(content_path)
    sample_num = len(content)
    adj_degree = np.sum(adj, axis=1)
    distance_mat = [([0] * sample_num) for row in range(sample_num)]

    # compute similar by cost similar
    for i in range(sample_num):
        for j in range(sample_num):
            #print("i=%d,j=%d" % (i, j))
            sample_i = np.array(content[i])
            sample_j = np.array(content[j])
            numerator = np.sum(sample_i * sample_j)
            denominator_i = np.sqrt(np.sum(sample_i ** 2))
            denominator_j = np.sqrt(np.sum(sample_j ** 2))
            cost_distance = numerator / (denominator_i * denominator_j)
            distance_mat[i][j] = cost_distance

    sorted_distance_mat = np.argsort(-np.asarray(distance_mat), axis=1)

    adj_semi_result = [([0] * sample_num) for row in range(sample_num)]
    for i in range(sample_num):
        k_i = adj_degree[i]
        #k_i = 9
        for t in range(k_i):
            index = sorted_distance_mat[i][t]
            if adj[i][index] == 1:
                adj_semi_result[i][index] = 1
                adj_semi_result[index][i] = 1

    d_prime = np.sum(adj_semi_result, axis=1)
    d_mat = np.diag(d_prime)
    result = np.matrix(d_mat) - np.matrix(adj_semi_result)
    return np.asarray(result)

def construct_adj_semi_union(adj_path, content_path):
    # construct content similar (by KNN, the k is depended by the degree of the current node)
    adj = load_txt(adj_path)
    content = load_txt(content_path)
    sample_num = len(content)
    adj_degree = np.sum(adj, axis=1)
    distance_mat = [([0] * sample_num) for row in range(sample_num)]

    # compute similar by cost similar
    for i in range(sample_num):
        for j in range(sample_num):
            #print("i=%d,j=%d" % (i, j))
            sample_i = np.array(content[i])
            sample_j = np.array(content[j])
            numerator = np.sum(sample_i * sample_j)
            denominator_i = np.sqrt(np.sum(sample_i ** 2))
            denominator_j = np.sqrt(np.sum(sample_j ** 2))
            cost_distance = numerator / (denominator_i * denominator_j)
            distance_mat[i][j] = cost_distance

    sorted_distance_mat = np.argsort(-np.asarray(distance_mat), axis=1)

    adj_semi_result = [([0] * sample_num) for row in range(sample_num)]
    for i in range(sample_num):
        #k_i = adj_degree[i]
        k_i = 9
        for t in range(k_i):
            index = sorted_distance_mat[i][t]
            adj[i][index] = 1
            adj[index][i] = 1

    d_prime = np.sum(adj, axis=1)
    d_mat = np.diag(d_prime)
    result = np.matrix(d_mat) - np.matrix(adj)
    return np.asarray(result)

def construct_adj_semi_www13(adj_path, content_path):
    # construct content similar (by KNN, the k is depended by the degree of the current node)
    adj = load_txt(adj_path)
    content = load_txt(content_path)
    sample_num = len(content)
    adj_degree = np.sum(adj, axis=1)
    distance_mat = [([0] * sample_num) for row in range(sample_num)]

    # compute similar by cost similar
    for i in range(sample_num):
        for j in range(sample_num):
            #print("i=%d,j=%d" % (i, j))
            sample_i = np.array(content[i])
            sample_j = np.array(content[j])
            numerator = np.sum(sample_i * sample_j)
            denominator_i = np.sqrt(np.sum(sample_i ** 2))
            denominator_j = np.sqrt(np.sum(sample_j ** 2))
            cost_distance = numerator / (denominator_i * denominator_j)
            distance_mat[i][j] = cost_distance

    sorted_distance_mat = np.argsort(-np.asarray(distance_mat), axis=1)

    adj_semi_result = [([0] * sample_num) for row in range(sample_num)]
    ngbr = adj
    for i in range(sample_num):
        #k_i = adj_degree[i]
        k_i = 9
        for t in range(k_i):
            index = sorted_distance_mat[i][t]
            ngbr[i][index] = 1
            ngbr[index][i] = 1

    #d_prime = np.sum(adj, axis=1)
    #d_mat = np.diag(d_prime)
    #result = np.matrix(d_mat) - np.matrix(adj)
    sim_t = [([0] * sample_num) for row in range(sample_num)]
    sim_c = sim_t
    for i in range(sample_num):
        ngbr_i_vec = ngbr[i]
        ngbr_i_index = find_index_in_list(ngbr_i_vec)
        ngbr_i_index_num = len(ngbr_i_index)
        for j in range(ngbr_i_index_num):
            gama_j = ngbr_i_index[j]
            sim_t[i][gama_j] = compute_cosin_similarity(adj[i], adj[gama_j])
            sim_c[i][gama_j] = distance_mat[i][gama_j]

    sim_t_scaled = preprocessing.scale(sim_t)
    sim_c_scaled = preprocessing.scale(sim_c)
    sim_t_scaler = preprocessing.StandardScaler().fit(sim_t_scaled)
    sim_c_scaler = preprocessing.StandardScaler().fit(sim_c_scaled)
    sim_t_norm = sim_t_scaler.transform(sim_t)
    sim_c_norm = sim_c_scaler.transform(sim_c)

    alpha = 0.5
    sim = alpha * sim_t_norm + (1 - alpha) * sim_c_norm
    sorted_sim = np.argsort(-np.asarray(sim), axis=1)

    pairwise = [([0] * sample_num) for row in range(sample_num)]
    for i in range(sample_num):
        # TODO: the value may be set again
        #get_sample_num = 5
        ngbr_i_vec = ngbr[i]
        ngbr_i_index = find_index_in_list(ngbr_i_vec)
        ngbr_i_index_num = len(ngbr_i_index)
        get_sample_num = int(np.ceil(np.sqrt(ngbr_i_index_num)))
        for k in range(get_sample_num):
            v_sample = sorted_sim[i][k]
            pairwise[i][v_sample] = 1
            pairwise[v_sample][i] = 1

    d_prime = np.sum(pairwise, axis=1)
    d_mat = np.diag(d_prime)
    result = np.matrix(d_mat) - np.matrix(pairwise)
    return np.asarray(result)

def find_index_in_list(list):
    list_len = len(list)
    index_list = []
    for i in range(list_len):
        if list[i] == 1:
            index_list.append(i)
    return index_list

def construct_adj_semi_by_kmeans(adj_path, content_path):
    content = load_txt(content_path)
    centroids, clusterAssment = kmeans(np.asarray(content), 5)
    cluster_result_label = list(array(clusterAssment)[:, 0])

    print (cluster_result_label)
    sio.savemat('../similarity/adj_semi_by_kmeans.mat', {'cluster_result_label': cluster_result_label})

if __name__ == '__main__':
    datapath = "../data/"
    data_list = os.listdir(datapath)
    print(data_list)
    for item in data_list:
	aa = construct_adj_semi_www13("../data/%s/%s_adj.txt" % (item, item), "../data/%s/%s_content.txt" % (item, item))
	bb = construct_dataset("../data/%s/%s_adj.txt" % (item, item), "../data/%s/%s_content.txt" % (item, item))
        with h5py.File("../h5_file/%s_stack.h5" % item, "w") as f:
 	    dset = f.create_dataset("aa", shape=aa.shape, data=aa)
	    dset = f.create_dataset("bb", shape=bb.shape, data=bb)
