#coding=utf-8

import numpy as np
import logging

from word2sparsevec import *
import word2sparsevec_inner as inner
from gensim.models.word2vec import Word2Vec
from scipy import sparse, stats

from sklearn.svm import SVC, LinearSVC
from sklearn.grid_search import RandomizedSearchCV
from sklearn.feature_extraction.text import CountVectorizer

def test_k_max_filter_1d():
    arr = np.random.random((10)).astype(np.float32)
    k = 3
    new_arr, k_max_index = inner.k_max_filter_1d(arr, k)
    print arr
    print new_arr
    print k_max_index

def test_min_1d():
    arr = np.random.random((10)).astype(np.float32)
    min_value, min_index = inner.min_1d(arr)
    print arr
    print min_value
    print min_index

def test_find_k_max_1d():
    arr = np.random.random((10)).astype(np.float32)
    k = 3
    k_max_value, k_max_index = inner.find_k_max_1d(arr, k)
    print arr
    print k_max_value
    print k_max_index

def test_sort_value_by_index():
    value = np.array([1.2, 1.1, 1.0, 1.5], dtype=np.float32)
    index = np.array([5, 4, 3, 6], dtype=np.uint32)
    inner.sort_value_by_index_1d(value, index)
    print value
    print index

def test_Word2SparseVec():
    # infile = "/home/hadoop/corpus/dl_corpus/corpus_seg"
    infile = "../data/douban/douban.data"
    # model_f = "../model/zh_neg_model.pkl"
    # model_f = "../model/zh_hs_model.pkl"
    model_f = "../model/douban_hs_model.pkl"
    # model = Word2SparseVec(LineSentence(infile, 1), size=1000, min_count=50, workers=2, k=8, hs=1, negative=0)
    model = Word2SparseVec(LineSentence(infile, 10), size=200, min_count=2, workers=2, k=10, hs=1, negative=0)
    model.save(model_f)

def test_similarity():
    model_f = "../model/zh_hs_model.pkl"
    model = Word2SparseVec.load(model_f)
    print sparse.csc_matrix(model[u"武汉"][:, np.newaxis])
    print '='*10
    print sparse.csc_matrix(model[u"南京"][:, np.newaxis])
    print '='*10
    print model.similarity(u"武汉", u"南京")
    print '='*10
    for word, sim in model.most_similar(u"武汉"):
        print word, sim
    # print model.accuracy("../data/questions-words.txt")

def test_dim_semantic():
    model_f = "../model/zh_hs_model.pkl"
    model = Word2SparseVec.load(model_f)
    for dim in range(model.layer1_size):
        for word, sim in model.dim_semantic(dim):
            print dim, word, sim

def test_distribution():
    output_f = "output.txt"
    model_f = "../model/zh_hs_model.pkl"
    model = Word2SparseVec.load(model_f)
    model.distribution(output_f, 1500)


def test_fast_sentence_sg_hs():
    word_point = np.array([1,2], dtype=np.uint32)
    word_code = np.array([0,1], dtype=np.uint8)
    syn0 = np.random.random((10,5)).astype(np.float32)
    syn1 = np.random.random((10,5)).astype(np.float32)
    word2_index = 8
    alpha = 0.1
    k = 3
    work = np.zeros((k,), dtype=np.float32)
    inner.fast_sentence_sg_hs(word_point, word_code, word_code.shape[0], syn0, syn1, word2_index, alpha, k, work)


def test_sentences2csc():
    sentences = [u"这部 电影 不错".split(), u"苹果 手机 很 好用".split()]
    model_f = "../model/zh_hs_model.pkl"
    model = Word2SparseVec.load(model_f)
    csc = model.sentences2csc(sentences, 2)
    print csc.shape
    print csc

def test_classification():
    model_f = "../model/zh_hs_model.pkl"
    model = Word2SparseVec.load(model_f)
    k_max = 50
    method = 1
    model.train(LineSentence("../data/douban/douban.data"))

    print "transforming train data"
    train_sentence = LineSentence("../data/douban/train2.data")
    train_label = np.fromfile("../data/douban/train2.label", sep='\n', dtype=np.int32)
    train_csc = model.sentences2csc(train_sentence, k_max, method)

    print "transforming test data"
    test_sentence = LineSentence("../data/douban/test2.data")
    test_label = np.fromfile("../data/douban/test2.label", sep='\n', dtype=np.int32)
    test_csc = model.sentences2csc(test_sentence, k_max, method)

    print "training"
    # svc = RandomizedSearchCV(LinearSVC(), {'C': stats.expon(scale=100)})
    svc = LinearSVC()
    svc.fit(train_csc, train_label)
    print "score: %f" %svc.score(test_csc, test_label)

    f1 = open("../data/douban/train2.data")
    f2 = open("../data/douban/test2.data")
    cv = CountVectorizer()
    X1 = cv.fit_transform(f1)
    X2 = cv.transform(f2)
    svc.fit(X1, train_label)
    print "score: %f" %svc.score(X2, test_label)


if __name__ == '__main__':
    logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
    # test_Word2SparseVec()
    # test_similarity()
    # test_dim_semantic()
    # test_find_k_max_1d()
    # test_sort_value_by_index()
    # test_fast_sentence_sg_hs()
    # test_distribution()
    # test_sentences2csc()
    test_classification()
