import numpy as np
from scipy.sparse import *
from sklearn.cluster import spectral_clustering
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import DBSCAN
import word2vector


def read_data(path, splitor=None, encoding='utf-8'):
    f = open(path, 'r', encoding=encoding)
    corpus = []
    for line in f:
        if splitor is None:
            lines = line.strip().split()
        else:
            lines = line.strip().split(splitor)
        corpus.append(lines)
    f.close()
    return corpus


def read_wordmap(path, splitor=None, encoding='utf-8'):
    f = open(path, 'r', encoding=encoding)
    corpus = {}
    for line in f:
        if splitor is None:
            lines = line.strip().split()
        else:
            lines = line.strip().split(splitor)
        for item in lines:
            if item not in corpus:
                corpus[item] = len(corpus)
    f.close()
    return corpus


def format_data(path):
    corpus = read_data(path)
    part_num = int(len(corpus) / 100)
    wordmap = read_wordmap(path)
    f = open(path + ".wordmap", "w", encoding="utf-8")
    for key in wordmap:
        f.write(key + " " + str(wordmap[key]) + "\n")
    f.close()

    word_size = len(wordmap)
    data = lil_matrix((word_size, word_size), dtype=np.int8)
    for index, line in enumerate(corpus):
        if index % part_num == 0:
            print(str(index / part_num) + "%")
        for word1 in line:
            for word2 in line:
                if word1 != word2:
                    data[wordmap[word1], wordmap[word2]] += 1
    return data.tocoo()


def spectral_cluster(path):
    the_label = "discretize"
    theTopics = 100
    labels = spectral_clustering(format_data(path), n_clusters=theTopics, assign_labels=the_label, n_init=20)
    f = open(path + "." + the_label + "_" + str(theTopics) + ".wordtopics", "w", encoding="utf-8")
    print(len(labels))
    for key in labels:
        f.write(str(key) + "\n")
    f.close()


def dbscan(path):

    fw = open(path + ".wordmap", 'w', encoding='utf-8')
    model = word2vector.doTrainModel(path)
    data = []
    for word in model.vocab:
        data.append(model[word])
        fw.write(word+"\n")
    fw.close()

    data = np.array(data)
    print(data.shape)
    db = DBSCAN(eps=30).fit(data)
    labels = db.labels_
    n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
    print(n_clusters_)
    f = open(path + ".vector", 'w', encoding='utf-8')
    for i in labels:
        f.write(str(i)+"\n")
    f.close()


def print_topics(path, wordmapPath, orgDatapath, topic_num):
    wordmap = {}
    pwordmap = {}
    f = open(wordmapPath, 'r', encoding='utf-8')

    for line in f:
        lines = line.split()
        wordmap[int(lines[1])] = lines[0]
        pwordmap[lines[0]] = int(lines[1])
    f.close()

    f = open(path, 'r', encoding='utf-8')
    topics = {}
    word2Topic = {}
    for index, line in enumerate(f):
        lines = line.strip()
        word2Topic[index] = int(lines)
        if lines not in topics:
            topics[lines] = []
        topics[lines].append(wordmap[index])
    f.close()

    for key in topics:
        print(key)
        print(topics[key])
    # print(word2Topic)

    orgData = read_data(orgDatapath)
    f = open(orgDatapath + ".theta", 'w', encoding='utf-8')
    for line in orgData:
        toWriteLine = [0 for item in range(topic_num)]
        for items in line:
            toWriteLine[word2Topic[pwordmap[items]]] += 1

        f.write(' '.join([str(item / len(line)) for item in toWriteLine]) + "\n")
    f.close()


