#!usr/bin/python

# Framework for scientific collections clustering
# Python 3

from itertools import combinations

import numpy as np
import scipy
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import DBSCAN
from sklearn import metrics

import matplotlib.pyplot as plt

def load_data(filename):
    data = []
    with open(filename, "r", encoding="UTF8") as fin:
        table = fin.readlines()
        features = table[0].strip().split("\t")[1:]
        data = [x.strip().split("\t")[1:] for x in table[1:]]
        points = [x.strip().split("\t")[0] for x in table[1:]]
    return features, points, np.array(data)

def load_true_labels():
    labels = {}
    with open("config/true_labels.txt", "r", encoding="UTF8") as fin:
        for line in fin:
            line = line.strip()
            if not line:
                continue
            items = line.split("\t")
            labels[items[0]] = int(items[1])
    return labels

def choose_columns(data, indices):
    return data.compress(np.logical_not(indices), axis=1)

def scale_data(data):
    return StandardScaler().fit_transform(data)

def make_mask(n, positions):
    lst = [1]*n
    for x in positions:
        lst[x] = 0
    return lst

def cluster(X, feats, points, ctr, make_plot=True):
    settings = (0.9, 5)  # Settings for classifier
    
    db = DBSCAN(eps=settings[0], min_samples=settings[1]).fit(X)
    core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
    core_samples_mask[db.core_sample_indices_] = True
    labels = db.labels_

    # Number of clusters in labels, ignoring noise if present.
    n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)

    print("Features:", feats)
    print("Predicted labels:", labels)
    labels_true = np.array([v for k, v in sorted(true_labels.items())])

    print('Estimated number of clusters: %d' % n_clusters_)
    print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
    print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
    print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
    print("Adjusted Rand Index: %0.3f"
          % metrics.adjusted_rand_score(labels_true, labels))
    print("Adjusted Mutual Information: %0.3f"
          % metrics.adjusted_mutual_info_score(labels_true, labels))
    print("Silhouette Coefficient: %0.3f"
          % metrics.silhouette_score(X, labels))
    if not make_plot:
        return True

    # Plot result
    # Black removed and is used for noise instead.
    unique_labels = set(labels)
    colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
    for k, col in zip(unique_labels, colors):
        if k == -1:
            # Black used for noise.
            col = 'k'

        class_member_mask = (labels == k)

        xy = X[class_member_mask & core_samples_mask]
        plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
                 markeredgecolor='k', markersize=14)

        xy = X[class_member_mask & ~core_samples_mask]
        plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
                 markeredgecolor='k', markersize=6)

        for point, x, y in zip(points, X[:, 0], X[:, 1]):
            plt.annotate(
                point, 
                xy = (x, y), xytext = (70, -40),
                textcoords = 'offset points', ha = 'right', va = 'bottom',
                #bbox = dict(boxstyle = 'round,pad=0.5', fc = 'white', alpha = 0.5),
                arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0')
                )
            
    plt.title('Estimated number of clusters: %d' % n_clusters_)
    plt.xlabel(feats[0])
    plt.ylabel(feats[1])
    plt.savefig("plots/DBSCAN{0:02d}_{1:.2f}_{2:d}.png".format(ctr, *settings))
    plt.clf()
    return True

def cluster_mult(features, points, data, make_plot=True):
    positions = combinations(range(len(features)), 2)
    masks = [make_mask(len(features), x) for x in positions]

    ctr = 1
    
    for mask in masks:
        shortdata = choose_columns(data, [bool(x) for x in mask])
        feats = tuple([features[i] for i in range(len(features)) if not mask[i]])

        cluster(shortdata, feats, points, ctr, make_plot)
        ctr += 1
    return True    

if __name__ == "__main__":

    filename = "data/all.txt"
    features, points, raw_data = load_data(filename)
    true_labels = load_true_labels()
    
    data = scale_data(raw_data)

    cluster_mult(features, points, data, True)


    
