import numpy as np
import matplotlib.pyplot as plt
from python_ai.common.xcommon import sep

from sklearn.metrics import homogeneity_score, completeness_score, v_measure_score
from python_math.app.LA.xperm import perm


def x_my_metrics():
    """
    https://sklearn.apachecn.org/docs/master/22.html
    2.3.10.3. 同质性，完整性和 V-measure

    :return:
    """
    global h
    h = np.array(h)
    pm = homogeneity_score(y, h)
    r = completeness_score(y, h)
    vm = v_measure_score(y, h)
    print(f'同质性（均一性）： {pm}')
    print(f'完整性： {r}')
    print(f'V-measure： {vm}')

    # grid search the best class map
    # Does not it matter ???
    ny = len(np.unique(y))
    nh = len(np.unique(h))
    print(ny, nh)
    nmax = max(ny, nh)
    xperm = np.array(perm(nmax, nh), dtype=np.int64)
    xperm -= 1
    # xperm = xperm[-1::-1]  # test
    # print(xperm)  # test
    max_vm = None
    max_vm_p = None
    max_vm_r = None
    best_pm = None
    for pm in xperm:
        for i, new_h in enumerate(pm):
            h[h == i] = 1000 * new_h
        h //= 1000
        p = homogeneity_score(y, h)
        r = completeness_score(y, h)
        vm = v_measure_score(y, h)
        print(f'({pm}): {vm}')
        if max_vm is None:
            max_vm = vm
            best_pm = pm
            max_vm_p = p
            max_vm_r = r
        elif vm > max_vm:
            max_vm = vm
            best_pm = pm
            max_vm_p = p
            max_vm_r = r
    print(f'Best map: {best_pm}')
    print(f'同质性（均一性）： {max_vm_p}')
    print(f'完整性： {max_vm_r}')
    print(f'Max vm: {max_vm}')


plt.figure(figsize=[8, 8])
spr = 2
spc = 2
spn = 0

# data
from sklearn.datasets import make_blobs, make_circles
x1, y1 = make_circles(n_samples=5000, factor=0.6, noise=0.05, random_state=9)
x2, y2 = make_blobs(n_samples=1000, n_features=2, centers=[[1.2,1.2]],
                   cluster_std=[[.1]],
                   random_state=9,
                   )
sep('y2 += max(y1)')
base = max(np.unique(y1)) + 1
y2 += base
x = np.r_[x1, x2]
y = np.r_[y1, y2]
spn += 1
plt.subplot(spr, spc, spn)
plt.title('data')
plt.scatter(x[:, 0], x[:, 1], s=1)

# k-means
title = 'by k-means'
sep(title)
from sklearn.cluster import KMeans
model = KMeans(n_clusters=3,
               random_state=9)
model.fit(x)
h = model.predict(x)
x_my_metrics()
spn += 1
plt.subplot(spr, spc, spn)
plt.title(title)
plt.scatter(x[:, 0], x[:, 1], s=1, c=h)

# dbscan
title = 'by dbscan'
sep(title)
from sklearn.cluster import DBSCAN
model = DBSCAN()
h = model.fit_predict(x)
x_my_metrics()
spn += 1
plt.subplot(spr, spc, spn)
plt.title(title)
plt.scatter(x[:, 0], x[:, 1], s=1, c=h)

# dbscan (params)
title = 'by dbscan(params)'
sep(title)
from sklearn.cluster import DBSCAN
model = DBSCAN(eps=0.08)
h = model.fit_predict(x)
x_my_metrics()
spn += 1
plt.subplot(spr, spc, spn)
plt.title(title)
plt.scatter(x[:, 0], x[:, 1], s=1, c=h)

# Finally show all drawings.
plt.show()