import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC

data = np.loadtxt(r'../../logic_regression/data/ex2data2.txt', delimiter=',')
x = data[:, :-1]
y = data[:, -1]

x = StandardScaler().fit_transform(x)

x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, random_state=666)


def try_gamma(gamma):
    global spn
    spn += 1
    plt.subplot(spr, spc, spn)

    kernel = 'rbf'
    clf = SVC(gamma=gamma, kernel=kernel)
    clf.fit(x_train, y_train)
    s_train = clf.score(x_train, y_train)
    print(f'Training score = {s_train}')
    s_test = clf.score(x_test, y_test)
    print(f'Testing score = {s_test}')

    cmap = plt.cm.get_cmap('rainbow')
    title = 'gamma:' + str(gamma) + ' kernel:' + kernel
    plt.title(title)
    plt.scatter(x[:, 0], x[:, 1], c=y, s=5, cmap=cmap, zorder=100, label=f'{s_train:.2f}, {s_test:.2f}')
    xx, yy = np.mgrid[x[:, 0].min():x[:, 0].max():100j, x[:, 1].min():x[:, 1].max():100j]
    zz = clf.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
    plt.contour(xx, yy, zz, zorder=0)
    plt.grid()
    plt.legend()


spr = 2
spc = 3
spn = 0
for gamma in [1, 2.5, 5, 7.5, 10, 15]:
    try_gamma(gamma)

plt.show()
