#!/usr/bin/env python3
# -*- coding: utf-8 -*-

# author    : Xiangwei Wang
# email     : wangxw-cn@qq.com
# datetime  : 2021/5/11 13:04

"""
"""

from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestClassifier
import numpy as np

import numpy

import torch
from torch import nn, Tensor, optim
from torch.autograd import Variable
import torch.nn.functional as F
from typing import (
    TypeVar, Type, Union, Optional, Any,
    List, Dict, Tuple, Callable, NamedTuple
)

import random
import time
import os
import copy
import re
import logging
from concurrent.futures import ThreadPoolExecutor
from concurrent import futures
import itertools

import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda, Compose
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from utils import Args, D, timeit

from sklearn.model_selection import cross_val_score


# iris = datasets.load_iris()
# print(iris)
# exit(0)
# x = iris.data
# y = iris.target

defence_90 = np.load("./feature_extraction/defence_90.npy", allow_pickle=True)
defence_10 = np.load("./feature_extraction/defence_10.npy", allow_pickle=True)
defence_100 = np.load("./feature_extraction/defence_100.npy", allow_pickle=True)

# features_train = []
# labels_train = []
# for sample in defence_90:
#     features_train.append(np.array(sample[0]) / np.array([2.26070000e+04,9.87146530e-01,1.69400000e+03,9.98482549e-01,2.42060000e+04,6.97181938e+03,1.04920932e+04,6.97472436e+03,1.22164046e+04,9.25031865e+00,1.99393939e+01,2.00000000e+01,2.00000000e+01,3.00000000e+01,3.00000000e+01,3.00000000e+01,3.00000000e+01,7.84896552e+02,8.53581712e+02,4.01000000e+02,2.73600000e+03,1.14666412e+01,2.73586412e-01,2.99794775e-01,1.85138514e+01,9.27920989e-01,1.41925807e+00,2.17992468e+01,5.66521710e-01,1.28559677e+00,9.13071063e+00,1.93695652e+01,2.00000000e+01,2.00000000e+01,2.27045571e+01,4.85500000e+01,5.00000000e+01,5.00000000e+01,2.22952061e+01,4.80000000e+01,5.00000000e+01,5.00000000e+01,1.00000000e+02,1.00000000e+02,1.00000000e+02,1.00000000e+02]))
#     # features_train.append(sample[0])
#     labels_train.append(sample[1])
# features_test = []
# labels_test = []
# for sample in defence_10:
#     features_test.append(np.array(sample[0]) / np.array([2.26070000e+04,9.87146530e-01,1.69400000e+03,9.98482549e-01,2.42060000e+04,6.97181938e+03,1.04920932e+04,6.97472436e+03,1.22164046e+04,9.25031865e+00,1.99393939e+01,2.00000000e+01,2.00000000e+01,3.00000000e+01,3.00000000e+01,3.00000000e+01,3.00000000e+01,7.84896552e+02,8.53581712e+02,4.01000000e+02,2.73600000e+03,1.14666412e+01,2.73586412e-01,2.99794775e-01,1.85138514e+01,9.27920989e-01,1.41925807e+00,2.17992468e+01,5.66521710e-01,1.28559677e+00,9.13071063e+00,1.93695652e+01,2.00000000e+01,2.00000000e+01,2.27045571e+01,4.85500000e+01,5.00000000e+01,5.00000000e+01,2.22952061e+01,4.80000000e+01,5.00000000e+01,5.00000000e+01,1.00000000e+02,1.00000000e+02,1.00000000e+02,1.00000000e+02]))
#     # features_test.append(sample[0])
#     labels_test.append(sample[1])

features_100_train = []
labels_100_train = []
max_features = (np.array(defence_100)[:,0]).max(axis=0)
print(max_features)
for index,val in enumerate(max_features):
    if val == 0:
        max_features[index] = 1
print(max_features)
for sample in defence_100:
    norm_features = np.array(sample[0]) / max_features
    features_100_train.append(norm_features)
    # features_100_train.append(sample[0])
    labels_100_train.append(sample[1])
# exit(0)
# print(features_100_train)

# KNN, defence大约24%
knn_classifier = KNeighborsClassifier(50)
scores = cross_val_score(knn_classifier, features_100_train, labels_100_train, cv=10).mean()
print('knn',scores)

# 随机森林，交叉验证，大约46%
rfc = RandomForestClassifier(n_estimators=100,n_jobs=-1,random_state=0,criterion='gini')
cross_score = cross_val_score(rfc, features_100_train, labels_100_train, cv=10).mean()
print(f'RandomForest cross-validate, estimators=100, acc={cross_score*100}%')

# 随机森林，大约47%
# rfc = RandomForestClassifier(n_estimators=100,n_jobs=-1,random_state=0)
# rfc.fit(features_train, labels_train)
# score = rfc.score(features_test, labels_test)
# print(f'RandomForest, estimators = 100, {score}')

# SVM，大约42%
from sklearn.svm import SVC, NuSVC, LinearSVC
svm = NuSVC(kernel='rbf')
score = cross_val_score(svm, features_100_train, labels_100_train, cv=10).mean()
print(f'SVC, {score}')
svm = NuSVC(kernel='poly')
score = cross_val_score(svm, features_100_train, labels_100_train, cv=10).mean()
print(f'SVC, {score}')
svm = NuSVC(kernel='sigmoid')
score = cross_val_score(svm, features_100_train, labels_100_train, cv=10).mean()
print(f'SVC, {score}')





