from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
import git
from sklearn import svm, datasets
from sklearn.model_selection import GridSearchCV
import mlflow
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType
from azureml.core.model import Model
import pickle

class svmImp(object):
    '''
    Implement svm algorithm for prediction
    '''
    def __init__(self,dataset,X_train,y_train,X_val,y_val,run):
        self.X_train = X_train
        self.y_train = y_train
        self.X_val = X_val
        self.y_val = y_val
        self.dataset = dataset
        self.run = run
        self.svc = svm.SVC()
        self.parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
        
    def svmImpTrn(self):
        '''
        Model training with SVM method. First we use grid search to find out the best svm hyper-parameters.
        Then we implement them for prediction.
        '''
        print("We use Grid Search to find out the best hyper-parameters for svm training.")
        svc_grid = GridSearchCV(self.svc, self.parameters)
        svc_grid.fit(self.X_train, self.y_train)
        print("Get parameters of svc method: ",svc_grid.get_params(deep=True))
        print("Now we implement the best hyperparameter to svm training.")
        self.svc = SVC(C=svc_grid.get_params(deep=True)['estimator__C'], kernel=svc_grid.get_params(deep=True)['estimator__kernel'])
        self.svc.fit(self.X_train, self.y_train)
        mlflow.log_param("C", svc_grid.get_params(deep=True)['estimator__C'])
        mlflow.log_param("Kernel", svc_grid.get_params(deep=True)['estimator__kernel'])
        
    def svmImpVal(self):
        '''
        Model testing with SVM method.
        We finally get accuracy, fscore, precision and recall values.
        '''
        predicted_svc = self.svc.predict(self.X_val)
        self.acc = accuracy_score(self.y_val, predicted_svc)
        self.fscore = f1_score(self.y_val, predicted_svc, average="macro")
        self.precision = precision_score(self.y_val, predicted_svc, average="macro")
        self.recall = recall_score(self.y_val, predicted_svc, average="macro")
        print("Validation result: Accuracy: {0}; Fscore: {1}; Precision: {2}; Recall: {3}"\
              .format(self.acc, self.fscore, self.precision, self.recall))
        repo = git.Repo(search_parent_directories=True)
        self.sha = repo.head.object.hexsha
        # Log to AzureML and MLflow
        mlflow.log_param("Test_accuracy", self.acc)
        mlflow.log_param("Precision", self.precision)
        mlflow.log_param("Test_accuracy", self.recall)
        mlflow.log_param("F-Score", self.fscore)
        mlflow.log_param("Git-sha", self.sha)
        mlflow.sklearn.log_model(self.svc, 'outputs')
     
    def onnxModelSave(self):
        '''
        Convert svm model into ONNX format file and save to local path.
        '''
        initial_type = [('float_input', FloatTensorType([None, 6]))]
        onx = convert_sklearn(self.svc, initial_types=initial_type)
        with open("outputs/svc.onnx", "wb") as f:
            f.write(onx.SerializeToString())
    
    def modelRegister(self,workspace):
        '''
        Register Model on AzureML workstation
        '''
        model = Model.register(model_path = './outputs/svc.onnx', # this points to a local file 
                            model_name = "support-vector-classifier", # this is the name the model is registered as
                            tags = {'dataset': self.dataset.name, 'version': self.dataset.version, 'hyparameter-C': '1', 'testdata-accuracy': '0.9519'}, 
                            model_framework='pandas==0.23.4',
                            description = "Support vector classifier to predict weather at port of Turku",
                            workspace = workspace)

        print('Name:', model.name)
        print('Version:', model.version)
        # Save the model to the outputs directory for capture
        mlflow.sklearn.log_model(self.svc, 'outputs/svc.onnx')
    
    def modelRegisterSC(self, sc, workspace):
        '''
        Register sc (StandardScaler) for the data standardrization parameters.
        '''
        with open('./outputs/scaler.pkl', 'wb') as scaler_pkl:
            # sc = StandardScaler()
            pickle.dump(sc, scaler_pkl)
        # Register Model on AzureML WS
        scaler = Model.register(model_path = './outputs/scaler.pkl', # this points to a local file 
                            model_name = "scaler", # this is the name the model is registered as
                            tags = {'dataset': self.dataset.name, 'version': self.dataset.version}, 
                            model_framework='pandas==0.23.4',
                            description = "Scaler used for scaling incoming inference data",
                            workspace = workspace)
        print('Name:', scaler.name)
        print('Version:', scaler.version)