#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sklearn
from sklearn.linear_model import LogisticRegression
import numpy as np
import pandas as pd
np.random.seed(0)
def main():
# 1. reading data
    learning_data = pd.read_table("acetylation_learning.txt", sep='\t', header=None, names=['ID', 'Sequence', 'State'])
    encode={"A":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
            "R":[0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
            "N":[0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
            "D":[0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
            "C":[0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
            "Q":[0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
            "E":[0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0],
            "G":[0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0],
            "H":[0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0],
            "I":[0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0],
            "L":[0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0],
            "K":[0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0],
            "M":[0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0],
            "F":[0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0],
            "P":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0],
            "S":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0],
            "T":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0],
            "W":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0],
            "Y":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0],
            "V":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
        }
    data_learn = []
    xtrain, ttrain = [], []
    for row in learning_data.iterrows():
        data_learn.append([row[1][1][0:3], True if row[1][2] == "Ac" else False])
    for i, v in enumerate(data_learn):
        code = []
        for char in v[0]:
            code += encode[char]
        xtrain.append(code)
        ttrain.append(1 if v[1] is True else 0)        
    xtrain=np.asarray(xtrain,dtype=np.float32)
    ttrain=np.asarray(ttrain,dtype=np.int32)
    # 2. standarization of data
    scaler=sklearn.preprocessing.StandardScaler()
    scaler.fit(xtrain)
    xtrain=scaler.transform(xtrain)
    sklearn.externals.joblib.dump(scaler,"scaler_lr.pkl",compress=True)
    # 3. learning, cross-validation
    diparameter={"C":[10**i for i in range(-2,4)],"random_state":[123],}
    licv=sklearn.model_selection.GridSearchCV(LogisticRegression(),param_grid=diparameter,cv=5)
    licv.fit(xtrain,ttrain)
    predictor=licv.best_estimator_
    sklearn.externals.joblib.dump(predictor,"predictor_lr.pkl",compress=True)
    # 4. evaluating the performance of the predictor
    liprediction=predictor.predict(xtrain)
    table=sklearn.metrics.confusion_matrix(ttrain,liprediction)
    tn,fp,fn,tp=table[0][0],table[0][1],table[1][0],table[1][1]
    print("TPR\t{0:.3f}".format(tp/(tp+fn)))
    print("SPC\t{0:.3f}".format(tn/(tn+fp)))
    print("PPV\t{0:.3f}".format(tp/(tp+fp)))
    print("ACC\t{0:.3f}".format((tp+tn)/(tp+fp+fn+tn)))
    print("MCC\t{0:.3f}".format((tp*tn-fp*fn)/((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn))**(1/2)))
    print("F1\t{0:.3f}".format((2*tp)/(2*tp+fp+fn)))
    # 5. printing parameters of the predictor
    print(sorted(predictor.get_params(True).items()))
    print(predictor.coef_)
    print(predictor.intercept_)
if __name__ == '__main__':
    main()