import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from joblib import dump
from rdkit import DataStructs
from rdkit.Chem import AllChem, MACCSkeys
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import precision_score, classification_report, recall_score, f1_score, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier

# pandas warnings
pd.options.mode.chained_assignment = None  # default='warn'

# import files
df = pd.read_excel("database_0714.xlsx")
for i in range(len(df)):
    if int(df["reaction class no."][i]) in [1, 2, 3, 4, 5, 6, 7, 8]:
        df["ML class no."][i] = int(df["reaction class no."][i])
    elif int(df["reaction class no."][i]) in [9, 10, 11]:
        df["ML class no."][i] = 5
    elif int(df["reaction class no."][i]) in range(12, 29):
        df["ML class no."][i] = int(df["reaction class no."][i]) - 3
    elif int(df["reaction class no."][i]) in [29, 30]:
        df["ML class no."][i] = 24
    elif int(df["reaction class no."][i]) in range(31, 37):
        df["ML class no."][i] = int(df["reaction class no."][i]) - 6
    elif int(df["reaction class no."][i]) in range(37, 39):
        df["ML class no."][i] = int(df["reaction class no."][i]) - 7
    elif int(df["reaction class no."][i]) in range(39, 46):
        df["ML class no."][i] = 32
# print(df.head(2))

data = df[["PUBCHEM CID", "source_substrate_smiles", "substrate_smiles_canonical"]]
data.drop_duplicates(inplace=True)
data = data.reset_index(drop=True)
data["ML class no."] = None
for i in range(len(data)):
    smi = data["substrate_smiles_canonical"][i]
    lst = df[df["substrate_smiles_canonical"] == smi]["ML class no."].to_list()
    # l2 = sorted(set(lst), key=lst.index)
    data["ML class no."][i] = lst
# print(data.head(2))

# transform reaction class number into numpy array for model
arr_length = df["ML class no."].max()
for i in range(1):
    class_lst = data["ML class no."][i]
    arr = np.zeros((1, arr_length))
    for _ in class_lst:
        pos = _ - 1
        arr[0][pos] = 1
    y = arr
for i in range(1, len(data)):
    class_lst = data["ML class no."][i]
    arr = np.zeros((1, arr_length))
    for _ in class_lst:
        pos = _ - 1
        arr[0][pos] = 1
    y = np.append(y, arr, axis=0)
y = y.astype(float)
# print(y.shape)

# transform smiles into numpy array for model
# MACCS
ls = []
for i in range(len(data)):
    smi = data['substrate_smiles_canonical'][i]
    try:
        m = AllChem.MolFromSmiles(smi)
        maccs_keys = MACCSkeys.GenMACCSKeys(m)
        array = np.zeros((0,), dtype=np.float64)
        DataStructs.ConvertToNumpyArray(maccs_keys, array)
        ls.append(array)
    except (Exception,):
        print(smi)
        print(i)
X = np.array(ls)
X = X.astype(float)
# print(X.shape)
# MORGAN
ls = []
for i in range(len(data)):
    smi = data['substrate_smiles_canonical'][i]
    try:
        m = AllChem.MolFromSmiles(smi)
        fps = AllChem.GetMorganFingerprintAsBitVect(m, 3, nBits=2048)
        array = np.zeros((0,), dtype=np.float64)
        DataStructs.ConvertToNumpyArray(fps, array)
        ls.append(array)
    except (Exception,):
        print(smi)
        print(i)
X2 = np.array(ls)
X2 = X2.astype(float)
# print(X2.shape)
X_append = np.append(X, X2, axis=1)
print(X_append .shape)
X_train, X_test, y_train, y_test = train_test_split(X_append, y, test_size=0.2, random_state=42)

# ExtraTreesClassifier
clf = ExtraTreesClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print('precision_score', precision_score(y_test, y_pred, average='weighted'))
print('recall_score', recall_score(y_test, y_pred, average='weighted'))
print('f1_score', f1_score(y_test, y_pred, average='weighted'))
dump(clf, 'clf.joblib')

# Multi-layer Perceptron classifier
mlp = MLPClassifier(hidden_layer_sizes=(2048, 100), activation='relu',
                    validation_fraction=0.1, solver='adam',
                    alpha=0.0001, learning_rate_init=0.001)
mlp.fit(X_train, y_train)
y_pred1 = mlp.predict(X_test)
print('precision_score', precision_score(y_test, y_pred1, average='weighted'))
print('recall_score', recall_score(y_test, y_pred1, average='weighted'))
print('f1_score', f1_score(y_test, y_pred1, average='weighted'))
print(classification_report(y_test, y_pred1))
# plt.plot(mlp.loss_curve_)
dump(mlp, 'mlp.joblib')

# Confusion matrix
model_conf = confusion_matrix(np.argmax(y_test, axis=1), np.argmax(y_pred, axis=1))
plt.imshow(model_conf, interpolation="nearest", cmap=plt.cm.Blues)
plt.colorbar()
tick_marks = np.arange(32)
# plt.xticks(tick_marks, n_classes, rotation=45)
# plt.yticks(tick_marks, n_classes)
plt.xlabel("Actual Species")
plt.ylabel("Predicted Species")
plt.show()