AlishbaImran's picture
Update app.py
d2bc965
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import warnings
warnings.filterwarnings("ignore")
from PIL import Image
import base64
import pandas as pd
import streamlit as st
import pickle
from rdkit import Chem
from rdkit.Chem import AllChem
from sklearn.ensemble import RandomForestRegressor
import random
import numpy as np
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.metrics import mean_squared_error
import time
import numpy
from sklearn.model_selection import GridSearchCV
import tensorflow
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
def create_model(optimizer='RMSprop', learn_rate=0.1, momentum=0.4, activation='sigmoid', dropout_rate=0.0):
keras_model = Sequential()
keras_model.add(Dense(128, input_dim=train_encoded.shape[1], activation=activation))
keras_model.add(Dropout(dropout_rate))
keras_model.add(Dense(32, activation=activation))
keras_model.add(Dropout(dropout_rate))
keras_model.add(Dense(8,activation=activation))
keras_model.add(Dropout(dropout_rate))
keras_model.add(Dense(1,activation='linear'))
keras_model.summary()
keras_model.compile(loss='mean_squared_error', optimizer=optimizer)
return keras_model
def get_ecfc(smiles_list, radius=2, nBits=2048, useCounts=True):
ecfp_fingerprints=[]
erroneous_smiles=[]
for smiles in smiles_list:
mol=Chem.MolFromSmiles(smiles)
if mol is None:
ecfp_fingerprints.append([None]*nBits)
erroneous_smiles.append(smiles)
else:
mol=Chem.AddHs(mol)
if useCounts:
ecfp_fingerprints.append(list(AllChem.GetHashedMorganFingerprint(mol, radius, nBits)))
else:
ecfp_fingerprints.append(list(AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits).ToBitString()))
df_ecfp_fingerprints = pd.DataFrame(data = ecfp_fingerprints, index = smiles_list)
if len(erroneous_smiles)>0:
print("The following erroneous SMILES have been found in the data:\n{}.\nThe erroneous SMILES will be removed from the data.".format('\n'.join(map(str, erroneous_smiles))))
df_ecfp_fingerprints = df_ecfp_fingerprints.dropna(how='any')
return df_ecfp_fingerprints
import deepchem as dc
from deepchem.models import GraphConvModel
def generate(SMILES, verbose=False):
featurizer = dc.feat.ConvMolFeaturizer()
gcn = featurizer.featurize(SMILES)
properties = [random.randint(-1,1)/100 for i in range(0,len(SMILES))]
dataset = dc.data.NumpyDataset(X=gcn, y=np.array(properties))
return dataset
st.write("""# Accelerated reaction energy prediction for redox batteries 🧪 """)
st.write('By: [Alishba Imran](https://www.linkedin.com/in/alishba-imran-/)')
about_part = st.expander("Learn More About Project", expanded=False)
with about_part:
st.write('''
#### About
Redox flow batteries (RFB) are widely being explored as a class of electrochemical energy storage devices for large-scale energy storage applications. Redox flow batteries convert electrical energy to chemical energy via electrochemical reactions (through reversible oxidation and reduction) of compounds.
To develop next-gen redox flow batteries with high cycle life and energy density, we need to speed up the discovery of electroactive materials with desired properties. This process can currently be very slow and expensive given how large and diverse the chemical space of the candidate compounds is.
Using an attention-based graph convolutional neural network technique, I've developed a model that can take in reactants as SMILEs and predict the reaction energy in the redox reaction.
A lot of this work was inspired and built on top of this [paper](https://chemrxiv.org/engage/chemrxiv/article-details/60c7575f469df44a40f45465). Feel free to give it a try and reach out for any feedback. Email: alishbai734@gmail.com.
''')
st.write('**Insert your SMILES**')
st.write('Type any SMILES used as a reactant in the redox reaction. This model will output the reaction energy.')
SMILES_input = "Oc1cccc(c12)c(O)c(nn2)O\nc1cccc(c12)cc(nn2)O\nOc1c(O)ccc(c12)cc(nn2)O"
SMILES = st.text_area('press ctrl+enter to run model!', SMILES_input, height=20)
SMILES = SMILES.split('\n')
SMILES = list(filter(None, SMILES))
if len(SMILES)>1000:
SMILES=SMILES[0:1000]
ecfc_encoder = get_ecfc(SMILES)
generated_dataset = generate(SMILES)
filename = 'final_models/transformers.pkl'
infile = open(filename,'rb')
transformers = pickle.load(infile)
infile.close()
model_dir = 'final_models/tf_chp_initial'
gcne_model = dc.models.GraphConvModel(n_tasks=1, batch_size=100, mode='regression', dropout=0.25,model_dir= model_dir,random_seed=0)
gcne_model.restore('final_models/tf_chp_initial/ckpt-94/ckpt-197')
pred_gcne = gcne_model.predict(generated_dataset, transformers)
from keras.models import model_from_json
keras_final_model = model_from_json(open('./final_models/keras_final_model_architecture.json').read())
keras_final_model.load_weights('./final_models/keras_final_model_weights.h5')
rf_final_model = pickle.load(open(r'./final_models/rf_final_model.txt', "rb"))
pred_keras = keras_final_model.predict(ecfc_encoder)
pred_rf = rf_final_model.predict(ecfc_encoder)
pred_rf_r = pred_rf.reshape((len(pred_rf),1))
pred_consensus = (pred_keras + pred_gcne + pred_rf)/3
from sklearn.metrics import mean_absolute_error,mean_squared_error,r2_score
test1_mae = []
test1_mae.append(0.00705)
test1_mae.append(0.00416)
test1_mae.append(0.0035)
test2_mae = []
test2_mae.append(0.00589)
test2_mae.append(0.00483)
test2_mae.append(0.00799)
weighted_pred_0_1_3=( np.power(2/(test1_mae[0]+test2_mae[0]),3) * pred_gcne +
np.power(2/(test1_mae[1]+test2_mae[1]),3) * pred_keras +
np.power(2/(test1_mae[2]+test2_mae[2]),3) * pred_rf_r ) / (
np.power(2/(test1_mae[0]+test2_mae[0]),3) + np.power(2/(test1_mae[1]+test2_mae[1]),3) + np.power(2/(test1_mae[2]+test2_mae[2]),3))
pred_weighted = (pred_gcne + pred_keras + pred_rf_r)/3
df_results = pd.DataFrame(SMILES, columns=['SMILES Reactant'])
df_results["Predicted Reaction Energy"]= weighted_pred_0_1_3
df_results=df_results.round(6)
st.header('Prediction of Reaction Energy for RFB')
df_results