from myclasses.a4_train_methods import get_trained_brain

brain = get_trained_brain(version='20220918')


from myclasses.a2_load_dataset import get_samples
sample = get_samples(version='20220919')

# first check enhance
enhance_predict = brain(sample[:-1])
from QfUtil.TrainTool.train_classifier import tell_accuracy
enhance_accuracy = tell_accuracy(predict=enhance_predict, label=sample[2])
print(enhance_accuracy)

# make auditory input noise
import torch
noise_auditory = torch.randn_like(sample[1]) * 0.01
pure_visual_predict = brain((sample[0], noise_auditory))
pva = tell_accuracy(pure_visual_predict, sample[2])
print('when input make human confused, the model is also confused with accuracy: ', pva)

# what happend when confusing?
# let's see accuracy in auditory
from matplotlib import pyplot as plt
ar = brain.auditory_route(sample[1][sample[2]==6][0:1]+torch.randn_like(sample[1][sample[2]==6][0:1])*5)
ar_figure = plt.figure()
plt.plot((ar[0]).cpu().detach())
ar_figure.show()
ar = brain.auditory_route(sample[1][sample[2]==6][0:1])
ar_figure = plt.figure()
plt.plot((ar[0]).cpu().detach())
ar_figure.show()
print('hold on')