import os
import numpy as np
from math import sqrt,log
from scipy import stats
from torch_geometric.data import InMemoryDataset, DataLoader
from torch_geometric import data as DATA
import torch
from torch import nn
import torch.nn.functional as F
from sklearn.metrics import accuracy_score
import tensorflow as tf
import time
import matplotlib.pyplot as plt

class TestbedDataset(InMemoryDataset):
    def __init__(self, root='/tmp', dataset='davis', 
                 xd=None, xt=None, y=None, transform=None,
                 pre_transform=None,smile_graph=None):

        #root is required for save preprocessed data, default is '/tmp'
        super(TestbedDataset, self).__init__(root, transform, pre_transform)  #TestbedDataset引用于哪个包未知
        # benchmark dataset, default = 'davis'
        self.dataset = dataset
        if os.path.isfile(self.processed_paths[0]):   #processed_paths代表的内容未知
            print('Pre-processed data found: {}, loading ...'.format(self.processed_paths[0]))
            #torch.load(f, map_location=None, pickle_module=<module 'pickle' from '/home/jenkins/miniconda/lib/python3.5/pickle.py'>)
            #f – 类文件对象 (返回文件描述符)或一个保存文件名的字符串
            #从磁盘文件中读取一个通过torch.save()保存的对象。
            #用处是读取预训练路径？不是用于训练？因为不存在可以立刻重训练？
            self.data, self.slices = torch.load(self.processed_paths[0])
        else:
            print('Pre-processed data {} not found, doing pre-processing...'.format(self.processed_paths[0]))
            self.process(xd, xt, y,smile_graph)
            self.data, self.slices = torch.load(self.processed_paths[0])

    @property
    def raw_file_names(self):
        pass
        #return ['some_file_1', 'some_file_2', ...]

    @property
    def processed_file_names(self):
        return [self.dataset + '.pt']

    def download(self):
        # Download to `self.raw_dir`.
        pass

    def _download(self):
        pass

    def _process(self):
        if not os.path.exists(self.processed_dir):
            os.makedirs(self.processed_dir)

    # Customize the process method to fit the task of drug-target affinity prediction
    # Inputs:
    # XD - list of SMILES, XT: list of encoded target (categorical or one-hot),
    # Y: list of labels (i.e. affinity)
    # Return: PyTorch-Geometric format processed data
    def process(self, xd, xt, y,smile_graph):
        assert (len(xd) == len(xt) and len(xt) == len(y)), "The three lists must be the same length!"
        data_list = []
        data_len = len(xd)
        for i in range(data_len):
            print('Converting SMILES to graph: {}/{}'.format(i+1, data_len))
            smiles = xd[i]
            target = xt[i]
            labels = y[i]
            # convert SMILES to molecular representation using rdkit
            c_size, features, edge_index = smile_graph[smiles]
            # make the graph ready for PyTorch Geometrics GCN algorithms:
            # print("smile:",smiles)
            # print("edge index:",edge_index)
            """
            torch.transpose(input, dim0, dim1, out=None) → Tensor
            返回输入矩阵input的转置。交换维度dim0和dim1。 输出张量与输入张量共享内存，所以改变其中一个会导致另外一个也被修改。
            """
            GCNData = DATA.Data(x=torch.Tensor(features),
                                edge_index=torch.LongTensor(edge_index).transpose(1, 0),
                                y=torch.FloatTensor([labels]))
            GCNData.target = torch.LongTensor([target])
            GCNData.__setitem__('c_size', torch.LongTensor([c_size]))
            # append graph, label and target sequence to data list
            data_list.append(GCNData)

        if self.pre_filter is not None:
            data_list = [data for data in data_list if self.pre_filter(data)]

        if self.pre_transform is not None:
            data_list = [self.pre_transform(data) for data in data_list]
        print('Graph construction done. Saving to file.')
        data, slices = self.collate(data_list)
        # save preprocessed data:
        torch.save((data, slices), self.processed_paths[0])

def rmse(y,f):
    rmse = sqrt(((y - f)**2).mean(axis=0))
    return rmse
def mse(y,f):
    mse = ((y - f)**2).mean(axis=0)
    return mse
def pearson(y,f):
    rp = np.corrcoef(y, f)[0,1]
    return rp
def spearman(y,f):
    rs = stats.spearmanr(y, f)[0]
    return rs
def ci(y,f):
    ind = np.argsort(y)
    y = y[ind]
    f = f[ind]
    i = len(y)-1
    j = i-1
    z = 0.0
    S = 0.0
    while i > 0:
        while j >= 0:
            if y[i] > y[j]:
                z = z+1
                u = f[i] - f[j]
                if u > 0:
                    S = S + 1
                elif u == 0:
                    S = S + 0.5
            j = j - 1
        i = i - 1
        j = i-1
    ci = S/z
    return ci

#y=total_labels.numpy().flatten(), f=total_preds.numpy().flatten()
#y是真实标签，f是预测结果（分数）
def crossentropy(y,f):
    # bce=-(y*log(f)+(1-y)*log(1-f))
    """
    y:float32 list
    f:float32 list
    torch.nn.functional.binary_cross_entropy(input, target, weight=None, size_average=None, reduce=None, reduction='mean')
    input – Tensor of arbitrary shape
    target – Tensor of the same shape as input
    """
    # tensor_y=torch.Tensor(y)
    # tensor_f=torch.Tensor(f)
    # tensor_bce=F.binary_cross_entropy(tensor_f,tensor_y)
    # tensor_bce=torch.nn.functional.cross_entropy(tensor_f,tensor_y)
    tensor_bce = torch.nn.functional.cross_entropy(f, y.squeeze().long())
    return tensor_bce.item()

def accuracy(y,f):
    """
    sklearn.metrics.accuracy_score(y_true, y_pred, *, normalize=True, sample_weight=None)
    y_true:1d array-like, or label indicator array / sparse matrix.Ground truth (correct) labels.
    y_pred:1d array-like, or label indicator array / sparse matrix.Predicted labels, as returned by a classifier.
    为分类标签设计，所以y_true=[0,1,0,1]时，y_pred也应该时0和1的列表，或者二者均需要是int列表？
    """
    y_true=y.numpy().flatten()
    pred=torch.max(F.softmax(f, dim=1), 1)[1]
    y_pred=pred.data.numpy().squeeze()
    # threshold=0.5
    # acc=accuracy_score(y_true, (y_pred > threshold).astype(int))
    acc = accuracy_score(y_true, y_pred)
    return acc

def tf_accuracy(y_true,y_pred):
    m = tf.keras.metrics.BinaryAccuracy()
    m.update_state(y_true, y_pred)
    return m.result().numpy()

def precision(y,f):
    m = tf.keras.metrics.Precision()

    y_true = y.numpy().flatten()
    pred = torch.max(F.softmax(f, dim=1), 1)[1]
    y_pred = pred.data.numpy().squeeze()

    m.update_state(y_true,y_pred)
    return m.result().numpy()

def recall(y,f):
    m = tf.keras.metrics.Recall()

    y_true = y.numpy().flatten()
    pred = torch.max(F.softmax(f, dim=1), 1)[1]
    y_pred = pred.data.numpy().squeeze()

    m.update_state(y_true,y_pred)
    return m.result().numpy()

def auROC(y,f):
    """
    tf.keras.metrics.AUC(
    num_thresholds=200, curve='ROC', summation_method='interpolation', name=None,
    dtype=None, thresholds=None, multi_label=False, label_weights=None)
    属性：
    num_thresholds: (Optional) Defaults to 200. 在离散roc曲线时使用的阈值数。值必须为>1.
    curve: (Optional) 'ROC' [default] or 'PR'
    summation_method: (Optional) 指定使用的黎曼求和方法: 'interpolation' [default],'minoring','majoring'
    name: (Optional) string name of the metric instance.
    dtype: (Optional) data type of the metric result.
    thresholds: (Optional) 用作离散曲线的阈值的浮点值列表。如果设置，则忽略num_threshold参数。值应该在[0,1]中。端点阈值等于{-epsilon, 1+epsilon}对于一个小的正的epsilon值将自动被包括在这些中，以正确地处理等于0或1的预测。
    """
    m = tf.keras.metrics.AUC()

    y_true = y.numpy().flatten()
    pred = torch.max(F.softmax(f, dim=1), 1)[1]
    y_pred = pred.data.numpy().squeeze()

    m.update_state(y_true, y_pred)
    return m.result().numpy()

def auPR(y,f):
    m = tf.keras.metrics.AUC(curve='PR')

    y_true = y.numpy().flatten()
    pred = torch.max(F.softmax(f, dim=1), 1)[1]
    y_pred = pred.data.numpy().squeeze()

    m.update_state(y_true, y_pred)
    return m.result().numpy()

def plotLoss(loss,test_loss,foldind,filepath='./'):
    if not os.path.exists(filepath):
        os.mkdir(filepath)
    figname = str(foldind) + "-" + str(
        time.strftime("%b_%d_%Y_%H_%M_%S"))

    plt.figure()
    plt.plot(loss)
    plt.plot(test_loss)
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['trainloss', 'testloss'], loc='upper left')
    plt.savefig(filepath + figname + ".png", dpi=None, facecolor='w', edgecolor='w', orientation='portrait',papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1) #, frameon=None)
    plt.close()