import numpy as np
import os
import gzip
import util
from pandas import DataFrame
from sklearn import preprocessing


#-------------------------------------------------------------------------------------------------------------------
# Function: Get the residue sequence and corresponding secondary structure of each link from dataset.
# Input: Data_path and a boolean value  to differentiate the data is training_data or test_data.
# Output: Two list, sequences list and labels list.
#-------------------------------------------------------------------------------------------------------------------
def get_sequence(data_path,istrain):
    if not os.path.isfile(data_path):
        print('data is not exist , you can download it from http://http://www.princeton.edu/~jzthree/datasets/ICML2014')
    else:
        if istrain == 1:                          # if loading data is training_data(cullpdb).
            print('Loading CullPDB data ...')
            # X = util.load_gz(data_path) 
            X = np.load('D:/毕业设计/LSTM/cullpdb+profile_6133_filtered.npy.gz','r')          # loaded numpy array's shape is (5534*39900).
            X = np.reshape(X,(5534,700,57))
            sequences = []
            label = []
            pssm = []
            for i in range(1000):                 # for 1:1000 link.
                seq=''
                ss=''
                for j in range(700):              # for each residue in each link.
                    if X[i][j][21] == 1:          # if the residue is 'Noseq' behind real residues , leave out them.
                        break
                    else:
                        seq += util.code2amino(X[i][j][:22])     # convert each real residue's Orthogonal Coding to single-letter name and insert to sequence list.
                        ss += util.code2label(X[i][j][22:31])    # conver each real residue's label one-hot coding to H/E/C label and insert to label list.
                        pssm.append(X[i][j][35:57])
                sequences.append(seq)
                label.append(ss)
        else:                                     # if loading data is test_data(cb513).
            print('Loading CB513 data ...')
            X =  np.load('D:/毕业设计/LSTM/cb513+profile_split1.npy.gz','r')         # (514*39900), it should only contain 513 links, but there is a link more than 700AA, so split it into 2 links at last in the dataset.
            X = np.reshape(X,(514,700,57))
            sequences = []
            label = []
            pssm=[]
            for i in range(512):                  # for the 512 links not more than 700AA, directly convert.
                seq = ''
                ss = ''
                for j in range(700):
                    if X[i][j][21] == 1:
                        break
                    else:
                        seq += util.code2amino(X[i][j][:22])
                        ss += util.code2label(X[i][j][22:31])
                        pssm.append(X[i][j][35:57])
                if len(seq) == len(ss) :
                    sequences.append(seq)
                    label.append(ss)
                else:
                    print('convert error')
                    exit()
            seq =''
            ss = ''
            for i in range(700):                 # for last 2 links(actually one link), joint them and regain the 754AA length single link.
                seq += util.code2amino(X[512][i][:22])
                ss += util.code2label(X[512][i][22:31])
                pssm.append(X[512][i][35:57])
            for j in range(646,700):
                seq += util.code2amino(X[513][j][:22])
                ss += util.code2label(X[513][j][22:31])
                pssm.append(X[513][j][35:57])
            sequences.append(seq)
            label.append(ss)
    return(sequences, label, pssm)

#----------------------------------------------------------------------------------------------
# Function: Calculate the times of each amino acid appears in 3 types ss in CullPDB dataset.
# Input: The training_data's sequence list and label list obtained from 'get_sequence' function.
# Output: Three dictionary, respectively are probability of each residue appears in H/E/C.
# log(each type of residue appears in H/E/C   /   each amino acid's occurrence in protein)
#----------------------------------------------------------------------------------------------
def Cal_occur(train_X,train_Y):
    total_acid = 0              # Total num of residues.
    total_H = 0
    total_E = 0
    total_C = 0
    str_H = ''
    str_E = ''
    str_C = ''
    if len(train_X) != len(train_Y):
        print('training datas is not consistent with its labels')
    else:                                            # Get the total number of types belongs to H/E/C.
        for i in range (len(train_Y)):                
            for j in range(len(train_Y[i])):
                total_acid += 1
                if train_Y[i][j] == 'H':            
                    total_H += 1
                    str_H += train_X[i][j]           # Add all residue whose label is H into str_H so that we can calculate the times every type of residue appear in H.
                elif train_Y[i][j] == 'E':
                    total_E += 1
                    str_E += train_X[i][j]
                elif train_Y[i][j] == 'C':
                    total_C += 1
                    str_C += train_X[i][j]
                else:
                    print('There are unexpected label except H/E/C:', train_Y[i][j])
    if total_acid != total_H + total_E + total_C:
        print('Statistic error !')
    else:
        print('Cullpdb total:',total_acid,'H:',total_H,'E',total_E,'C',total_C)

        count_H={a:(str_H.count(a) * 100 / total_H) for a in set(str_H)}    # The ratio of every type of residue appear in each ss type.
        count_E={a:(str_E.count(a) * 100 / total_E) for a in set(str_E)}
        count_C={a:(str_C.count(a) * 100 / total_C) for a in set(str_C)}
        
        oip={'A':8.76,'R':5.78,'N':3.93,'D':5.49,'C':1.38,'E':6.32,'Q':3.9,'G':7.03,'H':2.26,'I':5.49,'L':9.68,'K':5.19,'M':2.32,'F':3.87,'P':5.02,'S':7.14,'T':5.53,'W':1.25,'Y':2.91,'V':6.73}      # Each amino acid's occurrence in protein.

        if len(count_H) == 20:                     
            occur_H={k: float('{:.3f}'.format(np.log(count_H[k] / oip[k]))) for k in oip}
            occur_E={k: float('{:.3f}'.format(np.log(count_E[k] / oip[k]))) for k in oip}
            occur_C={k: float('{:.3f}'.format(np.log(count_C[k] / oip[k]))) for k in oip}
        return(occur_H, occur_E, occur_C)                                   # Return the occurrence with the dictionary fomat.

#-----------------------------------------------------------------------------------------------------------------------------------
# Function: Get a 8-dims' Feature from a residue's single-letter name.
# Input: A residue's single-letter name.
# Output:  A list including corresponding 8-dims attributes obtained from Wikipedia.
#-----------------------------------------------------------------------------------------------------------------------------------
def get_attributes(aa):
    aalist=['A','R','N','D','C','E','Q','G','H','I','L','K','M','F','P','S','T','W','Y','V','J']   # Add 'J' represent Noseq to padding before execute sliding operation.
    attributes=[                              # e.g.: A={'fivetype':5,'polarity':4,'charge':2,'hydro':1.8,'MV':-1.589,'occur_H':occur_H['A'],'occur_E':occur_E['A'],'occur_C':occur_C['A']}.
    [5, 4, 2, 1.8, 89.094, occur_H['A'], occur_E['A'], occur_C['A']],
    [1, 2, 3, -4.5, 174.203, occur_H['R'], occur_E['R'], occur_C['R']],
    [3, 3, 2, -3.5, 132.119, occur_H['N'], occur_E['N'], occur_C['N']],
    [2, 1, 1, -3.5, 133.104, occur_H['D'], occur_E['D'], occur_C['D']],
    [4, 4, 2, 2.5, 121.154, occur_H['C'], occur_E['C'], occur_C['C']],
    [2, 1, 1, -3.5, 147.131, occur_H['E'], occur_E['E'], occur_C['E']],
    [3, 3, 2, -3.5, 146.146, occur_H['Q'], occur_E['Q'], occur_C['Q']],
    [4, 4, 2, -0.4, 75.067, occur_H['G'], occur_E['G'], occur_C['G']],
    [1, 2, 4, -3.2, 151.156, occur_H['H'], occur_E['H'], occur_C['H']],
    [5, 4, 2, 4.5, 131.175, occur_H['I'], occur_E['I'], occur_C['I']],
    [5, 4, 2, 3.8, 131.175, occur_H['L'], occur_E['L'], occur_C['L']],
    [1, 2, 3, -3.9, 146.189, occur_H['K'], occur_E['K'], occur_C['K']],
    [5, 4, 2, 1.9, 149.208, occur_H['M'], occur_E['M'], occur_C['M']],
    [5, 4, 2, 2.8, 165.192, occur_H['F'], occur_E['F'], occur_C['F']],
    [4, 4, 2, -1.6, 115.132, occur_H['P'], occur_E['P'], occur_C['P']],
    [3, 3, 2, -0.8, 105.093, occur_H['S'], occur_E['S'], occur_C['S']],
    [3, 3, 2, -0.7, 119.119, occur_H['T'], occur_E['T'], occur_C['T']],
    [5, 4, 2, -0.9, 204.228, occur_H['W'], occur_E['W'], occur_C['W']],
    [5, 3, 2, -1.3, 181.191, occur_H['Y'], occur_E['Y'], occur_C['Y']],
    [5, 4, 2, 4.2, 117.148, occur_H['V'], occur_E['V'], occur_C['V']],
    [0, 0, 0, 0, 0, 0, 0, 0]   # J
    ]
    mms = preprocessing.MinMaxScaler()
    attributes = mms.fit_transform(attributes)
    idx = aalist.index(aa)
    return(attributes[idx])    # Return a list e.g. [5, 4, 2, 1.8, -1.589, 0.26, -0.334, -0.307].

    #--------------------------------------------------------------------------------------------
# Function: Build slide windows on training or test data with flexible size of parameter winsize.
# Input: The training_data or test_data's sequence list obtained from 'get_sequence' function and winsize.
# Output: numpy array of sliding data with shape of (samples, winsize, 8)
#--------------------------------------------------------------------------------------------
def buildwindow(X, pssm, winsize):
    num_pad = int(winsize/2)
    samples = 0
    for eachline in X:                           # Know how many residues in all to apply for memory space.
        samples += len(eachline)
    N_terminal = 'J' * num_pad           # Padding 'Noseq' before and behind of each link.
    C_terminal = 'J' * num_pad
    x1 = np.zeros((samples, winsize, 30))
    num = 0
    for eachline in X:
        eachline = N_terminal + eachline + C_terminal
        for i in range(len(eachline) - (winsize - 1)):
            for j in range(winsize):
                x1[num][j][:8] = get_attributes(eachline[i+j])     # Each line of window is 8-dims attributes.
            num += 1
    if num != len(pssm):
        print('length error')
    else:
        for i in range(num):                   # Add 22-dims pssm
            for j in range(-num_pad,num_pad+1):
                if x1[i][j+num_pad][0] == 0:
                    continue
                else:
                    x1[i][j+num_pad][8:] = pssm[i+j]
    return(x1)

#--------------------------------------------------------------------------------------------
# Function: Get ont-hot data of labels.
# Input: The training_data or test_data's label list obtained from 'get_sequence' function.
# Output: numpy array of ont-hot label with shape of (samples, 3)
#--------------------------------------------------------------------------------------------
def getlabel(Y):
    samples = 0
    for eachline in Y:
        samples += len(eachline)
    y1 = np.zeros((samples, 3))
    num = 0
    for eachline in Y:
        for acid in eachline:
            if acid == 'H':
                y1[num] = [1,0,0]
            elif acid == 'E':
                y1[num] = [0,1,0]
            elif acid == 'C':
                y1[num] = [0,0,1]
            num += 1
    return(y1)





Train_path = 'D:/毕业设计/LSTM/cullpdb+profile_6133_filtered.npy.gz'
Test_path = 'D:/毕业设计/LSTM/cb513+profile_split1.npy.gz'
save_path = 'D:/毕业设计/LSTM/data.npz'
winsize = 13

print('load data ...')
X_train, Y_train, pssm_train = get_sequence(Train_path,1)
X_test, Y_test, pssm_test = get_sequence(Test_path, 0)
occur_H,occur_E,occur_C = Cal_occur(X_train, Y_train)

print('begain sliding on dataset ...')
X_train = buildwindow(X_train, pssm_train, winsize = winsize)
Y_train = getlabel(Y_train)
print('train data pre-processing complete, window size=', winsize)

X_test = buildwindow(X_test, pssm_test, winsize = winsize)
Y_test = getlabel(Y_test)
print('test data pre-processing complete.')

print('The shape of datas show as follows:')
print('X_train:', np.shape(X_train))
print('Y_train:', np.shape(Y_train))
print('X_test:', np.shape(X_test))
print('Y_test:', np.shape(Y_test))

np.savez(save_path, trax = X_train, tray = Y_train, tex = X_test, tey = Y_test)
print('All data load and save completely')

