import sys
import numpy
from scipy.fftpack import dct
from scipy.fftpack import idct
import random
from sklearn import cross_validation
import os
from sklearn import svm
from collections import OrderedDict
from sklearn.externals import joblib
import urllib2
import shutil

if len(sys.argv) != 7:
    print "Error: Wrong number of arguments."
    print "Please use python generate_classifier.py ppi_file.txt proteins.fasta #number_of_top_gos #max_ppis_per_classifier uniref #ignoring_threshold"
    sys.exit()
    
print 'Interactions File:', str(sys.argv[1])
print 'Fasta File:', str(sys.argv[2])
print 'Number of TOP GOs Considered:', str(sys.argv[3])
print 'Max interactions per GO:', str(sys.argv[4])
print 'Uniref File:', str(sys.argv[5])
print 'GO ignoring threshold:', str(sys.argv[6])

f_interactions = open (sys.argv[1],"r")
interactions = f_interactions.readlines();
f_interactions.close();
number_of_top_gos = int(sys.argv[3])
max_ppis_per_classifier = int(sys.argv[4])
uniref = sys.argv[5]
threshold = int(sys.argv[6])

# -- FASTA Parsing --
genetic_code_dictionary = {}

def read_fasta(fp):
    name, seq = None, []
    for line in fp:
        line = line.rstrip()
        if line.startswith(">"):
            if name: yield (name,''.join(seq))
            name, seq = line, []
        else:
            seq.append(line)
    if name: yield (name, ''.join(seq))
    
with open(sys.argv[2]) as fp:
    for name, seq in read_fasta(fp):
        name = name.split("|")[1]
        if name not in genetic_code_dictionary.keys():
            genetic_code_dictionary[name] = seq     
        l1 = genetic_code_dictionary.keys()
        

use_gos = int(sys.argv[4])

f_uniref = open(uniref,'r')

# -- Load GOs --
GO_dict = {}
GOs = ['go%3a1071','go%3a3824','go%3a4872','go%3a5085','go%3a5198','go%3a5215','go%3a5488','go%3a9055','go%3a16015','go%3a16209','go%3a16247','go%3a16530','go%3a30234','go%3a30545','go%3a31386','go%3a36370','go%3a42056','go%3a45182','go%3a45499','go%3a45735','go%3a60089','go%3a5515']
#os.mkdir('classificador')

out_uniref = open('uniref_aux.txt','w')

line = f_uniref.readline()
while (line !=''):
    aux = line.split()
    if aux[1] == 'UniRef90':
        out_uniref.write(aux[0]+ ' '+ aux[2]+"\n")
        #aux2 = aux[2].split('_')
    line = f_uniref.readline()

f_uniref.close()
out_uniref.close()
print 'uniref reference built.'
'''
for go in GOs:
    url = "http://www.uniprot.org/uniprot/?query="+go+"&force=yes&format=list"

    file_name = "uniprot-"+go+".list"
    mp3file = urllib2.urlopen(url)
    output = open(file_name,'wb')
    output.write(mp3file.read())
    output.close()    
    print go +" Download Sucessfully"
'''
for go in GOs:

    f_go = open("uniprot-"+go+".list", "r")
    prot_l = f_go.readlines()
    
    for elem in prot_l:
        GO_dict[elem.rstrip()] = go
        
    f_go.close()
    print go +" Load Sucessfully"


go_interactions = {}
GO_Datasets = {}

'''
# -- GET Features --
'''
contains = []
protein_features = {}
aux = []

def substitution(amino_acids):
    newChain= []
    
    for amino_acid in amino_acids:
        if amino_acid == 'A' or amino_acid == 'G' or amino_acid == 'V' :
            newChain.append("0");
        elif amino_acid == 'I' or amino_acid == 'L' or amino_acid == 'F' or amino_acid == 'P' :
            newChain.append("1");
        elif amino_acid == 'Y' or amino_acid == 'M' or amino_acid == 'T' or amino_acid == 'S' :
            newChain.append("2");
        elif amino_acid == 'H' or amino_acid == 'N' or amino_acid == 'Q' or amino_acid == 'W' :
            newChain.append("3");
        elif amino_acid == 'R' or amino_acid == 'K':
            newChain.append("4");
        elif amino_acid == 'D' or amino_acid == 'E':
            newChain.append("5");
        elif amino_acid == 'C':
            newChain.append("6");
    
    chain = ','.join(newChain);
    return chain;

def getPCChain(amino_acids):
    chain = substitution(amino_acids)
    return chain

for protein_interaction in interactions:     
    
    protein_interaction = protein_interaction.split()
    protein_1 = protein_interaction[0];
    protein_2 = protein_interaction[1];
        
    if protein_1 not in contains:
        amino_acids_protein_1 = genetic_code_dictionary[protein_1];
        chain_protein_1 = getPCChain(amino_acids_protein_1);
        
        prot_features = chain_protein_1.split(",")
        valores = map(float, prot_features)
        
        valores = numpy.array(valores,dtype='float')

        J = dct(valores)
        features = numpy.zeros(600)
        J= J[:600]
    
        if len(features) < len(J):
            c = J.copy()
            c[:len(features)] += features
        else:
            c = features.copy()
            c[:len(J)] += J
        reconstructed = idct(c)
        
        media = numpy.average(reconstructed);
        desvioPadrao = numpy.std(reconstructed)
        for j in range(0,len(reconstructed)):
            reconstructed[j] = (reconstructed[j]-media)/desvioPadrao;
            
        protein_features[protein_1] = reconstructed; 
        contains.append(protein_1)
        
    if protein_2 not in contains:
        amino_acids_protein_2 = genetic_code_dictionary[protein_2];
        chain_protein_2 = getPCChain(amino_acids_protein_2);
        
        prot_features = chain_protein_2.split(",")
    
        valores = map(float, prot_features)
        
        valores = numpy.array(valores,dtype='float')

        J = dct(valores)
        features = numpy.zeros(600)
        J= J[:600]
    
        if len(features) < len(J):
            c = J.copy()
            c[:len(features)] += features
        else:
            c = features.copy()
            c[:len(J)] += J
        reconstructed = idct(c)
        
        media = numpy.average(reconstructed);
        desvioPadrao = numpy.std(reconstructed)
        for j in range(0,len(reconstructed)):
            reconstructed[j] = (reconstructed[j]-media)/desvioPadrao;
            
            
        protein_features[protein_2] = reconstructed; 
        contains.append(protein_2)
    

GO_contains_proteins = {}


for protein_interaction in interactions:
    protein_interaction = protein_interaction.split()
    
    protein_1 = protein_interaction[0];
    protein_2 = protein_interaction[1];
    
    if protein_1 in GO_dict and protein_2 in GO_dict:
        
        key = GO_dict[protein_1]+GO_dict[protein_2]
        key2 = GO_dict[protein_2]+GO_dict[protein_1]
        
        if (key < key2):
            key = key
        else:
            key = key2

        if key in GO_Datasets:
            GO_Datasets[key].append(protein_1+" "+protein_2+" "+str(1));
        else:
            GO_Datasets[key] = []
            GO_Datasets[key].append(protein_1+" "+protein_2+" "+str(1));
            
        if key in GO_contains_proteins:
            GO_contains_proteins[key].append(protein_1)
            GO_contains_proteins[key].append(protein_2)
        else:
            GO_contains_proteins[key] = []
            GO_contains_proteins[key].append(protein_1)
            GO_contains_proteins[key].append(protein_2)
        
    else:
        key = "no_go"
        if key in GO_Datasets:
            GO_Datasets[key].append(protein_1+" "+protein_2+" "+str(1));
        else:
            GO_Datasets[key] = []
            GO_Datasets[key].append(protein_1+" "+protein_2+" "+str(1));
            
        if key in GO_contains_proteins:
            GO_contains_proteins[key].append(protein_1)
            GO_contains_proteins[key].append(protein_2)
        else:
            GO_contains_proteins[key] = []
            GO_contains_proteins[key].append(protein_1)
            GO_contains_proteins[key].append(protein_2)

print ("Ignoring GOs:")
f=open('ignore_gos','w')
# threshold nao inter
for k in sorted(GO_Datasets, key=lambda k: len(GO_Datasets[k])):
    if k!="no_go":
        if len(GO_Datasets[k]) < threshold:
            f.write(k+" ")
            print k," interactions: ",len(GO_Datasets[k])
f.close()

print ("Chosen GOs:")
# Ordena
new_dict = {}   
for k in sorted(GO_Datasets, key=lambda k: len(GO_Datasets[k]), reverse=True)[:number_of_top_gos]:
    new_dict[k] = GO_Datasets[k]
    print k, len(GO_Datasets[k])
if not ("no_go" in new_dict):
    new_dict["no_go"] = GO_Datasets["no_go"]
GO_Datasets = new_dict

#Limita
for k in GO_Datasets.keys():
    if len(GO_Datasets[k])>(max_ppis_per_classifier/2):
        GO_Datasets[k] = GO_Datasets[k][:max_ppis_per_classifier/2]
    GO_contains_proteins[k] = list(OrderedDict.fromkeys(GO_contains_proteins[k]))
    
for k in sorted(GO_Datasets.keys()):
        
        for i in range(0,len(GO_Datasets[k])):
            prot1 =  GO_contains_proteins[k][random.randint(0,(len(GO_contains_proteins[k]))-1)]
            prot2 =  GO_contains_proteins[k][random.randint(0,(len(GO_contains_proteins[k]))-1)]
            GO_Datasets[k].append(prot1+" "+prot2+" "+str(0))
            
        index = 0;
        dataset = numpy.zeros((len(GO_Datasets[k]),1200))
        output = numpy.zeros(len(GO_Datasets[k])) 
        
        for protein_interaction in GO_Datasets[k]:     
            protein_interaction = protein_interaction.split()
        
            protein_1 = protein_interaction[0];
            protein_2 = protein_interaction[1];
            
            y = protein_interaction[2];
            
            feat1 = protein_features[protein_1];
            feat2 = protein_features[protein_2];
        
            interaction = numpy.concatenate((feat1, feat2))
        
            dataset[index] = interaction
            output[index] = y
        
            index+=1
            #print index;
            
        xdata = dataset;
        ydata = output;
                
        size = len(ydata)
           
        allIDX = numpy.arange(size)
        numpy.random.shuffle(allIDX);
           
        xdata= xdata[allIDX];
        ydata= ydata[allIDX];
        
        C_range = 10.0 ** numpy.arange(-2, 9)
        gamma_range = 10.0 ** numpy.arange(-5, 4)
        c = C_range[3]
        gamm = gamma_range[2]
        
        print str(k)+ " Interactions: "+str(len(GO_Datasets[k]))
        classifier2 = svm.SVC(C = c,gamma = gamm, kernel='rbf', probability = True)
        scores = cross_validation.cross_val_score(classifier2, xdata, ydata, cv=3)
        print "Accuracy: %0.3f (+/- %0.3f)" % (scores.mean(), scores.std() * 2)
        classifier2.fit(xdata, ydata)
        filename = str(k)+"_classifier"
        joblib.dump(classifier2, filename, compress=9)
    