package helper
import nci60.administration.*
import nci60.data.protein.Protein
import nci60.data.protein.SourceOrganism
import nci60.data.maxquant.*
import nci60.data.maxquant.proteingroup.*
import nci60.data.maxquant.peptide.*
import nci60.data.maxquant.msms.*
import nci60.data.connected.LeadingProtein
import nci60.query.Suggestion

/**
 * GRAILS <a href="http://grails.org/doc/latest/guide/single.html#services" target="_blank">Services</a> 
 * This service contains methods/closures, which perform tasks for uploading data to the database. 
 *
 * @autor <a href="mailto:Florian.J.Auer@googlemail.com">Florian J. Auer</a>
*/
class UploadService {

    /**
     * Gets the column names of a MaxQuant output file.<p />
     * This is basicly the first line of the file. 
     * 
     * @param file  file to read from
     * @return  List of the column names (ordered)
     */
    List getColumnNames(File file) {
        def line
        file.withReader {line = it.readLine().split('\t')}
        return line
    }
    
    
    /**
     * Creates/updates an organism<p />
     * The short and full name of the organism is updated,
     * or the organism is created, if none with the given id exists.
     * 
     * @param id        id of the organism
     * @param shortName short name of the organism
     * @param fullName  full name of the organism
     * @see nci60.data.protein.SourceOrganism
     */
    void updateOrganism(int id, String shortName, String fullName){
        def organism = SourceOrganism.findById(id)
        if(organism==null){
            organism = new SourceOrganism(name:shortName, fullName:fullName)
        }else{
            organism.name = shortName
            organism.fullName = fullName
        }
        if (!organism.save(flush:true)) {
            organism.errors.each {println it}
        }
    }
    
    /**
     * Finds/creates an organism<p />
     * Returns an organism searched by its id.
     * If none matches, one is created.
     * If short and full name are given in params, the names are updated
     * 
     * @param params    Map containing at least "id". Additional it can contain "name" for short name and "fullName" for full name
     * @return          Found/updated/created organism
     * @see nci60.data.protein.SourceOrganism
     */
    SourceOrganism getOrganism(Map params){
        def organism
        
        if(params.containsKey('id')){
            println 'search organism by id'
            organism = SourceOrganism.findById(params.id)
        }
        if(organism == null){
            println 'generate new organism'
            organism = new SourceOrganism(name:params.shortName, fullName:params.fullName)
        }else{
            if(params?.shortName != null){
                organism.name = params.shortName
            }
            if(params?.fullName != null){
                organism.fullName = params.fullName
            }
        }
        if (!organism.save(flush:true)) {
            organism.errors.each {println it}
        }
        
        println 'use organism:'
        organism.print()
        println ''
        
        return organism
    }
    
    /**
     * Uploads all protein information from IPI into the corresponding classes from {@link nci60.data.protein}<p />
     * Additionally a file is created containing all Uniprot id necessary for extending the database {@link nci60.data.protein.uniprot}
     * 
     * @param sourceFile        IPI dat file, which contains the protein information
     * @param organism          organism to which this protein information belongs to
     * @param tempFolder        temporary folder in which the generated sql files are written
     * @param uniprotOutFile    file containing the Uniprot ids
     * @param upload            if true, the sql files are automatically uploaded to the database after writing is completed
     * @see nci60.data.protein.Protein
     */
    void uploadProteins(File sourceFile, SourceOrganism organism, String tempFolder, File uniprotOutFile,  boolean upload) {
        String separator = File.separator
        
        def uniprotOut = new BufferedWriter(new FileWriter(uniprotOutFile))
        
        def proteinsC = ['id', 'description', 'id_version', 'main_id', 'organism_id', 'sequence', 'weight']
        def chromosomeC = ['version', 'chromosome', 'end', 'organism_id', 'sourceipi_id', 'start', 'strand']
        def altNameC = ['version', 'alt_name', 'sourceipi_id']
        def geneC = ['version', 'name', 'protein_id', 'is_main_name']
        def referenceC = ['id', 'version', 'dbname', 'class', 'ac_number', 'gene_id', 'peptide_id', 'gi_number', 'status', 'cdna', 'cluster']
        def sourceC = ['protein_source_references_id', 'protein_reference_id']
        def refidC = ['protein_reference_ids_id', 'protein_reference_id']
        
        def sql = [:]
        sql.put('proteins', new SQLUploader(new File(''+ tempFolder + separator + 'proteins.sql'), 'proteins', proteinsC))
        sql.put('chromosome', new SQLUploader(new File(''+ tempFolder + separator + 'protein_chromosome.sql'), 'protein_chromosome', chromosomeC))
        sql.put('altName', new SQLUploader(new File(''+ tempFolder + separator + 'protein_alt_name.sql'), 'protein_alt_name', altNameC))
        sql.put('gene', new SQLUploader(new File(''+ tempFolder + separator + 'gene_name.sql'), 'gene_name', geneC))
        sql.put('references', new SQLUploader(new File(''+ tempFolder + separator + 'protein_reference.sql'), 'protein_reference', referenceC))
        sql.put('sources', new SQLUploader(new File(''+ tempFolder + separator + 'ipi_sourcereferences.sql'), 'ipi_sourcereferences', sourceC))
        sql.put('refs', new SQLUploader(new File(''+ tempFolder + separator + 'ipi_referenceids.sql'), 'ipi_referenceids', refidC))
        
        def maxId = Protein.executeQuery('select max(id) from Protein')[0]
        def id = (maxId==null)?1:maxId+1
        def startId = id
        
        def maxRefId = Protein.executeQuery('select max(id) from ProteinReference')[0]
        def refId = (maxRefId==null)?1:maxRefId+1
        
        boolean readSequence = false
        String mainId
        Integer version
        String description = ''
        String sequence = ''
        Integer weight
        def sourceReference =[]
        def altNames=[]
        def chromosomes = []
        def genes = []
        def referenceIds = []

        println 'Reading file: '+sourceFile.absolutePath

        try{
            BufferedReader file = new BufferedReader(new InputStreamReader(new FileInputStream(sourceFile)));
            String line;
            while ((line = file.readLine()) != null) {
                def matchID = (line =~ /ID\s+([A-Z0-9]+)\.(\d+)\s+(.+)/)
                def matchAltNames = (line =~ /AC\s+.+/)
                def matchChromosome = (line =~ /CC\s+-!-\sGENE_LOCATION: Chr\. (.+):(\d+)-([A-Z0-9]+):(-?\d)\./)
                def matchUniprotSwissProt = (line =~ /DR   UniProtKB\/Swiss-Prot; ([A-Z0-9]+).+/)
                def matchUniprotTrembl = (line =~ /DR   UniProtKB\/TrEMBL; ([A-Z0-9]+).+/)
                def matchEnsembls = (line =~ /DR   ENSEMBL; ([A-Z0-9]+); ([A-Z0-9]+);.+/)
                def matchEnsemblHavana = (line =~ /DR   ENSEMBL_HAVANA; ([A-Z0-9]+); ([A-Z0-9]+);.+/)
                def matchHInvDB = (line =~ /DR   H-InvDB; ([A-Z0-9]+); ([A-Z0-9]+).+/)
                def matchRefseq = (line =~ /DR   REFSEQ_([A-Z]+); (.+); (GI:\d+);.+/)
                def matchVega = (line =~ /DR   Vega; ([A-Z0-9]+); ([A-Z0-9]+).+/)
                def matchMasterEntry = (line =~ /DR   (.+); M\./)

                def matchEnd = (line =~ /\/\//)
                
                
                
                if(matchID.matches()) {
                    mainId = matchID[0][1]
                    version = matchID[0][2].toInteger()
                }else if(matchAltNames.matches()) {
                    line[5..-1].replaceAll(" ","").split(";").each{ element ->
                            altNames.add(element)
                    }
                }else if(matchEnd.matches()){
                    sql.proteins << [id, '\''+description.replace('\'','\\\'')+'\'', version, '\''+mainId+'\'', organism.id, '\''+sequence+'\'', weight]
                    chromosomes.each{sql.chromosome << [1, '\''+it.chromosome+'\'', it.end, organism.id, id, it.start, it.strand]}
                    altNames.each{sql.altName << [1, '\''+it+'\'', id]}
                    genes.sort{it.size()}.eachWithIndex {name,index->
                        def isMainId = (index==0)?1:0
                        sql.gene << [1, '\''+name+'\'', id, isMainId]
                    }

                    referenceIds.each {
                        sql.references << [refId, 1, '\''+it.dbname+'\'', '\''+it.clss+'\'', it.ac_number, it.gene_id, it.peptide_id, it.gi_number, it.status, it.cdna, it.cluster]
                        sql.refs << [id, refId]
                        refId++
                    }
                    sourceReference.each {
                        sql.references << [refId, 1, '\''+it.dbname+'\'', '\''+it.clss+'\'', it.ac_number, it.gene_id, it.peptide_id, it.gi_number, it.status, it.cdna, it.cluster]
                        sql.sources << [id, refId]
                        refId++
                    }

                    mainId = ""
                    version = 1
                    description = ''
                    sequence = ''
                    weight = 0
                    sourceReference = []
                    altNames=[]
                    chromosomes = []
                    genes = []
                    referenceIds = []

                    readSequence = false


                    id++
                }else if(line[0..1]=="DE") {
                    description += line[5..-2]
                }else if(matchChromosome.matches()) {
                    chromosomes.add([chromosome:matchChromosome[0][1], start:matchChromosome[0][2],end:matchChromosome[0][3],strand:matchChromosome[0][4]])
                }else if(line.startsWith('DR   HGNC')){
                    genes.add(line.split('; ')[2])
                }else if(matchUniprotSwissProt.matches()) {
                    def t = [version:1, dbname:'UniProt/Swiss-Prot', 'clss':'nci60.data.protein.ProteinUniprotSwissProt', ac_number:'\''+matchUniprotSwissProt[0][1]+'\'', gene_id:'null', peptide_id:'null', gi_number:'null', status:'null', cdna:'null', cluster:'null']
                    if(matchMasterEntry.matches()) {
                        sourceReference.add(t)
                    }else{
                        referenceIds.add(t)
                    }
                    uniprotOut.write(matchUniprotSwissProt[0][1]+'\n')
                }else if(matchUniprotTrembl.matches()) {
                    def t = [version:1, dbname:'UniProt/TrEMBL', 'clss':'nci60.data.protein.ProteinUniprotTrembl', ac_number:'\''+matchUniprotTrembl[0][1]+'\'', gene_id:'null', peptide_id:'null', gi_number:'null', status:'null', cdna:'null', cluster:'null']
                    if(matchMasterEntry.matches()) {
                        sourceReference.add(t)
                    }else{
                        referenceIds.add(t)
                    }
                }else if(matchEnsembls.matches()) {
                    def t = [version:1, dbname:'ENSEMBL', 'clss':'nci60.data.protein.ProteinEnsemble', ac_number:'null', gene_id:'\''+matchEnsembls[0][2]+'\'', peptide_id:'\''+matchEnsembls[0][1]+'\'', gi_number:'null', status:'null', cdna:'null', cluster:'null']
                    if(matchMasterEntry.matches()) {
                        sourceReference.add(t)
                    }else{
                        referenceIds.add(t)
                    }
                }else if(matchEnsemblHavana.matches()) {
                    def t = [version:1, dbname:'ENSEMBL_HAVANA', 'clss':'nci60.data.protein.ProteinEnsembleHavana', ac_number:'null', gene_id:'\''+matchEnsemblHavana[0][2]+'\'', peptide_id:'\''+matchEnsemblHavana[0][1]+'\'', gi_number:'null', status:'null', cdna:'null', cluster:'null']
                    if(matchMasterEntry.matches()) {
                        sourceReference.add(t)
                    }else{
                        referenceIds.add(t)
                    }
                }else if(matchHInvDB.matches()) {
                    def t = [version:1, dbname:'H-InvDB', 'clss':'nci60.data.protein.ProteinHInvDB', ac_number:'null', gene_id:'null', peptide_id:'null', gi_number:'null', status:'null', cdna:'\''+matchHInvDB[0][1]+'\'', cluster:'\''+matchHInvDB[0][2]+'\'']
                    if(matchMasterEntry.matches()) {
                        sourceReference.add(t)
                    }else{
                        referenceIds.add(t)
                    }
                }else if(matchRefseq.matches()) {
                    def t = [version:1, dbname:'REFSEQ', 'clss':'nci60.data.protein.ProteinRefseq', ac_number:'\''+matchRefseq[0][2]+'\'', gene_id:'null', peptide_id:'null', gi_number:'\''+matchRefseq[0][3]+'\'', status:'\''+matchRefseq[0][1]+'\'', cdna:'null', cluster:'null']
                    if(matchMasterEntry.matches()) {
                        sourceReference.add(t)
                    }else{
                        referenceIds.add(t)
                    }
                }else if(matchVega.matches()) {
                    def t = [version:1, dbname:'Vega', 'clss':'nci60.data.protein.ProteinVega', ac_number:'null', gene_id:'\''+matchVega[0][2]+'\'', peptide_id:'\''+matchVega[0][1]+'\'', gi_number:'null', status:'null', cdna:'null', cluster:'null']
                    if(matchMasterEntry.matches()) {
                        sourceReference.add(t)
                    }else{
                        referenceIds.add(t)
                    }
                }else if(line[0..1] == 'SQ') {
                    readSequence = true
                    weight = line.split(';')[1].replaceAll(' ', '').replaceAll('MW','').toInteger()
                }else if(readSequence) {
                    sequence += line.replaceAll(' ','')
                }
            }
            file.close()
            uniprotOut.close()
            
            sql.proteins.finish()
            sql.chromosome.finish()
            sql.altName.finish()
            sql.gene.finish()
            sql.references.finish()
            sql.sources.finish()
            sql.refs.finish()
            
            new File(''+ tempFolder + separator + 'upload_order_proteins_ipi.txt').withWriter { out ->
                out.writeLine('proteins.sql')
                out.writeLine('protein_chromosome.sql')
                out.writeLine('protein_alt_name.sql')
                out.writeLine('gene_name.sql')
                out.writeLine('protein_reference.sql')
                out.writeLine('ipi_referenceids.sql')
                out.writeLine('ipi_sourcereferences.sql')
            }
            
            println '\nfinished with reading dat file: '+(id-startId+1)+' Proteins found'
            
            if(upload){
                println 'Uploading generated sql files...'
                print 'uploading proteins (proteins.sql)'
                sql.proteins.upload()
                println '\tdone!'
                
                print 'uploading chromosomes (protein_chromosome.sql)'
                sql.chromosome.upload()
                println '\tdone!'
                
                print 'uploading alternative names (protein_alt_name.sql)'
                sql.altName.upload()
                println '\tdone!'
                
                print 'uploading genes names (gene_name.sql)'
                sql.gene.upload()
                println '\tdone!'
                
                print 'uploading references (protein_reference.sql)'
                sql.references.upload()
                println '\tdone!'
                
                print 'uploading references (ipi_referenceids.sql)'
                sql.refs.upload()
                println '\tdone!'
                
                print 'uploading source references (ipi_sourcereferences.sql)'
                sql.sources.upload()
                println '\tdone!'
            }     
                
        } catch (FileNotFoundException e) {
                System.err.println("!: file not found: "+sourceFile.absolutePath)
                e.printStackTrace(System.out)
        } catch (IOException e) {
                System.err.println("!: could not read file: "+sourceFile.absolutePath)
                e.printStackTrace(System.out)
        }
        
    }
    

    /**
     * Uploads all protein information from Uniprot into the corresponding classes from {@link nci60.data.protein.uniprot}<p />
     * 
     * @param file              Uniprot dat file, which contains the protein information
     * @param tempFolder        temporary folder in which the generated sql files are written
     * @param upload            if true, the sql files are automatically uploaded to the database after writing is completed
     * @see nci60.data.protein.uniprot.Uniprot
     */
    void uploadUniprot(File file, String tempFolder, boolean upload) {
        String separator = File.separator
        
        println 'prepare for uploading uniprot information...'
        println 'getting protein ids and uniprot AC from database'
        def acToPid = [:]
        Protein.executeQuery('select p.id, r.acNumber from Protein p join p.sourceReferences r where r.DBname=\'UniProt/Swiss-Prot\'').each(){
            def ac = it[1]
            def id = it[0]
            if(!acToPid.containsKey(ac)){
                acToPid.put(ac, [])
            }
            acToPid[ac].add(id)
        }

        println 'reading Uniprot File: '+file.absolutePath

        def uniprotC = ['id', 'identifier',  'reviewed', 'full_name', 'flags', 'protein_id']
        def uniprotAltNameC = ['id', 'full_name']
        def uniprotShortNameC = ['id', 'name']
        def uniprotECC = ['id', 'number']
        def uniprotContainC = ['id', 'full_name', 'parent_id']
        def uniprotIncludeC = ['id', 'full_name', 'parent_id']
        def uniprotGOC = ['id', 'go_class_id', 'identifier', 'description', 'evidence_id', 'source', 'protein_id']
        def uniprotGNC = ['id', 'name', 'protein_id']
        def uniprotGNSynC = ['id', 'name', 'gene_id']

        def uniprotHasAltNamesC = ['uniprot_alt_names_id', 'uniprot_alt_name_id']
        def uniprotHasShortNamesC = ['uniprot_short_names_id', 'uniprot_short_name_id']
        def uniprotHasECsC = ['uniprot_ecs_id', 'uniprotec_id']
        def uniprotAltNameHasShortNamesC = ['uniprot_alt_name_short_names_id', 'uniprot_short_name_id']
        def uniprotAltNameHasECsC = ['uniprot_alt_name_ecs_id', 'uniprotec_id']
        def uniprotContainHasAltNamesC = ['uniprot_contain_alt_names_id', 'uniprot_alt_name_id']
        def uniprotContainHasShortNamesC = ['uniprot_contain_short_names_id', 'uniprot_short_name_id']
        def uniprotContainHasECsC = ['uniprot_contain_ecs_id', 'uniprotec_id']
        def uniprotIncludeHasAltNamesC = ['uniprot_include_alt_names_id', 'uniprot_alt_name_id']
        def uniprotIncludeHasShortNamesC = ['uniprot_include_short_names_id', 'uniprot_short_name_id']
        def uniprotIncludeHasECsC = ['uniprot_include_ecs_id', 'uniprotec_id']

        def sql = [:]
        sql.put('uniprot', new SQLUploader(new File(''+tempFolder + separator + 'uniprot.sql'), 'uniprot', uniprotC))
        sql.put('uniprotAltName', new SQLUploader(new File(''+tempFolder + separator + 'uniprotAltName.sql'), 'uniprot_alt_name', uniprotAltNameC))
        sql.put('uniprotShortName', new SQLUploader(new File(''+tempFolder + separator + 'uniprotShortName.sql'), 'uniprot_short_name', uniprotShortNameC))
        sql.put('uniprotEC', new SQLUploader(new File(''+tempFolder + separator + 'uniprotEC.sql'), 'uniprotec', uniprotECC))
        sql.put('uniprotContain', new SQLUploader(new File(''+tempFolder + separator + 'uniprotContain.sql'), 'uniprot_contain', uniprotContainC))
        sql.put('uniprotInclude', new SQLUploader(new File(''+tempFolder + separator + 'uniprotInclude.sql'), 'uniprot_include', uniprotIncludeC))
        sql.put('uniprotGO', new SQLUploader(new File(''+tempFolder + separator + 'uniprotGO.sql'), 'uniprotgo', uniprotGOC))
        sql.put('uniprotGN', new SQLUploader(new File(''+tempFolder + separator + 'uniprotGN.sql'), 'uniprot_gene', uniprotGNC))
        sql.put('uniprotGNSyn', new SQLUploader(new File(''+tempFolder + separator + 'uniprotGNSyn.sql'), 'uniprot_gene_synonym', uniprotGNSynC))

        sql.put('uniprotHasAltNames', new SQLUploader(new File(''+tempFolder + separator + 'uniprotHasAltNames.sql'), 'uniprot_has_altnames', uniprotHasAltNamesC))
        sql.put('uniprotHasShortNames', new SQLUploader(new File(''+tempFolder + separator + 'uniprotHasShortNames.sql'), 'uniprot_has_shortnames', uniprotHasShortNamesC))
        sql.put('uniprotHasECs', new SQLUploader(new File(''+tempFolder + separator + 'uniprotHasECs.sql'), 'uniprot_has_ecs', uniprotHasECsC))
        sql.put('uniprotAltNameHasShortNames', new SQLUploader(new File(''+tempFolder + separator + 'uniprotAltNameHasShortNames.sql'), 'uniprotAltName_has_shortnames', uniprotAltNameHasShortNamesC))
        sql.put('uniprotAltNameHasECs', new SQLUploader(new File(''+tempFolder + separator + 'uniprotAltNameHasECs.sql'), 'uniprotAltName_has_ecs', uniprotAltNameHasECsC))
        sql.put('uniprotContainHasAltNames', new SQLUploader(new File(''+tempFolder + separator + 'uniprotContainHasAltNames.sql'), 'uniprotContain_has_altnames', uniprotContainHasAltNamesC))
        sql.put('uniprotContainHasShortNames', new SQLUploader(new File(''+tempFolder + separator + 'uniprotContainHasShortNames.sql'), 'uniprotContain_has_shortnames', uniprotContainHasShortNamesC))
        sql.put('uniprotContainHasECs', new SQLUploader(new File(''+tempFolder + separator + 'uniprotContainHasECs.sql'), 'uniprotContain_has_ecs', uniprotContainHasECsC))
        sql.put('uniprotIncludeHasAltNames', new SQLUploader(new File(''+tempFolder + separator + 'uniprotIncludeHasAltNames.sql'), 'uniprotInclude_has_altnames', uniprotIncludeHasAltNamesC))
        sql.put('uniprotIncludeHasShortNames', new SQLUploader(new File(''+tempFolder + separator + 'uniprotIncludeHasShortNames.sql'), 'uniprotInclude_has_shortnames', uniprotIncludeHasShortNamesC))
        sql.put('uniprotIncludeHasECs', new SQLUploader(new File(''+tempFolder + separator + 'uniprotIncludeHasECs.sql'), 'uniprotInclude_has_ecs', uniprotIncludeHasECsC))

        def uniprotID = 1
        def uniprotAltNameID = 1
        def uniprotShortNameID = 1
        def uniprotECID = 1
        def uniprotContainID = 1
        def uniprotIncludeID = 1
        def uniprotGOID = 1
        def uniprotGNID = 1
        def uniprotGNSynID = 1


        def goClassesToId = [:]
        UniprotGOClass.findAll().each(){
            goClassesToId.put(it.letter.toString(), it.id)
        }

        def goEvidenceToId = [:]
        UniprotGOEvidence.findAll().each(){
            goEvidenceToId.put(it.letters.toString(), it.id)
        }


        boolean reviewed = false
        def ac = []
        def prot = [:]
        def curEle = [:]
        def curProt = prot
        def an = []
        curProt.put('AltName', an)
        def shor = []
        curEle.put('Short', an)
        def ec = []
        curEle.put('EC', ec)
        def con = []
        curProt.put('Contains', con)
        def inc = []
        curProt.put('Includes', inc)
        def go = []
        curProt.put('GO', go)
        def curGene = [:]
        def gene = []
        curProt.put('GN', gene)
        def skip = false
        try {
                BufferedReader ufile = new BufferedReader(new InputStreamReader(new FileInputStream(file.absolutePath)));
                String line;
                int counter = 1
                int lineNr = 0
                while ((line = ufile.readLine()) != null) {
                        lineNr++
                        if(line.startsWith('ID')){
                                reviewed = (line.contains('Reviewed;')) ? true : false
                        }else if(line.startsWith('AC')){
                                def tm = line[2..-1].replaceAll(' ','').split(';')
                                tm.each(){
                                    ac.add(it)
                                }
                        }else if(line.startsWith('GN')){
                            (line[5..-1]+' ').split('; ').each(){gn->
                                if(gn.startsWith('Name=')){
                                    curGene = ['name':gn[5..-1], 'Synonym':[]]
                                    prot['GN'].add(curGene)
                                }else if(gn.startsWith('Synonyms=')){
                                    gn[9..-1].split(', ').each(){
                                        curGene['Synonym'].add(it)
                                    }
                                }
                            }
                        }else if(line.startsWith('DE')){

                                if(line.contains('Contains:')){
                                        curProt = [:]
                                        prot['Contains'].add(curProt)
                                        def ant = []
                                        curProt.put('AltName', ant)
                                }else if(line.contains('Includes:')){
                                        curProt = [:]
                                        prot['Includes'].add(curProt)
                                        def ant = []
                                        curProt.put('AltName', ant)
                                }

                                if(line.contains('RecName: ')){
                                        curEle = [:]
                                        curProt.put('ProtName',curEle)
                                        shor = []
                                        curEle.put('Short',shor)
                                        ec = []
                                        curEle.put('EC', ec)
                                        skip = false
                                }else if(line.contains('AltName: ')){
                                        curEle = [:]
                                        curProt['AltName'].add(curEle)
                                        shor = []
                                        curEle.put('Short',shor)
                                        ec = []
                                        curEle.put('EC', ec)
                                        skip = false
                                }else if(line.contains('SubName: ')){
                                        skip = true
                                }else if(line.contains('Flags: ')){
                                        def tmp = line.split('Flags: ')[1]
                                        prot.put('Flags', tmp[0..-2])
                                }
                                if(! skip){
                                        if(line.contains('Full=')){
                                                def tmp = line.split('Full=')[1]
                                                curEle.put('Full', tmp[0..-2])
                                        }else if(line.contains('Short=')){
                                                def tmp = line.split('Short=')[1]
                                                curEle['Short'].add(tmp[0..-2])
                                        }else if(line.contains('EC=')){
                                                def tmp = line.split('EC=')[1]
                                                curEle['EC'].add(tmp[0..-2])
                                        }else if(line.contains('Allergen=')){
                                                def tmp = line.split('Allergen=')[1]
                                                curEle.put('Allergen', tmp[0..-2])
                                        }else if(line.contains('Biotech=')){
                                                def tmp = line.split('Biotech=')[1]
                                                curEle.put('Biotech', tmp[0..-2])
                                        }else if(line.contains('CD_antigen=')){
                                                def tmp = line.split('CD_antigen=')[1]
                                                curEle.put('CD_antigen', tmp[0..-2])
                                        }else if(line.contains('INN=')){
                                                def tmp = line.split('INN=')[1]
                                                curEle.put('INN', tmp[0..-2])
                                        }
                                }
                        }else if(line.startsWith('DR   GO; ')){
                            def goline = line[9..-2].split('; ')
                            def gotmp = [:]
                            gotmp.put('term',goline[0])
                            gotmp.put('process',goline[1][0])
                            gotmp.put('description',goline[1][2..-1])
                            def tttt = goline[2].split(':')
                            gotmp.put('evidence',tttt[0])
                            gotmp.put('class',tttt[1])
                            prot['GO'].add(gotmp)
                        }else if(line.startsWith('//')){
                            if(reviewed){
                                ac.each(){acNumber->
                                    if(acToPid.containsKey(acNumber)){
                                        acToPid[acNumber].each(){ipi->
                                            def reviewedTemp = (reviewed) ? 1 : 0
                                            def flags = (prot.containsKey('Flags')) ? '\"'+prot['Flags']+'\"' : 'null'
                                            def u1 = [uniprotID, '\"'+acNumber+'\"', reviewedTemp, '\"'+prot['ProtName']['Full']+'\"', flags, ipi]
                                            sql['uniprot'] << u1
                                            prot['ProtName']['Short'].each(){shortName->
                                                sql['uniprotShortName'] << [uniprotShortNameID, '\"'+shortName+'\"']
                                                sql['uniprotHasShortNames'] << [uniprotID, uniprotShortNameID]
                                                uniprotShortNameID++
                                            }
                                            prot['ProtName']['EC'].each(){ecNumber->
                                                sql['uniprotEC'] << [uniprotECID, '\"'+ecNumber+'\"']
                                                sql['uniprotHasECs'] << [uniprotID, uniprotECID]
                                                uniprotECID++
                                            }

                                            prot['GN'].each(){g->
                                                sql['uniprotGN'] << [uniprotGNID, '\"'+g['name']+'\"', uniprotID]
                                                g['Synonym'].each(){
                                                    sql['uniprotGNSyn'] << [uniprotGNSynID, '\"'+it+'\"', uniprotGNID]
                                                    uniprotGNSynID++
                                                }
                                                uniprotGNID++
                                            }
                                            prot['AltName'].each(){altName->
                                                if(altName['Full']!=null){
                                                    sql['uniprotAltName'] << [uniprotAltNameID, '\"'+altName['Full']+'\"']
                                                    sql['uniprotHasAltNames'] << [uniprotID, uniprotAltNameID]
                                                    altName['Short'].each(){shortName->
                                                        sql['uniprotShortName'] << [uniprotShortNameID, '\"'+shortName+'\"']
                                                        sql['uniprotAltNameHasShortNames'] << [uniprotAltNameID, uniprotShortNameID]
                                                        uniprotShortNameID++
                                                    }
                                                    altName['EC'].each(){ecNumber->
                                                        sql['uniprotEC'] << [uniprotECID, '\"'+ecNumber+'\"']
                                                        sql['uniprotAltNameHasECs'] << [uniprotAltNameID, uniprotECID]
                                                        uniprotECID++
                                                    }
                                                    uniprotAltNameID++
                                                }
                                            }
                                            prot['Contains'].each(){cont->
                                                sql['uniprotContain'] << [uniprotContainID, '\"'+cont['ProtName']['Full']+'\"', uniprotID]
                                                cont['Short'].each(){shortName->
                                                    sql['uniprotShortName'] << [uniprotShortNameID, '\"'+shortName+'\"']
                                                    sql['uniprotContainHasShortNames'] << [uniprotContainID, uniprotShortNameID]
                                                    uniprotShortNameID++
                                                }
                                                cont['EC'].each(){ecNumber->
                                                    sql['uniprotEC'] << [uniprotECID, '\"'+ecNumber+'\"']
                                                    sql['uniprotContainHasECs'] << [uniprotContainID, uniprotECID]
                                                    uniprotECID++
                                                }
                                                cont['AltName'].each(){altName->
                                                    if(altName['Full']!=null){
                                                        sql['uniprotAltName'] << [uniprotAltNameID, '\"'+altName['Full']+'\"']
                                                        sql['uniprotContainHasAltNames'] << [uniprotContainID, uniprotAltNameID]
                                                        altName['Short'].each(){shortName->
                                                            sql['uniprotShortName'] << [uniprotShortNameID, '\"'+shortName+'\"']
                                                            sql['uniprotAltNameHasShortNames'] << [uniprotAltNameID, uniprotShortNameID]
                                                            uniprotShortNameID++
                                                        }
                                                        altName['EC'].each(){ecNumber->
                                                            sql['uniprotEC'] << [uniprotECID, '\"'+ecNumber+'\"']
                                                            sql['uniprotAltNameHasECs'] << [uniprotAltNameID, uniprotECID]
                                                            uniprotECID++
                                                        }
                                                        uniprotAltNameID++
                                                    }
                                                }
                                                uniprotContainID++
                                            }
                                            prot['Includes'].each(){cont->
                                                sql['uniprotInclude'] << [uniprotIncludeID, '\"'+cont['ProtName']['Full']+'\"', uniprotID]
                                                cont['Short'].each(){shortName->
                                                    sql['uniprotShortName'] << [uniprotShortNameID, '\"'+shortName+'\"']
                                                    sql['uniprotIncludeHasShortNames'] << [uniprotIncludeID, uniprotShortNameID]
                                                    uniprotShortNameID++
                                                }
                                                cont['EC'].each(){ecNumber->
                                                    sql['uniprotEC'] << [uniprotECID, '\"'+ecNumber+'\"']
                                                    sql['uniprotIncludeHasECs'] << [uniprotIncludeID, uniprotECID]
                                                    uniprotECID++
                                                }
                                                cont['AltName'].each(){altName->
                                                    if(altName['Full']!=null){
                                                        sql['uniprotAltName'] << [uniprotAltNameID, '\"'+altName['Full']+'\"']
                                                        sql['uniprotIncludeHasAltNames'] << [uniprotIncludeID, uniprotAltNameID]
                                                        altName['Short'].each(){shortName->
                                                            sql['uniprotShortName'] << [uniprotShortNameID, '\"'+shortName+'\"']
                                                            sql['uniprotAltNameHasShortNames'] << [uniprotAltNameID, uniprotShortNameID]
                                                            uniprotShortNameID++
                                                        }
                                                        altName['EC'].each(){ecNumber->
                                                            sql['uniprotEC'] << [uniprotECID, '\"'+ecNumber+'\"']
                                                            sql['uniprotAltNameHasECs'] << [uniprotAltNameID, uniprotECID]
                                                            uniprotECID++
                                                        }
                                                        uniprotAltNameID++
                                                    }
                                                }
                                                uniprotIncludeID++
                                            }
                                            prot['GO'].each(){g->
                                                sql['uniprotGO'] << [uniprotGOID, goClassesToId[g.process], '\"'+g.term+'\"', '\"'+g.description+'\"', goEvidenceToId[g.evidence], '\"'+g['class']+'\"', uniprotID]
                                                uniprotGOID++
                                            }
                                            uniprotID++
                                            counter++
                                        }
                                    }
                                }
                            }
                                prot = [:]
                                ac = []
                                curEle = [:]
                                curProt = prot
                                def ant = []
                                curProt.put('AltName', ant)
                                con = []
                                curProt.put('Contains', con)
                                inc = []
                                curProt.put('Includes', inc)
                                go = []
                                curProt.put('GO', go)
                                shor = []
                                curEle.put('Short', an)
                                ec = []
                                curEle.put('EC', ec)
                                gene = []
                                curProt.put('GN', gene)
                                skip = false
                        }
                }
        
            sql['uniprot'].finish()
            sql['uniprotAltName'].finish()
            sql['uniprotShortName'].finish()
            sql['uniprotEC'].finish()
            sql['uniprotContain'].finish()
            sql['uniprotInclude'].finish()
            sql['uniprotGO'].finish()
            sql['uniprotGN'].finish()
            sql['uniprotGNSyn'].finish()

            sql['uniprotHasAltNames'].finish()
            sql['uniprotHasShortNames'].finish()
            sql['uniprotHasECs'].finish()
            sql['uniprotAltNameHasShortNames'].finish()
            sql['uniprotAltNameHasECs'].finish()
            sql['uniprotContainHasAltNames'].finish()
            sql['uniprotContainHasShortNames'].finish()
            sql['uniprotContainHasECs'].finish()
            sql['uniprotIncludeHasAltNames'].finish()
            sql['uniprotIncludeHasShortNames'].finish()
            sql['uniprotIncludeHasECs'].finish()
            
            println 'finished with reading uniprot flat file'
            
            new File(''+ tempFolder + separator + 'upload_order_proteins_uniprot.txt').withWriter { out ->
                out.writeLine('uniprot.sql')
                out.writeLine('uniprotAltName.sql')
                out.writeLine('uniprotShortName.sql')
                out.writeLine('uniprotEC.sql')
                out.writeLine('uniprotContain.sql')
                out.writeLine('uniprotInclude.sql')
                out.writeLine('uniprotGO.sql')
                out.writeLine('uniprotGN.sql')
                out.writeLine('uniprotGNSyn.sql')

                out.writeLine('uniprotHasAltNames.sql')
                out.writeLine('uniprotHasShortNames.sql')
                out.writeLine('uniprotHasECs.sql')
                out.writeLine('uniprotAltNameHasShortNames.sql')
                out.writeLine('uniprotAltNameHasECs.sql')
                out.writeLine('uniprotContainHasAltNames.sql')
                out.writeLine('uniprotContainHasShortNames.sql')
                out.writeLine('uniprotContainHasECs.sql')
                out.writeLine('uniprotIncludeHasAltNames.sql')
                out.writeLine('uniprotIncludeHasShortNames.sql')
                out.writeLine('uniprotIncludeHasECs.sql')
            }
            
            if(upload){
                println 'upload uniprot information from sql file to database'
                println 'uploading uniprot.sql'
                sql['uniprot'].upload()
                print '\tdone!'
                println 'uploading uniprotAltName.sql'
                sql['uniprotAltName'].upload()
                print '\tdone!'
                println 'uploading uniprotShortName.sql'
                sql['uniprotShortName'].upload()
                print '\tdone!'
                println 'uploading uniprotEC.sql'
                sql['uniprotEC'].upload()
                print '\tdone!'
                println 'uploading uniprotContain.sql'
                sql['uniprotContain'].upload()
                print '\tdone!'
                println 'uploading uniprotInclude.sql'
                sql['uniprotInclude'].upload()
                print '\tdone!'
                println 'uploading uniprotGO.sql'
                sql['uniprotGO'].upload()
                print '\tdone!'
                println 'uploading uniprotGN.sql'
                sql['uniprotGN'].upload()
                print '\tdone!'
                println 'uploading uniprotGNSyn.sql'
                sql['uniprotGNSyn'].upload()

                print '\tdone!'
                println 'uploading uniprotHasAltNames.sql'
                sql['uniprotHasAltNames'].upload()
                print '\tdone!'
                println 'uploading uniprotHasShortNames.sql'
                sql['uniprotHasShortNames'].upload()
                print '\tdone!'
                println 'uploading uniprotHasECs.sql'
                sql['uniprotHasECs'].upload()
                print '\tdone!'
                println 'uploading uniprotAltNameHasShortNames.sql'
                sql['uniprotAltNameHasShortNames'].upload()
                print '\tdone!'
                println 'uploading uniprotAltNameHasECs.sql'
                sql['uniprotAltNameHasECs'].upload()
                print '\tdone!'
                println 'uploading uniprotContainHasAltNames.sql'
                sql['uniprotContainHasAltNames'].upload()
                print '\tdone!'
                println 'uploading uniprotContainHasShortNames.sql'
                sql['uniprotContainHasShortNames'].upload()
                print '\tdone!'
                println 'uploading uniprotContainHasECs.sql'
                sql['uniprotContainHasECs'].upload()
                print '\tdone!'
                println 'uploading uniprotIncludeHasAltNames.sql'
                sql['uniprotIncludeHasAltNames'].upload()
                print '\tdone!'
                println 'uploading uniprotIncludeHasShortNames.sql'
                sql['uniprotIncludeHasShortNames'].upload()
                print '\tdone!'
                println 'uploading uniprotIncludeHasECs.sql'
                sql['uniprotIncludeHasECs'].upload()
                print '\tdone!'
            }

        } catch (FileNotFoundException e) {
                System.err.println("!: file not found: "+file.absolutePath)
                e.printStackTrace(System.out)
        } catch (IOException e) {
                System.err.println("!: could not read file: "+file.absolutePath)
                e.printStackTrace(System.out)
        }
    }
                             
    
    /**
     * Uploads information for protein groups from MaxQuant into {@link nci60.data.maxquant.proteingroup.ProteinGroup}<p />
     * 
     * @param sourceFile        protein group file from MaxQuant, which contains the protein group information
     * @param tempFolder        temporary folder in which the generated sql files are written
     * @param proterties        contains the used properties of the {@link nci60.data.maxquant.proteingroup.ProteinGroup} class and corresponding column names in the file, e.g. molWeight:'Mol. weight [kDa]'
     * @param currentProject    project (dataset) this information belongs to
     * @see nci60.data.maxquant.proteingroup.ProteinGroup
     */
    void uploadProteinGroupTable(File sourceFile, String tempFolder, Map properties,  Project currentProject){
        println '-preparing for uploading protein_group table'
        String separator = File.separator

        //find the position of the properties in the columns of the file
        def columns = getColumnNames(sourceFile)
        def indices = [:]
        properties.each{k,v->
            indices.put(k, columns.indexOf(v))
        }

        //Defining the column names
        def columnNames = []
        columnNames.add('version')
        columnNames.add('current_project_id')
        properties.keySet().each{property->
            if(property=='iBAQ'){
                columnNames.add('ibaq')
            }else{
                columnNames.add(property.replaceAll("[A-Z]") {'_'+it[0].toLowerCase()})
            }
        }

        //defining the sql outputfile
        def sqlFile = new File(''+tempFolder+separator+'ProteinGroup.sql')
        //setting up the SQLUploader Object
        def sqlUploader = new SQLUploader(sqlFile, 'protein_group', columnNames)

        println '--reading protein groups file: '+sourceFile.absolutePath
        //reading the sourcefile
        def temp = new ProteinGroup()
        try{
            BufferedReader file = new BufferedReader(new InputStreamReader(new FileInputStream(sourceFile)));
            String line;
            def i =0
            while ((line = file.readLine()) != null) {
                if(i!=0){
                    def splittedLine = line.split('\t')
                    def values = []
                    values.add(1)
                    values.add(currentProject.id)
                    properties.keySet().each{property->
                        values.add(temp.modifySQL(property,splittedLine[indices[property]]))
                    }
                    sqlUploader << values
                }
                i++
            }
            file.close()

            println '--uploading protein group sql file'
            sqlUploader.upload()
        } catch (FileNotFoundException e) {
                println "!: file not found: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        } catch (IOException e) {
                println "!: could not read file: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        }
        
        println '--uploading protein_group table done!'
    }
    
    /**
     * Uploads information for protein groups from MaxQuant for each experiment (cell line) into {@link nci60.data.maxquant.proteingroup.ProteinGroupExperiment}<p />
     * 
     * @param sourceFile        protein group file from MaxQuant, which contains the protein group information
     * @param tempFolder        temporary folder in which the generated sql files are written
     * @param proterties        contains the used properties of the {@link nci60.data.maxquant.proteingroup.ProteinGroupExperiment} class and corresponding column names in the file, e.g. intensity = 'Intensity [Experiment]', whereat "Experiment" is replaced by the p-numbers in the file
     * @param idInFile          column name of the id used in the MaxQuant file
     * @param currentProject    project (dataset) this information belongs to
     * @see nci60.data.maxquant.proteingroup.ProteinGroup
     * @see nci60.data.maxquant.proteingroup.ProteinGroupExperiment
     */
    void uploadProteinGroupExperimentTable(File sourceFile, String tempFolder, Map properties, String idInFile, Project currentProject){
        println '-preparing for uploading protein_group_experiment table'
        String separator = File.separator
        
        //get p-Numbers and Experiments
        def experimentMap = [:]
        Experiment.findAllByCurrentProject(currentProject).each(){exp->
            experimentMap.put(exp.pNumber, exp)
        }

        //find the position of the properties in the columns of the file
        def columns = getColumnNames(sourceFile)
        def indices = [:]
        properties.each(){p,v->
            experimentMap.keySet().each(){pNumber->
                def newParam = v.replace('[Experiment]',pNumber)
                indices.put(newParam, columns.indexOf(newParam))
            }
        }
        def idInFilePosition = columns.indexOf(idInFile)

        //Defining the column names
        def columnNames = []
        columnNames.add('version')
        columnNames.add('current_project_id')
        columnNames.add('experiment_id')
        columnNames.add('group_id')
        properties.keySet().each(){p->
            if(p=='iBAQ'){
                columnNames.add('ibaq')
            }else{
                columnNames.add(p.replaceAll("[A-Z]") {'_'+it[0].toLowerCase()})
            }
        }

        //getting groupIds
        def groupIdMap = [:]
        ProteinGroup.executeQuery('select p.idInFile, p.id from ProteinGroup p where p.currentProject.id='+currentProject.id).each(){
            groupIdMap.put(it[0], it[1])
        }


        //defining the sql outputfile
        def sqlFile = new File(''+tempFolder+separator+'ProteinGroupExperiment.sql')
        def sqlUploader = new SQLUploader(sqlFile, 'protein_group_experiment', columnNames)

        println '--reading protein groups file: '+sourceFile.absolutePath

        def temp = new ProteinGroupExperiment()
        try{
            BufferedReader file = new BufferedReader(new InputStreamReader(new FileInputStream(sourceFile)));
            String line;
            def i = 0
            while ((line = file.readLine()) != null) {
                if(i!=0){
                    def splittedLine = line.split('\t')
                    experimentMap.keySet().each(){e->
                        def values = []
                        boolean skip = false
                        values.add(1)
                        values.add(currentProject.id)
                        values.add(experimentMap[e].id)
                        values.add(groupIdMap[splittedLine[idInFilePosition].toInteger()])
                        properties.keySet().each(){p->
                            def newParam = properties[p].replace('[Experiment]',e)
                            values.add(temp.modifySQL(p,splittedLine[indices[newParam]]))
                            if((p=='numberOfPeptides')&&(values[-1]=='0')){
                                skip =true
                            }
                        }
                        if(!skip){
                            sqlUploader << values
                        }
                    }
                }
                i++
            }
            file.close()

            println '--uploading protein group experiment sql file'
            sqlUploader.upload()
        } catch (FileNotFoundException e) {
                println "!: file not found: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        } catch (IOException e) {
                println "!: could not read file: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        }
        
        println '--uploading of protein_group_experiment done!'
    }

    /**
     * Connects the identifications from {@link nci60.data.maxquant.proteingroup.ProteinGroup} with {@link nci60.data.protein.Protein}<p />
     * 
     * @param sourceFile         protein group file from MaxQuant, which contains the protein group information
     * @param tempFolder         temporary folder in which the generated sql files are written
     * @param proteinIdInFile    column name of the IPI ids used in the MaxQuant file
     * @param groupIdInFile      column name of the id used in the MaxQuant file
     * @param contaminantInFile  column name of the contaminant used in the MaxQuant file
     * @param reverseInFile      column name of the reversed used in the MaxQuant file
     * @param currentProject     project (dataset) this information belongs to
     * @see nci60.data.connected.LeadingProtein
     * @see nci60.data.connected.IdentifiedProtein        
     */
    void identifyProteins( File sourceFile, String tempFolder, String proteinIdInFile, String groupIdInFile, String contaminantInFile, String reverseInFile, Project currentProject) {
        println '-preparing for get leading and identified proteins'

        String separator = File.separator
        
        //find the position of the properties in the columns of the file
        def columns = getColumnNames(sourceFile)
        def groupIdInFilePosition = columns.indexOf(groupIdInFile)
        def proteinIdInFilePosition = columns.indexOf(proteinIdInFile)
        def contaminantInFilePosition = columns.indexOf(contaminantInFile)
        def reverseInFilePosition = columns.indexOf(reverseInFile)
        
        println '--reading protein group file: '+sourceFile.absolutePath

        def identified = [:]
        def leading = [:]
        try{
            BufferedReader file = new BufferedReader(new InputStreamReader(new FileInputStream(sourceFile)));
            String line;
            def i = 0
            while ((line = file.readLine()) != null) {
                if(i!=0){
                    def splittedLine = line.split('\t')
                    if((splittedLine[contaminantInFilePosition]!='+')&&(splittedLine[reverseInFilePosition]!='+')){
                        splittedLine[proteinIdInFilePosition].split(';').eachWithIndex(){id,index->
                            if(index==0){
                                leading.put(id,splittedLine[groupIdInFilePosition])
                            }else{
                                identified.put(id,splittedLine[groupIdInFilePosition])
                            }
                        }
                    }
                }
                i++
            }
            file.close()

            println '--get information from database'

            def groupIdMap = [:]
            ProteinGroup.executeQuery('select p.idInFile, p.id from ProteinGroup p where p.currentProject.id='+currentProject.id).each(){it->
                groupIdMap.put(it[0].toString(),it[1])
            }

            def prots = [:]
            Protein.executeQuery('select p.mainId, p.id from Protein p').each(){
                prots.put(it[0],it[1])
            }


            println '--writing leading proteins sql file'
            def sqlFile = new File(''+tempFolder+separator+'LeadingProteins.sql')
            def sqlUploaderLeading = new SQLUploader(sqlFile, 'leading_protein', ['version', 'protein_id', 'group_id', 'current_project_id'])

            leading.each(){key, value->
                if(prots[key]!=null){
                    sqlUploaderLeading << ['0', prots[key], groupIdMap[value], currentProject.id]
                }else{
                    println 'Not included in leading proteins: '+key
                }
            }

            println '--writing identified proteins sql file'
            sqlFile = new File(''+tempFolder+separator+'IdentifiedProteins.sql')
            def sqlUploaderIdentified = new SQLUploader(sqlFile, 'identified_protein', ['version', 'protein_id', 'group_id', 'current_project_id'])

            identified.each(){key, value->
                if(prots[key]!=null){
                    sqlUploaderIdentified << ['0', prots[key], groupIdMap[value], currentProject.id]
                }else{
                    println 'Not included in identified proteins: '+key
                }
            }


            println '--uploading leading proteins sql file'
            sqlUploaderLeading.upload()
            println '--uploading identified proteins sql file'
            sqlUploaderIdentified.upload()
        } catch (FileNotFoundException e) {
                println "!: file not found: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        } catch (IOException e) {
                println "!: could not read file: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        }
        
        println '--uploading leading and identified proteins done!'
    }

    /**
     * Uploads information for peptides from MaxQuant into {@link nci60.data.maxquant.peptide.Peptide}<p />
     * 
     * @param sourceFile            peptides file from MaxQuant, which contains the peptide information
     * @param tempFolder            temporary folder in which the generated sql files are written
     * @param pepParams             contains the used properties of the {@link nci60.data.maxquant.peptide.Peptide} class and corresponding column names in the file, e.g. score:'Score'
     * @param leadingRazorProtName  column name of the protein used as leading razor protein used in the MaxQuant file
     * @param currentProject        project (dataset) this information belongs to
     * @see nci60.data.maxquant.peptide.Peptide
     */
    void uploadPeptideTable(File sourceFile, String tempFolder, Map pepParams, String leadingRazorProtName, Project currentProject){
        println '-preparing for uploading peptide table'
        
        String separator = File.separator
        
        //find the position of the properties in the columns of the file; here for Protein Group
        def columns = getColumnNames(sourceFile)
        def pepIndices = [:]
        pepParams.keySet().each(){p->
            def index = 0
            columns.eachWithIndex(){c, i->
                if(c==pepParams[p]){
                    index = i
                }
            }
            pepIndices.put(p,index)
        }
        def leadingRazorProtIndex = columns.indexOf(leadingRazorProtName)

        //getting Protein IDs and Names
        def ipiToId = [:]
        Protein.executeQuery('select p.mainId, p.id from Protein p').each(){prot->
            ipiToId.put(prot[0], prot[1])
        }

        //Defining the column names for peptides table
        def pepColumnNames = []
        pepColumnNames.add('current_project_id')
        pepColumnNames.add('leading_razor_protein_id')
        pepParams.keySet().each(){p->
            if(p=='iBAQ'){
                pepColumnNames.add('ibaq')
            }else{
                pepColumnNames.add(p.replaceAll("[A-Z]") {'_'+it[0].toLowerCase()})
            }
        }

        //defining the sql outputfile
        def pepSqlFile = new File(''+tempFolder + separator + 'peptides.sql')

        //setting up the SQLUploader Object
        def pepSqlUploader = new SQLUploader(pepSqlFile, 'peptide', pepColumnNames)

        println '--reading peptides file: '+sourceFile.absolutePath
        //reading the sourcefile
        def temp = new Peptide()
        try{
            BufferedReader file = new BufferedReader(new InputStreamReader(new FileInputStream(sourceFile)));
            String line;
            def i = 0
            while ((line = file.readLine()) != null) {
                if(i!=0){
                    def splittedLine = line.split('\t')
                    def pepValues = []
                    pepValues.add(currentProject.id)
                    pepValues.add(ipiToId[splittedLine[leadingRazorProtIndex]])
                    pepParams.keySet().each(){p->
                        pepValues.add(temp.modifySQL(p,splittedLine[pepIndices[p]]))
                    }
                    pepSqlUploader << pepValues
                }
                i++
            }
            file.close()

            println '--uploading peptide table sql file'
            //upload data to the database
            pepSqlUploader.upload()
        } catch (FileNotFoundException e) {
                println "!: file not found: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        } catch (IOException e) {
                println "!: could not read file: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        }
        
        println '--uploading peptide table done!'
    }

    /**
     * Uploads information for peptides from MaxQuant into {@link nci60.data.maxquant.peptide.PeptideExperiment}<p />
     * 
     * @param sourceFile            peptides file from MaxQuant, which contains the peptide information
     * @param tempFolder            temporary folder in which the generated sql files are written
     * @param pepExpParams          contains the used properties of the {@link nci60.data.maxquant.peptide.PeptideExperiment} class and corresponding column names in the file, e.g. intensity = 'Intensity [Experiment]', whereat "Experiment" is replaced by the p-numbers in the file
     * @param idInFileName          column name of the id used in the MaxQuant file
     * @param currentProject        project (dataset) this information belongs to
     * @see nci60.data.maxquant.peptide.Peptide
     * @see nci60.data.maxquant.peptide.PeptideExperiment
     */
    void uploadPeptideExperimentTable(File sourceFile, String tempFolder, Map pepExpParams, String idInFileName, Project currentProject){
        println '-preparing to upload peptide_experiment table'
        
        String separator = File.separator
        
        //get p-Numbers and Experiments
        def experimentMap = [:]
        Experiment.findAllByCurrentProject(currentProject).each(){exp->
            experimentMap.put(exp.pNumber, exp)
        }

        //find the position of the properties in the columns of the file
        def columns = getColumnNames(sourceFile)
        def pepExpIndices = [:]
        pepExpParams.keySet().each(){p->
            experimentMap.keySet().each(){e->
                def newParam = pepExpParams[p].replace('[Experiment]',e)
                def index = 0
                columns.eachWithIndex(){c, i->
                    if(c==newParam){
                        index = i
                    }
                }
                pepExpIndices.put(newParam,index)
            }

        }
        def idInFilePosition = columns.indexOf(idInFileName)

        //Defining the column names for peptide_experiment_table
        def pepExpColumnNames = []
        pepExpColumnNames.add('version')
        pepExpColumnNames.add('current_project_id')
        pepExpColumnNames.add('experiment_id')
        pepExpColumnNames.add('peptide_id')
        pepExpParams.keySet().each(){p->
            if(p=='iBAQ'){
                pepExpColumnNames.add('ibaq')
            }else{
                pepExpColumnNames.add(p.replaceAll("[A-Z]") {'_'+it[0].toLowerCase()})
            }
        }

       //getting peptide Ids
        def pepIdMap = [:]
        Peptide.executeQuery('select p.idInFile, p.id from Peptide p where p.currentProject.id='+currentProject.id).each(){
            pepIdMap.put(it[0], it[1])
        }

        //defining the sql outputfile
        def sqlFile = new File(''+tempFolder + separator + 'peptide_experiments.sql')
        def sqlUploader = new SQLUploader(sqlFile, 'peptide_experiment', pepExpColumnNames)
        
        println '--reading peptides file: '+sourceFile.absolutePath
        
        def temp = new PeptideExperiment()
        try{
            BufferedReader file = new BufferedReader(new InputStreamReader(new FileInputStream(sourceFile)));
            String line;
            def i = 0
            while ((line = file.readLine()) != null) {
                if(i!=0){
                    def splittedLine = line.split('\t')
                    experimentMap.keySet().each(){e->
                        boolean skip = false
                        def values = []
                        values.add(1)
                        values.add(currentProject.id)
                        values.add(experimentMap[e].id)
                        values.add(pepIdMap[splittedLine[idInFilePosition]])

                        pepExpParams.keySet().each(){p->
                            def newParam = pepExpParams[p].replace('[Experiment]',e)
                            values.add(temp.modifySQL(p,splittedLine[pepExpIndices[newParam]]))
                            if((p=='numberOfEvidences')&&(values[-1]=='0')){
                                skip =true
                            }
                        }
                        if(!skip){
                            sqlUploader << values
                        }
                    }
                }
                i++
            }
            file.close()

            println '--uploading peptide_experiments sql file'
            //upload data to the database
            sqlUploader.upload()
        } catch (FileNotFoundException e) {
                println "!: file not found: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        } catch (IOException e) {
                println "!: could not read file: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        }
        
        println '--uploading peptide_experiment table done!'
    }
    
    /**
     * Uploads charge information for peptides from MaxQuant into {@link nci60.data.maxquant.peptide.PeptideCharge}<p />
     * 
     * @param sourceFile            peptides file from MaxQuant, which contains the peptide information
     * @param tempFolder            temporary folder in which the generated sql files are written
     * @param idInFileName          column name of the id used in the MaxQuant file
     * @param chargeName            column name of the charges used in the MaxQuant file
     * @param currentProject        project (dataset) this information belongs to
     * @see nci60.data.maxquant.peptide.Peptide
     * @see nci60.data.maxquant.peptide.PeptideExperiment
     * @see nci60.data.maxquant.peptide.PeptideCharge
     */
    void uploadPeptideChargeTable(File sourceFile, String tempFolder, String idInFileName, String chargeName, Project currentProject){
        println '-preparing to upload peptide_charge table'
        
        String separator = File.separator
        
        //find the position of the properties in the columns of the file
        def columns = getColumnNames(sourceFile)
        def idInFileIndex = columns.indexOf(idInFileName)
        def chargeIndex = columns.indexOf(chargeName)
        
        //get charges
        def chargeToId = [:]
        (1..10).each{charge->
            def c = PeptideCharge.findByCharge(charge.toString())
            if(c==null){
                c = new PeptideCharge(charge:charge.toString())
                if (!c.save(flush:true)) {
                    c.errors.each {println it}
                }
            }
            chargeToId.put(c.charge.toString(), c.id)
        }

        //get both peptide ids
        def pepFileToDbId = [:]
        Peptide.executeQuery('select p.idInFile, p.id from Peptide p where p.currentProject.id='+currentProject.id).each(){
            pepFileToDbId.put(it[0], it[1])
        }

        //Defining the column names
        def columnNames = []
        columnNames.add('peptide_charges_id')
        columnNames.add('peptide_charge_id')


        //defining the sql outputfile
        def sqlFile = new File(''+tempFolder + separator + 'peptide_charge.sql')

        println '--reading peptides file: '+sourceFile.absolutePath

        //setting up the SQLUploader Object
        def sqlUploader = new SQLUploader(sqlFile, 'peptide_charges', columnNames)

        //reading the sourcefile
        try{
            BufferedReader file = new BufferedReader(new InputStreamReader(new FileInputStream(sourceFile)));
            String line;
            def i = 0
            while ((line = file.readLine()) != null) {
                if(i!=0){
                    def splittedLine = line.split('\t')
                    def charges = splittedLine[chargeIndex].split(',')
                    charges.eachWithIndex(){charge,cIndex->
                        def values = []
                        values.add(pepFileToDbId[splittedLine[idInFileIndex]])
                        values.add(chargeToId[charge])
                        sqlUploader << values
                    }
                }
                i++
            }
            file.close()

            println '--uploading peptide_charge sql file'
            //upload data to the database
            sqlUploader.upload()
        } catch (FileNotFoundException e) {
                println "!: file not found: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        } catch (IOException e) {
                println "!: could not read file: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        }
        
        println '--upload peptide_charge table done!'
    }
    
    /**
     * Connects the identified {@link nci60.data.maxquant.peptide.Peptide} with the {@link nci60.data.protein.Protein}<p />
     * 
     * @param sourceFile         peptides file from MaxQuant, which contains the peptide information
     * @param tempFolder         temporary folder in which the generated sql files are written
     * @param idInFileName       column name of the id used in the MaxQuant file
     * @param proteinsName       column name of the protein ids (IPI identifier) used in the MaxQuant file
     * @param sequenceName       column name of the sequence used in the MaxQuant file
     * @param contaminantName    column name of the contaminant used in the MaxQuant file
     * @param reverseName        column name of the reversed used in the MaxQuant file
     * @param currentProject     project (dataset) this information belongs to
     * @see nci60.data.maxquant.peptide.Peptide
     * @see nci60.data.protein.Protein     
     * @see nci60.data.connected.PeptideInProtein   
     */
    void uploadPeptideInProteinTable(File sourceFile, String tempFolder, String idInFileName, String proteinsName, String sequenceName, String contaminantName, String reverseName, Project currentProject){
        println '-preparing to upload peptide_in_protein table'
        
        String separator = File.separator
        
         //find the position of the properties in the columns of the file
        def columns = getColumnNames(sourceFile)
        def idInFileIndex = columns.indexOf(idInFileName)
        def proteinsInFile = columns.indexOf(proteinsName)
        def sequenceInFile = columns.indexOf(sequenceName)
        def contaminantInFile = columns.indexOf(contaminantName)
        def reverseInFile = columns.indexOf(reverseName)

        //get both group ids and position in sequence
        def proteinMainIdToId = [:]
        def proteinIdToSequence = [:]
        Protein.executeQuery('select l.protein.mainId, l.protein.id, l.protein.sequence from LeadingProtein l where l.currentProject.id='+currentProject.id).each(){
            proteinMainIdToId.put(it[0], it[1])
            proteinIdToSequence.put(it[1], it[2].replaceAll('L', 'I'))  //renaiming all L to I makes the two sequences comparable
        }

        //get both peptide ids
        def pepFileToDbId = [:]
        Peptide.executeQuery('select p.idInFile, p.id from Peptide p where p.currentProject.id='+currentProject.id).each(){
            pepFileToDbId.put(it[0], it[1])
//            println it
        }

        //Defining the column names
        def columnNames = []
        columnNames.add('version')
        columnNames.add('current_project_id')
        columnNames.add('protein_id')
        columnNames.add('peptide_id')
        columnNames.add('position')


        //defining the sql outputfile
        def sqlFile = new File(''+tempFolder + separator + 'peptide_in_protein.sql')

        //setting up the SQLUploader Object
        def sqlUploader = new SQLUploader(sqlFile, 'peptide_in_protein', columnNames)

        println '--reading peptides file: '+sourceFile.absolutePath
        
        //reading the sourcefile
        try{
            BufferedReader file = new BufferedReader(new InputStreamReader(new FileInputStream(sourceFile)));
            String line;
            def i = 0
            while ((line = file.readLine()) != null) {
                if(i!=0){
                    def splittedLine = line.split('\t')
                    //skipping entries, which are marked as contaminant or reverse
                    if((splittedLine[contaminantInFile]!='+')&&(splittedLine[reverseInFile]!='+')){
                        //rename all L to I to make the two sequences comparable
                        def seq = splittedLine[sequenceInFile].replaceAll('L','I')
                        def protIds = splittedLine[proteinsInFile].split(';')
                        protIds.eachWithIndex(){p,pIndex->
                            //check if ipi identifier is in leading proteins
                            if(proteinMainIdToId[p]!=null){
                                def values = []
                                values.add(1)
                                values.add(currentProject.id)
                                values.add(proteinMainIdToId[p])
                                values.add(pepFileToDbId[splittedLine[idInFileIndex]])
                                values.add(proteinIdToSequence[proteinMainIdToId[p]].indexOf(seq))
                                sqlUploader << values
                            }
                        }
                    }
                }
                i++
            }

            println '--uploading peptide_in_protein sql file'
            //upload data to the database
            sqlUploader.upload()
        } catch (FileNotFoundException e) {
                println "!: file not found: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        } catch (IOException e) {
                println "!: could not read file: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        }
        
        println '--upload peptide_in_protein table done!'
    }

    /**
     * Updates the peptides in {@link nci60.data.connected.PeptideInProtein} which are razor. <p />
     * This informaion is stored in the protein group file from MaxQuant. <p />
     * For this no sql file is needed.
     * 
     * @param sourceFile                        protein group file from MaxQuant, which contains the peptide information
     * @param protIdInFilePositionName          column name of the protein id (IPI identifier) used in the MaxQuant file
     * @param contaminantInFilePositionName     column name of the contaminant used in the MaxQuant file
     * @param reverseInFilePositionName         column name of the reversed used in the MaxQuant file
     * @param peptideIDsInFilePositionName      column name of the (peptide) id used in the MaxQuant file
     * @param peptideIsRazorInFilePositionName  column name showing if the peptide is razor in the MaxQuant file
     * @param currentProject                    project (dataset) this information belongs to
     * @see nci60.data.connected.PeptideInProtein   
     */
    void updatePeptideIsRazor(File sourceFile, String protIdInFilePositionName, String contaminantInFilePositionName, String reverseInFilePositionName, String peptideIDsInFilePositionName, String peptideIsRazorInFilePositionName, Project currentProject){
        println 'preparing for updating razor peptides in peptide_in_protein table'
        
        //find the position of the properties in the columns of the file
        def columns = getColumnNames(sourceFile)
        def protIdInFilePosition = columns.indexOf(protIdInFilePositionName)
        def contaminantInFilePosition = columns.indexOf(contaminantInFilePositionName)
        def reverseInFilePosition = columns.indexOf(reverseInFilePositionName)
        def peptideIDsInFilePosition = columns.indexOf(peptideIDsInFilePositionName)
        def peptideIsRazorInFilePosition = columns.indexOf(peptideIsRazorInFilePositionName)
        
        def pepInProt = [:]
        Peptide.executeQuery('select p.protein.mainId, p.peptide.idInFile, p.id from PeptideInProtein p where p.currentProject=:proj',[proj:currentProject]).each{lin->
            if(!pepInProt.containsKey(lin[0])){pepInProt.put(lin[0], [:])}
            pepInProt[lin[0]].put(lin[1], lin[2])
        }

        println '--reading protein groups file: ' + sourceFile.absolutePath
        
        def razorPeptideIds = [:]
        try{
            BufferedReader file = new BufferedReader(new InputStreamReader(new FileInputStream(sourceFile)));
            String line;
            def i = 0
            while ((line = file.readLine()) != null) {
                if(i!=0){
                    def splittedLine = line.split('\t')
                    if((splittedLine[contaminantInFilePosition]!='+')&&(splittedLine[reverseInFilePosition]!='+')){
                        def pepIDs = splittedLine[peptideIDsInFilePosition].split(';')
                        def pepIsRazor = splittedLine[peptideIsRazorInFilePosition].split(';')
                        def prot = splittedLine[protIdInFilePosition].split(';')[0]
                        pepIDs.eachWithIndex{pepId,index->
                            if(pepIsRazor[index].toLowerCase()=='true'){
                                if((pepInProt.containsKey(prot))&&(pepInProt[prot].containsKey(pepId))){
                                    razorPeptideIds.put(pepInProt[prot][pepId], 0)
                                }else{
                                    println 'could not update peptide in protein for peptide: '+pepId+' in protein: '+prot
                                }
                            }
                        }
                    }
                }
                i++
            }
            file.close()
            println '-number of peptides to update: '+razorPeptideIds.size()

            def update = []
            def curr = []
            update.add(curr)
            razorPeptideIds.keySet().eachWithIndex{id,index->
                if(index%1000==999){
                    curr = []
                    update.add(curr)
                }
                curr.add(id)
            }

            println '--updating razor peptides'

            update.each{ids->
                if(ids.size()!=0) Peptide.executeUpdate('update PeptideInProtein set razor=true where id in :pep', [pep:ids])
            }
            
            println '--updating protein group experiments'
            Peptide.executeQuery('select l.group.id, l.protein.id from LeadingProtein l where l.currentProject=:proj', [proj:currentProject]).each{gp->
                def gid = gp[0].toInteger()
                def pid = gp[1].toInteger()
                Protein.executeUpdate('update ProteinGroupExperiment set protein=:pid where group.id=:gid', [pid:pid, gid:gid])
            }
        } catch (FileNotFoundException e) {
                println "!: file not found: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        } catch (IOException e) {
                println "!: could not read file: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        }
        
        println '--updating razor peptides done!'
    }
    
    
    /**
     * Uploads information for msms from MaxQuant into {@link nci60.data.maxquant.msms.MsMs}<p />
     * 
     * @param sourceFile            peptides file from MaxQuant, which contains the peptide information
     * @param tempFolder            temporary folder in which the generated sql files are written
     * @param msmsParams            contains the used properties of the {@link nci60.data.maxquant.msms.MsMs} class and corresponding column names in the file, e.g. score:'Score'
     * @param proj                  project (dataset) this information belongs to
     * @see nci60.data.maxquant.msms.MsMs
     */
    void uploadMsMs(File sourceFile, String tempFolder, Map msmsParams, Project proj){
        println '-preparing for uploading ms_ms table'
        
        String separator = File.separator
        
        //get Id and ExperimentId from RawFile
        def rawToId = [:]
        def rawToExpId = [:]
        RawFile.findAllByCurrentProject(proj).each{r->
            rawToId.put(r.name, r.id)
            rawToExpId.put(r.name, r.experiment.id)
        }

        def columns = getColumnNames(sourceFile)

        //find the position of the properties in the columns of the file
        def indices = [:]
        msmsParams.each(){p,columnName->
            def index = 0
            columns.eachWithIndex(){c, i->
                if(c==columnName){
                    index = i
                }
            }
            indices.put(p,index)
        }

        msmsParams.remove('reverse')
        msmsParams.remove('modifiedSequence')
        msmsParams.remove('peptide')
        msmsParams.remove('rawFile')

        //getting Peptide IDs and Names
        def pepIdInFileToId = [:]
        Protein.executeQuery('select p.idInFile, p.id from Peptide p where p.currentProject=:proj',[proj:proj]).each(){line->
            pepIdInFileToId.put(line[0], line[1])
        }

        //Defining the column names
        def columnNames = []
        columnNames.add('current_project_id')
        columnNames.add('raw_file_id')
        columnNames.add('experiment_id')
        columnNames.add('peptide_id')
        columnNames.add('oxidations')
        columnNames.add('acetylation')
        msmsParams.keySet().each(){p->
            if(p=='precursorMZ'){
                columnNames.add('precursormz')
            }else{
                columnNames.add(p.replaceAll("[A-Z]") {'_'+it[0].toLowerCase()})
            }
        }

        def sql = new SQLUploader(new File(''+tempFolder + separator + 'MsMs.sql'), 'ms_ms', columnNames)
        
        println '--reading msms file: ' + sourceFile.absolutePath
        
        def temp = new MsMs()
        try {
                BufferedReader ufile = new BufferedReader(new InputStreamReader(new FileInputStream(sourceFile.absolutePath)));
                String line;
                def i = 0
                while ((line = ufile.readLine()) != null) {
                    if(i!=0){
                        line = line+'\tx'   //to avoid Index out of bounds exception
                        def splittedLine = line.split('\t')
                        println 'i: '+i+'\tsize: '+splittedLine.size()
                        if(splittedLine[indices['reverse']]!='+'){
                            def values = []
                            values.add(proj.id)
                            values.add(rawToId[splittedLine[indices['rawFile']]])
                            values.add(rawToExpId[splittedLine[indices['rawFile']]])
                            values.add(pepIdInFileToId[splittedLine[indices['peptide']]])
                            values.add('\''+findMods(splittedLine[indices['modifiedSequence']])+'\'')
                            values.add(((splittedLine[indices['modifiedSequence']].startsWith('_(ac)'))?'true':'false'))
                            msmsParams.keySet().each(){p->
                                values.add(temp.modifySQL(p,splittedLine[indices[p]]))
                            }
                            sql << values
                        }
                    }
                    i++
                }
                ufile.close()
                
                println '--uploading ms_ms sql file'

                sql.upload()
        } catch (FileNotFoundException e) {
                println "!: file not found: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        } catch (IOException e) {
                println "!: could not read file: "+sourceFile.absolutePath
                e.printStackTrace(System.out)
        }catch (ArrayIndexOutOfBoundsException e) {
                e.printStackTrace(System.out)
        }
        
        println '--uploading msms spectra done'
    }

    /**
     * Finds modifications in the peptide<p />
     * 
     * @param line    line for modifications from the msms file 
     * @return  String with marked modifications
     * @see nci60.data.maxquant.msms.MsMs
     */
    String findMods(String line){
        def oxi = ''
        def seqIndex = -1
        def brOpen = false
        line.eachWithIndex{letter,pos->
            if((letter!='_')&&(!brOpen)){
                if(letter=='('){
                    brOpen=true
                    if(line[pos..(pos+3)]=='(ox)'){
                        oxi = oxi+''+seqIndex+';'
                    }
                }else{
                    seqIndex++
                }
            }else if(letter==')'){brOpen=false}
        }
        return ((oxi.size()!=0)?oxi[0..-2]:oxi)
    }
    
    
    /**
     * Uploads the full spectrum information from MaxQuant apl files into {@link nci60.data.maxquant.msms.MsMs}<p />
     * 
     * @param files         list of apl files from MaxQuant, which contains the spectral information
     * @param tempFolder    temporary folder in which the generated sql files are written
     * @param proj          project (dataset) this information belongs to
     * @see nci60.data.maxquant.msms.MsMs
     * @see nci60.data.maxquant.msms.FullSpectrum
     * @see nci60.data.maxquant.RawFile
     */
    void uploadAPLSpectra(List files, String tempFolder, Project proj){
        println '-preparing for uploading full_spectrum table'
        String separator = File.separator
        
        //identifier[rawFileId][ScanNumber]=boolean   default:false; true:entry is allready imported
        def identifier = [:]
        def rawFileNameToId = [:]
        RawFile.executeQuery('select r.name, r.id from RawFile r where r.currentProject=:proj',[proj:proj]).each{line->
            def rawFileName = line[0]
            def rawFileId = line[1]
            rawFileNameToId.put(rawFileName, rawFileId)
        }
        //get raw file and scan number info from db
        MsMs.executeQuery('select m.rawFile.id, m.scanNumber from MsMs m where m.currentProject.id=:proj',[proj:proj.id]).each{line->
            def rawFileId = line[0]
            def scanNr = line[1].toString()
            if(!identifier.containsKey(rawFileId)){identifier.put(rawFileId, [:])}
            identifier[rawFileId].put(scanNr, false)
        }

        def sql = new SQLUploader(new File(''+tempFolder + separator + 'fullSpectra.sql'), 'full_spectrum', ['current_project_id', 'raw_file_id', 'scan_number', 'masses', 'intensities'], 100)
        
        def doUpload = true
        files.each{file->
            try {
                println '--reading file: ' + file.absolutePath
                BufferedReader ufile = new BufferedReader(new InputStreamReader(new FileInputStream(file.absolutePath)));
                String line;
                def rawFileId
                def rawFileName
                def scanNumber
                def readPeaks = false
                def masses = []
                def intensities = []
                while ((line = ufile.readLine()) != null) {
                    if(line.startsWith('header=RawFile:')){
                        def splitted = line.split(' ')
                        rawFileName = rawFileNameToId[splitted[1]]
                        scanNumber = splitted[3]
                        if(identifier.containsKey(rawFileName)&&identifier[rawFileName].containsKey(scanNumber)&&(!identifier[rawFileName][scanNumber])){
                            readPeaks = true
                            identifier[rawFileName][scanNumber]=true
                            rawFileId = rawFileName
                        }
                    }else if(readPeaks){
                        if(line.startsWith('peaklist end')){
                            readPeaks = false
                            sql << [proj.id, rawFileId, scanNumber, '\"'+mergeList(masses, ';')+'\"', '\"'+mergeList(intensities, ';')+'\"']
                            masses = []
                            intensities = []
                        }else{
                            def splitted = line.split('\t')
                            masses.add(splitted[0])
                            intensities.add(splitted[1])
                        }
                    }
                }
            } catch (FileNotFoundException e) {
                    println "!: file not found: "+file.absolutePath
                    e.printStackTrace(System.out)
                    doUpload = false
            } catch (IOException e) {
                    println "!: could not read file: "+file.absolutePath
                    e.printStackTrace(System.out)
                    doUpload = false
            }
        }
        if(doUpload){
            println '--uploading full_spectrum sql file'
            sql.upload()
            println '--uploading of full spectra done'
        }else{
            println '!:cannot upload full_spectrum sql file, because it is incomplete'
        }
    }

    /**
     * Merges the element of a list using a given sepqrator<p />
     * 
     * @param list         list with some elements
     * @param separator    used separator for joining
     * @return  String with joined elements
     */
    String mergeList(List list, String separator){
        def result = ''
        list.eachWithIndex{element,index->
            if(index==0){
                result = element
            }else{
                result = result+separator+element
            }
        }
        return result
    }
    
    
    /**
     * Updates the suggestions given by select2 for the search of proteins. <p />
     * This should usually be done each time after inserting a new project (dataset) <p />
     * Old suggestions are deleted.
     * 
     * @param tempFolder    temporary folder in which the generated sql files are written
     * @see nci60.query.Suggestion
     */
    void makeSuggestions(String tempFolder){
        println '-prepare for generating suggestions table'
        String separator = File.separator
        
        def queries = ['select l.protein.id , l.protein.description from LeadingProtein l ':'description',
            'select l.protein.id , go.identifier from LeadingProtein l join l.protein.uniprot u join u.go go':'go',
            'select l.protein.id , go.description from LeadingProtein l join l.protein.uniprot u join u.go go':'go',
            'select l.protein.id , u.fullName from LeadingProtein l join l.protein.uniprot u':'uniprot',
            'select l.protein.id , us.name from LeadingProtein l join l.protein.uniprot u join u.shortNames us':'uniprot',
            'select l.protein.id , ua.fullName from LeadingProtein l join l.protein.uniprot u join u.altNames ua':'uniprot',
            'select l.protein.id , uas.name from LeadingProtein l join l.protein.uniprot u join u.altNames ua join ua.shortNames uas':'uniprot',
            'select l.protein.id , uc.fullName from LeadingProtein l join l.protein.uniprot u join u.contains uc':'uniprot',
            'select l.protein.id , ucs.name from LeadingProtein l join l.protein.uniprot u join u.contains uc join uc.shortNames ucs':'uniprot',
            'select l.protein.id , uca.fullName from LeadingProtein l join l.protein.uniprot u join u.contains uc join uc.altNames uca':'uniprot',
            'select l.protein.id , ucas.name from LeadingProtein l join l.protein.uniprot u join u.contains uc join uc.altNames uca join uca.shortNames ucas':'uniprot',
            'select l.protein.id , ui.fullName from LeadingProtein l join l.protein.uniprot u join u.includes ui':'uniprot',
            'select l.protein.id , uis.name from LeadingProtein l join l.protein.uniprot u join u.includes ui join ui.shortNames uis':'uniprot',
            'select l.protein.id , uia.fullName from LeadingProtein l join l.protein.uniprot u join u.includes ui join ui.altNames uia':'uniprot',
            'select l.protein.id , uias.name from LeadingProtein l join l.protein.uniprot u join u.includes ui join ui.altNames uia join uia.shortNames uias':'uniprot']

        def sql = new SQLUploader(new File(''+tempFolder + separator + 'suggestion.sql'), 'suggestion', ['phrase', 'protein_id', 'type'])

        println '--deleting old suggestions'
        
        Suggestion.executeUpdate('DELETE Suggestion s')
        
        println '--querying the database for new suggestions'
        
        queries.each(){q,t->
            println 'query '+q+' group by l.protein.id'
            LeadingProtein.executeQuery(q+' group by l.protein.id').each(){r->
                sql << ['\"'+r[1]+'\"', r[0], '\"'+t+'\"']
            }
        }

        queries = ['select l.protein.id , gn.name from LeadingProtein l join l.protein.uniprot u join u.genes gn group by l.protein.id':'gene',
            'select l.protein.id , gns.name from LeadingProtein l join l.protein.uniprot u join u.genes gn join gn.synonyms gns group by l.protein.id':'gene',
            'select g.protein.id, g.name from GeneName g, LeadingProtein l where l.protein=g.protein group by g.protein.id':'gene']

        def genes = [:]
        queries.each(){q,t->
//            println '---query: '+q
            LeadingProtein.executeQuery(q).each(){r->
                if(!genes.containsKey(r[0])){genes.put(r[0], [:])}
                genes[r[0]].put(r[1], 0)
            }
        }

        genes.each{prot,foundGenes->
            foundGenes.each{gene,bla->
                sql << ['\"'+gene+'\"', prot, '\"gene\"']
            }
        }

        println '--uploading suggestion sql file'

        sql.upload()
        
        println '--generating suggestions done!'
    }
}
