package uk.co.jgo2rdf.tairidmapping;

import com.csvreader.CsvReader;
import org.apache.commons.net.ftp.FTP;
import org.apache.commons.net.ftp.FTPClient;
import org.apache.log4j.Logger;
import org.apache.lucene.analysis.KeywordAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.store.NIOFSDirectory;
import org.apache.lucene.util.Version;
import uk.co.jgo2rdf.uniprotidmapping.UniprotIdMappingIndex;

import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import java.util.zip.GZIPInputStream;

/**
 * Created by IntelliJ IDEA.
 * User: mhindle
 * Date: 08/03/12
 * Time: 16:54
 * To change this template use File | Settings | File Templates.
 */
public class TAIRNCBIIDMapping {

    private String urlRNAID = "ftp://ftp.arabidopsis.org/home/tair/Genes/TAIR10_genome_release/TAIR10_NCBI_mapping_files/TAIR10_NCBI_REFSEQ_mapping_PROT";
    private String urlProteinID = "ftp://ftp.arabidopsis.org/home/tair/Genes/TAIR10_genome_release/TAIR10_NCBI_mapping_files/TAIR10_NCBI_REFSEQ_mapping_PROT";

    private static String NCBI_RNA = "NCBI_RNA";
    private static String NCBI_PROTEIN = "NCBI_PROTEIN";
    private static String TAIR = "TAIR";

    private static String DB = "DB";
    private static String ID = "ID";

    private NIOFSDirectory directory;
    private String indexDir;

    private IndexSearcher indexSearcher; //is automatically init after parsing

    private static final Logger log = Logger.getLogger(UniprotIdMappingIndex.class);

    /**
     * @param indexDir  directory of lucene index
     * @param loadIndex if true it will load the existing index at the dir else it is deleted
     */
    public TAIRNCBIIDMapping(String indexDir, boolean loadIndex) throws IOException {
        this.indexDir = indexDir;

        File f = new File(indexDir);
        if (!loadIndex) { //this is the fist run
            f.delete();
            f.mkdirs();
            f.mkdir();
        }

        directory = new NIOFSDirectory(f);

        if (loadIndex) {
            indexSearcher = new IndexSearcher(IndexReader.open(directory, true), null);
        }
    }

    public void parse() throws IOException {
        parse(new URL(urlRNAID), NCBI_RNA);
        parse(new URL(urlProteinID), NCBI_PROTEIN);
    }

    public void parse(URL url, String database) throws IOException {
        if (url.getProtocol().equalsIgnoreCase("ftp")) {
            log.info("Fetch " + url.getFile() + " from " + url.getHost());

            FTPClient ftp = new FTPClient();
            try {
                ftp.connect(url.getHost());
                ftp.login("anonymous", "anonymous");
                ftp.setFileTransferMode(FTP.STREAM_TRANSFER_MODE);
                ftp.setFileType(FTP.BINARY_FILE_TYPE);
                if (url.getFile().endsWith(".gz")) {
                    parse(new GZIPInputStream(
                            ftp.retrieveFileStream(url.getFile())), database);
                } else {
                    parse(ftp.retrieveFileStream(url.getFile()), database);
                }
            } finally {
                ftp.disconnect();
            }
        } else {
            if (url.getFile().endsWith(".gz")) {
                parse(new GZIPInputStream(url.openStream()), database);
            } else {
                parse(url.openStream(), database);
            }
        }
    }


    public void parse(InputStream is, String database) throws IOException {

        if (indexSearcher != null)
            indexSearcher.close();

        IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_30,
                new KeywordAnalyzer());
        IndexWriter writer = new IndexWriter(directory, config);

        CsvReader csvReader = new CsvReader(is,
                '\t', Charset.defaultCharset());
        csvReader.setSafetySwitch(false);
        try {
            csvReader.setComment('!');
            csvReader.setUseComments(true);
            csvReader.setTrimWhitespace(false);
            csvReader.setHeaders(new String[]{"GENEID", ID,TAIR});

            int i = 0;
            while (csvReader.readRecord()) {
                if (i % 10000 == 0)
                    System.out.println("Processed " + i + " valid records");

                String dbid = csvReader.get(ID);
                if (dbid == null || dbid.length() == 0) continue;

                String tair = csvReader.get(TAIR);
                if (tair == null || tair.length() == 0) continue;

                i++;
                Document doc = new Document();
                doc.add(new Field(ID,
                        dbid.trim().toUpperCase(),
                        Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
                doc.add(new Field(DB,
                        database.trim().toUpperCase(),
                        Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
                doc.add(new Field(TAIR,
                        tair.trim().toUpperCase(),
                        Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
                writer.addDocument(doc);

                int version = tair.lastIndexOf('.');
                if (version > 0) {
                    Document docuv = new Document();
                    docuv.add(new Field(ID,
                            dbid.trim().toUpperCase(),
                            Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
                    docuv.add(new Field(DB,
                            database.trim().toUpperCase(),
                            Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
                    docuv.add(new Field(TAIR,
                            tair.trim().toUpperCase().substring(0, version),
                            Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
                    writer.addDocument(docuv);
                }
            }
        } finally {
            csvReader.close();
            is.close();
            writer.prepareCommit();
            writer.commit();
            writer.close(true);
        }
        indexSearcher = new IndexSearcher(IndexReader.open(directory, true), null);
    }

    public Set<String> getRefSeqRNAAccessions(String tairaccession) throws IOException {
        return getAccessions(tairaccession, NCBI_RNA);
    }

    public Set<String> getRefSeqProteinAccessions(String tairaccession) throws IOException {
        return getAccessions(tairaccession, NCBI_PROTEIN);
    }

    public Set<String> getAccessions(String tairaccession, final String database) throws IOException {
        final Set<String> accessions = new HashSet<String>();

        BooleanQuery bq = new BooleanQuery();
        bq.add(new TermQuery(new Term(TAIR, tairaccession.toUpperCase())), BooleanClause.Occur.MUST);
        if (database != null)
            bq.add(new TermQuery(new Term(DB, database.toUpperCase())), BooleanClause.Occur.MUST);

        TermQuery q = new TermQuery(new Term(TAIR, tairaccession.trim().toUpperCase()));

        final Collector docCollector = new Collector() {

            private int docBase;

            @Override
            public void setScorer(Scorer scorer) throws IOException {
            }

            @Override
            public void collect(int doc) throws IOException {
                try {
                    accessions.add(indexSearcher.doc(doc + docBase).get(ID));
                } catch (Exception e) {
                    log.error(e);
                    e.printStackTrace();
                }
            }

            @Override
            public void setNextReader(IndexReader reader, int docBase) throws IOException {
                this.docBase = docBase;
            }

            @Override
            public boolean acceptsDocsOutOfOrder() {
                return true;
            }
        };

        indexSearcher.search(q, docCollector);

        return accessions;
    }

    public static void main(String[] args) throws IOException {
        TAIRNCBIIDMapping index = new TAIRNCBIIDMapping("/home/mhindle/UniProt/tairidncbimapping", false);
        index.parse();

        Set<String> ids = index.getRefSeqRNAAccessions("At3g17750.1");
        System.out.println(Arrays.toString(ids.toArray(new String[ids.size()])));

        ids = index.getRefSeqProteinAccessions("AT3G17750.1");
        System.out.println(Arrays.toString(ids.toArray(new String[ids.size()])));

    }

}
