#!/usr/bin/python
# __title__   = eLMSG_v2
# __created__ = 2018/6/8
# __author__  = Yang Xiao

from pymongo import MongoClient
from bs4 import BeautifulSoup
from retry import retry
import multiprocessing
from tqdm import tqdm
import requests
import hashlib
import pickle
import getopt
import shutil
import json
import sys
import os

CACHE_DIR=None
DOWNLOAD_PROCESS_SIZE=None
collection_lmsg_gca=None
collection_16SrRNA=None
collection_elmsg=None

def create_cache_dir():
    print("Create Cache Folder  ./%s" % CACHE_DIR)
    if not os.path.exists(CACHE_DIR):
        os.mkdir(CACHE_DIR)


def remove_cache_dir():
    print("Delete Cache Folder  ./%s" % CACHE_DIR)
    if os.path.exists(CACHE_DIR):
        shutil.rmtree(CACHE_DIR)


def write_lines(lines, file_path, encoding="utf-8", _filter=lambda x: True, transer=lambda x: x):
    with open(file_path, "w", encoding=encoding) as f:
        for line in lines:
            if _filter(line):
                f.write("{}\n".format(transer(line)))


@retry(delay=5)
def cache_requests(url, recache=False):
    md5 = hashlib.md5()
    md5.update(url.encode(encoding='utf-8'))
    md5_url = md5.hexdigest()

    print(CACHE_DIR)
    cache_path = os.path.join(CACHE_DIR, md5_url)

    if not recache:
        if os.path.exists(cache_path):
            with open(cache_path, "rb") as cf:
                return pickle.load(cf)

    rsp = requests.get(url)
    assert rsp.status_code == 200

    with open(cache_path, "wb") as cf:
        pickle.dump(rsp, cf)
    return rsp


def clear_Genbank_16SrRNA_str(v):
    v = v.strip()

    if v.find(".") != -1:
        v = v.split(".")[0]

    if v.find(" ") != -1:
        v = v.split(" ")[0]

    return v


def if_retry(if_, func):
    while 1:
        r = func()
        if not if_(r):
            return r


def func_requests_ncbi_assembly(in_):
    print("PID {} started".format(multiprocessing.current_process().pid))
    while 1:
        row = in_.get()
        if row == "done":
            in_.put("done")
            break

        val = pickle.loads(row)

        val = val["id"]

        url_esearch = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=assembly&term=%s&retmode=json" % val

        j = cache_requests(url_esearch)
        if j.status_code != 200:
            retry_func = cache_requests(url_esearch, True)
            j = if_retry(lambda x: x.status_code != 200, retry_func)

        j = j.json()
        for i in j['esearchresult']['idlist']:
            url_esummary = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=assembly&id=%s&retmode=json" % i
            j = cache_requests(url_esummary)

            if j.status_code != 200:
                retry_func = cache_requests(url_esummary, True)
                if_retry(lambda x: x.status_code != 200, retry_func)


def requests_ncbi_assembly():
    print("Start request NCBI_ASSEMBLY")
    queue = multiprocessing.Queue()

    cursor = collection_elmsg.find({
        "NCBI_Assembly": {
            "$exists": 1,
            "$ne": None
        }
    }, {
        "NCBI_Assembly": 1
    })

    ignore_size = 0
    pool_size = 0

    for it in tqdm(cursor, total=cursor.count(), desc="Create Download NCBI_ASSEMBLY Pool"):

        for NCBI_Assembly in it["NCBI_Assembly"]:

            # 如果是以更新的数据就跳过
            if len(NCBI_Assembly.keys()) > 2:
                ignore_size += 1
                continue

            pool_size += 1
            queue.put(pickle.dumps(NCBI_Assembly))

    # 末尾标志
    queue.put("done")

    print("ignore size : %d" % ignore_size)

    if pool_size == 0:
        return

    print("pool size : %d" % pool_size)
    print("process size : %d" % DOWNLOAD_PROCESS_SIZE)

    if pool_size == 0:
        return

    requests_process = []
    for _ in range(DOWNLOAD_PROCESS_SIZE):
        p = multiprocessing.Process(target=func_requests_ncbi_assembly, args=(queue,))
        p.daemon = True
        requests_process.append(p)

    for p in requests_process:
        p.start()

    print("NCBI_ASSEMBLY  Downloading...")

    for p in requests_process:
        p.join()


def func_requests_genbank_16sr_rna(in_):
    print("PID {} started".format(multiprocessing.current_process().pid))
    while 1:
        row = in_.get()
        if row == "done":
            in_.put("done")
            break

        row = pickle.loads(row)

        if "Genbank_16SrRNA" not in row:
            continue

        val = row["Genbank_16SrRNA"]

        url_esearch = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=nuccore&term=%s&retmode=json" % val

        j = cache_requests(url_esearch)
        if j.status_code != 200:
            retry_func = cache_requests(url_esearch, True)
            j = if_retry(lambda x: x.status_code != 200, retry_func)

        j = j.json()

        for i in j['esearchresult']['idlist']:
            url_efetch = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=nuccore&id=%s&rettype=gb&retmode=xml" % i
            j = cache_requests(url_efetch)

            if j.status_code != 200:
                retry_func = cache_requests(url_efetch, True)
                if_retry(lambda x: x.status_code != 200, retry_func)


def requests_genbank_16sr_rna():
    print("Start request GENBANK_16SR_RNA")
    queue = multiprocessing.Queue()
    cursor = collection_elmsg.find({
        "Genbank_16SrRNA": {
            "$exists": 1,
            "$ne": None
        }
    }, {
        "Genbank_16SrRNA": 1
    })
    ignore_size = 0
    pool_size = 0

    total = cursor.count()
    for it in tqdm(cursor, total=total, desc="Create Download GENBANK_16SR_RNA Pool"):
        if not isinstance(it, str):
            ignore_size += 1
            continue
        del it["_id"]
        pool_size += 1
        queue.put(pickle.dumps(it))

    queue.put("done")

    print("ignore size : %d" % ignore_size)
    if pool_size == 0:
        return

    print("pool size : %d" % pool_size)
    print("process size : %d" % DOWNLOAD_PROCESS_SIZE)

    requests_process = []
    for _ in range(DOWNLOAD_PROCESS_SIZE):
        p = multiprocessing.Process(target=func_requests_genbank_16sr_rna, args=(queue,))
        p.daemon = True
        requests_process.append(p)

    for p in requests_process:
        p.start()

    print("GENBANK_16SR_RNA  Downloading...")

    for p in requests_process:
        p.join()


def parser():
    print("Parse Starting...")
    cursor = collection_elmsg.find({
        "Genbank_16SrRNA": {
            "$exists": 1,
            "$ne": None
        }
    }, {
        "Genbank_16SrRNA": 1
    }, no_cursor_timeout=True)
    for row in tqdm(cursor, total=cursor.count(), desc="Parse Genbank_16SrRNA"):

        if not isinstance(row["Genbank_16SrRNA"],str):
            continue

        val = row["Genbank_16SrRNA"]

        j = cache_requests(
            "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=nuccore&term=%s&retmode=json" % val)
        j = j.json()

        for i in j['esearchresult']['idlist']:
            txt = cache_requests(
                "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=nuccore&id=%s&rettype=gb&retmode=xml" % i)
            txt = txt.text

            soup = BeautifulSoup(txt, 'lxml-xml')

            for gbseq in soup.find_all("GBSeq"):
                gbseq_dict = {}

                if gbseq.GBSeq_locus and len(gbseq.GBSeq_locus.text.strip()):
                    gbseq_dict["locus"] = gbseq.GBSeq_locus.text.strip()

                if gbseq.GBSeq_length and len(gbseq.GBSeq_length.text.strip()):
                    gbseq_dict["length"] = gbseq.GBSeq_length.text.strip()

                if gbseq.GBSeq_strandedness and len(gbseq.GBSeq_strandedness.text.strip()):
                    gbseq_dict["strandedness"] = gbseq.GBSeq_strandedness.text.strip()

                if gbseq.GBSeq_moltype and len(gbseq.GBSeq_moltype.text.strip()):
                    gbseq_dict["moltype"] = gbseq.GBSeq_moltype.text.strip()

                if gbseq.GBSeq_topology and len(gbseq.GBSeq_topology.text.strip()):
                    gbseq_dict["topology"] = gbseq.GBSeq_topology.text.strip()

                if gbseq.GBSeq_division and len(gbseq.GBSeq_division.text.strip()):
                    gbseq_dict["division"] = gbseq.GBSeq_division.text.strip()

                if gbseq.find("GBSeq_update-date") and len(gbseq.find("GBSeq_update-date").text.strip()):
                    gbseq_dict["gbseq_update_date"] = gbseq.find("GBSeq_update-date").text.strip()

                if gbseq.find("GBSeq_create-date") and len(gbseq.find("GBSeq_create-date").text.strip()):
                    gbseq_dict["gbseq_create_date"] = gbseq.find("GBSeq_create-date").text.strip()

                if gbseq.GBSeq_definition and len(gbseq.GBSeq_definition.text.strip()):
                    gbseq_dict["definition"] = gbseq.GBSeq_definition.text.strip()

                if gbseq.find("GBSeq_primary-accession") and len(
                        gbseq.find("GBSeq_primary-accession").text.strip()):
                    gbseq_dict["primary_accession"] = gbseq.find("GBSeq_primary-accession").text.strip()

                if gbseq.find("GBSeq_accession-version") and len(
                        gbseq.find("GBSeq_accession-version").text.strip()):
                    gbseq_dict["accession_version"] = gbseq.find("GBSeq_accession-version").text.strip()

                if gbseq.find("GBSeq_other-seqids"):
                    seqids = set()
                    for gbseqid in gbseq.find("GBSeq_other-seqids").find_all("gbseqid"):
                        seqids.add(gbseqid.text.strip())
                    if len(seqids):
                        gbseq_dict["other_seqids"] = list(seqids)

                if gbseq.GBSeq_source and len(gbseq.GBSeq_source.text.strip()):
                    gbseq_dict["source"] = gbseq.GBSeq_source.text.strip()

                if gbseq.GBSeq_organism and len(gbseq.GBSeq_organism.text.strip()):
                    gbseq_dict["organism"] = gbseq.GBSeq_organism.text.strip()

                if gbseq.GBSeq_taxonomy and len(gbseq.GBSeq_taxonomy.text.strip()):
                    gbseq_dict["taxonomy"] = gbseq.GBSeq_taxonomy.text.strip()

                if gbseq.GBSeq_references:
                    references = []
                    for gbreference in gbseq.GBSeq_references.find_all("GBReference"):
                        gbreference_dict = {}

                        if gbreference.GBReference_reference and len(
                                gbreference.GBReference_reference.text.strip()):
                            gbreference_dict["reference"] = gbreference.GBReference_reference.text.strip()

                        if gbreference.GBReference_position and len(
                                gbreference.GBReference_position.text.strip()):
                            gbreference_dict["position"] = gbreference.GBReference_position.text.strip()

                        if gbreference.GBReference_authors:
                            gbreference_dict['authors'] = []
                            for author in gbreference.GBReference_authors.find_all('GBAuthor'):
                                if len(author.text.strip()):
                                    gbreference_dict['authors'].append(author.text.strip())

                            gbreference_dict['authors'] = set(gbreference_dict['authors'])
                            gbreference_dict['authors'] = list(gbreference_dict['authors'])

                        if gbreference.GBReference_title and len(gbreference.GBReference_title.text.strip()):
                            gbreference_dict["title"] = gbreference.GBReference_title.text.strip()

                        if gbreference.GBReference_journal and len(gbreference.GBReference_journal.text.strip()):
                            gbreference_dict["journal"] = gbreference.GBReference_journal.text.strip()

                        if gbreference.GBReference_xref:
                            xrefs = []
                            for xref in gbreference.GBReference_xref.find_all("GBXref"):
                                xref_dict = {}
                                if xref.GBXref_dbname and len(xref.GBXref_dbname.text.strip()):
                                    xref_dict["db"] = xref.GBXref_dbname.text.strip()
                                if xref.GBXref_id and len(xref.GBXref_id.text.strip()):
                                    xref_dict["id"] = xref.GBXref_id.text.strip()

                                if len(xref_dict):
                                    xrefs.append(xref_dict)
                            if len(xrefs):
                                gbreference_dict["xref"] = xrefs

                        if gbreference.GBReference_pubmed and len(gbreference.GBReference_pubmed.text.strip()):
                            gbreference_dict["pubmed"] = gbreference.GBReference_pubmed.text.strip()

                        if len(gbreference_dict):
                            references.append(gbreference_dict)

                    if len(references):
                        gbseq_dict["references"] = references

                if gbseq.find("GBSeq_feature-table"):
                    feature_table = []
                    for feature in gbseq.find("GBSeq_feature-table").find_all("GBFeature"):

                        feature_dict = {}

                        if feature.GBFeature_key and len(feature.GBFeature_key.text.strip()):
                            feature_dict["key"] = feature.GBFeature_key.text.strip()

                        if feature.GBFeature_location and len(feature.GBFeature_location.text.strip()):
                            feature_dict["location"] = feature.GBFeature_location.text.strip()

                        if feature.GBFeature_intervals:
                            intervals = []
                            for interval in feature.GBFeature_intervals.find_all("GBInterval"):
                                interval_dict = {}
                                if interval.GBInterval_from and len(interval.GBInterval_from.text.strip()):
                                    interval_dict['from'] = interval.GBInterval_from.text.strip()
                                if interval.GBInterval_to and len(interval.GBInterval_to.text.strip()):
                                    interval_dict['to'] = interval.GBInterval_to.text.strip()
                                if interval.GBInterval_accession and len(
                                        interval.GBInterval_accession.text.strip()):
                                    interval_dict['accession'] = interval.GBInterval_accession.text.strip()
                                if len(interval_dict):
                                    intervals.append(interval_dict)
                            if len(intervals):
                                feature_dict["intervals"] = intervals

                        if feature.GBFeature_quals:
                            quals = []
                            for qual in feature.GBFeature_quals.find_all("GBQualifier"):
                                qual_dict = {}
                                if qual.GBQualifier_name and len(qual.GBQualifier_name.text.strip()):
                                    qual_dict['name'] = qual.GBQualifier_name.text.strip()
                                if qual.GBQualifier_value and len(qual.GBQualifier_value.text.strip()):
                                    qual_dict['value'] = qual.GBQualifier_value.text.strip()

                                if len(qual_dict):
                                    quals.append(qual_dict)
                            if len(quals):
                                feature_dict['quals'] = quals

                        if feature.GBFeature_partial5:
                            feature_dict['partial5'] = feature.GBFeature_partial5.get("value")

                        if feature.GBFeature_partial3:
                            feature_dict['partial3'] = feature.GBFeature_partial3.get("value")

                        if len(feature_dict):
                            feature_table.append(feature_dict)

                    if len(feature_table):
                        gbseq_dict['feature_table'] = feature_table

                # if gbseq.gbseq_sequence and len(gbseq.gbseq_sequence.text.strip()):
                #     gbseq_dict["sequence"] = gbseq.gbseq_sequence.text.strip()

                # print(gbseq_dict)
                collection_16SrRNA.update_one({"primary_accession": val}, {"$set": gbseq_dict}, upsert=True)

    cursor = collection_elmsg.find({
        "NCBI_Assembly": {
            "$exists": 1,
            "$ne": None
        }
    }, {
        "NCBI_Assembly": 1
    }, no_cursor_timeout=True)
    for row in tqdm(cursor, total=cursor.count(), desc="Parse NCBI_Assembly"):

        for val in row["NCBI_Assembly"]:

            if len(val.keys()) > 2:
                continue

            val = val["id"]

            j = cache_requests(
                "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=assembly&term=%s&retmode=json" % val)
            j = j.json()

            for i in j['esearchresult']['idlist']:
                rsp = cache_requests(
                    "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=assembly&id=%s&retmode=json" % i)
                rsp = rsp.json()

                del rsp['result']['uids']
                for result in rsp['result'].values():
                    del_key = []
                    for k, v in result.items():
                        if len(str(v)) == 0:
                            del_key.append(k)

                    for k in del_key:
                        del result[k]

                    collection_lmsg_gca.update_one({"synonym.genbank": val}, {"$set": result}, upsert=True)

    for d in tqdm(collection_elmsg.find(no_cursor_timeout=True), total=collection_elmsg.count(), desc="Update ELMSG"):
        if "Genbank_16SrRNA" in d and isinstance(d["Genbank_16SrRNA"], str):

            Genbank_16SrRNA = clear_Genbank_16SrRNA_str(d["Genbank_16SrRNA"])

            _16sr = collection_16SrRNA.find_one({"primary_accession": Genbank_16SrRNA}
                                                , projection={"primary_accession": 1,
                                                              "organism": 1,
                                                              "gbseq_create_date": 1,
                                                              "length": 1,
                                                              "topology": 1,
                                                              "strandedness": 1,
                                                              "definition": 1,
                                                              "other_seqids": 1,
                                                              "_id": 0})

            if _16sr is not None:
                collection_elmsg.update_one({"_id": d["_id"]}
                                            , {
                                                "$set": {
                                                    "Genbank_16SrRNA": _16sr
                                                }})
            else:
                print("数据没找到 Genbank_16SrRNA：{}".format(d["Genbank_16SrRNA"]))

        if "NCBI_Assembly" in d and d["NCBI_Assembly"] is not None:
            new_NCBI_Assembly = []

            for old_assembly in d["NCBI_Assembly"]:

                if len(old_assembly.keys()) > 2:
                    new_NCBI_Assembly.append(old_assembly)
                    continue

                new_assembly = {
                    "id": old_assembly["id"],
                    "strain": old_assembly["strain"],
                }

                gca = collection_lmsg_gca.find_one({"synonym.genbank": old_assembly["id"]}
                                                   , projection={"speciesname": 1,
                                                                 "submissiondate": 1,
                                                                 "lastupdatedate": 1,
                                                                 "submitterorganization": 1,
                                                                 "coverage": 1,
                                                                 "assemblystatus": 1,
                                                                 "assemblyclass": 1,
                                                                 "assemblyname": 1,
                                                                 "gb_bioprojects": 1,
                                                                 "biosampleaccn": 1,
                                                                 "_id": 0
                                                                 })

                if gca is not None:
                    for k, v in gca.items():
                        new_assembly[k] = v
                else:
                    print("数据没找到 NCBI_Assembly：{}".format(d["NCBI_Assembly"]))

                new_NCBI_Assembly.append(new_assembly)

            collection_elmsg.update_one({"_id": d["_id"]}
                                        , {
                                            "$set": {
                                                "NCBI_Assembly": new_NCBI_Assembly
                                            }})


def create_structured_lineage():
    def d(me, out):
        out.add(me["Name"])

        if "Parent_LMSG_ID" not in me:
            return

        parent = collection_elmsg.find_one({"LMSG_ID": me["Parent_LMSG_ID"]},
                                           {"Name": 1, "Parent_LMSG_ID": 1})
        if parent is None:
            return

        d(parent, out)

    print("Creating MongoDB Index ...  Index name = Parent_LMSG_ID")
    collection_elmsg.create_index("Parent_LMSG_ID")

    cursor = collection_elmsg.find({"StructuredLineage": {"$exists": 0}}, {"Parent_LMSG_ID": 1, "Name": 1})

    print("Creating structured lineage, Waiting....")

    for it in tqdm(cursor, total=cursor.count()):
        structured_lineage = set()
        d(it, structured_lineage)

        structured_lineage = list(structured_lineage)
        collection_elmsg.update_one({"_id": it["_id"]}, {"$set": {"StructuredLineage": structured_lineage}})

    print("Creating structured lineage, Done!")


def find_no_data():
    lines = ["database\tid\trank\tname\tlineage"]
    for d in tqdm(collection_elmsg.find({
        "NCBI_Assembly": {
            "$exists": 1
        }
    }, no_cursor_timeout=True), total=collection_elmsg.count(), desc="Find NO DATA"):
        if "NCBI_Assembly" not in d:
            continue

        for old_assembly in d["NCBI_Assembly"]:
            gca = collection_lmsg_gca.count({"synonym.genbank": old_assembly["id"]})
            if gca == 0:
                lines.append("{}\t{}\t{}\t{}\t{}".format("NCBI_Assembly",
                                                         old_assembly["id"],
                                                         d["Rank"],
                                                         d["Name"],
                                                         d["Lineage"]))
    if len(lines) > 1:
        txt_name = "本次更新eLMSG有疑问的数据.txt"
        print("存在疑问数据，已写入文件 %s" % txt_name)
        write_lines(lines, txt_name)

def Yangxiao_pipeline(arg0:str,HOST:str,DB:str,COLL:str):
    global CACHE_DIR, DOWNLOAD_PROCESS_SIZE, collection_lmsg_gca, collection_16SrRNA, collection_elmsg
    CACHE_DIR = "%s.cache_download" % arg0

    create_cache_dir()

    # 并发下载进程 的数量
    DOWNLOAD_PROCESS_SIZE = multiprocessing.cpu_count()

    client = MongoClient(HOST)
    try:
        collection_lmsg_gca = client[DB]["lmsg_gca"]
        collection_16SrRNA = client[DB]["_16SrRNA"]
        collection_elmsg = client[DB][COLL]

        requests_ncbi_assembly()
        requests_genbank_16sr_rna()
        create_structured_lineage()
        parser()
        # find_no_data()
    finally:
        client.close()

    remove_cache_dir()


if __name__ == '__main__':

    def usage():
        usage_hint = "%s --lmsg_gca=Pre_Data.lmsg_gca --16SrRNA=Pre_Data._16SrRNA --eLMSG=eLMSG.eLMSG_20180612" % \
                     sys.argv[0]
        print("Usage Example: ")
        print(usage_hint)

    lmsg_gca = _16SrRNA = eLMSG = None
    # lmsg_gca, _16SrRNA, eLMSG = ["eLMSG", "lmsg_gca"], ["eLMSG", "_16SrRNA"], ["eLMSG", "eLMSG_20180612"]

    try:
        opts, args = getopt.getopt(sys.argv[1:], "", ["lmsg_gca=", '16SrRNA=', "eLMSG="])
        for opt in opts:
            if opt[0] == "--lmsg_gca":
                lmsg_gca = opt[1].split(".", 1)
            elif opt[0] == "--16SrRNA":
                _16SrRNA = opt[1].split(".", 1)
            elif opt[0] == "--eLMSG":
                eLMSG = opt[1].split(".", 1)
        assert lmsg_gca is not None and len(lmsg_gca) == 2
        assert _16SrRNA is not None and len(_16SrRNA) == 2
        assert eLMSG is not None and len(eLMSG) == 2
    except:
        usage()
        exit()

    CACHE_DIR = "%s.cache_download" % sys.argv[0]

    create_cache_dir()

    # 并发下载进程 的数量
    DOWNLOAD_PROCESS_SIZE = multiprocessing.cpu_count()

    client = MongoClient("10.188.188.22")
    try:
        collection_lmsg_gca = client[lmsg_gca[0]][lmsg_gca[1]]
        collection_16SrRNA = client[_16SrRNA[0]][_16SrRNA[1]]
        collection_elmsg = client[eLMSG[0]][eLMSG[1]]

        requests_ncbi_assembly()
        requests_genbank_16sr_rna()
        create_structured_lineage()
        parser()
        # find_no_data()
    finally:
        client.close()

    remove_cache_dir()
