#!/usr/bin/env python
# coding=utf-8
# __author__ = 'Yunchao Ling'

from tqdm import tqdm
from retrying import retry
import requests


def compare_mining_with_manual(mining_path: str, manual_path: str):
    manual_specific = 0
    mining_specific = 0
    intersection = 0
    set_manual = set()
    infile_manual = open(manual_path, "r")
    for line in infile_manual:
        line = line.rstrip("\n")
        set_manual.add(line)
    infile_manual.close()
    infile_mining = open(mining_path, "r")
    infile_mining.readline()
    for line in infile_mining:
        line = line.rstrip("\n")
        rsid = line.split("\t")[0]
        if rsid in set_manual:
            intersection += 1
        else:
            mining_specific += 1
    infile_mining.close()
    manual_specific = len(set_manual) - intersection
    print("manual specific = %d" % manual_specific)
    print("mining specific = %d" % mining_specific)
    print("intersection = %d" % intersection)


def verify_rsid_by_length(mining_path: str):
    count = 0
    infile_mining = open(mining_path, "r")
    infile_mining.readline()
    for line in infile_mining:
        line = line.rstrip("\n")
        rsid = line.split("\t")[0]
        if len(rsid) > 11:
            print(rsid)
            count += 1
    infile_mining.close()
    print(count)


def verify_rsid_by_api(mining_path: str, output_path: str):
    outfile = open(output_path, "w")
    infile_mining = open(mining_path, "r")
    outfile.write(infile_mining.readline())
    outfile.flush()
    for line in tqdm(infile_mining):
        line = line.rstrip("\n")
        rsid = line.split("\t")[0]
        valid = verify_api(rsid)
        if valid:
            outfile.write(line + "\n")
            outfile.flush()
    infile_mining.close()
    outfile.close()


@retry
def verify_api(rsid: str):
    url = "https://api.ncbi.nlm.nih.gov/variation/v0/beta/refsnp/"
    proxies = {'http': 'http://127.0.0.1:7890', 'https': 'http://127.0.0.1:7890'}
    r = requests.get(url + rsid[2:], stream=True, proxies=proxies)
    if r.status_code == 200:
        return True
    elif r.status_code == 404:
        print("Invaid rsid : %s" % rsid)
        return False
    else:
        print("Timeout : %s" % rsid)
        raise IOError("Web Error.")


def integrate_lists(mining_path: str, manual_path: str, manual2_path: str, output_path: str):
    set_manual = set()
    infile_manual = open(manual_path, "r")
    for line in infile_manual:
        line = line.rstrip("\n")
        set_manual.add(line)
    infile_manual.close()

    set_manual2 = set()
    infile_manual2 = open(manual2_path, "r")
    for line in infile_manual2:
        line = line.rstrip("\n")
        set_manual2.add(line)
    infile_manual2.close()

    set_mining = set()
    infile_mining = open(mining_path, "r")
    outfile = open(output_path, "w")
    outfile.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (
        "variation", "variation_freq", "pmid_list", "pmid_count", "nutrition_list", "nutrition_count", "rank"))
    outfile.flush()
    infile_mining.readline()
    for line in tqdm(infile_mining):
        line = line.rstrip("\n")
        splitline = line.split("\t")

        rsid = splitline[0]
        set_mining.add(rsid)

        variation_freq = int(splitline[1])

        # 处理pmid_list一列，形成pmid列表和"|"分隔的字符串
        pmid_str = splitline[2]
        if pmid_str.startswith("\""):
            pmid_str = pmid_str[2:-2]
        else:
            pmid_str = pmid_str[1:-1]
        pmid_list = pmid_str.split(", ")
        pmid_list = [item[1:-1] for item in pmid_list]
        pmid_str = "|".join(pmid_list)
        pmid_count = len(pmid_list)

        # 处理term_list一列，形成nutrition列表和"|"分隔的字符串
        nutrition_str = splitline[4]
        if nutrition_str.startswith("\""):
            nutrition_str = nutrition_str[2:-2]
        else:
            nutrition_str = nutrition_str[1:-1]
        nutrition_list = nutrition_str.split(", ")
        nutrition_list = [item[1:-1] for item in nutrition_list]
        nutrition_str = "|".join(nutrition_list)
        nutrition_count = len(nutrition_list)

        # 位点证据评分
        rank = 1
        if pmid_count > 1:
            rank = 3
        if rsid in set_manual2:
            rank = 7
        if rsid in set_manual:
            rank = 9

        outfile.write("%s\t%d\t%s\t%d\t%s\t%d\t%d\n" % (
            rsid, variation_freq, pmid_str, pmid_count, nutrition_str, nutrition_count, rank))
        outfile.flush()
    infile_mining.close()

    for item in set_manual:
        if item not in set_mining:
            set_mining.add(item)
            outfile.write("%s\t\t\t\t\t\t%d\n" % (item, 9))
            outfile.flush()
    for item in set_manual2:
        if item not in set_mining:
            set_mining.add(item)
            outfile.write("%s\t\t\t\t\t\t%d\n" % (item, 7))
            outfile.flush()

    outfile.close()


if __name__ == '__main__':
    # compare_mining_with_manual("/Users/genesis/Seafile/Work/营养知识图谱/literature_mining_list/literature_mining_list.txt",
    #                            "/Users/genesis/Seafile/Work/营养知识图谱/manual_curated_list/rsid_list.txt")
    integrate_lists("/Users/genesis/Seafile/Work/营养知识图谱/literature_mining_list/literature_mining_list_valid.txt",
                    "/Users/genesis/Seafile/Work/营养知识图谱/manual_curated_list/rsid_list.txt",
                    "/Users/genesis/Seafile/Work/营养知识图谱/manual_curated_list/rank2_rsid_list.txt",
                    "/Users/genesis/Seafile/Work/营养知识图谱/integrated_list_valid.tsv")
    # verify_rsid_by_length("/Users/genesis/Seafile/Work/营养知识图谱/literature_mining_list/literature_mining_list.txt")
    # verify_api("rs9473558")
    # verify_rsid_by_api("/Users/genesis/Seafile/Work/营养知识图谱/literature_mining_list/literature_mining_list.txt",
    #                    "/Users/genesis/Seafile/Work/营养知识图谱/literature_mining_list/literature_mining_list_valid.txt")
