#!/usr/bin/env python
# coding=utf-8
# __author__ = 'Yunchao Ling'

from retrying import retry

def getTime():
    import time

    print time.strftime('%Y%m%d %H:%M:%S', time.localtime())


def remove():
    import os

    os.remove("D:/hahahaha.txt")


def writefile():
    file = open("D:/hahahaha.txt", "wb")
    file.write("lalala")
    file.flush()
    file.close()


def getURL():
    import requests
    # url1 = 'http://www.ncbi.nlm.nih.gov/pmc/articles/PMC' + str(id) + '/pdf'
    url2 = "http://nar.oxfordjournals.org/cgi/pmidlookup?view=long&pmid=25414342"
    r = requests.get(url2)
    print(r.url)


def checkEpub(file):
    import commands

    cmd = "unzip -t " + file
    result = commands.getoutput(cmd)
    if result.find("No errors detected") != -1:
        return 1
    else:
        return 0


def hextest(start, end):
    # for i in range(int(start,16),int(end,16)):
    # print("{:0>2x}".format(i))
    startint = int(start, 16)
    endint = int(end, 16)
    # print("{:0>4x}".format(startint))
    # print("{:0>4x}".format(endint))
    currentint = startint
    while currentint <= endint:
        currentstr = "{0:0>4x}".format(currentint)
        print(currentstr[0:2] + " " + currentstr[-2:])
        currentint += 1


def getPubmed(ids):
    import pycurl
    import StringIO

    split_ids = ids.split(",")
    start = split_ids[0]
    end = split_ids[len(split_ids) - 1]

    url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'
    buf = StringIO.StringIO()
    c = pycurl.Curl()
    c.setopt(pycurl.POST, 1)
    c.setopt(c.URL, url)
    c.setopt(pycurl.HTTPPOST, [('db', 'pubmed'), ('id', ids), ('retmode', 'xml')])
    c.setopt(c.WRITEFUNCTION, buf.write)
    c.perform()
    outfile = open("pubmed_" + start + "_" + end + ".xml", "w")
    outfile.write(buf.getvalue())
    outfile.close()
    buf.close()
    print "Pubmed records from " + start + " to " + end + " has been successfully downloaded."


def test1_9():
    ll1 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
    count = 0

    for i1 in ll1:
        ll2 = ll1[:]
        ll2.remove(i1)
        for i2 in ll2:
            ll3 = ll2[:]
            ll3.remove(i2)
            for i3 in ll3:
                ll4 = ll3[:]
                ll4.remove(i3)
                for i4 in ll4:
                    ll5 = ll4[:]
                    ll5.remove(i4)
                    for i5 in ll5:
                        ll6 = ll5[:]
                        ll6.remove(i5)
                        for i6 in ll6:
                            ll7 = ll6[:]
                            ll7.remove(i6)
                            for i7 in ll7:
                                ll8 = ll7[:]
                                ll8.remove(i7)
                                for i8 in ll8:
                                    ll9 = ll8[:]
                                    ll9.remove(i8)
                                    i9 = ll9[0]
                                    # print str(i1)+","+str(i2)+","+str(i3)+","+str(i4)+","+str(i5)+","+str(i6)+","+str(i7)+","+str(i8)+","+str(i9)
                                    result = float(i1) + 13.0 + float(i2) / float(i3) + float(i4) + 12.0 * float(
                                            i5) - float(i6) - 11.0 + float(i7) * float(i8) / float(i9) - 10.0
                                    if result == 66.0:
                                        print str(i1) + "," + str(i2) + "," + str(i3) + "," + str(i4) + "," + str(
                                                i5) + "," + str(i6) + "," + str(i7) + "," + str(i8) + "," + str(i9)
                                        count += 1
    print count



    # i4=ll4[0]
    # print str(i1)+","+str(i2)+","+str(i3)+","+str(i4)


def testfda():
    infile = open("C:/Users/Genesis/Desktop/drugsatfda/AppDoc.txt", "rb")
    outfile = open("D:/ttttt.txt", "w")
    # ht={}
    # for line in infile:
    # line=line.rstrip("\r\n")
    #     splitline=line.split("\t")
    #     length=len(splitline)
    #     if ht.has_key(length):
    #         ht[length]=ht[length]+1
    #     else:
    #         ht[length]=1
    # infile.close()
    # print ht
    for line in infile:
        line = line.decode('utf-8', 'ignore').encode('utf-8')
        # line.replace('\xA0', '\t')
        line = line.rstrip("\r\n")
        outfile.write(line + "\n")
        outfile.flush()
    infile.close()
    outfile.close()


def testre(strr):
    import re

    if re.match(r'^\d*$', strr):
        print "yes"
    else:
        print "no"


def check(name, nodes, checked):
    import sys

    if checked.has_key(name):
        print name
        sys.exit()
    else:
        checked[name] = 0
        if nodes.has_key(name):
            val = nodes[name]
            splitval = val.split(";")
            for item in splitval:
                check(item, nodes, checked)


def genRE(text):
    splitregex = text.split(" ")
    regex = "^("
    for item in splitregex:
        regex += item + "[^ ]*? "
    regex = regex[:-1] + ")$"
    return regex


def useRE():
    import re

    # Load Issn & Name Dict
    issn_p = {}
    issn_e = {}
    names = {}
    infile2 = open("D:/data/if/Journal.tsv", "r")
    infile2.readline()
    for line in infile2:
        line = line.rstrip("\n")
        splitline = line.split("\t")
        name = splitline[1].lower()
        issn1 = splitline[3]
        issn2 = splitline[4]
        nlm_id = splitline[6]
        issn_p[issn1] = nlm_id
        issn_e[issn2] = nlm_id
        names[name] = nlm_id
    infile2.close()

    infile1 = open("D:/data/if/abbr_2.txt", "r")
    outfile1 = open("D:/data/if/find_journals.txt", "w")
    outfile2 = open("D:/data/if/not_find_journals.txt", "w")
    for thisline in infile1:
        line_nlm_id = ""
        thisline = thisline.rstrip()
        splitthisline = thisline.split("|")
        abbr = splitthisline[0]
        journal_full_title = splitthisline[1]
        issn1 = splitthisline[2]
        issn2 = splitthisline[3]
        if issn1 != "":
            if issn_p.has_key(issn1):
                line_nlm_id = issn_p[issn1]
            elif issn_p.has_key(issn2):
                line_nlm_id = issn_p[issn2]
        elif issn2 != "":
            if issn_e.has_key(issn1):
                line_nlm_id = issn_e[issn1]
            elif issn_e.has_key(issn2):
                line_nlm_id = issn_e[issn2]
        elif journal_full_title != "":
            if names.has_key(journal_full_title.lower()):
                line_nlm_id = names[journal_full_title.lower()]
        else:
            regex = genRE(abbr)
            infile = open("D:/data/if/Journal.tsv", "r")
            infile.readline()
            # matches = 0
            for line in infile:
                line = line.rstrip("\n")
                splitline = line.split("\t")
                journal_title = splitline[1]
                title = journal_title.replace(" of ", " ")
                title = title.replace(" in ", " ")
                title = title.replace(" on ", " ")
                title = title.replace(" and ", " ")
                title = title.replace(" or ", " ")
                title = title.replace(" from ", " ")
                med_abbr = splitline[2].lower()
                iso_abbr = splitline[5].lower()
                nlm_id = splitline[6]
                # m = re.match(regex,title,re.I)
                # if m:
                # print journal_title+"|"+thisline
                #     matches+=1
                if abbr.lower() == med_abbr:
                    line_nlm_id = nlm_id
                    break
                elif abbr.lower() == iso_abbr:
                    line_nlm_id = nlm_id
                    break
                else:
                    m = re.match(regex, title, re.I)
                    if m:
                        # print journal_title+"|"+thisline
                        # print m.groups()[0]
                        line_nlm_id = nlm_id
                        # print regex
                        # print title
                        # print abbr+"|"+m.groups()[0]
                        break
            infile.close()
        if line_nlm_id == "":
            print thisline
            outfile2.write(thisline + "\n")
            outfile2.flush()
        else:
            print thisline + "|" + line_nlm_id
            outfile1.write(thisline + "|" + line_nlm_id + "\n")
            outfile1.flush()

    infile1.close()
    outfile1.close()
    outfile2.close()


def getJounalTitle():
    infile = open("D:/data/if/ana.out", "r")
    abbr = {}
    for line in infile:
        line = line.rstrip()
        name = line.split("|")[1]
        if not abbr.has_key(name):
            abbr[name] = 0
    infile.close()

    outfile = open("D:/data/if/abbr.txt", "w")
    for key in abbr.keys():
        outfile.write(key + "\n")
        outfile.flush()
    outfile.close()


def getJournalInfo():
    import sys
    reload(sys)
    sys.setdefaultencoding('utf-8')
    import MySQLdb

    DATABASE_NAME = 'pmc_meta'
    HOST = '10.10.31.17'
    USER_NAME = 'pmc'
    PASSWORD = 'pmc'

    conn = MySQLdb.connect(host=HOST, user=USER_NAME, passwd=PASSWORD, db=DATABASE_NAME, charset="utf8")
    infile = open("D:/data/if/abbr.txt", "r")
    outfile = open("D:/data/if/abbr_2.txt", "w")
    for line in infile:
        line = line.rstrip()
        cursor = conn.cursor()
        sql = "select title,issn_print,issn_electronic from tb_dds_journal where isoabbreviation=%s"
        params = (line)
        cursor.execute(sql, params)
        result = cursor.fetchone()
        if result != None:
            name = result[0]
            if name == None:
                name = ""
            issn1 = result[1]
            if issn1 == None:
                issn1 = ""
            issn2 = result[2]
            if issn2 == None:
                issn2 = ""
            name = name.decode('utf-8', 'ignore').encode('utf-8')
            print line + "|" + name + "|" + issn1 + "|" + issn2
            outfile.write(line + "|" + name + "|" + issn1 + "|" + issn2 + "\n")
            outfile.flush()
        else:
            outfile.write(line + "|||\n")
            outfile.flush()
        cursor.close()
    infile.close()
    conn.close()
    outfile.close()


def getSCI():
    ht = {}
    infile1 = open("D:/data/if/sci_journal.tsv", "r")
    for line in infile1:
        line = line.rstrip("\n")
        splitline = line.split("\t")
        ht[splitline[6]] = splitline[7]
    infile1.close()

    outfile1 = open("D:/data/if/find_sci.txt", "w")
    outfile2 = open("D:/data/if/find_nonsci.txt", "w")

    infile2 = open("D:/data/if/find_journals.txt", "r")
    for line in infile2:
        line = line.rstrip("\n")
        splitline = line.split("|")
        if ht.has_key(splitline[4]):
            outfile1.write(line + "|" + ht[splitline[4]] + "\n")
            outfile1.flush()
        else:
            outfile2.write(line + "|\n")
            outfile2.flush()
    outfile1.close()
    outfile2.close()


def fillJournal():
    import sys
    reload(sys)
    sys.setdefaultencoding('utf-8')
    import MySQLdb

    # ht={}
    # infile=open("D:/data/if/pmc_jlist.txt","r")
    # infile.readline()
    # for line in infile:
    #     line=line.rstrip("\n")
    #     splitline=line.split("\t")
    #     journal_title=splitline[0]
    #     pissn=splitline[2]
    #     eissn=splitline[3]
    #     if pissn!="":
    #         ht[pissn]=journal_title
    #     if eissn!="":
    #         ht[eissn]=journal_title
    # infile.close()

    ht = {}
    infile = open("D:/data/if/Journal.tsv", "r")
    infile.readline()
    for line in infile:
        line = line.rstrip("\n")
        splitline = line.split("\t")
        journal_title = splitline[1]
        isoabbreviation = splitline[5]
        medline_ta = splitline[2]
        unique_nlm_id = splitline[6]
        pissn = splitline[3]
        eissn = splitline[4]
        if pissn != "":
            ht[pissn] = (journal_title, isoabbreviation, medline_ta, unique_nlm_id)
        if eissn != "":
            ht[eissn] = (journal_title, isoabbreviation, medline_ta, unique_nlm_id)
    infile.close()

    DATABASE_NAME = 'pmc_meta'
    HOST = '10.10.31.17'
    USER_NAME = 'pmc'
    PASSWORD = 'pmc'

    conn = MySQLdb.connect(host=HOST, user=USER_NAME, passwd=PASSWORD, db=DATABASE_NAME, charset="utf8")
    cursor = conn.cursor()
    sql = "SELECT issn_electronic,issn_print FROM tb_dds_journal"
    cursor.execute(sql)
    result = cursor.fetchall()
    print len(result)
    count = 0
    for item in result:
        issn_e = item[0]
        issn_p = item[1]
        if issn_e == None:
            issn_e = ""
        if issn_p == None:
            issn_p = ""
        # print issn_e+"\t"+issn_p
        # if not (ht.has_key(issn_e) or ht.has_key(issn_p)):
        #     count+=1
        #     print issn_e+"\t"+issn_p
        if ht.has_key(issn_p):
            cursor1 = conn.cursor()
            sql1 = "UPDATE tb_dds_journal SET title=%s,isoabbreviation=%s,Medline_TA=%s,Unique_Nlm_ID=%s WHERE issn_print='" + issn_p + "'"
            params1 = ht[issn_p]
            cursor1.execute(sql1, params1)
            conn.commit()
            cursor1.close()
            count += 1
        elif ht.has_key(issn_e):
            cursor1 = conn.cursor()
            sql1 = "UPDATE tb_dds_journal SET title=%s,isoabbreviation=%s,Medline_TA=%s,Unique_Nlm_ID=%s WHERE issn_electronic='" + issn_e + "'"
            params1 = ht[issn_e]
            cursor1.execute(sql1, params1)
            conn.commit()
            cursor1.close()
            count += 1
    cursor.close()
    conn.close()
    print count


def getJournalTypes():
    from BeautifulSoup import BeautifulSoup

    outfile = open("D:/data/if/journal_types.tsv", "w")
    infile = open("D:/journal_types.html", "r")
    soup = BeautifulSoup(infile)
    infile.close()
    ps = soup.findAll("p")
    for item in ps:
        abbr = item.a["name"].strip().replace("&amp;", "&")
        type = item.strong.string.strip().replace("&amp;", "&")
        des = item.contents[-1].strip().replace("&amp;", "&")
        outfile.write(abbr + "\t" + type + "\t" + des + "\n")
        outfile.flush()
    outfile.close()


def compareJournal():
    ht = {}
    ht2 = {}
    infile1 = open("D:/data/if/Journal.tsv", "r")
    infile1.readline()
    for line in infile1:
        line = line.rstrip("\n")
        splitline = line.split("\t")
        issnp = splitline[3]
        issne = splitline[4]
        isoabbr = splitline[5]
        ht[isoabbr] = 0
        if issnp != "":
            ht2[issnp] = 0
        if issne != "":
            ht2[issne] = 0
    infile1.close()

    infile2 = open("D:/data/if/2014-detail.txt")
    infile2.readline()
    count1 = 0
    count2 = 0
    for line in infile2:
        line = line.rstrip()
        splitline = line.split("\t")
        isoabbr = splitline[1].strip()
        issn = splitline[3].strip()
        if not ht.has_key(isoabbr):
            count1 += 1
            if ht2.has_key(issn):
                count2 += 1
                print line
    infile2.close()
    print count1
    print count2


def integrateIF():
    ht = {}
    for i in xrange(2002, 2016):
        infile = open("D:/data/if/IF-HZ/" + str(i) + ".txt", "r")
        for line in infile:
            line = line.rstrip()
            splitline = line.split("\t")
            name = splitline[1]
            issn = splitline[2]
            impact_factor = splitline[4].strip()
            key = name + "|" + issn
            if not ht.has_key(key):
                ht[key] = {}
            if impact_factor != "":
                ht[key][i] = impact_factor
        infile.close()

    outfile = open("D:/data/if/if_2002_2015.txt", "w")
    for item in ht.keys():
        splititem = item.split("|")
        wt = splititem[0] + "\t" + splititem[1]
        for j in xrange(2002, 2016):
            if ht[item].has_key(j):
                wt += "\t" + ht[item][j]
            else:
                wt += "\t"
        outfile.write(wt + "\n")
        outfile.flush()
    outfile.close()


def testset():
    a = "221616791,10650003,'D008457.2015','','Q000635.2015',NULL"
    b = a.split(",")
    print b
    for item in b:
        if (item != "''") and (item != "NULL"):
            print item


def testHex(start, end):
    for i in xrange(int(start, 16), int(end, 16) + 1):
        currentstr = "{0:0>2x}".format(i)
        print currentstr


def modifyfile(filepath):
    infile = open(filepath, "r")
    outfile = open(filepath + ".modified", "w")
    for line in infile:
        line = line.rstrip()
        line = "ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/" + line
        outfile.write(line + "\n")
        outfile.flush()
    infile.close()
    outfile.close()


def testset():
    aaa.append("1")
    aaa.append("1")
    aaa.append("2")
    print aaa


def analyze_jcr_unique():
    unique = 0
    multi = 0
    cate = {}
    infile = open("D:/data/if/IntegrateJournal/JCR_UNIQUE.txt", "r")
    for line in infile:
        line = line.rstrip()
        splitline = line.split("\t")
        category = splitline[9]
        category = category.strip()
        if category != "":
            categories = category.split("|")
            if len(categories) == 1:
                unique += 1
                if cate.has_key(category):
                    cate[category] += 1
                else:
                    cate[category] = 1
            else:
                multi += 1
    infile.close()
    infile2 = open("D:/data/if/IntegrateJournal/JCR_UNIQUE.txt", "r")
    includes = 0
    for line in infile2:
        include = 0
        line = line.rstrip()
        splitline = line.split("\t")
        category = splitline[9]
        category = category.strip()
        if category != "":
            categories = category.split("|")
            for item in categories:
                if cate.has_key(item):
                    include = 1
        if include == 1:
            includes += 1
        else:
            print line
    infile2.close()

    print "UNIQUE CATEGORY: " + str(unique)
    print "MULTI CATEGORY: " + str(multi)
    print len(cate.keys())
    print includes


def pISSN_eISSN():
    from pymongo import MongoClient

    SERVER = '10.10.31.13'
    PORT = 27017
    DB_NAME = "journal"
    COLLECTION_NAME = "journal"

    connection = MongoClient(SERVER, PORT)
    db = connection[DB_NAME]
    collection = db[COLLECTION_NAME]

    results = collection.find()
    for result in results:
        if result.has_key("pISSN") and result.has_key("eISSN"):
            if result["pISSN"] == result["eISSN"]:
                print result
    connection.close()


def searchISSN():
    from pymongo import MongoClient

    SERVER = '10.10.31.13'
    PORT = 27017
    DB_NAME = "journal"
    COLLECTION_NAME = "journal"

    connection = MongoClient(SERVER, PORT)
    db = connection[DB_NAME]
    collection = db[COLLECTION_NAME]

    infile = open("D:/data/if/IntegrateJournal/JCR.txt.jcrmiss.jcr_title_unique", "r")
    outfile = open("D:/data/if/IntegrateJournal/searched_issn.txt", "w")
    for line in infile:
        line = line.rstrip()
        splitline = line.split("\t")
        FullTitle = splitline[0].strip()
        ISOAbbr = splitline[1].strip()
        ors = []
        if FullTitle != "":
            ors.append({"FullTitle": {"$regex": "^" + FullTitle + "$", "$options": "$i"}})
        if ISOAbbr != "":
            ors.append({"ISOAbbr": {"$regex": "^" + ISOAbbr + "$", "$options": "$i"}})
            ors.append({"MedAbbr": {"$regex": "^" + ISOAbbr + "$", "$options": "$i"}})
        result = collection.find_one({"$or": ors})
        pISSN = "****-****"
        eISSN = "****-****"
        NlmId = ""
        if result.has_key("pISSN"):
            pISSN = result["pISSN"]
        if result.has_key("eISSN"):
            eISSN = result["eISSN"]
        if result.has_key("NlmId"):
            NlmId = result["NlmId"]
        outfile.write(pISSN + "\t" + eISSN + "\t" + NlmId + "\n")
        outfile.flush()
    connection.close()
    infile.close()
    outfile.close()


def testFinalSeg():
    import finalseg

    sentence_list = [
        "VCGDB: Vitual Chinese dynamic genome database."
    ]

    print u"=默认效果"

    for sentence in sentence_list:
        seg_list = finalseg.cut(sentence)
        print "/ ".join(seg_list)

    print u"\n=打开新词发现功能后的效果\n"

    for sentence in sentence_list:
        seg_list = finalseg.cut(sentence, find_new_word=True)
        print "/ ".join(seg_list)


def testsolr():
    import solr

    s = solr.SolrConnection('http://10.10.31.13:8080/solr/ipaper_organization/')

    result = s.query("name:\"Hong Kong\"")
    for r in result:
        print r


def GetChinaOrganization():
    import MySQLdb
    import sys
    reload(sys)
    sys.setdefaultencoding('utf-8')

    # MySQL Info
    DATABASE_NAME = 'pmc_meta'
    HOST = '10.10.31.17'
    PORT = '3306'
    USER_NAME = 'pmc'
    PASSWORD = 'pmc'
    CHAR_SET = 'utf8'

    conn = MySQLdb.connect(host=HOST, user=USER_NAME, passwd=PASSWORD, db=DATABASE_NAME, charset=CHAR_SET)
    cursor = conn.cursor()
    sql = "SELECT id,name,description FROM tb_dds_organization"
    cursor.execute(sql)

    for row in cursor.fetchall():
        id = str(row[0])
        name = row[1].replace("\n", "")
        des = row[2].replace("\n", "")

        print id + "\t" + name + "\t" + des
    cursor.close()
    conn.close()


def testreplace():
    a = "aaabaaabaaab"
    b = a.replace("b", "c")
    print b


def ModifyGSE():
    infile = open("D:/gse.txt", "r")
    outfile = open("D:/gse_modified.txt", "w")
    for line in infile:
        line = line.rstrip()
        modi_line = "ftp://ftp.ncbi.nlm.nih.gov/geo/series/GSE" + line[
                                                                  :-3] + "nnn/GSE" + line + "/suppl/GSE" + line + "_RAW.tar"
        outfile.write(modi_line + "\n")
        outfile.flush()
    infile.close()
    outfile.close()


def testRE(line):
    import re

    # email_regex='a(.*?)a|b(.*?)b'
    # pattern = re.compile(email_regex,re.I)
    # match = pattern.findall(line)
    # for item in match:
    #     for item2 in item:
    #         if item2!="":
    #             print item2
    # filter_regex="aa|b b|P. R. China|dd"
    # filter_regex=filter_regex.replace(".","\.")
    # print filter_regex
    # items = re.sub(filter_regex,"",line,0,re.I)
    # for item in items:
    #     print item
    regex = "(aa|bb|cc|dd)"
    pattern = re.compile(regex, re.I)
    match = pattern.search(line)
    if match:
        print match.group()
    else:
        print match
        # for item in match:
        #     print item

        # match_regex="MD|CA|TX"
        # pattern = re.compile(match_regex)
        # m=re.search(pattern,line)
        # return m
        # print items


def Search_CaseSensitive(regex, line):
    import re
    pattern = re.compile(regex)
    m = re.search(pattern, line)
    if m:
        return True
    else:
        return False


def Search_CaseIgnore(regex, line):
    import re
    pattern = re.compile(regex, re.I)
    m = re.search(pattern, line)
    if m:
        return True
    else:
        return False


def testif(line):
    if line == "haha":
        return True
    else:
        return False


def Search_CaseSensitive(regex, line):
    import re
    pattern = re.compile(regex)
    m = re.search(pattern, line)
    if m:
        return True
    else:
        return False


def Search_CaseIgnore(regex, line):
    import re
    pattern = re.compile(regex, re.I)
    m = re.search(pattern, line)
    if m:
        return True
    else:
        return False


def Search_CaseSensitive_Return(regex, line):
    import re
    pattern = re.compile(regex)
    m = re.search(pattern, line)
    if m:
        return m.group(2)
    else:
        return line


def Search_CaseIgnore_Return(regex, line):
    import re
    regex2 = "(" + regex + ")"
    pattern = re.compile(regex2, re.I)
    m = re.search(pattern, line)
    if m:
        return m.group(1)
    else:
        return m


def Initialize_Country_Regex(filepath):
    infile = open(filepath, "r")
    country_regex = ""
    country_abbr_regex = ""
    country_ht = {}
    country_abbr_ht = {}
    for line in infile:
        line = line.rstrip()
        splitline = line.split("\t")
        country_regex += splitline[1] + "|"
        country_ht[splitline[1].upper()] = splitline[1]
        if len(splitline) > 3:
            country_abbr_regex += splitline[3] + "|"
            splitabbr = splitline[3].split("|")
            for abbr in splitabbr:
                country_abbr_ht[abbr] = splitline[1]
    infile.close()
    country_regex = country_regex[:-1].replace(".", "\.")
    country_abbr_regex = country_abbr_regex[:-1].replace(".", "\.")
    return country_regex, country_abbr_regex, country_ht, country_abbr_ht


def Initialize_State_Regex(filepath):
    infile = open(filepath, "r")
    state_regex = ""
    state_abbr_regex = ""
    for line in infile:
        line = line.rstrip()
        splitline = line.split("\t")
        state_regex += splitline[1] + "|" + splitline[3] + "|"
        state_abbr_regex += splitline[2] + "|"
    infile.close()
    state_regex = state_regex[:-1].replace(".", "\.")
    state_abbr_regex = state_abbr_regex[:-1].replace(".", "\.")
    return state_regex, state_abbr_regex


def Initialize_Other_Regex(filepath):
    infile = open(filepath, "r")
    other_regex = ""
    other_abbr_regex = ""
    other_ht = {}
    other_abbr_ht = {}
    for line in infile:
        line = line.rstrip()
        splitline = line.split("\t")
        other_regex += splitline[1] + "|"
        other_ht[splitline[1].upper()] = splitline[0]
        if len(splitline) > 2:
            other_abbr_regex += splitline[2] + "|"
            splitabbr = splitline[2].split("|")
            for abbr in splitabbr:
                other_abbr_ht[abbr] = splitline[0]
    infile.close()
    other_regex = other_regex[:-1].replace(".", "\.")
    other_abbr_regex = other_abbr_regex[:-1].replace(".", "\.")
    return other_regex, other_abbr_regex, other_ht, other_abbr_ht


def RecognizeCountry(org):
    regexpath = "D:/data/regex"
    country_regex, country_abbr_regex, country_ht, country_abbr_ht = Initialize_Country_Regex(
            regexpath + "/country.compact.tsv")
    state_regex, state_abbr_regex = Initialize_State_Regex(regexpath + "/state.tsv")
    other_regex, other_abbr_regex, other_ht, other_abbr_ht = Initialize_Other_Regex(regexpath + "/location.tsv")
    # print country_regex
    # print "================="
    # print country_abbr_regex
    # print "================="
    # print country_ht
    # print "================="
    # print country_abbr_ht
    # print "================="
    # print state_regex
    # print "================="
    # print state_abbr_regex
    # print "================="
    # print other_regex
    # print "================="
    # print other_ht

    m1 = Search_CaseIgnore_Return(country_regex, org)
    if m1 != None:
        country = country_ht[m1.upper()]
        print "Find Country: " + m1 + "\t" + country
    else:
        m2 = Search_CaseSensitive_Return(country_abbr_regex, org)
        if m2 != None:
            country = country_abbr_ht[m2]
            print "Find Country Abbr: " + m2 + "\t" + country
        elif Search_CaseIgnore(state_regex, org):
            print "Find State"
        elif Search_CaseSensitive(state_abbr_regex, org):
            print "Find State Abbr"
        else:
            m3 = Search_CaseIgnore_Return(other_regex, org)
            if m3 != None:
                country = other_ht[m3.upper()]
                print "Find Location: " + m3 + "\t" + country
            else:
                m4 = Search_CaseSensitive_Return(other_abbr_regex, org)
                if m4 != None:
                    country = other_abbr_ht[m4]
                    print "Find Location Abbr: " + m4 + "\t" + country


def removeStart(line):
    regex = "\(|\)|\*|‡|†|‡‡|∥|”|‖|–|§"

    if line.startsWith(regex):
        print line


def TestPinyin():
    infile = open("D:/city.txt", "r")
    ht = {}
    for line in infile:
        line = line.rstrip()
        splitline = line.split("\t")
        city = splitline[3]
        if ht.has_key(city):
            ht[city] += 1
        else:
            ht[city] = 1
    infile.close()
    for key in ht.keys():
        if ht[key] >= 2:
            print key


def TestMongoInsert():
    from pymongo import MongoClient
    import datetime

    DB_NAME = "test"
    COLLECTION_NAME = "test1"

    connection = MongoClient('192.168.8.253', 27017)
    db = connection[DB_NAME]
    collection = db[COLLECTION_NAME]

    collection.insert_one({"id": 2, "name": "haha2", "path": "haha2", "nofile": False,
                           "datein": datetime.datetime(2016, 1, 1, 0, 20, 20)})
    connection.close()


def TestJson():
    import json
    # stt = '{"header": {"type": "esearch","version": "0.3"},"esearchresult": {"count": "3782057","retmax": "20","retstart": "0","idlist": ["4707151","4707044","4707042"]}}'
    stt = '{"header": {"type": "esearch","version": "0.3"},"esearchresult": {"count": "3782057","retmax": "20","retstart": "0",'
    try:
        s = json.loads(stt)
        print s["esearchresult"]["count"]
    except ValueError:
        print "Error"

def TestDatetime():
    import datetime
    # time1=datetime.datetime.now()
    # print time1
    # time2=time1 - datetime.timedelta(days=3)
    # print time2
    # time3=datetime.datetime(time2.year,time2.month,time2.day,12)
    # print time3
    # time4=datetime.datetime.combine(time2.date(),datetime.time(12))
    # print time4

    from pymongo import MongoClient

    HOST = '192.168.8.253'
    PORT = 27017
    DB_NAME = 'pubmed'
    COLLECTION_NAME = 'metalist'

    connection = MongoClient(HOST, PORT)
    db = connection[DB_NAME]
    collection = db[COLLECTION_NAME]

    NOW=datetime.datetime.now()
    time1=datetime.datetime(2016,1,1)
    time5=datetime.datetime(2016,1,1,12)
    time6=datetime.datetime(2016,1,2,0)
    time7=datetime.datetime(2016,1,2,12)
    print NOW
    print time1

    result=collection.find_one({"time_in":time1})
    time2=result["time_in"]
    connection.close()
    print time2
    time3=NOW-time2
    print time3
    print time3.days
    time4=time1-time2
    print time4
    print time4.days
    print (time5-time2).days
    print (time6-time2).days
    print (time7-time2).days
    if NOW>time2:
        print True
    else:
        print False
    if time1>time2:
        print True
    else:
        print False
    if time5>time2:
        print True
    else:
        print False



    # time_now= datetime.datetime(2016,4,20,0,0,0)
    # print time_now
    # time2=datetime.datetime(2016,4,20,12,0,0)
    # if time2>=time_now:
    #     print True


def TestBeautifulSoup():
    from bs4 import BeautifulSoup
    # ht={}
    soup=BeautifulSoup(open(r"D:\data\pubmed_1_10000.xml"),"xml")
    pmids=soup.find_all("PMID")
    for pmid in pmids:
        parent_name=pmid.parent.name
        if parent_name=="MedlineCitation":
            print pmid.string
        # if ht.has_key(parent_name):
        #     ht[parent_name]+=1
        # else:
        #     ht[parent_name]=1
    # print ht




def TestFTP():
    import ftplib
    ftp=ftplib.FTP()
    # ftp.set_debuglevel(2)
    ftp.connect("fr.snowage.net","21")
    ftp.login("pmc_meta","pmc_meta")
    # fp=open("D:/org_type","rb")
    # ftp.storbinary("STOR org_type",fp)
    # ftp.dir()
    filelist=ftp.nlst("org_type")
    if len(filelist)!=0:
        outfile=open("D:/org_type3","wb")
        ftp.retrbinary("RETR org_type",outfile.write)
        outfile.close()
    # print filelist
    ftp.quit()

def TestBlankFile():
    import os
    outfile=open("D:/test.out","wb")
    outfile.write("test+\n")
    outfile.close()
    print os.path.getsize("D:/test.out")

def TestRevisedFile():
    from pymongo import MongoClient
    import json
    HOST="192.168.8.253"
    PORT=27017
    DB_NAME="pmc"
    COLLECTION_NAME="metalist"

    connection = MongoClient(HOST, PORT)
    db = connection[DB_NAME]
    collection = db[COLLECTION_NAME]

    f=file("D:/pmcid_list_20160120.json")
    s=json.load(f)
    f.close()
    count=0
    for id in s["esearchresult"]["idlist"]:
        result=collection.find_one({"pmcid":int(id)})
        print result
    connection.close()
    print count

def Get_PMID_List(nameString, dateStringStart, dateStringEnd):
    import StringIO
    import pycurl
    import json
    try:
        url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'
        buf = StringIO.StringIO()
        c = pycurl.Curl()
        c.setopt(pycurl.POST, 1)
        c.setopt(c.URL, url)
        c.setopt(pycurl.HTTPPOST,
                 [('db', 'pubmed'), ('mindate', dateStringStart), ('maxdate', dateStringEnd), ('retmode', "json"),
                  ('retmax', "100000")])
        c.setopt(c.WRITEFUNCTION, buf.write)
        c.perform()
        result = buf.getvalue()
        json_object = json.loads(result)
        # outfile = open("pmid_list_" + nameString + ".json", "w")
        # outfile.write(buf.getvalue())
        # outfile.close()
        buf.close()
        print "pmid_list_" + nameString + " has been successfully downloaded."
        return json_object
    except Exception,e:
        print("Download exception, try again ...")
        Get_PMID_List(nameString, dateStringStart, dateStringEnd)

@retry
def Get_PMID_List2(nameString, dateStringStart, dateStringEnd):
    import StringIO
    import pycurl
    import json
    url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'
    buf = StringIO.StringIO()
    c = pycurl.Curl()
    c.setopt(pycurl.POST, 1)
    c.setopt(c.URL, url)
    c.setopt(pycurl.HTTPPOST,
             [('db', 'pubmed'), ('mindate', dateStringStart), ('maxdate', dateStringEnd), ('retmode', "json"),
              ('retmax', "100000")])
    c.setopt(c.WRITEFUNCTION, buf.write)
    c.perform()
    result = buf.getvalue()
    json_object = json.loads(result)
    # outfile = open("pmid_list_" + nameString + ".json", "w")
    # outfile.write(buf.getvalue())
    # outfile.close()
    buf.close()
    print "pmid_list_" + nameString + " has been successfully downloaded."
    return json_object

def Extract_PubMed_IDs_From_XML(filename):
    from bs4 import BeautifulSoup
    infile = open(filename, "r")
    # outfile = open(filename[:-3] + "ids", "w")
    result = []
    current_string = ""
    for line in infile:
        line = line.rstrip()
        current_string += line
        if (line.find("</PubmedArticle>") != -1) or (line.find("</PubmedBookArticle>") != -1):
            soup = BeautifulSoup(current_string, "xml")
            pmids = soup.find_all("PMID")
            for pmid in pmids:
                parent_name = pmid.parent.name
                if (parent_name == "MedlineCitation") or (parent_name == "BookDocument"):
                    pmid_string = pmid.string
                    if pmid_string.isdigit():
                        result.append(pmid_string)
                        # outfile.write(pmid_string + "\n")
                        # outfile.flush()
            current_string = ""
    # outfile.close()
    infile.close()
    return result

def Extract_PubMed_IDs_From_XML_2(filename):
    from bs4 import BeautifulSoup
    infile = open(filename, "r")
    # outfile = open(filename[:-3] + "ids", "w")
    result = []
    for line in infile:
        line = line.rstrip()
        if line.startswith("        <PMID"):
            soup = BeautifulSoup(line, "xml")
            pmid = soup.find("PMID")
            pmid_string = pmid.string
            if pmid_string.isdigit():
                result.append(pmid_string)
                # outfile.write(pmid_string + "\n")
                # outfile.flush()
    # outfile.close()
    infile.close()
    return result

def Extract_PubMed_IDs_From_XML_3(filename):
    from bs4 import BeautifulSoup
    infile = open(filename, "r")
    # outfile = open(filename[:-3] + "ids", "w")
    result = []
    for line in infile:
        line = line.rstrip()
        if line.startswith("        <PMID"):
            pmid_string=REtest(line)
            if pmid_string.isdigit() and pmid_string!=None:
                result.append(pmid_string)
                # outfile.write(pmid_string + "\n")
                # outfile.flush()
    # outfile.close()
    infile.close()
    return result

def REtest(line):
    import re
    pattern = re.compile("<.*>(.*)<.*>", re.I)
    m = re.search(pattern, line)
    if m:
        return m.group(1)
    else:
        return m

def Find_XML_By_PMID():
    from pymongo import MongoClient
    HOST="192.168.8.253"
    PORT=27017
    DB_NAME="pubmed"
    COLLECTION_NAME="metalist"

    connection = MongoClient(HOST, PORT)
    db = connection[DB_NAME]
    collection = db[COLLECTION_NAME]

    infile=open("D:/mesh_error.txt","r")
    outfile=open("D:/mesh_error_xml.txt","w")
    for line in infile:
        line=line.rstrip()
        pmid=int(line)
        result=collection.find_one({"pmid":pmid})
        outfile.write(str(result["pmid"])+"\t"+str(result["xml"])+"\n")
        outfile.flush()
    outfile.close()
    infile.close()
    connection.close()

def GetRemoteFilelist():
    import paramiko
    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    ssh.connect(hostname="fr.snowage.net", port=22, username="dataadmin", password="data123")
    remote_path="/home/dataadmin/missing_file"
    stdin, stdout, stderr = ssh.exec_command("ls " + remote_path)
    for line in stdout:
        line = line.rstrip()
        if line != "":
            print line
    ssh.close()

def TestSortDict():
    dic = {'a':31, 'bc':5, 'c':3, 'asd':4, 'aa':74, 'd':0}
    dict= sorted(dic.iteritems(), key=lambda d:d[1], reverse = True)
    print dict
    print dict[0][0]
    print dict[0][1]

def findmissingcai():
    infile1=open("D:/cai/list.txt","r")
    ht=set()
    for line in infile1:
        line=line.rstrip("\n")
        ht.add(line)
    infile1.close()
    infile2=open("D:/cai/ena2.txt","r")
    outfile=open("D:/cai/ena2_miss.txt","w")
    for line in infile2:
        line=line.rstrip("\n")
        filename=line.split(" ")[-2].split("/")[-1]
        if filename not in ht:
            outfile.write(line+"\n")
    infile2.close()
    outfile.close()

def GetOrg():
    import sys
    default_encoding = 'utf-8'
    if sys.getdefaultencoding() != default_encoding:
        reload(sys)
        sys.setdefaultencoding(default_encoding)
    from pymongo import MongoClient
    HOST="10.188.188.22"
    PORT=27017
    DB_NAME="organization"
    COLLECTION_NAME="organization"

    connection = MongoClient(HOST, PORT)
    db = connection[DB_NAME]
    collection = db[COLLECTION_NAME]

    outfile=open("D:/china_org.txt","w")
    results=collection.find({"country":"China"})
    for result in results:
        if result.has_key("org"):
            outfile.write(result["org"]+"\n")
            outfile.flush()
    connection.close()
    outfile.close()

def getTime():
    import datetime
    tt=datetime.datetime.strptime("2009-04-28T10:29:37Z", "%Y-%m-%dT%H:%M:%SZ")
    print tt



if __name__ == "__main__":
    # findmissingcai()
    # writefile()
    # getURL()
    # import sys
    #
    # print(checkEpub(sys.argv[1]))
    # hextest("0000","01ff")
    # infile=open("D:/data/pubmedid.txt","r")
    # line=infile.readline()
    # line=line.rstrip()
    # getPubmed(line)
    # test1_9()
    # testfda()
    # testre("0")
    # infile=open("D:/PycharmProjects/gvkb/Drug/node_node_20150805142455.txt","r")
    # nodes={}
    # for line in infile:
    # line=line.rstrip()
    #     splitline=line.split("====")
    #     nodes[splitline[0]]=splitline[1]
    # infile.close()
    # checked={}
    # check("pmc-articleset",nodes,checked)
    # getJounalTitle()
    #     useRE()
    # getJournalInfo()
    # getSCI()
    # fillJournal()
    # getJournalTypes()
    # compareJournal()
    # integrateIF()
    # testHex("d0","ff")
    # modifyfile("D:/missing_file_20160101.txt")
    # testset()
    # analyze_jcr_unique()
    # pISSN_eISSN()
    # searchISSN()
    # testFinalSeg()
    # testsolr()
    # GetChinaOrganization()
    # testreplace()
    # ModifyGSE()
    # print Search_CaseSensitive("TE|MC|TX","Technical Support Branch, Administrative Division, US Army Medical Research Institute of Chemical Defense, Aberdeen Proving Ground, MD")
    # if Search_CaseSensitive("DE|MC|TX","Defense, Aberdeen Proving Ground, MD"):
    #     print "haha"
    # RecognizeCountry("Department of Family Medicine Science University School of Medicine, Portland")
    # testHex("00","ff")
    # infile=open("D:/symbol","r")
    # regex=infile.readline().rstrip()
    # infile.close()
    # regex="(\(|\)|§|¶|‖|†|”| )*"
    # regex="(\W|and)*"
    # regex0="^"+regex+"(.*?)"+regex+"$"
    # print regex0

    # regex="\WHenan\W|\WHebei\W|\Wbayannur\W"
    # line="<addr-line>  the Bayannur for Tuberculosis Control and Prevention, Bayannur, China </addr-line>"
    # print Search_CaseIgnore_Return(regex,line)

    # TestPinyin()
    # TestMongoInsert()
    # a = {}
    # a["haha"] = "hahaha"
    # print a["xixi"]
    # TestJson()
    # TestDatetime()
    # TestBeautifulSoup()
    # TestFTP()
    # TestBlankFile()
    # TestRevisedFile()
    # for i in xrange(1,100):
    #     print i,
    #     jsonObject=Get_PMID_List2("20160118","2016/01/18","2016/01/18")
    #     for id in jsonObject["esearchresult"]["idlist"]:
    #         print id
    #         break
    # import time
    # # time1=time.time()
    # # result=Extract_PubMed_IDs_From_XML("D:/pubmed_9980001_9990000.xml")
    # # print len(result)
    # time2=time.time()
    # result2=Extract_PubMed_IDs_From_XML_3("D:/pubmed_9980001_9990000.xml")
    # print len(result2)
    # time3=time.time()
    # # print time2-time1
    # print time3-time2
    # import os
    # print os.path.abspath(".")
    # Find_XML_By_PMID()
    # GetRemoteFilelist()
    # TestSortDict()
    getTime()