# import requests
import pandas, json, os
# from bs4 import BeautifulSoup
from Bio import Entrez
from lxml import etree

if not os.path.exists('result'):
    os.mkdir('result')

def processCSV(fileName):
    # Configuration
    data = pandas.read_csv(fileName, sep='\t')
    # headers = {
    #     'cookie': 'visid_incap_146342=ApFG0PaQSSS2n8pjyWNez9DvsGAAAAAAQUIPAAAAAACsNZp5RpCzMan9t02Z/LMA; nlbi_146342=ERh8Cw0shwUMH67zmewSQgAAAABsNnmtxulCL5ECF+YKsSYv; incap_ses_797_146342=jF8iUzpkZG7P77CIkYMPC9HvsGAAAAAAsFcoTul8exe0qYdWshhyNQ==; ASP.NET_SessionId=rumtktgee23kki2vtqa3o3es; rvcn=Xzf9WkvQjgDj5Ajc-kyq2TvIstcYJSl4skwTIWqckEpx1ZB3pA0QN0V9KFOfUfsJgY96FeB2sEZs4t1AN7QUjLN-Imk1',
    #     'User-Agent': 'PostmanRuntime/7.26.8'
    # }
    Entrez.email = 'yue@stu.gxmu.edu.cn'
    genes = list(data['symbol'])
    EntrezIDs = list(data['Entrez.ID'])

    for geneIndex in range(len(genes)):
        gene = genes[geneIndex]
        if not os.path.exists('result/'+gene+'.json'):
            # url='https://www.genecards.org/cgi-bin/carddisp.pl?gene='+gene
            print('Running '+str(geneIndex+1)+' / '+str(len(genes))+' job: '+gene)
            # res=requests.get(url, headers=headers)
            # # print(res.text)
            # soup=BeautifulSoup(res.text, 'lxml')

            # aliases = soup.select("div.gc-subsection div.col-xs-8 ul li")
            # for alias in aliases:
            #     if alias.contents[0].strip() != '':
            #         print('->'+alias.contents[0].strip())
            #     else:
            #         print('->'+alias.select('span')[0].get_text())

            # aLinks = soup.select("a.gc-ga-link")
            NCBIGeneID = EntrezIDs[geneIndex]
            # for aLink in aLinks:
            #     if aLink.get_text() == aLink['href'].replace('https://www.ncbi.nlm.nih.gov/gene/',''):
            #         NCBIGeneID = int(aLink.get_text())
            print('NCBI Gene ID: '+str(NCBIGeneID))

            geneXML = Entrez.read(Entrez.efetch(db="gene", id=NCBIGeneID, retmode='xml'))

            expr = 'NA'
            
            for comment in geneXML[0]['Entrezgene_comments']:
                if ('Gene-commentary_heading' in comment.keys()) and (comment['Gene-commentary_heading'] == 'Representative Expression'):
                    for item in comment['Gene-commentary_comment']:
                        if item['Gene-commentary_label'] == 'Category':
                            expr = item['Gene-commentary_text']
                            print('Expression: '+expr+'\n')
                
                with open('result/'+gene+'.json', 'w', encoding='UTF-8') as file:
                    file.write(json.dumps(comment, indent=4))
        else:
            print('Using cached XML file of {0} ( {1} / {2} ) \n'.format(gene, str(geneIndex+1), str(len(genes))))

    if not os.path.exists('trans'):
        os.mkdir('trans')

    with open(fileName+'.genes.xml', 'w', encoding='UTF-8') as f:
        f.write('<?xml version="1.0" encoding="UTF-8"?>')
        f.write('<?xml-stylesheet type="text/xsl" href="report.xsl"?>')
        f.write("<genes>")
        for geneIndex in range(len(genes)):
            gene = genes[geneIndex]
            f.write('<gene>{0}</gene>'.format(gene))
            print('Transforming {0}/{1}: {2} ...'.format(geneIndex+1, len(genes), gene))
            with open('result/'+gene+'.json', 'r') as geneJSONFile:
                geneJSON = json.loads(geneJSONFile.read())
                with open('trans/'+gene+'.xml', 'w') as geneXML:
                    geneXML.write('<gene symbol="{0}" id="{1}">'.format(gene, EntrezIDs[geneIndex]))
                    for comment in geneJSON['Gene-commentary_comment']:
                        if 'Gene-commentary_label' in comment.keys():
                            geneXML.write('<{0}>{1}</{0}>'.format(comment['Gene-commentary_label'].replace(' ', '-'), comment['Gene-commentary_text']))
                    geneXML.write('</gene>')
        f.write("</genes>")

    with open(fileName+'.genes.xml', 'r', encoding="utf-8") as results:
        with open('report.xsl', 'r', encoding="utf-8") as xsl:
            print('正在合并……')
            xslt = etree.parse(xsl)
            xml = etree.parse(results)

            transform = etree.XSLT(xslt)
            merged = transform(xml)
            
            with open(fileName+'.merge.html', 'w', encoding="utf-8") as output:
                output.write(str(merged))
                print('合并成功！执行完毕。')

processCSV('completeUTFs.tsv')
processCSV('completeDTFs.tsv')
