import json
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from time import sleep
from bs4 import BeautifulSoup

from utils import Download
chrome_opt = Options()  # 创建参数设置对象.
chrome_opt.add_argument('--headless')  # 无界面化.
chrome_opt.add_argument('--disable-gpu')  # 配合上面的无界面化.
chrome_opt.add_argument('--window-size=1366,768')  # 设置窗口大小, 窗口大小会有影响.
chrome_opt.add_argument("--no-sandbox") #使用沙盒模式运行
baseurl="http://www.gnomad-sg.org/downloads"
driver = webdriver.Chrome()
driver.get(baseurl)
sleep(5)
#print(driver.page_source)
soup = BeautifulSoup(driver.page_source, 'html.parser')#text='Variants'
div=soup.find_all('div', class_="downloadsPageStyles__Column-sc-1tywk31-3 VAolC")
#print(div)
with open('gnomad.json', 'r') as f:
    data_source = json.load(f)

def compere_exomes(div,data_source):
    exomes=div[0]
    ul=exomes.find_all('ul')[0]
    print(len(ul.find_all('li')))
    li=ul.find_all('li')[2:]
    exome_data=data_source['variant'][0]['subfiles']
    for i in range(len(li)):
        chrname=li[i].find_all('span')[0].text
        a=li[i].find_all('span')[2]
        url=a.find_all('a')[0]['href']
        filename=url.split('/')[-1]
        path=filename.replace('bgz','gz')
        md5=li[i].find_all('span')[1].text
        md5=md5.split(u'\xa0')[-1]
        if chrname in exome_data[i] and md5==exome_data[i][chrname]['md5']:
            print(chrname+'未改变')
        else:
            download = Download(url, fileName=path, dirName='gnomad')
            md5 = download.start()
            data_source['variant'][0]['subfiles'][i][chrname]['md5']=md5
def compere_genomes(div,data_source):
    exomes=div[1]
    ul=exomes.find_all('ul')[0]
    li=ul.find_all('li')[3:]
    genome_data=data_source['variant'][1]['subfiles']
    for i in range(len(li)):
        chrname=li[i].find_all('span')[0].text
        a=li[i].find_all('span')[2]
        url=a.find_all('a')[0]['href']
        filename=url.split('/')[-1]
        path=filename.replace('bgz','gz')
        md5=li[i].find_all('span')[1].text
        md5=md5.split(u'\xa0')[-1]
        if chrname in genome_data[i] and md5==genome_data[i][chrname]['md5']:
            print(chrname+'未改变')
        else:
            download = Download(url, fileName=path, dirName='gnomad')
            md5 = download.start()
            data_source['variant'][1]['subfiles'][i][chrname]['md5']=md5
        

compere_exomes(div,data_source)   
compere_genomes(div,data_source)
with open('gnomad.json', 'w') as f:
    json.dump(data_source, f)       



# 第一次创建json文档
# exomes=div[0]
# ul=exomes.find_all('ul')[0]
# li=ul.find_all('li')
# print(len(li))
# filelist_exomes=[]
# for i in range(2,26):
#     chrname=li[i].find_all('span')[0].text
#     a=li[i].find_all('span')[2]
#     url=a.find_all('a')[0]['href']
#     filename=url.split('/')[-1]
#     path=filename.replace('bgz','gz')
#     md5=li[i].find_all('span')[1].text
#     md5=md5.split(u'\xa0')[-1]
#     filelist_exomes.append({chrname:{'filename':filename,'url':url,'md5':md5,'path':path}})
    
# genomes=div[1]
# ul=genomes.find_all('ul')[0]
# li=ul.find_all('li')
# print(len(li))
# filelist_genomes=[]
# for i in range(3,26):
#     chrname=li[i].find_all('span')[0].text
#     a=li[i].find_all('span')[2]
#     url=a.find_all('a')[0]['href']
#     filename=url.split('/')[-1]
#     path=filename.replace('bgz','gz')
#     md5=li[i].find_all('span')[1].text
#     md5=md5.split(u'\xa0')[-1]
#     filelist_genomes.append({chrname:{'filename':filename,'url':url,'md5':md5,'path':path}})
# # print(filelist)
# with open('gnomad.json','w')as f:
#     json.dump({"baseurl": baseurl,"variant":[{'name':"exomes",'subfiles':filelist_exomes},{'name':"genomes",'subfiles':filelist_genomes}]},f)
