# -*- coding:utf-8 -*-

from bs4 import BeautifulSoup
import time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException


def get_allele_feq(browser, snp):
    browser.get(
    'https://www.ncbi.nlm.nih.gov/variation/tools/1000genomes/?q=%s' %snp) #Load page
    # browser.implicitly_wait(60) #智能等待xx秒
    time.sleep(30) #加载时间较长，等待加载完毕
    # browser.find_element_by_css_selector("div[title=\"Han Chinese in Bejing, China\"]") #use selenium function to find elements

    # 把selenium的webdriver调用page_source函数在传入BeautifulSoup中，就可以用BeautifulSoup解析网页了
    bs = BeautifulSoup(browser.page_source, "lxml")
    # bs.find_all("div", title="Han Chinese in Bejing, China")
    try:
        race = bs.find(string="CHB")
        race_data = race.find_parent("div").find_parent(
            "div").find_next_sibling("div")
        # print race_data
        race_feq = race_data.find("span", class_="gt-selected").find_all("li") # class_ 防止Python中类关键字重复，产生语法错误
        base1_feq = race_feq[0].text  #获取标签的内容
        base2_feq = race_feq[1].text
        return snp, base1_feq, base2_feq  # T=0.1408 C=0.8592

    except NoSuchElementException:
        return "%s:can't find element" %snp

def main():
    browser = webdriver.Chrome() # Get local session of chrome
    fh = open("./4diseases_snps_1kCHB_allele_feq.list2", 'w')
    snps = open("./4diseases_snps.list.uniq2",'r')
    for line in snps:
        snp = line.strip()
        response = get_allele_feq(browser, snp)
        time.sleep(1)
        fh.write("\t".join(response)) #unicode 编码的对象写到文件中后相当于print效果
        fh.write("\n")
        print "\t".join(response)
        time.sleep(1)  # sleep a few seconds
    fh.close()
    browser.quit()  # 退出并关闭窗口的每一个相关的驱动程序


if __name__ == '__main__':
    main()