from lxml import etree
from selenium import webdriver
import time
from urllib import parse

b_set = set()
c_set = set()

def pa(begin_url, openurl, textxpath, urlxpath, checkstr, savefile, wtime=10, pagecheckstr=None,Linuxenv=False,proxy=False,headless=False):
    global c_set
    '''
    begin_url 首页
    openurl 当前打开的网页
    textxpath 当前页面的文本xpath
    urlxpath 当前页面中其他详情页面的url
    checkstr:检查是否已加载出html的字符串，一般选用该网站每个页面都会出现的字符串
    savefile:保存目录
    wtime:等待时间
    Linuxenv:是否是Linux环境，默认为False
    proxy:是否使用代理，默认False，True为使用
    headless:True表示不显示浏览器，False表示不显示

    '''

    if not pagecheckstr:
        pagecheckstr = checkstr

    chrome_options = webdriver.ChromeOptions()
    prefs = {'profile.default_content_setting_values': {'images': 2}}
    chrome_options.add_experimental_option('prefs', prefs)

    if Linuxenv:
        headless=True
    if headless:
        chrome_options.add_argument('--headless')
    if proxy:
        chrome_options.add_argument('--proxy-server=http://192.168.1.20:1081')
    chrome_options.add_argument('--no-sandbox')
    if Linuxenv:
        browser=webdriver.Chrome('./chromedriver',options=chrome_options)

    browser = webdriver.Chrome(options=chrome_options)

    browser.get(openurl)
    # 等待静态页面加载，等待时间过长则跳出这一页
    print('get ' + openurl)

    time.sleep(1)
    html = browser.page_source
    textetr = etree.HTML(html)
    try:
        other_url_list = textetr.xpath(urlxpath)
        print("该页面url数量", len(set(other_url_list)))
        c_set = set.union(c_set, set(other_url_list))
    except Exception:
        print("null url")

    # 写文本
    gtime = 0
    while True:
        phtml = browser.page_source
        if pagecheckstr in phtml:
            break
        else:
            gtime += 1
            time.sleep(1)
        if gtime == wtime:
            print(openurl + ' timeout')
            break
    textlist = etree.HTML(phtml)
    with open(savefile, 'a', encoding='utf-8') as wf:
        for text in textlist.xpath(textxpath):
            if text.strip() and len(text.strip()) > 100:
                wf.write(text.strip() + '\n')
    print(openurl + ' done!')
    print("a_set剩余", len(a_set))
    print("c_set剩余", len(c_set))
    browser.close()


def quchong(c_set, b_set):  # 集合去重 a_set = 当前url集合、b_set = 全部url集合、c_set = a_set用完之前新增的不重复的url集合
    a_set = c_set.difference(b_set)
    return a_set  # 返回未打开url集合


def bianli(a_set,begin_url):
    while True:
        global b_set
        if len(a_set) == 0:
            break
        useurl = a_set.pop()
        if useurl[-3:-1] == "cs":
            continue
        openurl = parse.urljoin(begin_url, useurl)

        if openurl[0:20] != begin_url[0:20]:
            print("跳出范围",openurl)
            continue
        pa("http://policy.mofcom.gov.cn",
           openurl,
           "//p/text()",
           "//@href",
           "京公网安备11040102700091号",
           "/data/caoli/pachong/ceshi.txt",
           Linuxenv=True,
           proxy=True)
        b_set = set.union(b_set, useurl)
        if len(a_set) == 0:
            break


chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--proxy-server=http://192.168.1.20:1081')
browser=webdriver.Chrome('./chromedriver',options=chrome_options)
chrome_options.add_argument('--no-sandbox')
browser = webdriver.Chrome(options=chrome_options)
browser.get("http://policy.mofcom.gov.cn")

html = browser.page_source
textetr = etree.HTML(html)
first_url_list = textetr.xpath("//@href")
print("首页url数量", len(first_url_list))
c_set = set(first_url_list)

while True:
    if len(c_set) == 0:
        print("finlsh")
        break
    else:
        a_set = quchong(c_set, b_set)
        c_set = set()
        bianli(a_set,"http://policy.mofcom.gov.cn")

    '''

pa(first_url_list[0],
   "//div[@class='articleContent clearfix js_article-content']//p/text()",
   "//div[@class='wrapper-correlati clearfix']/div//a[1]/@href", "REDAZIONE",
   "C:\\caoli\\pycharm\\file\\ceshi.txt")'''
