from selenium import webdriver
import time
from lxml import etree
from urllib import parse

def numpage(begin_url,checkstr,clickelementxpath,urlxpath,textxpath,savefile,pagecheckstr=None,Linuxenv=False,proxy=False,headless=False,wtime=10):
    """
    begin_url:带有{}的url链接,{}用于format格式化,如'http:www.url/page/{}/'
    https://ent.ifeng.com/movie/special/67thcannes/zhuanfang/list_0/{}.shtml
    "http://www.cnlsw.net/newslist.asp?oid=1&c=10",
    "经典案例",
    "/html/body/div/table[3]/tbody/tr[2]/td[4]/table[2]/tbody/tr/td/a[1]
    /html/body/div/table[3]/tbody/tr[2]/td[4]/table[2]/tbody/tr/td/a[3]"




    checkstr:检查是否已加载出html的字符串，一般选用该网站每个页面都会出现的字符串 REDAZIONE

    clickelementxpath:点击的对象的xpath

    //*[@id="__layout"]/div/div[2]/div/div[2]/div[2]/div[3]/i[2]

    urlxpath:定位详细页面的url的xpath表达式

    //div[@class="col-center"]//div/a/@href

    textxpath:定位爬取的文本内容的xpath表达式

    //div[@class="articleContent clearfix js_article-content"]//p/text()

    savefile:爬取结果保存位置 C:\\caoli\\pycharm\\file\\ceshi.txt
    pagecheckstr:检查具体页面是否加载出html，类似checkstr 
    Linuxenv:是否是Linux环境，默认为False
    proxy:是否使用代理，默认False，True为使用
    headless:True表示不显示浏览器，False表示不显示
    wtime:每个页面等待秒数
    """

    if not pagecheckstr:
        pagecheckstr=checkstr
    chrome_options=webdriver.ChromeOptions()
    prefs={'profile.default_content_setting_values':{'images':2}}
    chrome_options.add_experimental_option('prefs',prefs)
    if Linuxenv:
        headless=True
    if headless:
        chrome_options.add_argument('--headless')
    if proxy:
        chrome_options.add_argument('--proxy-server=http://192.168.1.20:1081')
    chrome_options.add_argument('--no-sandbox')
    if Linuxenv:
        browser=webdriver.Chrome('./chromedriver',options=chrome_options)
    else:
        browser=webdriver.Chrome(options=chrome_options)
    browser.get(begin_url)
    #等待静态页面加载，等待时间过长则跳出这一页
    print('get '+begin_url)
    waitime=0
    while True:
        html=browser.page_source
        if checkstr in html:
            break
        else:
            waitime+=1
            time.sleep(1)
        if waitime==wtime:
            print(begin_url+' timeout!.')
            break
    if waitime==wtime:
        return None
    while True:
        etr=etree.HTML(html)
        get_pageurl=etr.xpath(urlxpath)
        if 'new' not in locals().keys():
            new=set(get_pageurl)
        if 'old' in locals().keys():
            pageurl=new-old
        else:
            pageurl=new
        for purl in pageurl:
            gurl=parse.urljoin(begin_url,purl)
            js='window.open(\''+gurl+'\');'
            browser.execute_script(js)
            time.sleep(0.5)
            handles=browser.window_handles
            browser.switch_to.window(handles[1])
            gtime=0
            while True:
                phtml=browser.page_source
                if pagecheckstr in phtml:
                    break
                else:
                    gtime+=1
                    time.sleep(1)
                if gtime==wtime:
                    print(gurl+' timeout')
                    break
            if gtime==wtime:
                continue
            textetr=etree.HTML(phtml)
            with open(savefile,'a',encoding='utf-8') as wf:
                for text in textetr.xpath(textxpath):
                    if text.strip():
                        wf.write(text.strip()+'\n')
            print(gurl+' done!')
            browser.close()
            browser.switch_to.window(handles[0])
        browser.execute_script('window.scrollBy(0,10000);')
        nextbutton=browser.find_element_by_xpath(clickelementxpath)
        browser.execute_script('arguments[0].scrollIntoView();',nextbutton)
        browser.execute_script('window.scrollBy(0,-200);')
        nextbutton.click()
        waitime=0
        while True:
            html=browser.page_source
            if checkstr in html:
                break
            else:
                print('button click wait 1s.')
                time.sleep(1)
                waitime+=1
            if waitime==wtime:
                print('button click timeout,exit.')
                break
        if waitime==wtime:
            break
        etr=etree.HTML(html)
        old=new
        new=set(etr.xpath(urlxpath))
        if len(new-old)==0:
            print('finish!')
            break
    print(begin_url+' done!')
    browser.quit()

#例如：
#numpage('https://www.corriere.it/elezioni/','corriere','//*[@id="more_news_speciali"]','//section//section//section/article[1]/header/h3/a/@href','//p//text()','corriere.txt',headless=False)
#numpage("https://www.fanpage.it/","REDAZIONE","//div[@class='wrapper-correlati clearfix']//div/a/@href",
#        "//div[@class='col-center']//div/a/@href","//div[@class='articleContent clearfix js_article-content']//p/text()",
#        "C:\\caoli\\pycharm\\file\\ceshi.txt")
numpage('https://ent.ifeng.com/movie/special/67thcannes/zhuanfang/list_0/{}.shtml',
        '',
        )