from selenium import webdriver
import time
from scrapy.selector import Selector

prefs = {"profile.managed_default_content_settings.images":2}
chrome_opt = webdriver.ChromeOptions()
chrome_opt.add_experimental_option("prefs",prefs)
browser = webdriver.Chrome(executable_path="G:\python\chromedriver.exe",chrome_options=chrome_opt)

browser.get("http://news.sina.com.cn/china/")
#detial_page = webdriver.Chrome(executable_path="G:\python\chromedriver.exe",chrome_options=chrome_opt)
#detial_page.get("http://news.sina.com.cn/o/2018-04-26/doc-ifztkpin6702119.shtml")
# 下拉滚动条，使浏览器加载出动态加载的内容，可能像这样要拉很多次，中间要适当的延时（跟网速也有关系）。
for i in range(3):
    for i in range(3):
        browser.execute_script("window.scrollTo(0,document.body.scrollHeight); var lenOfPage=document.body.scrollHeight; return lenOfPage;")
        time.sleep(3)

    t_selector = Selector(text=browser.page_source)
    urls = t_selector.css("#subShowContent1_news4 div h2 a::attr(href)").extract()
    one_page_urls_sum = 0
    print("当前页面地址为{0}".format(browser.current_url))
    print("----------------------")
    for url in urls:
        one_page_urls_sum += 1
        print(url)
    print("单页上的url数为{0}".format(one_page_urls_sum))
    print("----------------------")

    next_element = browser.find_element_by_css_selector("a[title='下一页']")
    if (next_element):
        next_element.click()


# t_selector = Selector(text=browser.page_source)
# urls = t_selector.css("#subShowContent1_news4 div h2 a::text").extract()
# #comment_num = page_selector.css("span[href='#url'] em a::text").extract()
#
# news_nodes = t_selector.css("#subShowContent1_news4 div h2 a::attr(href)").extract()


# 获取下一页的按钮

#print(comment_num)
browser.close()
#detial_page.close()