# coding=utf-8
from bs4 import BeautifulSoup
import time
import csv
from selenium import webdriver
driver = webdriver.Chrome(executable_path="C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe")
driver.get("https://news.qq.com")
#了解ajax加载
for i in range(1, 100):
    time.sleep(2)
    # 每隔两s进行向下滚动滚i次
    driver.execute_script("window.scrollTo(window.scrollX, %d);" % (i*200))
# 这个page_source 里有结果
html = driver.page_source
bsObj = BeautifulSoup(html,"lxml")

jxtits = bsObj.find_all(
    "div", {"class": "jx-tit"})[0].find_next_sibling().find_all("li")
print("index", ",", "title", ",", "url")
list = [];
for i, jxtit in enumerate(jxtits):
    #     print(jxtit)
    try:
        text = jxtit.find_all("img")[0]["alt"]
    except:
        text = jxtit.find_all("div", {"class": "lazyload-placeholder"})[0].text
    try:
        url = jxtit.find_all("a")[0]["href"]
    except:
        print(jxtit)
    list.append([text,url])

count = 0;
with open('tx.csv', 'w', newline=''"") as f:
    csv_writer = csv.writer(f)
    csv_writer.writerow(["index", "标题", "链接"])
    for item in list:
        count +=1;
        csv_writer.writerow([count, item[0], item[1]])
