import requests
from lxml import etree
from selenium import webdriver
import time
from selenium.webdriver.chrome.options import Options
import logging
import re

def save_url_and_title(url, title):
    content = title + ',' + url + '\n'
    logging.info(content)
    with open('url_and_title.csv', 'a') as f:
        f.write(content)

def save_list_url(driver, page_url, max_page, now_page):
    if max_page == 0:
        driver.get(page_url)
        list_url = driver.find_elements_by_xpath('//*[@id="content"]/div[@class="content1"]/div[@class="newsList"]/ul/li/a')
        list_title = driver.find_elements_by_xpath('//*[@id="content"]/div[@class="content1"]/div[@class="newsList"]/ul/li/a')
        html = driver.page_source
        pattern = 'createPageHTML\((\d{1,3})'
        max_page = int(re.search(pattern, html).group(1))
        print('最大页数:', max_page)
        save_list_url(driver, page_url, max_page, now_page)
    else:
        while now_page != max_page-1:
            now_page += 1
            now_page_url = page_url + 'index_' + str(now_page) + '.html'
            driver.get(now_page_url)
            list_url = driver.find_elements_by_xpath('//*[@id="content"]/div[@class="content1"]/div[@class="newsList"]/ul/li/a')
            list_title = driver.find_elements_by_xpath('//*[@id="content"]/div[@class="content1"]/div[@class="newsList"]/ul/li/a')
            for i in range(len(list_url)):
                article_url = list_url[i].get_attribute("href")
                title = list_title[i].text
                print(article_url, title)
                save_url_and_title(article_url, title)

logging.basicConfig(level = logging.INFO, filename='url_and_title.log')

list_url_path = [
            'kppdqxsj/kppdtqqh/', '/kppdqxsj/kppdqxgc/', 'kppdqxsj/kppdqhbh/', 'kppdqxsj/kppdrgyxtq/', 'kppdqxsj/kppdldfh/',
            'kppdqxsj/kppdhwsm/', 'kppdmsgd/', 'kppdqxyr/kppdnyqx/', 'kppdqxyr/kppdshqx/', 'kppdqxyr/kppdtyqx/', 
            'kppdqxyr/kppdjtqx/', 'kppdqxyr/kppdjsqx/', 'kppdqxyr/kppdxyqx/', 'kppdkjzg/', 'kppdqxwq/kppdqwys/', 'kppdkpdt/'
]

base_url = 'http://www.cma.gov.cn/kppd/'
ops = Options()
# ops.add_argument('--proxy-server=http://112.87.69.76:9999')
driver = webdriver.Chrome(chrome_options=ops)
driver.get(base_url)
time.sleep(2)
for url_path in list_url_path:
    # 当前分类下的最大页数
    max_page = 0
    # 当前页数
    now_page = 0
    url = base_url + url_path
    # save_list_url(driver, url, max_page, now_page)
    time.sleep(2)
