""""
武汉市政策法规-字段：标题、日期、内容
"""
import requests
import re

from lxml import etree

from gzproject.gz_spider.gz_spider import db

def get_url(url):
    res = requests.get(url)
    data = res.content.decode('utf-8')
    xpath_data = etree.HTML(data)
    lis = xpath_data.xpath('//ul[@class="info-list"]/li')
    url_list = []
    for li in lis:
        detail_url = li.xpath('./a/@href')
        url_list.append(''.join(detail_url))

    return url_list

def detail_data(url_list):
    for url in url_list:
        res = requests.get(url)
        data = res.content.decode('utf-8')
        xpath_data = etree.HTML(data)
        title = xpath_data.xpath('//div[@class="article"]/h2[1]/text()')
        print(title)



if __name__ == '__main__':
    url = 'http://zrzyhgh.wuhan.gov.cn/zwgk_18/zcfgyjd/cxghl/'
    for i in range(0,5):
        if i >= 1:
            url = f'http://zrzyhgh.wuhan.gov.cn/zwgk_18/zcfgyjd/cxghl/index_{i}.shtml'

        if i <= 0:
            url = url

        urls = get_url(url)
        detail_data(urls)


# url_list = []
# for url in urls:
#     if url.startswith('http'):
#         url_list.append(url)
#
# for detail_url in url_list:
#     r = requests.get(detail_url)
#     data = r.content.decode('utf-8')
#     xpath_data = etree.HTML(data)
#     title = xpath_data.xpath('//p[@class="con-title"]//text()')
#     date = xpath_data.xpath('//span[@class="date"]/text()')
#     contents = xpath_data.xpath('//div[@class="main-txt"]//text()')
#     content = ''.join(contents).strip().replace(r'\n\t', '').replace(r' ', '')
#     db.db_insert(''.join(title),''.join(date),''.join(content))
