""""
杭州市规划文件-字段：标题、日期、内容、文件、文件名
"""
import os

import requests
import re
import urllib.request


from lxml import etree

from gzproject.gz_spider.gz_spider import db

res = requests.get('http://ghzy.hangzhou.gov.cn/col/col1229368401/index.html?uid=6405289&pageNum=1')
data = res.content.decode('utf-8')
urls = re.findall('href="(.*?)" ', data)

url_list = []
for url in urls:
    if url.startswith('http'):
        url_list.append(url)

for detail_url in url_list:
    r = requests.get(detail_url)
    data = r.content.decode('utf-8')
    xpath_data = etree.HTML(data)
    title = xpath_data.xpath('//p[@class="con-title"]//text()')
    date = xpath_data.xpath('//span[@class="date"]/text()')
    contents = xpath_data.xpath('//div[@class="main-txt"]//text()')
    content = ''.join(contents).strip().replace(r'\n\t', '').replace(r' ', '')
    image_urls = xpath_data.xpath('//div[@class="main-txt"]//p/a/@href')
    image_name = xpath_data.xpath('//div[@class="main-txt"]//p/a/text()')
    urls = [f'http://ghzy.hangzhou.gov.cn{i}' for i in image_urls if '/module' in i]
    # db.db_insert(''.join(title),''.join(date),''.join(content), ''.join(urls), ''.join(image_name))
    if urls:
        image_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'hangzhou_files')
        if not os.path.exists(image_path):
            os.mkdir(image_path)

        # 创建图片类别文件夹
        category_path = os.path.join(image_path, ''.join(title))
        if not os.path.exists(category_path):
            os.mkdir(category_path)

        for url in urls:
            urllib.request.urlretrieve(url=url, filename=os.path.join(category_path, url[76:120]))
