import os
import re
import requests
import urllib.request

from lxml import etree

from gz_spider.gz_spider.db import db_insert


# 获取数据
def get_date(url):
    res = requests.get(url)
    data = res.content.decode('utf-8')
    xpath_data = etree.HTML(data)

    return xpath_data


# 首页数据
def list_item(url):
    url_list = []
    pdf_doc_list = []

    data = get_date(url)
    items = data.xpath('//ul[@class="list-group"]/li[@class="list-group-item"]')
    for item in items:
        is_href = item.xpath('./a/@href')[0]
        # 列表页如果有pdf 或者是 doc 就直接下载
        if is_href.endswith('.pdf'):
            image_name = item.xpath('./a/text()')
            pdf_doc_list.append((f'{image_name[0]}.pdf', is_href))
        elif is_href.endswith('.doc'):
            image_name = item.xpath('./a/text()')
            pdf_doc_list.append((f'{image_name[0]}.doc', is_href))
        elif 'pnr.sz.gov.cn' not in is_href:
            continue
        else:
            url_list.append(is_href)

    return url_list, pdf_doc_list


# 详情页数据
def detail(url_list, url):
    # 这里想要把首页的文件和详情页的文件一起发给 download_image 函数，所以再这里重新调用一次
    _, pdf_doc_list = list_item(url)
    detail_list = []
    data_list = []
    for url in url_list:
        data_dict = dict()
        data = get_date(url)
        data_dict['title'] = data.xpath('//h4//text()')
        des = data.xpath('//h5//text()')
        data_dict['date'] = re.findall('日期： \d+-\d+-\d+', des[0] if des else '')
        contents = data.xpath('//font[@id="Zoom"]//p//text()')

        data_list.append(data_dict)

        image_urls = data.xpath('//p[@id="appendix"]//a/@href')
        image_name = data.xpath('//p[@id="appendix"]//a/text()')
        detail_list.append(list(zip(image_urls, image_name, data_dict['title'])))

        title = data_dict['title'][0].replace('“', '') if data_dict['title'] else ''
        date = data_dict['date'][0] if data_dict['date'] else ''
        content = ''.join(contents).strip().replace(r'\n\t', '').replace(r' ', '') if contents else ''
        new_image_urls = ''
        if image_urls:
            for i in image_urls:
                new_image_urls += i

        new_image_name = ''
        if image_name:
            for j in image_name:
                new_image_name += j

        db_insert(title, content[16384] if len(content) > 16384 else content, new_image_urls, new_image_name)

    return pdf_doc_list, detail_list


# 下载文件
def download_image(file):
    # 创建一个文件夹
    image_path = os.path.join(os.path.join(os.path.dirname(os.path.dirname(__file__))), 'shenzhen_guihua')
    if not os.path.exists(image_path):
        os.makedirs(image_path)

    # 列表页文件
    url_list, detail_list = file
    for index_file in url_list:
        urllib.request.urlretrieve(index_file[1], filename=os.path.join(image_path, index_file[0]))

    for detail_file in detail_list:
        if detail_file:
            # 创建图片类别文件夹
            title_path = os.path.join(image_path, detail_file[0][2])
            if not os.path.exists(title_path):
                os.makedirs(title_path)
            # 下载文件
            urllib.request.urlretrieve(url=detail_file[0][0], filename=os.path.join(title_path, detail_file[0][1]))


if __name__ == '__main__':
    for i in range(1, 3):
        if i > 1:
            url = f'http://pnr.sz.gov.cn/ywzy/ghzs/index_{i}.html'
            url_list, _ = list_item(url)
            result = detail(url_list, url)
            download_image(result)
        else:
            url = f'http://pnr.sz.gov.cn/ywzy/ghzs/index.html'
            url_list, _ = list_item(url)
            result = detail(url_list, url)
            download_image(result)
