#!/usr/bin/python
# -*- coding:UTF8 -*-

from lxml import etree
import requests
import re
import os

# 1.获取各个模块的信息
new_url = 'https://www.jkl.com.cn/newsList.aspx?TypeId=10009'
user_agent = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36'}

r = requests.get(url=new_url, headers=user_agent).text
# 解析数据
analytical_data = etree.HTML(r)
info_name = analytical_data.xpath('//div[@class="infoLis"]//a/text()')
info_href = analytical_data.xpath('//div[@class="infoLis"]//@href')
info_href = ['https://www.jkl.com.cn/' + info_href for info_href in info_href]  # 列表推导式做拼接
# 转成字典的形式
dict1 = dict(zip(info_name, info_href))
for info_name, info_href in dict1.items():
    # 替换不规范的名称
    info_name = info_name.replace('/', '-')
    info_name = info_name.replace('披露...', '披露报表')
    info_name = info_name.replace('范运...', '范运作告知书')
    # 先创建一些文件夹
    info_path = 'D:/下载的软件/Python爬虫 第1季/第一季/测试数据/投资者关系/' + info_name
    if not os.path.exists(info_path):
        os.makedirs(info_path)
    # 拿到尾页的定位信息
    r = requests.get(url=info_href, headers=user_agent).text
    analytical_data = etree.HTML(r)
    tail_page = analytical_data.xpath('//a[text()="尾页"]/@href')
    # 用正则做判断，返回多少页
    if tail_page != []:
        regular = re.search('(\d+)', tail_page[0])
        tail_page = regular.group(1)
        # print(tail_page)
    else:
        tail_page = 1
    # 翻页方式获取每个模块下的详细文件
    for tail_page in range(1, int(tail_page) + 1):
        data = {
            'current': tail_page
        }
        r = requests.get(url=info_href, headers=user_agent, params=data).text
        analytical_data = etree.HTML(r)
        new_name = analytical_data.xpath('//div[@class="newsLis"]//li//a/text()')
        new_href = analytical_data.xpath('//div[@class="newsLis"]//li//@href')
        new_name = [new_name.strip() for new_name in new_name]
        if all(new_href):
            new_href = ['https://www.jkl.com.cn' + new_href for new_href in new_href]
            # print(new_href)
        dict2 = dict(zip(new_name, new_href))
        for new_name, new_href in dict2.items():
            r = requests.get(url=new_url, headers=user_agent).content
            new_name = new_name.replace('/', '-')
            last_pdf = new_href.split('.')[-1]
            file_path = info_path + '/' + new_name + '.' + last_pdf  # 拼接出文件路径
            # 写入建好的文件夹
            with open(file_path, 'wb') as file:
                file.write(r)
                print(file_path, '下载成功！！！')
