# -*- coding: utf-8 -*-
import requests
from lxml import etree



def main():
    url = 'https://sc.chinaz.com/ppt/free.html'
    header = {
        'User-Agent': 'Mozilla/5.0(Windows NT 10.0;WOW64)AppleWebKit/537.36(KHTML, like Gecko) Chrome / 86.0.4240.198 Safari / 537.36'
    }
    src = requests.get(url=url, headers=header)
    src.encoding = 'utf-8'
    src_text = src.text
    # print(src_text)

    tree = etree.HTML(src_text)
    # print(tree)

    # re_name_list = tree.xpath('//div[@class="bottom"]/ul[@class="unstyled"]/li/a/text()')
    # yi_name_list = tree.xpath('//div[@class="bottom"]/ul[@class="unstyled"]/div[2]/li/a/text()')

    # fp = open('./diming.txt', 'a', encoding='utf-8')
    # fp.write('热门城市：\n')
    # for m in re_name_list:
    #     fp.write('  ' + m + '\n')

    # fp.write('一般城市：\n')
    # for n in yi_name_list:
    #     fp.write('  ' + n + '\n')

    # print(re_name_list)
    # print(yi_name_list)

    # name_list = tree.xpath('//div[@class="bottom"]/ul[@class="unstyled"]//li/a/text()')
    # fp = open('./diming.txt', 'w', encoding='utf-8')
    # for m in name_list:
    #     fp.write('  ' + m + '\n')

    # print(name_list, '\n', len(name_list))


    wang_list = tree.xpath('//div[@class="bot-div"]/a/@href')
    name_list = tree.xpath('//div[@class="bot-div"]/a/@title')
    for m, n in zip(wang_list, name_list):
        url2 = 'https://sc.chinaz.com'
        url2 = url2 + m
        src2 = requests.get(url=url2, headers=header)
        src2.encoding = 'utf-8'
        src_text2 = src2.text
        tree = etree.HTML(src_text2)
        xiazai_list = tree.xpath('//div[@class="download-url"]/a/@href')
        wenjian= requests.get(xiazai_list[0], headers=header).content
        fp = open('./ppt/' + n + '.rar', 'wb')
        fp.write(wenjian)
        # print(wenjian)

    # print(wang_list)
    # print(name_list)


    print('保存成功')


if __name__ == "__main__":
    main()

