import requests
import pdfkit
from lxml import etree
from time import sleep


url = r'https://mp.weixin.qq.com/s/J33F859XEZy-TlB-JnHFQA'
header = {
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"
    }
web_data = requests.get(url,headers=header)  # Request有三个参数（url,data,headers）,如果有使用data就是post请求了，没有就是get请求。这里我没有data，所以我就指定headers=header，不然header就会被当成data了。
web_data.encoding = "utf-8"
data = web_data.text
root = etree.HTML(web_data.content)


# 选取 ol/li/div[@class="item"] 不管它们在文档中的位置
# items = root.xpath('//ol/li/div[@class="item"]')
options = {
        'page-size': 'A4',
        'margin-top': '0mm',
        'margin-right': '0mm',
        'margin-bottom': '0mm',
        'margin-left': '0mm',
        # 'orientation':'Landscape',#横向
        'encoding': "UTF-8",
        'no-outline': None,
          # 'footer-right':'[page]' 设置页码
    }


def url2html(url, file):
    r = requests.get(url, headers=header)
    with open(file, 'wb') as fh:
        fh.write(r.content)
        fh.close()

blocks = root.xpath('//blockquote')
tag = 0

for block in blocks:
    linksParts = block.xpath('.//a')
    for part in linksParts:
        value = part.xpath('string(.)')   # 获取所有一个标签内容的所有文本
        href = part.xpath('.//@href')[0]
        print('d:/test/' + value + '.html')
        url2html(href, 'd:/test/' + value + '.html')
        sleep(0.2)
        # print(part)




    # if(link[0:4] == 'http'):
    #     print(link[0:4])
    #     print(link)
    #     # 'd:/test/' + str(tag) +
    #     pdfkit.from_url(link, '0.pdf', options=options)
    #     exit(1)

# print(links.len())
# link = test.xpath('../a[@href]')
# print(test)
# ss = ('//a[@id="stockCode"]')

# pdfkit.from_url('http://google.com', 'out.pdf', options=options)