# 爬虫必须库
import urllib.request
from bs4 import BeautifulSoup

# 写Markdown
import markdown
import io

url = "https://xiaohua.zol.com.cn/detail1/1.html"
i = 0
while i < 50:
    print(url)
    headers = ("User-Agent",
               "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36")
    opener = urllib.request.build_opener()
    opener.addheaders = [headers]
    try:
        data = opener.open(url).read()

        soup = BeautifulSoup(data, 'html.parser')
        # next_pages = response.xpath('//div[@id="page"]/a[@class="next"]/@href').extract()[0]
        try:
            next_page = soup.find(attrs={"class": "next"})['href']
            keywords = soup.find(attrs={"name": "keywords"})['content'].strip(',')
            description = soup.find(attrs={"name": "description"})['content']
            url = "https://xiaohua.zol.com.cn" + next_page
            i = i + 1
        except:
            continue

        f = io.open("new.md", "a+", encoding="utf-8")
        f.write("###" + keywords)
        f.write('\n')
        f.write(description)
        f.write('\n')
    except:
        continue
