# -*- coding = utf-8 -*-
import re
import urllib.error
import urllib.request
from bs4 import BeautifulSoup
import json
#111111111
Baseurl = "https://theme.npm.edu.tw/opendata/digitimagesets.aspx"

# 物品名称的比对
findName = re.compile(r'<h3>(.*)</h3>')  # 创建正则表达式对象，表示规则
# 物品的各种信息
findOther = re.compile(r'<li><span>(.*)</li>')


# 爬取单个物品的详细内容
def getData(URL):
    datalist = []
    request = askURL(URL)
    html = urllib.request.urlopen(request).read().decode('utf-8')
    soup = BeautifulSoup(html, "html.parser")
    u = soup.select(".project-detail > h3")

    map = {}

    map["品名"] = u[0].text[:]
    ul = soup.select(".project-detail > ul > li")
    filename_num = ul[0].text[7:]
    filename = filename_num + '.json'
    print(ul)
    for li in ul:
        # map[li.text[:li.text.index('：')]
        map[li.text[:li.text.index('：')]
        ] = li.text[li.text.index('：') + 1:]

    print(map)
    j = json.dumps(map, ensure_ascii=False)
    print("文物编号：".format(filename_num), end=',')
    destfile = open(filename, "w")
    destfile.write(j)
    destfile.close()
    print("已经保存")



# 爬取网页
def getPage(baseurl):
        request = askURL(baseurl)  # 保存获取的一篇网页资源
        html = urllib.request.urlopen(request).read().decode('utf-8')
        # 1.对爬到的网页分析，找到单个物品的超链接，进入单个物品的详细介绍的网页中
        # 2.解析内容
        # soup = BeautifulSoup(html, 'html.parser')
        soup = BeautifulSoup(html, 'lxml')
        ul = soup.select('.painting-list > li > a')
        for li in ul:
            childUrl = Baseurl + li['href'][19:]  # 已经找到单个物品的对应网页
            getData(childUrl)


# 得到指定一个url的网页内容
def askURL(url):
    head = {  # 模拟浏览器头部信息，向服务器发送信息,用户代理部分
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \
                      Chrome/94.0.4606.71 Safari/537.36 Edg/94.0.992.38"
    }
    request = urllib.request.Request(url=url, headers=head)
    return request

if __name__ == "__main__":
    # 爬取网页
    for i in range(367, 402):  # 调用函数的次数 爬取的网页数量
        url = Baseurl + '?Key=^^3&pageNo=' + str(i)
        print(url)
        getPage(url)
   