# -*- codeing = utf-8 -*-
# @Time : From 4.16:15:30
# @Author : 熊凯
# @File : SpiderForProduct.py
# @Software: PyCharm

import urllib.request,urllib.error         #获取数据
from bs4 import BeautifulSoup              #解析数据
import re                                  #正则表达式匹配
import xlwt                                #将数据存入excel

findProductLink = re.compile(r'<a class="rd-card rd-card--square feature" href="(.*?)">')
findImageLink = re.compile(r'data-img-src="//(.*?)"')
findProductTitle = re.compile(r'<span class="rd-card__title">(.*?)</span>', re.S)
findProductTime = re.compile(r'<em>.*?<br/>\n</em>\n<span>(.*?)</span>')
findProductSize = re.compile(r'<dt>Measurements</dt>\n<dd>(.*?)</dd>')
findProductMedium = re.compile(r'<dt>Medium</dt>\n<dd>(.*?)</dd>')
findProductSort = re.compile(r'<dt>Department</dt>\n<dd><a href=".*?">(.*?)</a></dd>')
findProductLocality = re.compile(r'<dt>Place/s of Execution</dt>\n<dd>(.*?)</dd>')
findProductNumber = re.compile(r'<dt>Accession Number</dt>\n<dd>(.*?)</dd>')
findProductArtist = re.compile(r'<span class="rd-card__info">(.*?)</span>')

def main():
    baseurl = "https://www.ngv.vic.gov.au/?type=collection&s=China&from="
    data = []
    for i in range(0, 75):
        datalist = getData(baseurl + str(i+1))
        data.append(datalist)
    savepath = "product.xls"
    saveData(data, savepath)

def askURL(url):
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36"
    }

    request = urllib.request.Request(url, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8")
    except urllib.error.URLError as e:
        if hasattr(e,"code"):
            print(e.code)
        if hasattr(e,"reason"):
            print(e.reason)
    return html

def getData(baseurl):
    datalist = []
    html = askURL(baseurl)
    soup = BeautifulSoup(html, "html.parser")
    for item in soup.find_all('a', class_="rd-card rd-card--square feature"):
        data = []
        item = str(item)

        ProductLink = re.findall(findProductLink, item)[0]
        data.append(ProductLink)

        ImageLink = re.findall(findImageLink, item)[0]
        ImageLink = re.sub('amp;', "", ImageLink)
        data.append(ImageLink)

        ProductTitle = re.findall(findProductTitle, item)[0]
        data.append(ProductTitle)


        data = getMore(data, ProductLink)

        ProductArtist = re.findall(findProductArtist, item)[0]
        data.append(ProductArtist)

        datalist.append(data)

    return datalist

def getMore(data, ProductLink):
    html = askURL(ProductLink)
    soup = BeautifulSoup(html, "html.parser")

    for item in soup.find_all("h1", style="text-transform: none"):
        item = str(item)
        try:
            ProductTime = re.findall(findProductTime, item)[0]
            data.append(ProductTime)
        except:
            data.append(' ')

    for item in soup.find_all("dl", id="info"):
        item = str(item)

        try:
            ProductSize = re.findall(findProductSize, item)[0]
            data.append(ProductSize)
        except:
            data.append(' ')

        try:
            ProductMedium = re.findall(findProductMedium, item)[0]
            data.append(ProductMedium)
        except:
            data.append(' ')

        try:
            ProductSort = re.findall(findProductSort, item)[0]
            data.append(ProductSort)
        except:
            data.append(' ')

        try:
            ProductLocality = re.findall(findProductLocality, item)[0]
            data.append(ProductLocality)
        except:
            data.append(' ')

        data.append('NGV')

        try:
            ProductNumber = re.findall(findProductNumber, item)[0]
            data.append(ProductNumber)
        except:
            data.append(' ')

        data.append(' ')

    return data

def saveData(datalist, savepath):
    print("save....")
    book = xlwt.Workbook(encoding="utf-8",style_compression=0)
    sheet = book.add_sheet('作品信息', cell_overwrite_ok=True)
    col = ("Url", "Imgurl", "name", "time", "dimension", "medium", "classification", "region", "museum", "ID", "information", "Artist")
    for i in range(0, 12):
        sheet.write(0, i, col[i])
    for i in range(0, 2050):
        data = datalist[i]
        for j in range(0, 12):
            sheet.write(i + 1, j, data[j])
    book.save(savepath)

if __name__ == "__main__":
    main()
    print("爬取完毕！")