import urllib.request, urllib.error  # 获取数据
import xlwt  # 将数据存入excel
import json

def truncate_string(s):
    if len(s) > 700:
        return s[:700] + "..."
    else:
        return s

def spider():
    baseurl = "https://harvardartmuseums.org/browse?q=chinese&load_amount=100&offset="
    infoList = []
    for j in range(61):
        url = baseurl + str(100 * j)
        html = askURL(url)
        with open('havard' + str(j + 1) + '.json', "w") as f:
            f.write(html)
        with open('havard' + str(j + 1) + '.json', encoding='utf-8') as f:
            data = json.load(f)
        record = data.get('records')
        length = len(record)
        for i in range(length):
            info = []
            info.append(record[i]['url'])
            if 'images' in record[i].keys():
                info.append(record[i]['images'][0]['baseimageurl'])
            else:
                info.append('None')
            info.append(record[i]['title'])
            info.append(record[i]['period'])
            # print(type(record[i]['dimensions']))
            if type(record[i]['dimensions']) is type(None):
                info.append('None')
            else:
                info.append(truncate_string(record[i]['dimensions']))
            # print(type(record[i]['medium']))
            if type(record[i]['medium']) is type(None):
                info.append('None')
            else:
                info.append(truncate_string(record[i]['medium']))

            info.append(record[i]['division'])
            info.append(record[i]['culture'])
            info.append("Havard")
            info.append(record[i]['objectnumber'])
            if type(record[i]['description']) is type(None):
                info.append(record[i]['description'])
            else:
                description = truncate_string(record[i]['description'])
                info.append(description)

            if 'people' in record[i].keys():
                info.append(record[i]['people'][0]['displayname'])
            else:
                info.append("None")
            # print(type(record[i]['people'][i]))

            infoList.append(info)
        savepath = 'havard.xls'
        saveData(infoList, savepath,j)


    # for i in range(len(infoList)):
    #     print('insert into cultural (url,Imgurl,name,dimension,medium,time,classification,region,museum,ID,description,Artist) value (',end="")
    #     for j in range(11):
    #         print(infoList[i][j]+',',end="")
    #     print(infoList[1][11])






def saveData(datalist, savepath,seq):
    print("save....")
    book = xlwt.Workbook(encoding="utf-8", style_compression=0)
    sheet = book.add_sheet('havard.xls', cell_overwrite_ok=True)
    col = (
    "url", "Imgurl", "name", "time","dimension", "medium",  "classification", "region", "museum", "ID", "description",
    "Artist")
    for i in range(0, 12):
        sheet.write(0, i, col[i])

    for i in range(0, len(datalist)):
        data = datalist[i]
        for j in range(0, 12):
            sheet.write(i + 1, j, data[j])

    book.save(savepath)


def askURL(url):
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36"
    }

    request = urllib.request.Request(url, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8")
        # print(html)
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html


if __name__ == "__main__":
    spider()
    print("爬取完毕！")
