import urllib.request, urllib.error  # 获取数据
import xlwt  # 将数据存入excel
import json

def truncate_string(list):
    s = list[0]
    # print(s)
    if len(s) > 700:
        # print(s[:700] + "...")
        # print(len(s))
        return s[:700] + "..."
    else:
        return s

def main():
    baseurl = "https://index.liverpoolmuseums.org.uk/select?indent=on&spellcheck=on&q=tm_X3b_en_aggregated_field:(chinese)&fq=(ss_type:collection%20OR%20ss_type:artifact)&rows=24&start="
    infoList = []
    for j in range(5):
        url = baseurl + str(24 * j) + '&wt=json'
        html = askURL(url)
        data = json.loads(html)
        # print(type(data))
        record = data.get("response")
        docs = record.get('docs')
        length = len(docs)
        for i in range(length):
            info = []
            info.append("https://www.liverpoolmuseums.org.uk/" + docs[i].get('ss_alias'))
            if type(docs[i].get('ss_field_media_image_url')) is type(None):
                info.append('None')
            else:
                info.append("https://images.liverpoolmuseums.org.uk/styles/focal_point_4_3/public/"+docs[i].get('ss_field_media_image_url')[9:])
            # info.append(docs[i].get('ss_field_media_image_url'))
            info.append(docs[i].get('ss_title'))
            info.append(docs[i].get('tm_X3b_en_field_date_collected'))
            info.append(docs[i].get('tm_X3b_en_field_measurements'))
            info.append(docs[i].get('tm_X3b_en_field_materials'))
            info.append(docs[i].get('tm_X3b_en_field_itemname'))
            info.append(docs[i].get('tm_X3b_en_field_placemade'))
            info.append('Liverpool')
            info.append(docs[i].get('ss_field_number'))
            # print(type(docs[i].get('tm_X3b_en_field_description')))
            if type(docs[i].get('tm_X3b_en_field_description')) is type(None):
                info.append('None')
            else:
                description = truncate_string(docs[i].get('tm_X3b_en_field_description'))
                print(len(description))
                info.append(description)
            # info.append()
            info.append(docs[i].get('tm_X3b_en_field_maker'))

            infoList.append(info)
        savepath = 'liverpool.xls'
        saveData(infoList, savepath,j)



def saveData(datalist, savepath,seq):
    print("save....")
    book = xlwt.Workbook(encoding="utf-8", style_compression=0)
    sheet = book.add_sheet('liverpool.xls', cell_overwrite_ok=True)
    col = (
    "url", "Imgurl", "name", "time", "dimension", "medium", "classification", "region", "museum", "ID", "description",
    "Artist")
    for i in range(0, 12):
        sheet.write(0, i, col[i])

    for i in range(0, len(datalist)):
        data = datalist[i]
        for j in range(0, 12):
            sheet.write(i + 1, j, data[j])

    book.save(savepath)


def askURL(url):
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36"
    }

    request = urllib.request.Request(url, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8")
        # print(html)
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html


if __name__ == "__main__":
    main()
    print("爬取完毕！")
