# # s = '文物圖檔編號：K1B018133N000000000PAD'
# # print(s.index('：'))


# # arr = [
# #     '文物圖檔編號：K1B018133N000000000PAD',
# #     '朝代：明',
# #     '類別：陶瓷器',
# #     '功能：盛裝器,飲器',
# #     '質材：礦物/陶瓷/',
# # ]

# # map = {}

# # for it in arr:
# #     # print(it.index('：'))
# #     map[it[:it.index('：')]] = it[it.index('：') + 1:]

# # print(map)

# # Author: litianyu
# # Description: 通过 Python 基于 bs4 的网络爬虫。

# import urllib.request
# from bs4 import BeautifulSoup
# import json

# BaseUrl = 'https://theme.npm.edu.tw/opendata/DigitImageSets.aspx'


# def handle_request(url):
#     headers = {
#         'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) \
#          AppleWebKit/537.36 (KHTML, like Gecko)'
#     }
#     request = urllib.request.Request(url=url, headers=headers)
#     return request


# def wirte_to_file(url):
#     request = handle_request(url)

#     # 发送请求等待相应
#     content = urllib.request.urlopen(request).read().decode('utf-8')
#     # 解析内容
#     soup = BeautifulSoup(content, 'lxml')
#     # 获取对应的部分
#     ul = soup.select('.project-detail > ul > li')

#     map = {}

#     fileName = ul[0].text[7:] + '.json'

#     for li in ul:
#         map[li.text[:li.text.index('：')]] = li.text[li.text.index('：') + 1:]

#     # dict 转化为 json
#     j = json.dumps(map, ensure_ascii=False)

#     # 写入文件
#     file = open(fileName, "w")
#     file.write(j)
#     file.close()


# def get_link(baseUrl):
#     request = handle_request(baseUrl)
#     # 发送请求等待相应
#     content = urllib.request.urlopen(request).read().decode('utf-8')
#     # 解析内容
#     soup = BeautifulSoup(content, 'lxml')
#     # 获取对应的部分
#     ul = soup.select('.painting-list > li > a')
#     for li in ul:
#         childUrl = BaseUrl + li['href'][19:]
#         wirte_to_file(childUrl)


# # last page url is 2331
# if __name__ == '__main__':
#     # baseUrl = 'https://theme.npm.edu.tw/opendata/DigitImageSets.aspx'
#     for i in range(1071, 1500):
#         # print(baseUrl + '?pageNo=' + str(i))
#         get_link(BaseUrl + '?pageNo=' + str(i))
#         print(BaseUrl + '?pageNo=' + str(i))

import requests
from bs4 import BeautifulSoup
from tenacity import retry, stop_after_attempt
import concurrent
import json

BaseUrl = 'https://theme.npm.edu.tw/opendata/DigitImageSets.aspx'
Headers = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) \
       AppleWebKit/537.36 (KHTML, like Gecko)'
}
Proxies = {
    "http": "http://localhost:204",
    "https": "http://localhost:204"
}


# session = requests.Session()


@retry(stop=stop_after_attempt(3))
def get_html_content(url: str, headers: dict) -> str:
    session = requests.session()
    reponse = session.get(url=url, headers=headers)

    session.close()
    return reponse.text.encode("utf-8")


def wirte_to_file(url: str, referer: str) -> None:
    dataheaders = Headers
    dataheaders.update({"Referer": referer})
    # print(url)

    content = get_html_content(url=url, headers=dataheaders)
    # 解析内容
    soup = BeautifulSoup(content, 'lxml')
    # 获取对应的部分
    u = soup.select(".project-detail > h3")
    map = {}
    print("hhhhhhhhhh")
    map["品名"] = u[0].text[:]
  
    ul =  soup.select('.project-detail > ul > li')
    
   
  

  
    
    document_number = ul[0].text[7:]

    fileName = document_number + '.json'
    
    for li in ul:
        map[li.text[:li.text.index('：')]
            ] = li.text[li.text.index('：') + 1:]

   
    print(map)
  
    j = json.dumps(map, ensure_ascii=False)

    # 写入文件
    print("文物圖檔編號: {}".format(document_number), end=",")
    destfile = open(fileName, "w")
    destfile.write(j)
    destfile.close()
    print("已保存")


def get_link(baseurl: str) -> None:
    content = get_html_content(url=baseurl, headers=Headers)
    # 解析内容
    print("Analyzing html content of {}".format(baseurl))
    soup = BeautifulSoup(content, 'lxml')
    # 获取对应的部分
    ul = soup.select('.painting-list > li > a')

    for li in ul:
        childUrl = BaseUrl + li['href'][19:]
        wirte_to_file(childUrl, baseurl)


# last page url is 2331
if __name__ == '__main__':
    try:
        with concurrent.futures.ThreadPoolExecutor(5) as executor:
            for pageno in range(8, 43):
                # print(baseurl + '?pageNo=' + str(i))
                print(BaseUrl +'?Key=^^3&pageNo=' + str(pageno))
                
                args = BaseUrl + '?Key=^^9&pageNo=' + str(pageno)
                executor.submit(get_link, args)
    #        session.close()
    except KeyboardInterrupt:
        #        session.close()
        print("Aborted")
        exit(0)
