#coding=utf-8
import requests
from fontTools.ttLib import TTFont
import base64
import json
import re
from bs4 import BeautifulSoup

# 头信息
headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36",
    'Cookie':'_ga=GA1.2.682296989.1618206060; footprints=eyJpdiI6IlNTdWI0WGFTZzIxeGtpVVdwbTVXbWc9PSIsInZhbHVlIjoiOGRIa0NlK3laN1lwRmZKcHp0cjFGR2FpNzdHR05DaTlNXC9oK2JmUE5ONHR3RkxUendsNmhmQnBldXRubHlFVEQiLCJtYWMiOiIxNDFlNTM4NTRjZTFiYWRkOGY2MDE3OGEwNzNlZjdjNDNjMGNhN2M5MTcwODk5ZTk0YmNlOTNhMTFhN2E3NTM4In0%3D; remember_web_59ba36addc2b2f9401580f014c7f58ea4e30989d=eyJpdiI6InNHYjAyNHdYSTdjcXRzaEpRekNaVkE9PSIsInZhbHVlIjoibytFNldBZUdOVXFlTW5aMU1LV2FFNUw3NEpWTW83eDBRM0VOeXoxNmJlYzVnc0I3VThTcWs2RHB0OHE5QXFSZEtaWWZOOTVXdTJVekN1enFxdzlYUWZTUTM3cFZmcnI2blQ4S0w1djN2bkdQVVczN2x0TXFXbXhTVFwvUlwvOFwvMnZ5M0VhdzVIZGRLbENINWJqVDdEMFFPTDlTaUxTb25DUnJvY29hdWw5bFpZPSIsIm1hYyI6ImEwZGUxZjkxYWE5MGRiOTYzNWQ4OTgxNGJhNzZkNDk2N2IxNGE3NjFiYTE5MGY3NWU3MTM5OGFmYTVhM2IyZDEifQ%3D%3D; Hm_lvt_020fbaad6104bcddd1db12d6b78812f6=1619003048,1619082407,1619678949,1620817874; XSRF-TOKEN=eyJpdiI6Ik83eGtPWHUxY2U5eFZIUUVzWlwveU53PT0iLCJ2YWx1ZSI6Imh0bGhuZ0tRNHdEckxzSk9JRVUyMmc3VmlPR3lEWlBTYThQVzk5RzAwd2VrRGpYeGszYTNUN3lHSTR2NG9Zb0MiLCJtYWMiOiIxYmE1NTk0ZTI0NzEzNTc2NDgxN2I4M2Y0MjQ5MGQ4NDAyMWNiMjk3M2RjNGExMjVkMDVlZjRiOWRlZGI2YzExIn0%3D; glidedsky_session=eyJpdiI6IkQraG9OSjNJN0VqME5XTmZmaE9mQUE9PSIsInZhbHVlIjoiYzdcL0NNMTBPMWMra3VrM3JCTzRET2t3SktnVTlcL21UUEZwWlFCeW5QbDd4d3NjQ0pzcjk2Z0c3Wlp2dVhFd2tRIiwibWFjIjoiYzkwZmVkZDg4OTY5MTViM2U5NGE0MTgwZmFjY2IyMzBkOWM2ODlkNmI2YjMzMmU0ODY2OTc5M2U4MTc1NWZjMiJ9; Hm_lpvt_020fbaad6104bcddd1db12d6b78812f6=1620817969'
}
# 数字总和
sum = 0

# 映射
number_map = {
    ".notdef": -1,
    "zero": 0,
    "one": 1,
    "two": 2,
    "three": 3,
    "four": 4,
    "five": 5,
    "six": 6,
    "seven": 7,
    "eight": 8,
    "nine": 9
}


# 爬取
def download(response):
    # 引入全局变量sum
    global sum
    html_data = response.text

    # 保存当前网页，此操作纯粹是我测试时方便数据查看，小伙伴们可自行选择删除或留下
    # with open("字体文件.html",mode="w") as f:
    #     f.write(html_data)

    # 使用re模块正则匹配到当前网页中的base64字体文件，参考图2
    font_base = re.findall("base64,(.*?)\) format", response.text)[0]
    # 使用base64库进行解码
    result = base64.b64decode(font_base)
    # 解码之后保存
    with open("字体文件.ttf", mode="wb") as f:
        f.write(result)

    # 使用TTFont打开字体文件并保存为xml文件以供阅读
    font = TTFont('字体文件.ttf')
    font.saveXML("font.xml")

    # 参考图3
    # getGlyphOrder()可获取GlyphOrder标签下的GlyphID标签的全部name值
    font_map = font.getGlyphOrder()
    lists = []
    for name in font_map:
        # getGlyphID()根据name获取GlyphID标签的id值
        # 将获取到的数减一，再添加到lists列表中去
        lists.append(font.getGlyphID(name) - 1)

    # GlyphOrder映射
    # zip() 函数用于将可迭代的对象作为参数，将对象中对应的元素打包成一个个元组，然后返回由这些元组组成的列表。
    # 使用zip后再转化为字典
    dicts = dict(zip(font_map, lists))

    # 替换字典中的key值,替换后结果参考图4
    for key in dicts.keys():
        dicts[number_map[key]] = dicts.pop(key)

    # 使用BeautifulSoup的lxml库解析网页
    data = BeautifulSoup(html_data, "lxml")
    # 参考图5
    numbers = data.find(class_="row").find_all(class_="col-md-1")
    for num in numbers:
        num = BeautifulSoup(str(num), "lxml")
        # 获取文本值并去掉前后空格
        num_temp = num.text.strip()
        # 把数字拆分变为列表
        l = list(str(num_temp))
        # 对照dicts替换成真正的数字
        l[0] = str(dicts[int(l[0])])
        l[1] = str(dicts[int(l[1])])
        l[2] = str(dicts[int(l[2])])
        # 将列表在拼接为数字，再进行加法运算
        sum += int("".join(l))


# 开始
if __name__ == '__main__':
    # 爬取1000页
    for i in range(1000):
        print("正在爬取第" + str(i + 1) + "页")
        # url拼接
        url = "http://glidedsky.com/level/web/crawler-font-puzzle-1?page=" + str(i + 1)
        # 发起get请求
        response = requests.get(url=url, headers=headers)
        download(response)

    # 打印最后数字总和
    print(sum)
