import base64
import io
import re

from fontTools.ttLib import TTFont
import requests
from lxml import etree

# # 第1步：下载58同城页面数据，将其存储为html页面，这样能够在后面的代码编写过程中不用每次都下载，而是从html文件中打开这样即省去了58对我们的ip屏蔽风险，又快捷
# # 准备58同城的URL，以及user-agent
# url = "https://sz.58.com/chuzu/"
# headers = {
#     "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
#     "cookie": "f=n; commontopbar_new_city_info=4%7C%E6%B7%B1%E5%9C%B3%7Csz; commontopbar_ipcity=shuyang%7C%E6%B2%AD%E9%98%B3%7C0; id58=c5/nfF+sjol8pq7MDA3LAg==; 58tj_uuid=7666a956-cce2-4bed-b9a4-f2f784bbc006; als=0; wmda_uuid=c04c9142f2390c313366e6e032aad65c; wmda_new_uuid=1; wmda_visited_projects=%3B11187958619315; xxzl_deviceid=gFMnBfptKZ%2B485OUft22Lcw587ELWVXHvg3vCfVDhfW4FequzR2ONQRNkK3nCBvZ; f=n; new_uv=3"
# }
#
# # 爬取页面
# r = requests.get(url=url, headers=headers)
#
# # 获取页面响应数据
# r.encoding = "utf-8"
# print(r.text)
# # 保存到html文件
# with open("58.html", "w") as f:
#     f.write(r.text)


with open("a-58.html", "r") as f:
    html_content = f.read()

# 第2步：提取base64 字库数据，解析字库
base64_str = re.search("base64,(.*?)'\)", html_content).group(1)
# print(base64_str)
# 得到了字库的二进制数据
base64_bin = base64.b64decode(base64_str)
# 从字库的二进制数据中转换为字体
font = TTFont(io.BytesIO(base64_bin))
# 从字体中对应关系
cmap = font.get("cmap").getBestCmap()
# print(cmap)
ttf_dict = dict()
for k, v in cmap.items():
    # print(k, v)
    # print(hex(k), v)
    # print(hex(k), int(re.search(r"\d+", v).group(0)))
    new_dict_k = hex(k)
    new_dict_v = int(re.search(r"\d+", v).group(0)) - 1
    ttf_dict[new_dict_k] = new_dict_v
print("解析后：", ttf_dict)

# 第3步：对整个html文档进行替换字库对应的数字
for k, v in ttf_dict.items():
    k = k.replace("0x", "&#x") + ";"  # &#x9476;
    html_content = html_content.replace(k, str(v))

# print(html_content)

# 第4步：用xpath提取数据
html = etree.HTML(html_content)
li_list = html.xpath("//li[@class='house-cell']")
# print(li_list)
for li in li_list:
    title = li.xpath(".//h2//a/text()")[0].strip()
    price = li.xpath(".//div[@class='money']//b/text()")[0].strip()
    print(title, price)

# 后续操作提示：运行次程序，上面的代码已经能够在终端提取了页面的数据，如果想要继续操作下一页，只需要让上述的代码重复（但别忘记修改URL为下一页的）
# 当然，如果想要存储到csv、MongoDB只需要编写代码存储即可
