from myHeaders import headers
import requests
import json
import time


class MISpider:
    def __init__(self):
        self.url = 'https://i.mi.com/note/full/page/?ts={}'

    def _get(self):
        # 获取13位当前时间作为 ts
        self._parse(requests.get(url=self.url.format(time.time_ns() // 1000000), headers=headers).text)

    def _parse(self, html):  # 解析网页
        result_dict = json.loads(html)
        # 列表推导式中的 if 为判断是否为某个文件夹的笔记, 其中, folderId 为文件夹 id, 需自行寻找; 去掉 if 保存全部笔记
        self._save([each['snippet']+"\n\n" for each in result_dict["data"]["entries"] if each['folderId'] == "32808691578586560"])

    def _save(self, content: list):
        with open("Notes.txt", "w", newline="", encoding="utf-8") as file:
            file.writelines(content)

    def run(self):
        self._get()


if __name__ == '__main__':
    try:
        spider = MISpider()
        spider.run()
    except Exception as error:
        if isinstance(error, KeyError):
            print("请更新cookie!!!")
            print("请更新cookie!!!")
            print("请更新cookie!!!")

"""为什么我要写这么长，为什么我非要用 class，我也不知道，其实可以不用 class，更短小精悍"""
