import httpx
from datetime import datetime
import os


class CollectCultureInfo:

    def __init__(self):
        self.url = "https://weibo.com/ajax/statuses/mymblog?uid=2031569683&page=1"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0",
            "Referer": "https://weibo.com"
        }
        self.cookies = {
            "SINAGLOBAL": "1841074107512.084.1588840469173",
            "SCF": "Aka_eNvclJdZgkVwd7SGgIqFfOITZ7i0JuePJPe6RVbO2qaSBfh4sekjwOszJZG2cVQzIii-O2jiEbWyCBhjlzY.",
            "UOR": ",,www.baidu.com",
            "XSRF-TOKEN": "tB3y4HVx1OCtVUXA8BPVXYyf",
            "ALF": "1711248118",
            "SUB": "_2A25I3HWnDeRhGeNM6FQV8inJyTiIHXVrkPdvrDV8PUJbkNAGLULXkW1NTjM0TXSQGgIyzqsHffLj-H4r-uU4_koz",
            "SUBP": "0033WrSXqPxfM725Ws9jqgMF55529P9D9WWTzTuhPe1EuL_gCS91a6On5JpX5KMhUgL.Fo-Ee0qXeoMfeoB2dJLoIp-LxK-LBo5L12q7Uo5LxK-L1K2L1hqt",
            "_s_tentry": "weibo.com",
            "Apache": "5644763397929.464.1708656146562",
            "ULV": "1708656146638:32:2:1:5644763397929.464.1708656146562:1707666667195",
            "WBPSESS": "TnSyL_Lt2vm-x5WYYkobCxMZSiOwteZ8a27WeBLP2hGkXUZtVqgpOLz1yFQ7Q4L2qZsgZBPStAb9OjtPVJ4Ct39bBWA8yuzMdw2cGfUEkLOSaP8S-M6yD6BG3EoQ9NZwsCQ85NNMtNuLZBpBJC02Tw=="
        }
        self.json_data = {}
        self.client = httpx.Client(http2=True, verify=False)

    def get_json(self):
        try:
            resp = self.client.get(url=self.url, headers=self.headers, cookies=self.cookies, timeout=5)
            self.json_data = resp.json()
        except:
            pass

    def parse_json(self):
        fmt_in = "%a %b %d %H:%M:%S %z %Y"
        fmt_out = "%Y-%m-%d %H:%M:%S"
        fmt_name = "%Y-%m-%d-%H%M%S"

        for item in self.json_data['data']['list']:
            # 发布的时间
            created_at = item['created_at']
            dt = datetime.strptime(created_at, fmt_in)
            formatted_time = dt.strftime(fmt_out)
            formatted_name = dt.strftime(fmt_name)
            # 发布的内容
            text_raw = item.get('text_raw')
            pic_infos = item.get('pic_infos')

            if pic_infos:
                # 给图片创建对应微博的文件夹
                if not os.path.exists(f"../data/{formatted_name}"):
                    os.mkdir(f"../data/{formatted_name}")
                # 解析图片的url
                for pic in pic_infos:
                    url = pic_infos[pic]['original']['url']
                    print(formatted_time, text_raw, sep='\n')
                    self.get_image(url, formatted_name)
            else:
                print(formatted_time, text_raw, sep='\n')

    def get_image(self, url, formatted_name):
        resp = self.client.get(url, headers=self.headers, cookies=self.cookies)
        pic_name = url.split('/')[-1]
        with open(f'../data/{formatted_name}/{pic_name}', 'ab') as f:
            print(f'正在下载图片{pic_name}\n\n')
            f.write(resp.content)

    def run(self):
        self.get_json()
        self.parse_json()


if __name__ == '__main__':
    spider = CollectCultureInfo()
    spider.run()
