import httpx
import time

DIET_DETAIL_LIST = []


class DietCrawler:

    def __init__(self, url):
        self.url = url
        self.headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 MicroMessenger/7.0.20.1781(0x6700143B) NetType/WIFI MiniProgramEnv/Windows WindowsWechat/WMPF WindowsWechat(0x6309092b) XWEB/9053',
        }
        self.session = httpx.Client(http2=True, verify=False)
        self.json_data = {}

    def get_data(self):
        # 发送请求
        response = self.session.get(url=self.url, headers=self.headers)
        # 获取数据
        self.json_data = response.json()

    def parse_data(self):
        # 解析数据
        for item in self.json_data['data']['list']:
            diet_id = item['id']
            diet_detail_url = f"https://wlfw.changsha.gov.cn/api/service/content/contentcate/{diet_id}"
            DIET_DETAIL_LIST.append(diet_detail_url)

    def run(self):
        self.get_data()
        self.parse_data()


def main():
    for page in range(1, 4):
        # 实例化饮食爬虫
        url = f"https://wlfw.changsha.gov.cn/api/service/content/contentcate/search?limit=5&page={page}"
        spider = DietCrawler(url)
        time.sleep(1.5)
        # 运行爬虫，开始爬取
        spider.run()
    # 饮食详情列表
    return DIET_DETAIL_LIST


if __name__ == '__main__':

    for page in range(1, 4):
        # 实例化饮食爬虫
        url = f"https://wlfw.changsha.gov.cn/api/service/content/contentcate/search?limit=5&page={page}"
        spider = DietCrawler(url)
        # 运行爬虫，开始爬取
        spider.run()
    # 打印获取到的饮食详情列表
    print(DIET_DETAIL_LIST, len(DIET_DETAIL_LIST))
