import requests
import re
import time

url = "http://ask.ci123.com/"
headers = {
    'user-agent': 'Baiduspider-image+(+http://www.baidu.com/search/spider.htm)'
}


def get_detail(text):
    # 该函数实现解析页面数据,之后存储数据
    pattern = re.compile(r'<li>[.\s]*<a href="/questions/show/(\d+)/" title="(.*?)" class="list_title" target="_blank" >.*?</a>\s*<span class="list_asw">(\d+)<font>.*?</font></span>\s*<a class="list_author"  href="/users/show/\d+" title=".*?">(.*?)</a>\s*<span class="list_time">(.*?)</span>\s*</li>')
    data = pattern.findall(text)
    print(data)
    # 数据存储代码不在编写


def get_list(cate):
    # 获取总页码，循环抓取所有页

    res = requests.get(
        f"http://ask.ci123.com/categories/show/{cate}", headers=headers)

    pattern = re.compile(
        r'<a class="list_last_page" href="/categories/show/\d+/all\?p=(\d+)"', re.S)
    totle = pattern.search(res.text).group(1)
    for page in range(1, int(totle)):
        print(f"http://ask.ci123.com/categories/show/{cate}/all?p={page}")
        res = requests.get(
            f"http://ask.ci123.com/categories/show/{cate}/all?p={page}", headers=headers)

        time.sleep(0.2)
        # 调取列表页数据提取函数
        get_detail(res.text)


# 抓取分类页面
def get_category():
    res = requests.get("http://ask.ci123.com/", headers=headers)
    pattern = re.compile(
        r'<li><a href="/categories/show/(\d+)">', re.S)
    categories_ids = pattern.findall(res.text)
    print(f"获取到的分类ID如下:", categories_ids)
    for cate in categories_ids:
        get_list(cate)
        time.sleep(1)


if __name__ == "__main__":
    get_category()

