import requests
from bs4 import BeautifulSoup
import time


# 第一层url = ttps://cs.anjuke.com/community/p“1”/

# 静态爬取每一页中的"https://cs.anjuke.com/community/view/255088"


class GetxiangqingList:

    def __init__(self, url_with_p):
        self.url = url_with_p

    def get_html(self, url, headers, cookies):
        try:
            time.sleep(0.5)
            resp = requests.get(url=url, headers=headers, cookies=cookies)
            resp.encoding = 'utf-8'
            return resp.text
        except:
            pass

    def parse_data(self, html):
        soup = BeautifulSoup(html, 'html.parser')
        links = soup.find_all("a", class_="li-row")
        return links

    def save_data(self, links, Xurl_link):
        for link in links:
            href = link.get('href')
            Xurl_link.append(href)
            print(href)

    def run(self):
        headers = {
            "authority": "cs.anjuke.com",
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            "cache-control": "no-cache",
            "pragma": "no-cache",
            "referer": "https://login.anjuke.com/",
            "sec-ch-ua": "\"Not_A Brand\";v=\"8\", \"Chromium\";v=\"120\", \"Microsoft Edge\";v=\"120\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\"",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "same-origin",
            "sec-fetch-user": "?1",
            "upgrade-insecure-requests": "1",
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0"
        }
        cookies = {
            "sessid": "3AA21FBC-03DD-0600-039E-2D42C2F29F17",
            "aQQ_ajkguid": "9CECC00C-33E9-DCBF-B2ED-DF9EA2EBB6A7",
            "ctid": "27",
            "twe": "2",
            "id58": "CrIclWWhP59L1z6sLQYNAg==",
            "obtain_by": "2",
            "ajk_member_verify": "W5dL2pBhDCiir8akZBDo66rAU%2Bv0ns7yeYLQQhwQQ0U%3D",
            "ajk_member_verify2": "MjgwNjQwODk2fFVKYm5TcmJ8MQ%3D%3D",
            "ajk-appVersion": "",
            "fzq_h": "344bdb6b2653855c66f15c465e632a6c_1705066453652_34ca799228f4422d8d38124a06f4a1e8_3740786969",
            "ajk_member_id": "280640896",
            "ajkAuthTicket": "TT=fe650ee3783fa06fe5ba6e6243fb53d7&TS=1705066470600&PBODY=Lly6oPqbBHl_k2C3jcEKhJZYXsuwjr0MgVqbw1OXe5SXhlmdUthfZYUSuytax0KHLoYECTGHNfnpNhriO2vyeXz9RUVJYPk4URyAv-4UIxEG8q-sFKCdemTnHcw6D_ja4XjetJ0bGttsZPafd6NSjIda7NWYrIt3k_N0XX8P2w0&VER=2&CUID=z0xo8z93-p5iDJq5J3SnqaS2r3T3qS61",
            "fzq_js_anjuke_xiaoqu_pc": "7430b688a18b2314c47631289b10a687_1705066664461_25",
            "xxzl_cid": "d71dd6dc33c84bd19340f93a64c3470b",
            "xxzl_deviceid": "bnNnzFn1rNJNAyVjS3IS6Bc44muBh++I8YMgxvFR33vRRkETlizHoKTKLFAzcNgB"
        }
        html = self.get_html(self.url, headers, cookies)
        links = self.parse_data(html)
        Xurl_link = []
        self.save_data(links, Xurl_link)
        return Xurl_link


def main1():
    data = []
    nlist = [
        {
            'url': "https://cs.anjuke.com/community/yuelu/",
            'max_page': '44'
        },
        {
            'url': "https://cs.anjuke.com/community/yuhuah/",
            'max_page': '50'
        },
        {
            'url': "https://cs.anjuke.com/community/tianxin/",
            'max_page': '35'
        },
        {
            'url': "https://cs.anjuke.com/community/kaifu/",
            'max_page': '36'
        },
        {
            'url': "https://cs.anjuke.com/community/furong/",
            'max_page': '34'
        },
        {
            'url': "https://cs.anjuke.com/community/xingshac/",
            'max_page': '15'
        },
        {
            'url': "https://cs.anjuke.com/community/wangchenga/",
            'max_page': '17'
        },
        {
            'url': "https://cs.anjuke.com/community/ningxiang/",
            'max_page': '13'
        },
        {
            'url': "https://cs.anjuke.com/community/liuyang/",
            'max_page': '24'
        },
        {
            'url': "https://cs.anjuke.com/community/xiangsha/",
            'max_page': '7'
        },
    ]
    # url = "https://cs.anjuke.com/community/"
    for u in nlist:
        url = u['url']
        page = int(u['max_page'])
        for i in range(1, page + 1):  # max=51
            url_with_p = f"{url}p{i}"
            time.sleep(0.5)
            crawler = GetxiangqingList(url_with_p)
            Xurl_link = crawler.run()
            data.extend(Xurl_link)
            print(url, '当前正在爬', i)

    return data


if __name__ == '__main__':
    main1()




