import requests
from parsel import Selector

TOTAL_URL_LIST = []


class FiguresCrawler:

    def __init__(self, url):
        self.cookie = {}
        self.headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0'
        }
        self.url = url
        self.html = ''


    def get_html(self):
        try:
            response = requests.get(url=self.url, headers=self.headers)
            response.encoding = 'utf-8'
            self.html = response.text
        except:
            pass

    def parse_html(self):
        global TOTAL_URL_LIST
        selector = Selector(text=self.html)

        try:
            url_list = selector.xpath('//div[@id="pagelist"]/div/div[@class="dis_content"]/p/a/@href').getall()
            url_list = [f"https://people.isgoodgood.cn{item}" for item in url_list]
        except:
            url_list = []

        TOTAL_URL_LIST.extend(url_list)

    def run(self):
        self.get_html()
        self.parse_html()


def main():
    for i in range(1, 32):
        print(f'【正在采集第{i}页】')
        url = f'https://people.isgoodgood.cn/hunanren/changsha/page-{i}.html'
        spider = FiguresCrawler(url)
        spider.run()

    return TOTAL_URL_LIST


if __name__ == '__main__':
    url = 'https://people.isgoodgood.cn/hunanren/changsha/page-2.html'
    spider = FiguresCrawler(url)
    spider.run()
    print(TOTAL_URL_LIST)
