import scrapy
from lxml import etree


class InformationSpider(scrapy.Spider):
    name = "information"
    allowed_domains = ["baidu.com"]

    # start_urls = ["https://baidu.com"]
    def start_requests(self):
        urls = ['https://tieba.baidu.com/t/f/?class=college']
        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse1)

    def parse1(self, response):
        result = etree.HTML(response.text)
        school_list = result.xpath("//a[@class='each_topic_entrance_item']")
        for school in school_list:
            school_name = school.xpath("./text()")[0]
            school_url = f"https:" + school.xpath("./@href")[0]
            print(school_name, school_url)
            yield scrapy.Request(url=school_url, callback=self.parse2, meta={"school_name": school_name})
            break

    def parse2(self, response):
        school_name = response.meta['school_name']
        result = etree.HTML(response.text)
        try:
            detail_list = result.xpath("//div[@class='module_item']/ul/li[@class='thread_item']")

            for detail in detail_list:
                detail_name = detail.xpath(".//div[@class='thread_item_middle']/a[@class='thread_title']/text()")[0]
                author = detail.xpath(".//div[@class='thread_item_right']/div[1]/span[@class='member_name']/text()")[0]
                detail_url = 'https:' + \
                             detail.xpath(".//div[@class='thread_item_middle']/a[@class='thread_title']/@href")[
                                 0]
                yield scrapy.Request(url=detail_url, callback=self.parse3,
                                     meta={"detail_name": detail_name, "author": author, 'school_name': school_name})

        except:
            pass

    def parse3(self, response):
        school_name = response.meta['school_name']
        detail_name = response.meta['detail_name']
        author = response.meta['author']
        print(author, detail_name)
        result = etree.HTML(response.text)
        try:
            page_num_detail = result.xpath("//div[@id='thread_theme_7']//span[@class='red'][2]/text()")[0]
            for page in range(page_num_detail):
                every_page_url = response.url + '?pn=' + str(page)
                print(every_page_url)
                yield scrapy.Request(url=every_page_url, callback=self.parse4,
                                     meta={"detail_name": detail_name, "author": author, 'school_name': school_name,
                                           "page_num_detail": page_num_detail})

        except:
            pass

    def parse4(self, response):
        response_url = response.url
        print(response_url)
        if response.status == 302 or response.status == 301:
            redirect_url = response.headers.get('Location').decode('utf-8')
            print("Redirected to:", redirect_url)

        # school_name = response.meta['school_name']
        # detail_name = response.meta['detail_name']
        # author = response.meta['author']
        # page_num_detail = response.meta['page_num_detail']
        # print(detail_name, author, school_name, page_num_detail)
        # result = etree.HTML(response.text)
