import scrapy


class LoginSpider(scrapy.Spider):
    name = "login"
    allowed_domains = ["gushiwen.cn"]
    start_urls = ["https://so.gushiwen.cn/user/collect.aspx"]

    def start_requests(self):
        cookies = 'ticketStr=203526853%7cgQEe8DwAAAAAAAAAAS5odHRwOi8vd2VpeGluLnFxLmNvbS9xLzAyVlVXeVFxbGVkN2kxREVKd2hDMUgAAgTo4HhmAwQAjScA; login=flase; Hm_lvt_890cbddf95d987c60c4ff95fa719c319=1719197877,1719224982; ASP.NET_SessionId=qymnpko1luiamzv1gprdfura; Hm_lvt_9007fab6814e892d3020a64454da5a55=1719225083; codeyzgswso=2e8ca3a713ee9eab; gsw2017user=6139785%7c657A5BC8BC87D6EF54FEDA6380CC0956%7c2000%2f1%2f1%7c2000%2f1%2f1; login=flase; wxopenid=defoaltid; gswZhanghao=thou_lee%40outlook.com; gswEmail=thou_lee%40outlook.com; wsEmail=thou_lee%40outlook.com; Hm_lpvt_9007fab6814e892d3020a64454da5a55=1719227862; Hm_lpvt_890cbddf95d987c60c4ff95fa719c319=1719228705'
        cookies = {i.split('=')[0]: i.split('=')[1]
                   for i in cookies.split('; ')}
        yield scrapy.Request(self.start_urls[0], cookies=cookies)

    def parse(self, response):
        if '退出登录' in response.text:
            self.logger.info("登录成功")
        else:
            self.logger.error("登录失败")

        poetries = response.xpath('//*[@id="mainSearch"]/div[2]/div/div')
        for poetry in poetries:
            yield {
                'intro': poetry.xpath('a/text()').get(),
                'name': poetry.xpath('a/span/text()').get().strip(' - '),
            }
