import scrapy


class Git1Spider(scrapy.Spider):
    name = "git1"
    allowed_domains = ["github.com"]
    start_urls = ["https://github.com/login"]

    def parse(self, response):
        # 从响应中解析出post数据
        token = response.xpath('//*[name="authenticity_token"]/@values').extract_first
        post_data = {
        'commit': '登录',
        'authenticity_token': token,
        'add_account':'',
        'login':' lifeprolong',
        'password':' qweljw041013',
        'webauthn - conditional':' undefined',
        'javascript - support':' true',
        'webauthn - support': 'supported',
        'webauthn - iuvpaa - support': 'supported',
        'return_to: https':' // github.com / login',
        'allow_signup':'',
        'client_id':'',
        'integration':'',
        'required_field_d2dd':'',
        'timestamp': '1730162656242',
        'timestamp_secret': '5761598be2e34e49657eaaa5ab2d55cc23260de652ff29722b25637dbaac29ee'
        }
        # print(post_data)
        yield scrapy.FormRequest(
            url='https://github.com/session',
            callback=self.after_login,
            formdata=post_data
        )

    def after_login(self,response):
        yield scrapy.Request('https://github.com/lifeprolong', callback=self.check_login)

    def check_login(self,response):
        print(response.xpath('/html/head/title/text()').extract_first())