# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

# 获取登录页面
# class GitSpider(scrapy.Spider):
#     name = 'git'
#     allowed_domains = ['github.com']
#     start_urls = ['http://github.com/']
#
#     # cookie字符串化
#     cookies_str = '_ga=GA1.2.18690797.1545696515; _octo=GH1.1.2016878472.1545696515; tz=Asia%2FShanghai; has_recent_activity=1; _gat=1; user_session=NcILYQdtkHmpGzWE4FP00xFOE-0vJWKhuaw2v9yXQ78hoJ9n; __Host-user_session_same_site=NcILYQdtkHmpGzWE4FP00xFOE-0vJWKhuaw2v9yXQ78hoJ9n; logged_in=yes; dotcom_user=Fly744055970; _gh_sess=bTNieTVHQ3dMVkNsSlVCMzVhaWFpNFhtOWhkT0lWWENTUjE0YjlKcTlheGxkUDNnbS9MZENzKzZmRTVZZWYza296SzdiSFVQSFJKN3BlRkQ3Z0tDek85cEFXWENNQzFqSWJjT3BJdlRoRTZqdVgwR2ZEMW9hSXpoVlFvdDN2SThrMW9NRnZXRDhaRUEwdDlZVlphUXFzOXVtWkttL2xjaTJGbUE4cWYxZHV5QmVCMVZndXU4QXZJTm1FQWxYT3didGxVeWxaOXU0WE90NXBuWUZ4OWw4Zz09LS1veVdpYkRlajNGZTQ2WDgrUWZxSWFRPT0%3D--8f9fbe9a2f6b82a9e5d132765e2b28a5b44137d8'
#     # 字典推导式
#     cookies_dict = {i.split('=')[0]: i.split('=')[1] for i in cookies_str.split('; ')}
#
#     # 重写 父类 获取request 添加cookie
#     def start_requests(self):
#         # 遍历url列表 callback是回调函数
#         for url in self.start_urls:
#             yield scrapy.Request(url, cookies=self.cookies_dict, callback=self.parse_item)
#
#
#     def parse_item(self, response):
#         with open('01.html', 'wb') as f:
#             f.write(response.body)


#  requests
#  1. headers = {'Cookie':""}
#  2. cookies = {}
#  3. 代码登录 requests.session()

# class GithubSpider(scrapy.Spider):
#     name = 'git2'
#     allowed_domains = ['github.com']
#     # 登录url
#     start_urls = ['https://github.com/login']
#
#     def parse(self, response):
#
#         login_url = 'https://github.com/session'
#
#         # 登陆的参数
#         form_data = {
#             'commit': 'Sign in',
#             'utf8': '✓',
#             'authenticity_token': response.xpath('//*[@id="login"]/form/input[2]/@value').extract_first(),
#             'login': '2223626581s@gmail.com',
#             'password': 'stylove110',
#         }
#
#         yield scrapy.FormRequest(login_url, formdata=form_data,callback=self.parse_item)
#
#     def parse_item(self, response):
#         with open('02.html', 'wb') as f:
#             f.write(response.body)


# scrapy
# 1. cookies = {}---Reuqest
# 2. 代码登录 scrapy 自动保存cookies
# 3. from_response() 自动解析 form表单的参数 url

class GithubSpider(scrapy.Spider):
    name = 'git3'
    allowed_domains = ['github.com']
    # 登录url
    start_urls = ['https://github.com/login']

    def parse(self, response):
        # 登陆的参数
        form_data = {
            'login': '2223626581s@gmail.com',
            'password': 'stylove110',
        }

        # 3.发POST
        yield scrapy.FormRequest.from_response(
            response,
            formxpath='//*[@id="login"]/form',
            formdata=form_data,
            callback=self.parse_item
        )

    def parse_item(self, response):
        with open('03.html', 'wb') as f:
            f.write(response.body)
























