# -*- coding: utf-8 -*-
import scrapy
import json,re
from urllib import parse
############################### https://www.jianshu.com/p/7a1b8c144d83 看一下这里的教程
class ZhihuSpider(scrapy.Spider):
    name = 'zhang'
    allowed_domains = ['cg.zhangsiming.com']
    start_urls = ['http://cg.zhangsiming.com/admin.php?c=task&m=index']
    header = {
        "HOST": "cg.zhangsiming.com",
        #"Referer": "http://cg.zhangsiming.com/admin.php?c=login",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"
    }
    # 使用什么值得买作为实例
    def parse(self, response):
        # print(response.text)
        # pass
        all_urls = response.css("a::attr(href)").extract()
        all_urls = [parse.urljoin(response.url, url) for url in all_urls]
        for url in all_urls:
            yield scrapy.Request(url,callback=self.parse_question)

    def parse_question(self, response):
        title = response.xpath('//h3[@class="panel-title"]/text()').extract_first()  # 标题
        url=response.url
        print(url)
        print(title)
        pass

    def start_requests(self):
        post_url = "http://cg.zhangsiming.com/admin.php?c=login"
        post_data = {
            'do_login':'true',
            'username':'admin',
            'passwd':'111111',
        }

        #FormRequest用于表单提交
        return [scrapy.FormRequest(
            url=post_url,
            formdata=post_data,
            headers=self.header,
            callback=self.check_login
        )]

    def check_login(self, response):
        #这里可以加一点判断
        for url in self.start_urls:
            yield scrapy.Request(url,dont_filter=True)
