#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 登录爬虫
Desc : 模拟登录https://github.com后将自己的issue全部爬出来
tips：使用chrome调试post表单的时候勾选Preserve log和Disable cache
"""
import logging
import re
import sys
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.http import Request, FormRequest, HtmlResponse




class SHQHMSpider(CrawlSpider):
    name = "miaotianxia111"
    allowed_domains = ["miaotianxia.com"]
    start_urls = []

    rules = (
        # 消息列表
        Rule(LinkExtractor(
                           restrict_xpaths='/html/body/div[4]/div[2]/div[3]/div/ul/li[2]/a'),
             callback='parse_page'),
        #下一页, If callback is None follow defaults to True, otherwise it defaults to False
        #Rule(LinkExtractor(restrict_xpaths='//a[@class="next_page"]')),
    )

    GetthisAll = False #开关 是否搜全部的发布求购项目 False 不搜 True 全部 正确下 做一次就够了，耗时久 by cq

    if GetthisAll:
        for  num in range(1,1000): #撑死1000页
            start_urls.append("https://www.miaotianxia.com/product/buy?p="+str(num))
    else:
        for  num in range(1,3):
            start_urls.append("https://www.miaotianxia.com/product/buy?p="+str(num))

    post_login_headers={
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
        "Accept-Encoding": "gzip, deflate",
        "Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Content-Type": "application/x-www-form-urlencoded",
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.75 Safari/537.36",
        "Referer": "https://user.miaotianxia.com/",
    }

    # 重写了爬虫类的方法, 实现了自定义请求, 运行成功后会调用callback回调函数
    def start_requests(self):
        return [Request("https://user.miaotianxia.com/public/login",
                        meta={'cookiejar': 1}, callback=self.post_login)]

    # FormRequeset
    def post_login(self, response):
        # 先去拿隐藏的表单参数authenticity_token
        beforereferer = "https://www.miaotianxia.com/product/buy?p=1"
        #logging.info('authenticity_token=' + authenticity_token)
        # FormRequeset.from_response是Scrapy提供的一个函数, 用于post表单
        # 登陆成功后, 会调用after_login回调函数，如果url跟Request页面的一样就省略掉
        return [FormRequest.from_response(response,
                                          url='https://user.miaotianxia.com/public/login',
                                          meta={'cookiejar': response.meta['cookiejar']},
                                          headers=self.post_login_headers,  # 注意此处的headers
                                          formdata={
                                              'utf8': '✓',
                                              'username': 'fakeyoulife',
                                              'password': ' fakeyoulife',
                                              'referer': beforereferer
                                          },
                                          callback=self.after_login,
                                          dont_filter=True
                                          )]

    def after_login(self, response):
        for url in self.start_urls:
            # 因为我们上面定义了Rule，所以只需要简单的生成初始爬取Request即可
            yield Request(url, meta={'cookiejar': response.meta['cookiejar']})

    def parse_page(self, response):
        """这个是使用LinkExtractor自动处理链接以及`下一页`"""
        #logging.info(u'--------------消息分割线-----------------')
        print response.url
        issue_title = response.xpath(
            '/html/body/div[4]/div[2]/div[2]/div/table/tbody/tr[1]/td[2]/a/text()').extract_first()
        #logging.info(u'issue_title：' + issue_title.encode('utf-8'))
        print issue_title

    def _requests_to_follow(self, response):
        """重写加入cookiejar的更新"""
        if not isinstance(response, HtmlResponse):
            return
        seen = set()
        for n, rule in enumerate(self._rules):
            links = [l for l in rule.link_extractor.extract_links(response) if l not in seen]
            if links and rule.process_links:
                links = rule.process_links(links)
            for link in links:
                seen.add(link)
                r = Request(url=link.url, callback=self._response_downloaded)
                # 下面这句是我重写的
                r.meta.update(rule=n, link_text=link.text, cookiejar=response.meta['cookiejar'])
                yield rule.process_request(r)
