#coding=utf-8

from scrapy.spiders import Spider
from scrapy.selector import Selector
from scrapy.linkextractors import LinkExtractor
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.spiders import Rule,CrawlSpider
from zhihuSpider.items import ZhihuspiderItem
from scrapy.http import Request, FormRequest
import Queue
from scrapy.http.cookies import CookieJar
import os
import time
import random

email = 'lck5602@126.com'
password='19891109#'

class ZhihuMessageSpider(CrawlSpider):
    name = 'zhihu_message_spider2'

    allowde_domains = ['zhihu.com']
    start_urls = [
        'https://www.zhihu.com',
        'https://www.zhihu.com/people/e-miao-de-nai-ba',
        'https://www.zhihu.com/people/xiaxiaozheng'
    ]
    url_queue = Queue.Queue()

    zhihu_url = "https://www.zhihu.com"
    login_url = "https://www.zhihu.com/login/email"
    domain = "https://www.zhihu.com"

    rules = (
        Rule(SgmlLinkExtractor(allow=('/people/.*',), )),
        # Rule(SgmlLinkExtractor(allow=('https://www.zhihu.com/people',), )),
        Rule(SgmlLinkExtractor(allow=(r'www\.zhihu\.com/people/',)), callback='end_login', follow=True),
    )

    headers = {
        "Accept": "*/*",
        "Accept-Encoding": "gzip,deflate",
        "Accept-Language": "en-US,en;q=0.8,zh-TW;q=0.6,zh;q=0.4",
        "Connection": "keep-alive",
        "Content-Type": " application/x-www-form-urlencoded; charset=UTF-8",
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.111 Safari/537.36",
        "Referer": "http://www.zhihu.com/"
    }

    # 重写了爬虫类的方法, 实现了自定义请求, 运行成功后会调用callback回调函数
    def start_requests(self):
        yield Request(
            url=self.zhihu_url,
            headers=self.headers,
            meta={
                # "proxy": proxy,
                "cookiejar": 1
            },
            callback=self.request_captcha
        )

    def request_captcha(self, response):
        # 获取_xsrf值
        _xsrf = response.css('input[name="_xsrf"]::attr(value)').extract()[0]
        # 获得验证码的地址
        captcha_url = "http://www.zhihu.com/captcha.gif?r=" + str(time.time() * 1000)
        # 准备下载验证码
        # 获取请求
        yield Request(
            url=captcha_url,
            headers=self.headers,
            meta={
                # "proxy": proxy,
                "cookiejar": response.meta["cookiejar"],
                "_xsrf": _xsrf
            },
            callback=self.download_captcha
        )

    def download_captcha(self, response):
        # 下载验证码
        with open("captcha.gif", "wb") as fp:
            fp.write(response.body)
        # 打开验证码
        os.system('open captcha.gif')
        # 输入验证码
        print "请输入验证码:\n"
        captcha = raw_input()
        # 输入账号和密码
        yield FormRequest(
            url=self.login_url,
            headers=self.headers,
            formdata={
                "email": email,
                "password": password,
                "_xsrf": response.meta["_xsrf"],
                "remember_me": "true",
                "captcha": captcha
            },
            meta={
                # "proxy": proxy,
                "cookiejar": response.meta["cookiejar"],
            },
            callback=self.parse_item
        )

    def parse_item(self, response):
        '''
            现在已经登录,请求www.zhihu.com的页面
        '''
        for url in self.start_urls:
            yield Request(url = url,
                                 headers=self.headers,
                                 meta={
                                     # "proxy": proxy,
                                     "cookiejar": response.meta["cookiejar"],
                                     "from": {"sign": "else", "data": {}},
                                 },
                                 callback=self.end_login,
                                 dont_filter=True)

    def end_login(self, response):
        # 至此已经登录
        a = random.randint(1,1000)
        with open("base%d.html"%a, "wb") as fp:
            fp.write(response.body)
