# -*- coding: utf-8 -*-
import re
import time
import json
import scrapy
import urlparse
from PIL import Image
from scrapy.loader import ItemLoader
from ArticleSpider.items import ZhihuQuestionItem, ZhihuAnswerItem


class ZhihuSpider(scrapy.Spider):
    name = 'zhihu'
    allowed_domains = ['www.zhihu.com']
    start_urls = ['http://www.zhihu.com/']
    agent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36"
    header = {
        "User-Agent": agent
    }

    def parse(self, response):
        """
        提取出html页面中在的所有url，并跟踪这些url进行一步爬取
        如果提取的url格式为requestion/XXX 就直接进入解析函数
        """
        all_urls = response.css("a::attr(href)").extract()
        all_urls = [urlparse.urljoin(response.url, url) for url in all_urls]
        all_urls = filter(lambda x: True if x.startswith("https") else False, all_urls)
        for url in all_urls:
            match_obj = re.match("(.*zhihu.com/question/(\d+)).*", url)
            if match_obj:
                request_url = match_obj.group(1)
                question_id = match_obj.group(2)
                yield scrapy.Request(request_url, headers=self.header, callback=self.parse_question)

    def parse_question(self, response):
        match_obj = re.match("(.*zhihu.com/question/(\d+)).*", response.url)
        if match_obj:
            question_id = match_obj.group(2)
        item_loader = ItemLoader(ZhihuQuestionItem(), response=response)
        item_loader.add_css("title", "h1.QuestionHeader-title::text")
        item_loader.add_css("content", ".QuestionHeader-title")
        item_loader.add_value("url", response.url)
        item_loader.add_value("zhihu_id", question_id)
        item_loader.add_css("answer_num", ".QuestionMainAction::text")
        item_loader.add_css("comments_num", ".QuestionHeader-Comment button::text")
        item_loader.add_css("watch_user_num",".NumberBoard-value::text")
        item_loader.add_css("topics", ".QuestionHeader-topics .Popover::text")
        pass

    def start_requests(self):
        return [scrapy.Request("http://www.zhihu.com/#signin", headers=self.header, callback=self.login)]

    def login(self, response):
        response_text = response.text
        match_obj = re.match(r'.*name="_xsrf" value="(.*?)"', response.text, re.DOTALL)
        xsrf = ""
        if match_obj:
            xsrf = match_obj.group(1)

        if xsrf:
            post_data = {
                "_xsrf": xsrf,
                "password": 'zy5945',
                "phone_num": '18516636360',
            }
            checkcode_url = "https://www.zhihu.com/captcha.gif?r=" + str(int(time.time()*1000)) + "&type=login&lang=en"
            yield scrapy.Request(checkcode_url, headers=self.header, meta={"post_data": post_data}, callback=self.login_after_captcha)

    def login_after_captcha(self, response):
        post_url = "https://www.zhihu.com/login/phone_num"
        with open("checkcode.gif", "wb") as f:
            f.write(response.body)
        f.close()
        try:
            im = Image.open("checkcode.gif")
            im.show()
            im.close()
        except:
            pass
        post_data = response.meta.get("post_data")
        post_data["captcha"] = raw_input("checkcode:")
        return [scrapy.FormRequest(
                url=post_url,
                formdata=post_data,
                headers=self.header,
                callback=self.check_login,
            )]

    def check_login(self, response):
        # 验证服务器的返回数据判断是否成功
        text_json = json.loads(response.text)
        print text_json
        if 'msg' in text_json and text_json["msg"] == "登录成功":
            for url in self.start_urls:
                yield scrapy.Request(url, dont_filter=True,headers=self.header)

