# -*- coding: utf-8 -*-

import json
import re
import datetime

#当前时间
# datetime.datetime.now()

#时间戳，转为datetime
# datetime.datetime.fromtimestamp(sec)

# py3
from urllib import parse

#py2
# import urlparse

import scrapy
from scrapy.loader import ItemLoader

from ArticleSpider.items import RubyChinaQuestionItem,RubyChinaAnswerItem
from ArticleSpider.util.agent import random_user_agent


class IxueaSpider(scrapy.Spider):
    name = 'ixuea'
    allowed_domains = ['ruby-china.org']
    # start_urls = ['https://ruby-china.org/']
    start_urls = ['https://ruby-china.org/topics/1366']

    #字符串格式化
    # answer_url="https://a.com/{0}"
    # answer_url.format(answer_url,"1")

    username="lifengsoft"
    password="RPQ13698275312n"

    headers = {
        "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
    }

    custom_settings = {
        "COOKIES_ENABLED":True
    }

    def parse(self, response):
        """
        我们的目的是，获取所有问题，答案
        所以就要获取到所有问题列表，对于知乎没有
        对于ruby-chain，有；但我们要模拟知乎
        所以就从首页爬取

        获取当前页面所有连接，如果包含topics/就调用详情解析方法(问题)
        否则还是调用parse方法

        :param response:
        :return:
        """
        urls=response.css("a::attr(href)").extract()

        #循环了两次，可以写为一次
        urls=[parse.urljoin(response.url,url) for url in urls]

        #对urls中每个元素，执行lambda
        urls=filter(lambda x:True if x.startswith("https") else False,urls)

        for url in urls:
            self.headers["User-Agent"]=random_user_agent()

            #https://ruby-china.org/topics/37629#reply-347612
            #https://ruby-china.org/topics/37629

            #url是https://ruby-china.org/topics/37629,去掉了回答
            #id是问题id
            match_result=re.match("(.*ruby-china.org/topics/(\d+)($|/).*)",url)
            if match_result:
                #匹配出问题详情地址
                question_url=match_result.group(1)
                question_id=match_result.group(2)

                #print(question_url,question_id)
                yield scrapy.Request(question_url,meta={"id":question_id},headers=self.headers,callback=self.parse_question)
            else:
                #不是问题详情，继续跟踪
                yield scrapy.Request(url,headers=self.headers,callback=self.parse)

    def parse_question(self, response):
        #处理question页面，解析出item

        #版本兼容，这里没有，知乎才有
        #可以判断未某个字符串存在
        # if "new" in response.text:
        #     #新版
        #     pass
        # else:
        #     #旧版
        #     pass

        url=response.url

        #其实这里相当于，将从response获取数据
        #并填充到Item中的逻辑通过ItemLoader代理实现
        item_loader=ItemLoader(item=RubyChinaQuestionItem(),response=response)

        id=response.meta.get("id", "")

        item_loader.add_value("id",int(id))
        item_loader.add_css("title",".media-body .title::text")
        item_loader.add_css("content","#main .card-body")
        item_loader.add_value("url",url)
        # item_loader.add_css("answer_counts",response.url)
        # item_loader.add_css("comment_counts",response.url)
        item_loader.add_css("view_counts",".media-body .info::text")

        #让Loader执行添加的操作
        item=item_loader.load_item()

        #解析回答
        # yield scrapy.Request(url,headers=self.headers,callback=self.parse_answer)
        # 解析回答
        replys = response.css(".items .reply")
        for reply in replys:
            answer_id = reply.css("::attr(data-id)").extract_first()
            content = reply.css(".infos .markdown").extract_first()
            author_id = reply.css(".infos .user-name::text").extract_first()
            # answer_item_loader = ItemLoader(item=RubyChinaAnswerItem(), response=response)
            # answer_item_loader.add_css("id",)

            answerItem = RubyChinaAnswerItem()
            answerItem["id"] = answer_id
            answerItem["content"] = content
            answerItem["question_id"] = id
            answerItem["author_id"] = author_id

            yield answerItem

        yield item

    def parse_answer(self,response):
        #解析回答
        replys=response.css(".items .reply")
        for reply in replys:
            answer_id=reply.css("::attr(data-id)::text")
            content=reply.css(".infos .markdown::text")
            # answer_item_loader = ItemLoader(item=RubyChinaAnswerItem(), response=response)
            # answer_item_loader.add_css("id",)

            answerItem=RubyChinaAnswerItem()
            answerItem["id"]=answer_id
            answerItem["content"]=content

            yield answerItem

        #判断下一页链接
        #由于ruby china回答没有暂时没看到下一页面，所以直接解析


    def start_requests(self):
        return [scrapy.Request("https://ruby-china.org/account/sign_in",headers=self.headers,callback=self.prepare_login)]

    def prepare_login(self,response):
        print(response.text)

        csrf=response.css('meta[name=csrf-token]::attr(content)').extract_first()

        #如果这里登录，要加验证码
        #不能单独使用session，更不能使用request

        #方法如下，在这里请求验证码
        #登录逻辑放到，验证码请求完成后
        cap_url="https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1547455970626&di=81c90b6c569594978acb0b988e8c1535&imgtype=0&src=http%3A%2F%2Fwww.xiaobaixitong.com%2Fd%2Ffile%2Fhelp%2F2018-08-06%2Ff15ce5d652d8da38e9e0e384f35b39d7.png"
        yield scrapy.Request(cap_url,headers=self.headers,meta={"csrf":csrf},callback=self.login)


    def login(self,response):
        #response是验证码，请求
        #在这里，就能保证获取到验证码
        #剩下的就是识别了

        with open("capt.jpg", "wb") as f:
            f.write(response.body)

        from PIL import Image
        try:
            im = Image.open("capt.jpg")
            im.show()
            im.close()
        except:
            pass

        # 让用户从控制台输入
            cap = input("输入验证码\n")

        # python2
        # raw_input

        form_data = {
            "user[login]": self.username,
            "user[password]": self.password,
            "user[remember_me]": "1",
            "cap": cap
        }

        self.headers['x-csrf-token'] = response.meta.get("csrf","")

        return [scrapy.FormRequest(
            url="https://ruby-china.org/account/sign_in",
            formdata=form_data,
            headers=self.headers,
            callback=self.checkLogin
        )]

    def checkLogin(self,response):
        #验证是否，登录成功

        #如果是json
        # result_json=json.loads(response.text)

        if response.status==200:
            print("登录成功")
            for url in self.start_urls:
                yield scrapy.Request(url, headers=self.headers,dont_filter=True)