# -*- coding:utf-8 -*-
# PyCharm file

'''
 主程序使用cookies 登录抓取
 @author:zhangjian
 @date:2016-1-2
'''
import log
import requests
import tmp

try:
    import scrapy
    from scrapy.selector import Selector
except ImportError:
    log.logger.warning("import scrapy module exception.")
from scrapy.http import Request, FormRequest
from zhihu_spider.items.detailItems import AboutUrls
import json
from bs4 import BeautifulSoup

host = 'https://www.zhihu.com'

class mainSpider(scrapy.spiders.Spider):
    name = 'aragog'
    allowed_domains = ["zhihu.com"]
    start_urls = ['https://www.zhihu.com/people/ZhangJianForV']

    def __init__(self):
        '''
        构造函数
        :return:
        '''
        self.cookies = {}
        self.loginUrl = "http://www.zhihu.com/login/email"
        # construct headers
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.71 Safari/537.36',
            'Host': 'www.zhihu.com',
            'Origin': 'http://www.zhihu.com',
            'Connection': 'keep-alive',
            'Referer': 'http://www.zhihu.com/',
            'Content-Type': 'application/x-www-form-urlencoded',
            'X-Requested-With': 'XMLHttpRequest'
        }
        # construct data
        self.data = {
            "email": 'zhangjian12424@gmail.com',
            "password": 'youknow123',
            "rememberme": "true"
        }
        self.loginUrl = 'http://www.zhihu.com/login/email'
        self.homePage = 'https://www.zhihu.com/people/n1trox'
        self.session = requests.session()
        self.followers = 'https://www.zhihu.com/people/ZhangJianForV/followees'
        self.detail = 'https://www.zhihu.com/people/ZhangJianForV/about'

    def start_requests(self):
        '''
        重写start_requests()方法 带cookies传递
        :return:
        '''
        cookies = tmp.main()
        self.cookies = cookies
        yield scrapy.Request(url=self.homePage,
                             meta={'cookiejar': self.homePage},
                             headers=self.headers,
                             cookies=self.cookies,
                             callback=self.after_login,
                             dont_filter=True)

    def after_login(self, response):
        '''
        'https://www.zhihu.com/people/ZhangJianForV/'
         ==>
        'https://www.zhihu.com/people/ZhangJianForV/about/'
        :param response
        :return:
        '''
        selector = Selector(response)
        names = selector.xpath("//a[@class='zm-profile-header-user-detail zg-link-litblue-normal']/@href").extract()
        # for name in names:
        #     yield Request(host + name + "/about",headers=self.headers, callback=self.parse)

        print names
        testUrl = str(host + names[0])
        yield scrapy.Request(url=testUrl,
                             cookies=self.cookies,
                             headers=self.headers,
                             callback=self.parse_user,
                             dont_filter=True)

    def parse_user(self, response):
        """
        get zhihu_user detail infomation
        :param response:
        :return:
        """

        selector = Selector(response)

        followees_num = ''.join(
                selector.xpath("//div[@class='zm-profile-side-following zg-clear']/a[1]/strong/text()").extract())
        followers_num = ''.join(
                selector.xpath("//div[@class='zm-profile-side-following zg-clear']/a[2]/strong/text()").extract())
        followees_link = host + \
                         selector.xpath(
                                 "//div[@class='zm-profile-side-following zg-clear']/a[1]/@href").extract()[
                             0]
        followers_link = host + \
                         selector.xpath(
                                 "//div[@class='zm-profile-side-following zg-clear']/a[2]/@href").extract()[
                             0]

        _xsrf = ''.join(selector.xpath('//input[@name="_xsrf"]/@value').extract())
        hash_id = ''.join(
                selector.xpath('//div[@class="zm-profile-header-op-btns clearfix"]/button/@data-id').extract())

        # 替换headers
        headers_followee = self.headers
        headers_followee['Referer'] = followees_link

        headers_follower = self.headers
        headers_follower['Referer'] = followers_link

        # followers
        num = int(followers_num) if followers_num else 0
        page_num = num / 20
        page_num += 1 if num % 20 else 0

        for i in xrange(page_num):
            params = json.dumps({"hash_id": str(hash_id), "order_by": "created", "offset": i * 20})
            payload = {"method": "next", "params": params, "_xsrf": str(_xsrf)}
            yield FormRequest(url='https://www.zhihu.com/node/ProfileFollowersListV2',
                              headers=headers_follower,
                              formdata=payload,
                              cookies=self.cookies,
                              callback=self.parse_ajax,
                              dont_filter=True
                              )

        # followees
        num = int(followees_num) if followees_num else 0
        page_num = num / 20
        page_num += 1 if num % 20 else 0
        for i in xrange(page_num):
            params = json.dumps({"hash_id": hash_id, "order_by": "created", "offset": i * 20})
            payload = {"method": "next", "params": params, "_xsrf": _xsrf}
            yield FormRequest(url='https://www.zhihu.com/node/ProfileFolloweesListV2',
                              headers=headers_followee,
                              formdata=payload,
                              cookies=self.cookies,
                              callback=self.parse_ajax,
                              dont_filter=True)

    def parse_ajax(self, response):
        """
        解析ajax
        :param response:
        :return:
        """
        msg = json.loads(response.body)["msg"]
        for html in msg:
            aboutUrl = AboutUrls()
            soup = BeautifulSoup(html)
            h2 = soup.find('h2')
            aboutUrl["aboutUrl"] = user_url = h2.a['href'] + "/about"
            print user_url
            yield aboutUrl
