# -*- coding:utf-8 -*-
# PyCharm file
# 从获取的about详情中获取用户详细信息

'''
 主程序使用cookies 登录抓取
 @author:zhangjian
 @date:2016-1-2
'''
import log
import requests
import tmp

try:
    import scrapy
    from scrapy.selector import Selector
except ImportError:
    log.logger.warning("import scrapy module exception.")
from scrapy.http import Request, FormRequest
from zhihu_spider.items.detailItems import ZhihuSpiderItem
import json
from bs4 import BeautifulSoup
import sys

host = 'https://www.zhihu.com'


def get_all_qurl_list():
    q = []
    q_path = r'f://n1trox.txt'
    q_file = open(q_path, 'r')
    for line in q_file.xreadlines():
        log.logger.info("line ,"+line)
        q.append(line.strip())
    return q


class mainSpider(scrapy.spiders.Spider):
    name = 'aragog_detail'
    allowed_domains = ["zhihu.com"]
    start_urls = ['https://www.zhihu.com/']

    def __init__(self):
        '''
        构造函数
        :return:
        '''
        self.cookies = {}
        self.loginUrl = "http://www.zhihu.com/login/email"
        # construct headers
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.71 Safari/537.36',
            'Host': 'www.zhihu.com',
            'Origin': 'http://www.zhihu.com',
            'Connection': 'keep-alive',
            'Referer': 'http://www.zhihu.com/',
            'Content-Type': 'application/x-www-form-urlencoded',
            'X-Requested-With': 'XMLHttpRequest'
        }
        # construct data
        self.data = {
            "email": 'zhangjian12424@gmail.com',
            "password": '',
            "rememberme": "true"
        }
        self.loginUrl = 'http://www.zhihu.com/login/email'
        self.homePage = 'https://www.zhihu.com/people/jixin'
        self.session = requests.session()
        self.followers = 'https://www.zhihu.com/people/ZhangJianForV/followees'
        self.detail = 'https://www.zhihu.com/people/ZhangJianForV/about'

    def start_requests(self):
        '''
        重写start_requests()方法 带cookies传递
        :return:
        '''
        self.logger.info("begin aragog_detail start_requests")
        cookies = tmp.main()
        self.cookies = cookies
        yield scrapy.Request(url=self.homePage,
                             meta={'cookiejar': self.homePage},
                             headers=self.headers,
                             cookies=self.cookies,
                             callback=self.after_login,
                             dont_filter=True)

    def after_login(self, response):
        '''
        'https://www.zhihu.com/people/ZhangJianForV/'
         ==>
        'https://www.zhihu.com/people/ZhangJianForV/about/'
        :param response
        :return:
        '''
        # self.logger.info("PATH:%s",sys.path[0])
        self.logger.info("begin aragog_detail after_login")
        for url in get_all_qurl_list():
            # yield self.make_requests_from_url(url)

            yield scrapy.Request(url=url,
                                 headers=self.headers,
                                 cookies=self.cookies,
                                 dont_filter=True,
                                 callback=self.parse)

    def parse(self, response):
        """
        get zhihu_user detail infomation
        :param response:
        :return:
        """
        self.logger.info("begin aragog_detail parse")
        selector = Selector(response)
        zhihu_item = ZhihuSpiderItem()

        zhihu_item['zhi_id'] = response.url.split('/')[-2]
        zhihu_item["nick"] = ''.join(selector.xpath("//a[@class='name']/text()").extract())

        zhihu_item["location"] = ''.join(
                response.xpath("//span[@class='info-wrap']/span[@class='location item']/@title").extract())
        zhihu_item["bussiness"] = ''.join(
                response.xpath("//span[@class='info-wrap']/span[@class='business item']/@title").extract())

        zhihu_item["sex"] = ''.join(response.xpath("//span[@class='info-wrap']/span[@class='item gender']/i/@class").extract()).split('-')[
            -1]
        zhihu_item["vote"] = ''.join(
                response.xpath("//div[@class='zm-profile-module-desc']/span[2]/strong/text()").extract())
        zhihu_item["thanks"] = ''.join(
                response.xpath("//div[@class='zm-profile-module-desc']/span[3]/strong/text()").extract())
        zhihu_item["fav"] = ''.join(
                response.xpath("//div[@class='zm-profile-module-desc']/span[4]/strong/text()").extract())
        zhihu_item["share"] = ''.join(
                response.xpath("//div[@class='zm-profile-module-desc']/span[5]/strong/text()").extract())

        zhihu_item["employment"] = ''.join(
                response.xpath("//span[@class='info-wrap']/span[@class='employment item']/@title").extract())
        zhihu_item["position"] = ''.join(
                response.xpath("//span[@class='info-wrap']/span[@class='position item']/@title").extract())
        # company = response.xpath("//span[@class='info-wrap']/span/text()").extract()

        zhihu_item["edu"] = ''.join(response.xpath("//span[@class='education item']/@title").extract())
        zhihu_item["edu_extra"] = ''.join(response.xpath("//span[@class='education-extra item']/@title").extract())
        zhihu_item["followees"] = ''.join(
                selector.xpath("//div[@class='zm-profile-side-following zg-clear']/a[1]/strong/text()").extract())
        zhihu_item["followeers"] = ''.join(
                selector.xpath("//div[@class='zm-profile-side-following zg-clear']/a[2]/strong/text()").extract())
        zhihu_item["column"] = ''.join(
                selector.xpath("//div[@class='zu-main-sidebar']/div[3]/div/div[1]/a/strong/text()").extract())
        zhihu_item["topics"] = ''.join(selector.xpath(
                "//div[@class='zu-main-sidebar']/div[4]/div/div[1]/a/strong/text()").extract())
        zhihu_item["weibo"] = ''.join(selector.xpath("//div[@class='weibo-wrap']/a/@href").extract())

        zhihu_item["topics_list"] = ''.join(response.xpath("//div[@id='zh-profile-following-topic']/a/img/@alt").extract())

        # self.logger.info(topics_list)
        # # self.logger.info(str(zhihu_item))
        yield zhihu_item
