# -*- coding:utf-8 -*-
# Created by Jin(jinzhencheng@outlook.com) at 2018/8/3 11:30

from scrapy import Request
from scrapy import Spider
from datetime import datetime
from bd_spider.items import BaiJiaHao

from bd_spider.config import SpiderConfig
from bd_spider.config import GenerationConfig
from bd_spider.util import logger
from bd_spider.action import simulation
from bd_spider.dal import category_dal
from lxml import etree
from random import choice

import time

class BaiJiaHaoSpider(Spider):

    name = SpiderConfig.BAIJIAHAO_SPIDER_NAME

    def __init__(self, *args, **kwargs):
        super(BaiJiaHaoSpider, self).__init__(*args, **kwargs)
        self.__logger = logger.get_logger()

    def start_requests(self):
        category_list = category_dal.list_category()
        for index, item in enumerate(category_list):
            category = choice(category_list)
            keywords = category.name
            category_list.remove(item)
            url = "%s %s" % (SpiderConfig.REQUEST_URL_PREFIX, keywords)
            yield Request(url=url, meta={"keywords": keywords})

    def parse(self, response):
        for sel in response.xpath("//div[contains(@class, 'c-container')]"):
            article_url = sel.xpath("./h3/a/@href")[0].extract()
            yield Request(url=article_url, meta=response.meta, callback=self.parse_profile)
        try:
            time.sleep(SpiderConfig.REQUEST_WAIT_TIME)
            next_page_suffix = response.xpath("//div[@id='page']/a[last()]/@href")[0].extract()
            next_url = "http://www.baidu.com" + next_page_suffix
            yield Request(url=next_url, meta=response.meta, callback=self.parse)
        except Exception, e:
            self.__logger.error("Xpath parse elements error, detail: %s", e.message)

    def parse_profile(self, response):
        try:
            self.__logger.info("Crawl ---- start crawl info from the website, keywords: %s" % response.meta["keywords"])
            if "404" not in response.url:
                base_info = response.xpath("//div[@id='content-container']/@data-extralog")[0].extract()
                app_id = base_info[base_info.rindex(':') + 1: base_info.rindex(';')]
                url = SpiderConfig.author_home_url(app_id)
                simulation.request(url)
                final_url, source = simulation.response()
                time.sleep(GenerationConfig.DEFAULT_WAIT_TIME)
                if "404" not in final_url:
                    item = BaiJiaHao()
                    item["add_time"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                    item["is_valid"] = GenerationConfig.DEFAULT_IS_VALID
                    item["category"] = response.meta["keywords"]
                    item["home_url"] = url
                    item["app_id"] = app_id
                    page = etree.HTML(source)
                    """
                    Parse authentication, like 'https://baijiahao.baidu.com/u?app_id=3014'.
                    """
                    if page.xpath("//div[@class='avatar']/div[contains(@class, 'vip')]"):
                        name = page.xpath("//div[@class='name']/text()")[0]
                        fans = page.xpath("//div[@class='fans']/text()")[0]
                        follow = page.xpath("//div[@class='follow']/text()")[0]
                        remark_sel = page.xpath("//div[@class='panel']/div[1]/text()")
                        if remark_sel:
                            remark = remark_sel[0]
                            item["remark"] = remark
                        description_sel = page.xpath("//div[@class='panel']/div[2]/text()")
                        if description_sel:
                            description = description_sel[0]
                            item["description"] = description
                        item["name"] = name
                        item["follow"] = follow
                        item["fans"] = fans
                        yield item
                    else:
                        remark_sel = page.xpath("//div[contains(@class, 'authentication-item')]/descendant-or-self::text()")
                        if remark_sel:
                            remark = " ".join(remark_sel)
                            item["remark"] = remark
                        description_sel = page.xpath("//div[@class='sign']/text()")
                        if description_sel:
                            description = description_sel[0]
                            item["description"] = description
                        follow_sel = page.xpath("//div[@class='follow']/span[@class='num']/text()")
                        if follow_sel:
                            follow = " ".join(follow_sel)
                            item["follow"] = follow
                        fans_sel = page.xpath("//div[@class='fans']/span[@class='num']/text()")
                        if fans_sel:
                            fans = " ".join(fans_sel)
                            item["fans"] = fans
                        author_sel = page.xpath("//div[@class='name-item']/div[@class='name']/text()")
                        if author_sel:
                            item["name"] = author_sel[0]
                        yield item
        except Exception, e:
            self.__logger.error("An Exception happened when crawl info from website, detail:{}" % e.message)




