# -*- coding: UTF-8 -*-
"""
Created on 2018年5月5日
@author: Leo
"""

# 系统库
import hashlib
from datetime import datetime
from collections import OrderedDict

# 第三方库
import scrapy


class SegmentFaultSpider(scrapy.Spider):
    name = "SegmentFault"

    def __init__(self, **kwargs):
        # 字段
        self.search_text = kwargs['text']
        self.page = kwargs['total_page']

        # 启动页面
        self.start_urls = []
        for page in range(1, int(self.page) + 1):
            u = "https://segmentfault.com/search?q={}&type=article&page={}".format(self.search_text, page)
            self.start_urls.append(u)

        # 数据有序字典
        self._data_od = OrderedDict()

        # Header
        self.header = {
            ":authority": "segmentfault.com",
            ":method": "GET",
            ":path": "/search?type=article&q={}".format(self.search_text),
            ":scheme": "https",
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
            "referer": "https://segmentfault.com/search?q={}".format(self.search_text),
            "upgrade-insecure-requests": "1",
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.3964.2 Safari/537.36"
        }

        super().__init__(**kwargs)

    def start_requests(self):
        for req_url in self.start_urls:
            yield scrapy.Request(url=req_url,
                                 method="GET",
                                 callback=self.parse_outside_data,
                                 headers=self.header)

    def parse_outside_data(self, response):
        sections = response.xpath('//div[@class="col-md-8 main search-result"]/section')

        for section in sections:
            self._data_od = OrderedDict()
            self._data_od['title'] = section.xpath('string(h2/a)').extract_first()
            self._data_od['href'] = \
                "https://segmentfault.com{}".format(section.xpath('string(h2/a/@href)').extract_first())
            yield scrapy.Request(url=self._data_od['href'],
                                 method="GET",
                                 callback=self.parse,
                                 meta={'data': self._data_od},
                                 headers=self.header)

    def parse(self, response):
        data = response.meta['data']
        blog_info = response.xpath('//div[@class="content__tech blog-type-common blog-type-0-before"]')
        try:
            data['createDate'] = \
                datetime.strptime(
                    [
                        d.replace("\n", "").strip()
                        for d in
                        blog_info.xpath('div[@class="article__author"]/text()').extract()
                    ][-1].replace("发布", "").replace("年", "-").replace("月", "-").replace("日", ""), "%Y-%m-%d")
        except IndexError:
            blog_info_1 = response.xpath('//div[@class="content__tech blog-type-common blog-type-1-before"]')
            d_1 = [
                d.replace("\n", "").strip() for d in blog_info_1.xpath('div[@class="article__author"]/text()').extract()
            ][-1]
            if "年" not in d_1:
                d_1 = "2018-{}".format(d_1.replace("发布", "").replace("月", "-").replace("日", ""))
            else:
                d_1 = d_1.replace("发布", "").replace("年", "-").replace("月", "-").replace("日", "")
            data['createDate'] = datetime.strptime(d_1, "%Y-%m-%d")
        data['authorId'] = blog_info.xpath('string(div[@class="article__author"]/a/strong)').extract_first()
        data['content'] = ""
        data['tag'] = \
            ",".join([li.xpath('string(a)').extract_first().strip() for li in blog_info.xpath('ul/li')])
        data['visitorsIds'] = []
        data['visitorsNum'] = 0
        """
         类型（atype）共有三种分别是：
         0：学习路线推荐文章
         1：知识点推荐文章
         2：知识点必读文章（教师发布的）
        """
        data['atype'] = 1
        data['knowledgePointId'] = ""
        data['knowledgePointName'] = ""
        data['sectionName'] = ""
        data['courseName'] = ""
        # segmentfault的中文名叫"思否"
        data['origin'] = "思否"
        try:
            data['titleMD5'] = \
                hashlib.md5(data['title'].encode(encoding='gb2312')).hexdigest()
        except UnicodeEncodeError:
            data['titleMD5'] = \
                hashlib.md5(data['title'].encode(encoding='utf-8')).hexdigest()
        data['contentMD5'] = ""
        data['recommendedId'] = ""
        visitor_num = response.xpath('string(//strong[@class="no-stress"])').extract_first()
        if "k" in visitor_num:
            visitor_num = int(float(visitor_num.replace("k", "")) * 1000)
        data['aViewsCount'] = int(visitor_num)
        data['aCollectionCount'] = response.xpath('string(//span[@id="mainBookmarkNum"])').extract_first()
        data['searchName'] = self.search_text
        data['remark'] = ""
        print(dict(data))
        # yield data

