# -*- coding: UTF-8 -*-
"""
Created on 2018年5月4日
@author: Leo
"""

# 系统库
import json
import hashlib
from datetime import datetime
from collections import OrderedDict

# 第三方库
import scrapy


class JuejinSpider(scrapy.Spider):
    name = "Juejin"

    def __init__(self, **kwargs):
        # 字段
        self.search_text = kwargs['text']
        self.page = kwargs['total_page']

        # 启动页面
        self.start_urls = []
        for page in range(1, int(self.page) + 1):
            u = \
                "https://search-merger-ms.juejin.im/v1/search?query={}&page={}&raw_result=false&src=web".format(self.search_text,
                                                                                                                page)
            self.start_urls.append(u)

        # 数据有序字典
        self._data_od = OrderedDict()

        super().__init__(**kwargs)

    # 请求层
    def start_requests(self):
        # 请求
        print(self.start_urls)
        for req_url in self.start_urls:
            yield scrapy.Request(url=req_url, method="GET", callback=self.parse_data_list)

    def parse_data_list(self, response):
        # 搜索结果(Json)
        json_res = json.loads(response.body.decode(encoding='utf-8'))
        id_list = [data['objectId'] for data in json_res['d']]
        detail_url = \
            "https://timeline-merger-ms.juejin.im/v1/get_entry_by_ids?src=web&entryIds={}".format("|".join(id_list))
        yield scrapy.Request(url=detail_url, method="GET", callback=self.parse)

    def parse(self, response):
        # 结果
        json_res = json.loads(response.body.decode(encoding='utf-8'))
        # print(json_res)
        # print(json.dumps(json_res, indent=4, ensure_ascii=False))
        data = json_res['d']['entrylist']
        for d in data:
            self._data_od = OrderedDict()
            self._data_od['title'] = d['title']
            self._data_od['href'] = d['originalUrl']
            self._data_od['createDate'] = datetime.strptime(d['createdAt'].split('T')[0], "%Y-%m-%d")
            self._data_od['authorId'] = d['user']['username']
            self._data_od['content'] = ''
            self._data_od['tag'] = ",".join([dd['title'] for dd in d['tags']])
            self._data_od['visitorsIds'] = []
            self._data_od['visitorsNum'] = 0
            """
             类型（atype）共有三种分别是：
             0：学习路线推荐文章
             1：知识点推荐文章
             2：知识点必读文章（教师发布的）
            """
            self._data_od['atype'] = 1
            self._data_od['knowledgePointId'] = ""
            self._data_od['knowledgePointName'] = ""
            self._data_od['sectionName'] = ""
            self._data_od['courseName'] = ""
            self._data_od['origin'] = "掘金"
            try:
                self._data_od['titleMD5'] = hashlib.md5(d['title'].encode(encoding='gb2312')).hexdigest()
            except UnicodeEncodeError:
                self._data_od['titleMD5'] = hashlib.md5(d['title'].encode(encoding='utf-8')).hexdigest()
            self._data_od['contentMD5'] = ""
            self._data_od['recommendedId'] = ""
            self._data_od['aViewsCount'] = d['viewsCount']
            self._data_od['aCollectionCount'] = d['collectionCount']
            self._data_od['searchName'] = self.search_text
            self._data_od['remark'] = ""
            # print(dict(self._data_od))
        # print("############################################################################################")
            yield self._data_od
