# -*- coding: UTF-8 -*-
"""
Created on 2018年5月4日
@author: Leo
"""

# 系统库
import re
import json
import hashlib
from datetime import datetime
from collections import OrderedDict

# 第三方库
import scrapy


class JianShuSpider(scrapy.Spider):
    name = "Jianshu"

    def __init__(self, **kwargs):
        # 字段
        self.search_text = kwargs['text']
        self.page = kwargs['total_page']

        self.url_prefix = "https://www.jianshu.com/search"

        # 启动页面
        self.start_urls = []
        self.get_token_url = []
        for page in range(1, int(self.page) + 1):
            u = "{}/do?q={}&type=note&page={}&order_by=default".format(self.url_prefix, self.search_text, page)
            t_u = "{}?q={}&page={}&type=note".format(self.url_prefix, self.search_text, page)
            self.start_urls.append(u)
            self.get_token_url.append(t_u)

        # 数据有序字典
        self._data_od = OrderedDict()

        super().__init__(**kwargs)

    # 请求层
    def start_requests(self):
        # 请求
        print(self.get_token_url)
        for i, req_url in enumerate(self.get_token_url):
            yield scrapy.Request(url=self.get_token_url[i],
                                 method="GET",
                                 callback=self.parse_token,
                                 meta={"n": i})

    def parse_token(self, response):
        # meta标签
        n = response.meta['n']

        # 解析CSRF TOKEN
        csrf_token = response.xpath('string(//html/head/meta[@name="csrf-token"]/@content)').extract_first()

        header = {
            "Accept": "application/json",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.8",
            "Connection": "keep-alive",
            "Content-Length": "0",
            "Cookie": "_ga=GA1.2.519452658.1501645416; read_mode=day; default_font=font2; signin_redirect=https%3A%2F%2Fwww.jianshu.com%2Fsearch%3Fq%3DHTML%26page%3D1%26type%3Dnote; Hm_lvt_0c0e9d9b1e7d617b3e6842e85b9fb068=1524453845,1525340754,1525410101,1525415263; Hm_lpvt_0c0e9d9b1e7d617b3e6842e85b9fb068=1525416391; locale=zh-CN; _m7e_session=003009c2329fac633597708035812838; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2216169b9dcfa310-0a6724d9839a7c-4a541326-2764800-16169b9dcfc3f4%22%2C%22%24device_id%22%3A%2216169b9dcfa310-0a6724d9839a7c-4a541326-2764800-16169b9dcfc3f4%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22http%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3D-Ti1tD681btYFA3uW73cJLkeXO_ND3ZF-LnqsbrmMe6wW5wMYus7yZzc4rWey_90%26wd%3D%26eqid%3Dc7e752180001fca3000000065aebfd5c%22%2C%22%24latest_referrer_host%22%3A%22www.baidu.com%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%7D%7D",
            "Host": "www.jianshu.com",
            "Origin": "https://www.jianshu.com",
            "Referer": "https://www.jianshu.com/search?q={}&page={}&type=note".format(self.search_text, self.page),
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.3964.2 Safari/537.36",
            "X-CSRF-Token": csrf_token
        }

        yield scrapy.Request(url=self.start_urls[n],
                             method="POST",
                             callback=self.parse,
                             headers=header)

    def parse(self, response):
        json_res = json.loads(response.body.decode("UTF-8"))
        articles = json_res['entries']

        reg = re.compile('<[^>]*>')

        for article in articles:
            self._data_od = OrderedDict()
            if len(re.findall('<[^>]*>', article['title'])) == 0:
                continue
            else:
                self._data_od['title'] = reg.sub('', article['title'])
                self._data_od['href'] = "http://www.jianshu.com/p/{}".format(article['slug'])
                self._data_od['createDate'] = datetime.strptime(article['first_shared_at'].split("T")[0], "%Y-%m-%d")
                self._data_od['authorId'] = article['user']['nickname']
                self._data_od['content'] = ''
                self._data_od['tag'] = self.search_text
                self._data_od['visitorsIds'] = []
                self._data_od['visitorsNum'] = 0
                """
                 类型（atype）共有三种分别是：
                 0：学习路线推荐文章
                 1：知识点推荐文章
                 2：知识点必读文章（教师发布的）
                """
                self._data_od['atype'] = 1
                self._data_od['knowledgePointId'] = ""
                self._data_od['knowledgePointName'] = ""
                self._data_od['sectionName'] = ""
                self._data_od['courseName'] = ""
                self._data_od['origin'] = "简书"
                try:
                    self._data_od['titleMD5'] = \
                        hashlib.md5(self._data_od['title'].encode(encoding='gb2312')).hexdigest()
                except UnicodeEncodeError:
                    self._data_od['titleMD5'] = \
                        hashlib.md5(self._data_od['title'].encode(encoding='utf-8')).hexdigest()
                self._data_od['contentMD5'] = ""
                self._data_od['recommendedId'] = ""
                self._data_od['aViewsCount'] = article['views_count']
                self._data_od['aCollectionCount'] = article['likes_count']
                self._data_od['searchName'] = self.search_text
                self._data_od['remark'] = ""
                # print(dict(self._data_od))
                yield self._data_od
        # print("############################################################################################")
