# -*- coding: utf-8 -*-
import time,random
import scrapy,re
import json,urllib.parse

chrome = [
    "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36" ,
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36",
    "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36",
    "Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36",
    "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36",
]

class SohuSpider(scrapy.Spider):
    name = 'jianshu_channel'

    def start_requests(self):
        urls = {
            '470':"https://www.jianshu.com/c/e50258a6a44b",  #娱乐
            '471': 'https://www.jianshu.com/c/3041305e9e0b',
            '472': 'https://www.jianshu.com/c/5cc4460bb4a0',
            '473': 'https://www.jianshu.com/c/93d58e9169cb',
            '474': 'https://www.jianshu.com/c/un8RTj',
            '475': 'https://www.jianshu.com/c/f6b4ca4bb891',
            '476': 'https://www.jianshu.com/c/70b8514fb442',
            '478': 'https://www.jianshu.com/c/4cba94111ae4',
            '479': 'https://www.jianshu.com/c/GQ5FAs',
            '480': 'https://www.jianshu.com/c/c3db005e8f80',
            '481': 'https://www.jianshu.com/c/0856231c8e98',
            '482': 'https://www.jianshu.com/c/qqfxgN',
            '483': 'https://www.jianshu.com/c/5c53e46ed869',
            '484': 'https://www.jianshu.com/c/8b493334965e',
            '485': 'https://www.jianshu.com/c/YZRvCb',
            '486': 'https://www.jianshu.com/c/Jgq3Wc',
            '487': 'https://www.jianshu.com/c/5AUzod',
            '488': 'https://www.jianshu.com/c/b591324d6443',
            '489': 'https://www.jianshu.com/c/cc58f5ea95df',
            '490': 'https://www.jianshu.com/c/f47874e6e993',
            '491': 'https://www.jianshu.com/c/7d98bc59c940',
                }
        for cate in urls:
            item={}
            item['site_classify_id']=cate
            item['url']=urls[cate]
            yield scrapy.Request(item['url'],meta={'item': item}, headers={"User-Agent":random.choice(chrome)})

    def parse(self, response):
        item = response.meta['item']
        cate=item['site_classify_id']
        data_list = response.xpath("//div[@class='content']/a")
        for data in data_list:
            item = dict()
            item['url'] = urllib.parse.urljoin(response.url,data.xpath('./@href').extract_first())
            item['title'] = data.xpath('./text()').extract_first()
            item['spider_time'] = time.strftime("%Y-%m-%d %X", time.localtime())
            item['site_id'] = "104"  # 站点分类
            item['site_classify_id'] = cate
            yield scrapy.Request(item['url'], callback=self.detail_parse, meta={'item': item}, headers={"User-Agent":random.choice(chrome)})

    def detail_parse(self,response):
        item = response.meta['item']
        item['publish_time'] = response.xpath("//span[@class='publish-time']/text()").extract_first().replace('.','-')+":00"
        item['author'] = response.xpath("//span[@class='name']/a/text()").extract_first()
        views_count=re.findall(r'views_count":([^,]+)', response.text)
        if views_count:
            item['browse_count']=views_count[0]
        comment_count = re.findall(r'comments_count":([^,]+)', response.text)
        if comment_count:
            item['comment_count'] =comment_count[0]
            # print(item)
            yield item

