# -*- coding: utf-8 -*-
import random
import scrapy, time, json

class YidianSpider(scrapy.Spider):
    name = 'wangyi_channel'
    comment_url="https://comment.api.163.com/api/v1/products/a2869674571f77b5a0867c3d71db5856/threads/"

    def start_requests(self):
        urls = {
            '447': "http://fashion.163.com/special/002688FE/fashion_datalist.js?callback=data_callback",
            '450':'http://tech.163.com/special/00097UHL/tech_datalist.js?callback=data_callback',
            '451': 'http://edu.163.com/special/002987KB/newsdata_edu_hot.js?callback=data_callback',
            '453': 'http://baby.163.com/special/003687OS/newsdata_hot.js?callback=data_callback',
            '460': 'http://fashion.163.com/special/002688FE/fashion_datalist.js?callback=data_callback',
            '461': 'http://sports.163.com/special/000587PR/newsdata_n_index.js?callback=data_callback',
            '464': 'http://travel.163.com/special/00067VEJ/newsdatas_travel.js?callback=data_callback',
            '465': 'http://money.163.com/special/00259BVP/news_flow_index.js?callback=data_callback',
        }
        for cate in urls:
            item = {}
            item['site_classify_id'] = cate
            item['url'] = urls[cate]
            yield scrapy.Request(item['url'], meta={'item': item})

    def parse(self, response):
        item = response.meta['item']
        cate = item['site_classify_id']
        con=response.text.replace('data_callback(','').replace(')','')
        con = eval(con)
        for data in con:
            item={}
            temp_time=data.get('time')
            [q, w, e] = temp_time.split(' ')[0].split('/')
            item['publish_time'] = '-'.join([e, q, w]) + ' ' + temp_time.split(' ')[1]
            item['url'] = data.get('docurl')
            if 'photoview' in item['url']:
                continue
            item['spider_time'] = time.strftime("%Y-%m-%d %X", time.localtime())
            item['site_id'] = "4"  # 站点分类
            item['site_classify_id'] = cate
            yield scrapy.Request(item['url'],callback=self.detail_parse, meta={'item': item})

    def detail_parse(self,response):
        item = response.meta['item']
        item['title']=response.xpath("//div[@id='epContentLeft']/h1/text()").extract_first()
        item['author'] = response.xpath("//div[@class='post_time_source']/a/text()").extract_first()
        if item['title']:
            comment_url=self.comment_url+item['url'].split('/')[-1].replace('.html','')
            yield scrapy.Request(comment_url,callback=self.parse_comment, meta={'item': item})

    def parse_comment(self,response):
        item = response.meta['item']
        con=json.loads(response.text)
        item['comment_count']=con.get('tcount')
        if item['comment_count'] !=0:
            item['browse_count']=int(item['comment_count'])*random.randint(100,300)
        else:
            item['browse_count']=random.randint(100,1000)
        # print(item)
        yield item
