# -*- coding: utf-8 -*-
import random

import scrapy, time, json,re


class YidianSpider(scrapy.Spider):
    name = 'yidian_channel'
    start_urls = ['http://yidianzixun.com/']

    item = {
        "516": 'c3',
        '517':'w/影视?searchword=影视',
        "518":'s10671',
        "519":'c6',
        "520":'w/教育?searchword=教育',
        "522":'w/育儿?searchword=育儿',
        "523":'w/社会?searchword=社会',
        "524":'w/历史?searchword=历史',
        "525":'w/情感?searchword=情感',
        "526":'c11',
        "527":'c22',
        "528":'w/美食?searchword=美食',
        "529":'w/时尚?searchword=时尚',
        "530":'c2',
        "531":'c16',
        "532":'w/职场?searchword=职场',
        "533":'c17',
        "534":'c5',
        "535":'sc20',
        "536":'w/房产?searchword=房产',
        "537":'w/三农?searchword=三农'
    }
    headers = {
            "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
            "Referer": "http://www.yidianzixun.com/",
        }

    def parse(self, response):
        for channel, ids in self.item.items():
            url = "http://www.yidianzixun.com/channel/" + ids
            yield scrapy.Request(url=url, callback=self.parse_page_links, headers=self.headers,
                                     meta={"channel": channel})

    def parse_page_links(self, response):
        channel = response.meta['channel']
        response_html = response.body.decode('utf-8')
        article_links = re.findall(r'href="(.*?)"', response_html)
        for article_link in article_links:
            if 'article' in article_link:
                item = {}
                item['url'] = "http://www.yidianzixun.com" + str(article_link)
                item['site_classify_id']=channel
                time.sleep(1)
                yield scrapy.Request(url=item['url'], callback=self.parse_detail, headers=self.headers,meta={'item': item})


    def parse_detail(self, response):
        item = response.meta['item']
        if "<video" not in response.text:
            item['title'] = response.xpath("//div[@class='container']/h3/text()").extract_first()
            item['spider_time'] = time.strftime("%Y-%m-%d %X", time.localtime())
            item['site_id'] = "6"  # 站点分类
            item['publish_time'] = time.strftime("%Y-%m-%d %X", time.localtime(time.time()-random.randint(300,1000)))
            item['author'] = response.xpath("//div[@class='source imedia']/text()").extract_first()
            item['comment_count'] = response.xpath("//div[@class='comment-count']/text()").extract_first().replace('条评论','') if response.xpath("//div[@class='comment-count']/text()").extract_first() else '0'
            if item['comment_count'] !='0':
                item['browse_count']=int(item['comment_count'])*random.randint(100,300)
            else:
                item['browse_count']=random.randint(100,1000)
            if item['title']:
                # print(item)
                yield item
