#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: xaoyaoyao
@contact: xaoyaoyao@aliyun.com
@file: techweb_spider.py
@time: 2018/08/25
"""

import random

from scrapy.http import Request
from scrapy.spiders import CrawlSpider
from scrapy.linkextractors import LinkExtractor
from article_spider.items import ArticleItem

class TechwebSpider(CrawlSpider):
    name = 'techweb'
    allowed_domains = ['techweb.com.cn']
    start_urls = [
        'http://www.techweb.com.cn/news/',
        'http://people.techweb.com.cn/',
        'http://www.techweb.com.cn/internet/',
        'http://www.techweb.com.cn/it/',
        'http://www.techweb.com.cn/tele/',
        'http://www.techweb.com.cn/finance/',
        'http://www.techweb.com.cn/yuanchuang/',
        'http://www.techweb.com.cn/mobile',
        'http://mi.techweb.com.cn/',
        'http://mo.techweb.com.cn/',
        'http://app.techweb.com.cn/',
        'http://www.techweb.com.cn/smarthome/',
        'http://www.techweb.com.cn/shoujiyouxi/',
        'http://www.techweb.com.cn/guide/',
        'http://www.techweb.com.cn/onlinegame/',
        'http://www.techweb.com.cn/esports/',
        'http://www.techweb.com.cn/onlinegamenews/#wp',
        'http://www.techweb.com.cn/onlinegameguide/#wp',
        'http://www.techweb.com.cn/onlinegamerim/#wp',
        'http://www.techweb.com.cn/onlinegameesports/#wp',
        'http://www.techweb.com.cn/gamenews/#wp',
        'http://www.techweb.com.cn/gameguide/#wp',
        'http://www.techweb.com.cn/gamereview/#wp',
        'http://www.techweb.com.cn/gamerim/#wp',
        'http://www.techweb.com.cn/gameesports/#wp',
    ]

    def parse(self, response):
        current_url = response.url
        self.logger.info('This is an item page! %s', current_url)
        ## 暂时去掉翻页,每天爬数据不需要翻页
        # try:
        #     next_link = LinkExtractor(restrict_xpaths='//div[@class="list_con"]/div[@class="page"]/span[@class="current"]/following-sibling::a[1]')
        #     if next_link:
        #         n_link = next_link.extract_links(response)
        #         if n_link:
        #             next_url = n_link[0].url
        #             self.logger.info('The next item page! %s', next_url)
        #             yield Request(url=next_url, callback=self.parse)
        # except Exception as e:
        #     self.logger.error("the next link error. The msg %s", str(e))
        try:
            d_link = LinkExtractor(restrict_xpaths='//div[@class="list_con"]/div[@class="picture_text"]/div[@class="text"]/a')
            if d_link:
                links = d_link.extract_links(response)
                if links:
                    for detail_link in links:
                        if detail_link:
                            yield Request(url=detail_link.url, callback=self.parse_detail)
        except Exception as e:
            self.logger.error("the link detail error. The msg %s", str(e))


    def parse_detail(self, response):
        article_item = ArticleItem()
        current_url = response.url
        self.logger.info('This is an item page! %s', current_url)
        article_item['url'] = current_url
        # title
        rt_title = response.css('div.content div.main_c h1::text').extract()
        title = ''
        if rt_title and len(rt_title) > 0:
            title = rt_title[0]
        article_item['title'] = title
        # content
        rt_content = response.css('div.main_c #content p::text').extract()
        content = ''
        if rt_content:
            for c_text in rt_content:
                content += str(c_text) + '\n'
                content = content.replace('　', '')
        article_item['content'] = content
        ## author
        rt_author = response.css('div.article_info div.infos span.author::text').extract()
        author = ''
        if rt_author and len(rt_author) > 0:
            author = rt_author[0]
        article_item['author'] = author
        rt_date = response.css('div.article_info div.infos span.time::text').extract()
        _date = None
        if rt_date:
            if rt_date and len(rt_date) > 0:
                _date = rt_date[0]
        rt_type = response.css('div.breadnav a::text').extract()
        _type = ''
        if rt_type and len(rt_type) > 0:
            _type = rt_type[1]
        article_item['type'] = _type
        article_item['name'] = 'TechWeb '
        article_item['date'] = _date
        article_item['grade'] = random.randint(88, 95)
        article_item['domain'] = 'techweb.com.cn'
        self.logger.info('article_item >> %s', article_item)
        yield article_item