# -*- coding: utf-8 -*-
"""
爬取策略：
1.get请求ajax
方案一：
页数：40条
时间：2855
图片: 6222
并发量：16(并发数不够)
延时时间：0
处理时间：131 item/min

方案二：
页数：40条
时间：3465
图片: 6286
并发量：32(98)
延时时间：0
处理时间：110 item/min
"""
import time
import json
import re
import scrapy
from scrapy_redis.spiders import RedisSpider

from tuniu.Tools.extract__words import Words
from tuniu.items import TuniuItem
import logging


class TuniuSpider(RedisSpider):
    name = 'tuniu'
    # http://trips.tuniu.com/travels/index/ajax-list?sortType=2&page=1&limit=10
    redis_key = 'tuniu:start_urls'

    def __init__(self, *args, **kwargs):
        domain = kwargs.pop('domain', '')
        self.allowed_domains = list(filter(None, domain.split(',')))
        super(TuniuSpider, self).__init__(*args, **kwargs)

    def parse(self, response):
        # 获取当前页码
        _now_page = int(re.findall(r'page=(\d+)', response.url)[0])
        # 获取响应，解析url
        _str_data = response.body.decode('UTF-8', errors='ignore')
        try:
            _node_list = json.loads(_str_data)['data']['rows']
            for _node in _node_list:
                _item = TuniuItem()
                _item['title'] = _node['name'].strip()
                _item['views'] = _node['viewCount']
                _item['like'] = _node['likeCount']
                _item['link'] = 'http://www.tuniu.com/trips/' + str(_node['id'])
                _item['summary'] = _node['summary'].strip()

                # 根据需要判断是否需要提取详情页链接
                yield scrapy.Request(_item['link'], callback=self.parse_detail, meta={'basic': _item})
                # 翻页
            if len(_node_list) != 0:
                _next_page = _now_page + 1
                _next_url = 'http://trips.tuniu.com/travels/index/ajax-list?sortType=1&page={0}&limit=10'.format(
                    _next_page)

                yield scrapy.Request(_next_url, callback=self.parse)
            else:
                pass
        except Exception as e:
            print(e)

    def parse_detail(self, response):
        _item = response.meta['basic']
        # TODO 解析详情页失败
        w = Words()
        _item['created_time'] = w.get_pattern_first('\d{4}-\d{2}-\d{2}', response.xpath(
            '//p[@class="user-detail"]/text()').extract_first())
        _item['desc'] = '\n'.join(response.xpath('//div[@class="sdk-trips-text-content"]/text()').extract())
        _item['image'] = '\n'.join(response.xpath('//img[@class="product-img"]/@data-src').extract())
        _addr = response.xpath('//div[@class="poi-title"]/text()').extract_first()
        _item['addr'] = _addr.strip() if _addr else ''
        _item['time'] = time.ctime()
        logging.warn(_item)

        yield _item
