#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2017-04-06 12:13:10
# Project: fetch_jd_com

from pyspider.libs.base_handler import *

class Handler(BaseHandler):
    crawl_config = {
        # 'headers': {
        #     'User-Agent': 'GoogleBot',
        #     'Cookie': 'xxxx'
        # },
        # 'proxy': 'localhost:8080'
    }

    @every(minutes=24 * 60)
    def on_start(self):
        self.crawl('https://m.jd.com', callback=self.index_page)

    @config(age=10 * 24 * 60 * 60)
    def index_page(self, response):
        for each in response.doc('a[href^="http"]').items():
            link = each.attr.href
            if '/mall/active/' in link:
                self.crawl(link, callback=self.index_page)
            elif '/ware/' in link:
                self.crawl(link, callback=self.detail_page)
            elif 'product' in link:
                print(link)

    @config(priority=2)
    def detail_page(self, response):
        p = response.doc('.yang-pic-price').text() or \
            response.doc('.seckill-price').text() or \
            response.doc('.plus-member-price-text').text() or \
            response.doc('.plus-jd-price-text').text()
        p = p.replace(' ', '')
        p = p.replace('&yen', '')
        try:
            p = float(p)
        except:
            pass

        return {
            "name": response.doc('title').text().replace('-京东', ''),
            "price": p
        }

    # def on_result(self, result):
    #     print(result)
    #     # to save result

