#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2018-10-19 14:39:07
# Project: m_jd_com

import json
import html
import time
import random
import requests
from pyspider.libs.base_handler import *
from pyquery import PyQuery as pq

from src.mysqldb import SQL
from src.headers_switch import HeadersSelector
from src.utils import download_img
from src.utils import md5
from src.utils import oss
from src.utils import get_proxy

class Handler(BaseHandler):
    crawl_config = {
        'itag': 'v0.0.6'
    }

    cookies = ''

    def get_cookies(self):
        cookie = {}
        return cookie
        for line in self.cookies.split(';'):
            key,value = line.split('=', 1) #1代表只分一次，得到两个数据
            cookie[key.strip()] =value.strip()
        return cookie

    @every(minutes=5)
    def on_start(self):
        self.crawl_config['proxy'] = get_proxy()
        header_slt = HeadersSelector()
        headers = header_slt.select_header()  # 获取一个新的 header
        headers['Referer'] = 'https://m.jd.com/'
        js_script = """
            function() {
                setTimeout(function() {$($('#detailTab .J_ping')[1]).click()}, 1000);
            }
        """
        # url = 'https://item.m.jd.com/product/12157971534.html'
        # self.crawl(url, fetch_type='js', js_script=js_script, callback=self.detail_page, headers=headers, connect_timeout=5000, timeout=20000)
        # return
        goods_urls = [
            '1060848',
            '1069555',
            '1135611',
            '1140623',
            '1195551',
            '1443618',
            '1593516',
            '2854388',
            '3035227',
            '3749089',
            '4052525',
            '4279806',
            '4331151',
            '4331155',
            '4595003',
            '4853850',
            '4992809',
            '5001213',
            '5089237',
            '5089253',
            '5089267',
            '5089273',
            '5114935',
            '5159242',
            '5225346',
            '5464265',
            '5544038',
            '5544068',
            '6055066',
            '6072622',
            '6325084',
            '6384536',
            '6455001',
            '6494556',
            '6558346',
            '6600258',
            '6703015',
            '6733026',
            '6946627',
            '6946635',
            '6949475',
            '7154067',
            '7225398',
            '7296396',
            '7333907',
            '7348367',
            '7429917',
            '7437768',
            '7479804',
            '7643003',
            '7651927',
            '7652151',
            '1014314835',
            '1424211542',
            '1528966665',
            '1756380782',
            '12157971534',
            '13975710992',
            '16610969975',
            '17017465829',
            '17139854836',
            '18536333736',
            '21145091102',
            '26040604707',
            '26690365578',
            '28327659312',
            '28360461108',
            '28711624206',
            '30447515374',
        ]
        for gid in goods_urls:
            save = {
                'gid': gid,
            }
            # time.sleep(random.randint(5, 10))
            url = 'https://item.m.jd.com/product/%s.html' % (gid, )
            self.crawl(url, fetch_type='js', js_script=js_script, callback=self.detail_page, save=save, headers=headers, connect_timeout=5000, timeout=20000)

    @config(priority=2)
    def detail_page(self, response):
        header_slt = HeadersSelector()
        headers = header_slt.select_header()  # 获取一个新的 header
        headers['Referer'] = 'https://m.jd.com/'
        doc = response.doc
        save = response.save
        gid = save.get('gid', '0')

        subpath = 'goods_jd/gid_%d/%s'
        pic_list = [doc('#firstImg').attr('src')] + [img.attr('back_src') for img in doc('.pic_list li img').items()]
        # 下载图片到 oss
        pic_list2 = [oss(subpath % (gid, 'pic_list'), img, headers=headers) for img in pic_list if img is not None]

        detail = doc('#commDesc').html()
        ddoc = pq(detail)
        for img in ddoc('img').items():
            img_url = oss(subpath % (gid, 'detail'), img.attr('item_init_src'), headers=headers)
            img.attr('src', img_url)
            img.remove_attr('item_init_src')
        # end for
        shop_logo = oss(subpath % (gid, 'shop_logo'), doc('#shopLogo').attr('src'), headers=headers)
        detail2 = doc('#detail2').html()

        goods = {
            "url": response.url,
            "title": doc('title').text(),
            "pic_list": json.dumps(pic_list2),
            "price_sale": doc('#priceSale').text(),
            "eval_rate": doc('#evalRate').text(),
            "shop_logo": shop_logo,
            "shop_name": doc('#shopInfo .name ._n').text(),
            "shop_fans": [item for item in doc('#shopBaseInfo .tab_item .num').items()][0].text(),
            "detail": html.escape(ddoc.html()),
            "specification": html.escape(detail2),
        }
        return goods

    def on_result(self, result):
        print(result)
        if not result or not result['url']:
            return
        sql = SQL()
        where = "`url`='%s'" % (result['url'], )
        sql.update('jjb_goods_jd', where, result)
