# -*- coding: utf-8 -*-

import scrapy
from ..items import JdItem
from bs4 import BeautifulSoup
import json


class JdSpider(scrapy.Spider):

    name = 'jd_detail'
    allowed_domains = ["www.jd.com"]
    search_url = 'https://search.jd.com/Search?keyword={key}&enc=utf-8'
    search_url1 = 'https://search.jd.com/Search?keyword={key}&enc=utf-8&page={page}'
    search_url2 = 'https://search.jd.com/s_new.php?keyword={key}&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&page={page}' \
                  '&s=3721&click=0'
    search_url3 = 'https://search.jd.com/s_new.php?keyword={key}&enc=utf-8&page={page}&s=26&scrolling=y&pos=30' \
                  '&tpl=3_L&show_items={goods_items}'
    search_detail_comment_number_url = 'http://club.jd.com/productpage/p-{goods_id}-s-0-t-3-p-0.html'

    headers = {
        'Referer': 'https://search.jd.com'
    }

    def start_requests(self):
        keys = ['面膜', '洗面奶', '花露水', '化妆棉', '眼霜', '杏仁', '饼干', '薯片', '麦片', '牛肉干', '巧克力', '红酒',
                '伏特加', '金酒', '茅台', '啤酒', '牛奶', '鸡蛋', '猪肋骨', '鱼', '土鸡', '牛排', '苹果', '橙子', '速冻水饺',
                'NFC果汁', '玉米油', '米', '盐', '糖', '酱油', '醋', '卷纸', '抽纸', '沐浴露', '洗发水', '洗洁精', '洗衣粉',
                '香皂', '保鲜膜', '保鲜袋', '卫生巾', '牙膏', '牙刷', '毛巾']
        # keys = ['速冻水饺']
        for key in keys:
            yield scrapy.Request(url=self.search_url.format(key=key),
                                 callback=self.parse,
                                 headers=self.headers,
                                 meta={'key': key})

    def parse(self, response):
        # print response.text, '-----------------------'
        key_word = response.meta['key']
        bs = BeautifulSoup(response.text, 'lxml')
        total_page_num = bs.find('span', class_='fp-text').find('i').get_text()
        print total_page_num
        for i in range(1, int(total_page_num) + 1):
            yield scrapy.Request(url=self.search_url2.format(key=key_word, page=str(2 * i - 1)),
                                 callback=self.parse_and_next,
                                 headers=self.headers,
                                 meta={'key': key_word, 'page': 2 * i},
                                 dont_filter=True)

    def parse_and_next(self, response):
        # print response.text, '======================='
        key_word = response.meta['key']
        page = response.meta['page']
        items = BeautifulSoup(response.text, 'lxml').find_all('li', class_='gl-item')
        if len(items) == 30:
            yield scrapy.Request(url=self.search_url2.format(key=key_word, page=str(page)),
                                 callback=self.parse_content,
                                 headers=self.headers,
                                 meta={'key': key_word, 'page': page + 1},
                                 dont_filter=True)
        key_word = response.meta['key']
        page = response.meta['page'] - 1
        items = BeautifulSoup(response.text, 'lxml').find_all('li', class_='gl-item', attrs={'data-sku': True})
        for item_i in items:
            comment_number = item_i.find(class_='p-commit').find('a').get_text()
            comment_number = comment_number.replace('+', '')
            if u'万' in comment_number:
                comment_number = comment_number.replace(u'万', '')
                comment_number = 10000 * float(comment_number)
            # item = JdItem()
            # item['comment_number'] = int(comment_number)
            # item['goods_id'] = item_i.attrs['data-sku']
            # item['category'] = key_word
            # item['goods_page'] = str(page)
            # yield item
            goods_id = item_i.attrs['data-sku']
            # print comment_number, '====================', goods_id
            yield scrapy.Request(url=self.search_detail_comment_number_url.format(goods_id=str(goods_id)),
                                 callback=self.parse_detail_comment,
                                 meta={'key': key_word, 'page': page, 'goods_id': goods_id, 'comment_number': int(comment_number)},
                                 dont_filter=True)

    def parse_content(self, response):
        key_word = response.meta['key']
        page = response.meta['page'] - 1
        items = BeautifulSoup(response.text, 'lxml').find_all('li', class_='gl-item', attrs={'data-sku': True})
        for item_i in items:
            comment_number = item_i.find(class_='p-commit').find('a').get_text()
            comment_number = comment_number.replace('+', '')
            if u'万' in comment_number:
                comment_number = comment_number.replace(u'万', '')
                comment_number = 10000 * float(comment_number)
            # item = JdItem()
            # item['comment_number'] = int(comment_number)
            # item['goods_id'] = item_i.attrs['data-sku']
            # item['category'] = key_word
            # item['goods_page'] = str(page)
            # yield item
            goods_id = item_i.attrs['data-sku']
            yield scrapy.Request(url=self.search_detail_comment_number_url.format(goods_id=str(goods_id)),
                                 callback=self.parse_detail_comment,
                                 meta={'key': key_word, 'page': page, 'goods_id': goods_id, 'comment_number': int(comment_number)},
                                 dont_filter=True)

    def parse_detail_comment(self, response):
        key_word = response.meta['key']
        page = response.meta['page']
        goods_id = response.meta['goods_id']
        comment_number = response.meta['comment_number']
        try:
            res = json.loads(response.text)
            res_comment_number = int(res['productCommentSummary']['commentCount'])
        except Exception, e:
            res_comment_number = comment_number
        # print res_comment_number, '--------------------', comment_number
        item = JdItem()
        item['comment_number'] = comment_number
        item['comment_number_detail'] = res_comment_number
        item['goods_id'] = goods_id
        item['category'] = key_word
        item['goods_page'] = str(page)
        if item['comment_number'] < comment_number:
            # print '======================='
            print item['goods_id'], comment_number, item['comment_number']
        yield item
