# 京东万象爬虫

# 京东万象API的部分JSON数据并非标准的JSON格式，无法使用Python自带的json模块解析
import demjson

import bs4
from bs4 import BeautifulSoup

from ._base import *


class SpiderJdwx(SpiderBase):
    def __init__(self):
        super().__init__()
        self.source_url = 'https://wx.jdcloud.com'

    def get_app_list(self, url='market', page=1):
        r = requests.get(self.source_url + '/' + url + '/2_0/' + str(page))
        soup = BeautifulSoup(r.text, 'lxml')
        url_list = [item.a['href'] for item in soup.find(id='con-listTab').ul.contents if type(item) == bs4.element.Tag]
        pages = int(
            [item for item in soup.find(id='con-listTab').contents if item.name == 'div'][0].find(title='尾页').string)
        return url_list, pages

    def get_app_info(self, url):
        r = requests.get(self.source_url + url)
        soup = BeautifulSoup(r.text, 'lxml')
        info_dict = {
            'url': url,
            'title': '',
            'tags': [],
            'intro': '',
            'price': [],
            'rating': 0.0,
            'label': '',
            'label_url': '',
            'shop': '',
            'shop_url': '',
            'view': 0,
            'buy': 0,
            'favorite': 0,
            'comment': 0
        }
        app_info = [item for item in soup.find(class_='wrap').div.div.div.contents if type(item) == bs4.element.Tag]
        info_dict['title'] = str(app_info[1].find(class_='detail-title-new').h1.string)
        for item in app_info[1].find(class_='detail-title-new').contents:
            if item.name == 'span':
                info_dict['tags'].append(str(item.string))
        info_dict['intro'] = str(app_info[1].find(class_='new-intro').string).strip()
        price_num = map(lambda x: float(x.span.string),
                        app_info[1].find(class_='new-price-show').ul.find_all('span', class_='price-num'))
        price_times = map(lambda x: x.p.get_text().replace(' ', '').replace('\n', ''),
                          app_info[1].find(class_='new-price-show').ul.find_all('div', class_='price-times'))
        info_dict['price'] = list(zip(price_times, price_num))
        info_dict['rating'] = float(
            app_info[1].find(class_='info-list').find(class_='dark-color').string.replace('分', '').strip())
        temp_info_list = list(filter(lambda x: type(x) == bs4.element.Tag,
                                     app_info[1].find(class_='info-list').contents))
        try:
            info_dict['label'] = str(temp_info_list[1].find('a').string)
        except AttributeError:
            info_dict['label'] = str(temp_info_list[1].string)
        else:
            info_dict['label_url'] = str(temp_info_list[1].find('a')['href'])
        info_dict['shop'] = str(temp_info_list[2].find('a').string)
        info_dict['shop_url'] = str(temp_info_list[2].find('a')['href'])

        info_dict['view'] = int(app_info[0].ul.li.string.split('(')[1].split(')')[0])
        r = requests.get(
            self.source_url + '/order/orderPageList' + '?pageNow=1&id=' + soup.find(id='id')['value'] + '&type=' +
            soup.find(id='dataType')['value'])
        info_dict['buy'] = int(demjson.decode(BeautifulSoup(r.text, 'lxml').input['value'])['totalCount'])
        info_dict['favorite'] = int(app_info[0].find(class_='collectNum').string.split('(')[1].split(')')[0])
        r = requests.get(
            self.source_url + '/comment/cpagelist?pageNow=1&dataId=' + soup.find(id='id')['value'] + '&dataType=' +
            soup.find(id='dataType')['value'] + '&showDefault=0')
        info_dict['comment'] = int(demjson.decode(BeautifulSoup(r.text, 'lxml').input['value'])['totalCount'])
        return info_dict
