# -*- coding: utf-8 -*-
from datetime import datetime
import json
import re

import requests
import scrapy
from bs4 import BeautifulSoup

from ..items import MyjdtestItem


class JdtestSpider(scrapy.Spider):
    name = 'JdTest'
    allowed_domains = ['jd.com']
    page = 1
    brandict={
        0:'5F14026',#'苹果'
        1:'5F16975',#'天语'
        2:'5F18374',#'小米'
        3:'5F8557',#'华为'
        4:'5F2032',#'OPPO'
        5:'5F25591',#	'vivo'
        6:'5F15127',#'三星'
        7:'5F438621',#'真我(realme)'
        8:'5F21011',#'诺基亚'
        9:'5F8557',#'中兴'
        10:'5F180213'#'纽曼'
    }
    bid=0
    base_url = 'https://list.jd.com/list.html?cat=9987,653,655&ev=exbrand%'+brandict[bid]+'&page=' + str(
        page) + '&sort=sort_rank_asc&trans=1&JL=6_0_0#J_main'
    start_urls = [base_url]
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
    }

    def parse(self, response):

            soup = BeautifulSoup(response.text, 'lxml')

            if soup:
                shoplist = soup.find_all('li', class_='gl-item')
                for shop in shoplist:
                    detailpage = 'http:' + shop.a['href']
                    yield scrapy.Request(detailpage, headers=self.headers, callback=self.parse_deatil)
                # 换页
                totals = soup.find_all('span', class_='p-skip')[0].text
                total = int(re.findall(r'.*共(\d+).*', totals)[0])

            if self.bid<len(self.brandict):
                if self.page < total:
                    self.page = self.page + 1
                    kk=self.brandict[self.bid]
                    # print(kk)
                    base_url = 'https://list.jd.com/list.html?cat=9987,653,655&ev=exbrand%' + self.brandict[
                        self.bid] + '&page=' + str(self.page) + '&sort=sort_rank_asc&trans=1&JL=6_0_0#J_main'
                    yield scrapy.Request(base_url,headers=self.headers, callback=self.parse)
                else:
                    self.bid=self.bid+1

                    self.page=1
                    base_url='https://list.jd.com/list.html?cat=9987,653,655&ev=exbrand%' + self.brandict[
                        self.bid] + '&page=' + str(self.page) + '&sort=sort_rank_asc&trans=1&JL=6_0_0#J_main'
                    yield scrapy.Request(base_url,headers=self.headers,callback=self.parse)

    def parse_deatil(self, response):
        print(response)
        soup = BeautifulSoup(response.text, 'lxml')
        item = MyjdtestItem()
        if soup:
            # 获取商品标题
            p_title = re.sub(r'^(\s+)|(\s+)$', '', soup.find_all('div', 'sku-name')[0].text)
            item['p_title'] = p_title
            # 商品品牌

            p_brand=re.findall(r'.*品牌：[\s](.*).*', soup.find_all('ul',id='parameter-brand')[0].li.text)[0]

            item['p_brand'] = p_brand
            # 商品编号
            p = soup.find_all('ul', class_=['parameter2', 'p-parameter-list'])[-1].text
            p_number = re.findall('.*商品编号：(\d+).*', p)[0]
            item['p_number'] = p_number
            # 商品图片
            imgs=str(soup.find_all('div', id='spec-n1')[0].img)
            if re.search(r'.*(src).*', imgs):
                p_img_url = 'https:'+soup.find_all('div',id='spec-n1')[0].img['src']
                # img_conn = requests.get(p_img_url, headers=self.headers).content
                # p_img = p_number + '.jpg'
                # with open(p_img, 'wb') as f:
                #     f.write(img_conn)
                item['p_img'] = p_img_url
            elif re.search(r'.*(data-origin).*',imgs):
                p_img_url = 'https:' + soup.find_all('div', id='spec-n1')[0].img['data-origin']
                # img_conn = requests.get(p_img_url, headers=self.headers).content
                # p_img = '../static/shopimg/'+p_number + '.jpg'
                # with open(p_img, 'wb') as f:
                #     f.write(img_conn)
                item['p_img'] = p_img_url
            # 商品价格
            price_url = "https://p.3.cn/prices/get?skuid=J_" + p_number
            price_con = requests.get(price_url, headers=self.headers).text
            p_price = json.loads(price_con)[0]['p']
            item['p_price'] = (p_price)
            # 商品链接
            p_href = 'https://item.jd.com/' + p_number + '.html'
            item['p_href'] = p_href
            # 商品所属店铺
            if soup.find_all('div', class_='mt')[1].h3.a:
                p_store = soup.find_all('div', class_='mt')[1].h3.a['title']
            elif soup.find_all('div', class_='mt')[0].h3.a:
                p_store = soup.find_all('div', class_='mt')[0].h3.a['title']
            elif soup.find_all('div', class_='mt')[2].h3.a:
                p_store = soup.find_all('div', class_='mt')[2].h3.a['title']
            else:
                p_store='无'
            item['p_plate']='京东'





            item['p_store'] = p_store
            # 商品评论数
            comment_url = 'https://club.jd.com/productpage/p-' + p_number + '-s-0-t-3-p-0.html'
            comment_con = requests.get(comment_url, headers=self.headers).text
            p_comment = json.loads(comment_con)['productCommentSummary']['commentCount']
            item['p_comment_num'] = (p_comment)
            # 商品更新时间
            item['p_updateTime'] = datetime.now().strftime('%Y-%m-%d')

            item['d_number'] = p_number

            '''商品参数信息'''
            DetailParam = soup.find_all('ul', class_=['parameter2', 'p-parameter-list'])[-1].text
            # 商品毛重：
            d_weight = re.findall('.*商品毛重：(\d+\.?\d*[\w]*).*', DetailParam)[0]
            item['d_weight'] = d_weight
            # CPU型号
            if re.search('.*CPU型号：(.*)', DetailParam):
                d_cpuType = re.findall('.*CPU型号：(.*)', DetailParam)[0]
                item['d_cpuType'] = d_cpuType
            else:
                item['d_cpuType'] = '其他'
            # 运行内存
            if re.search('.*运行内存：(.*)', DetailParam):
                d_ram = re.findall('.*运行内存：(.*)', DetailParam)[0]
                item['d_ram'] = d_ram
            else:
                item['d_ram'] = '其他'
            # 机身存储
            if re.search('.*存储：(.*)', DetailParam):
                d_rom = re.findall('.*存储：(.*)', DetailParam)[0]
                item['d_rom'] = d_rom
            else:
                item['d_rom'] = '其他'

            # 后摄主摄像素：
            if re.search('.*后摄主摄像素：(.*)', DetailParam):
                d_bcamera = re.findall('.*后摄主摄像素：(.*)', DetailParam)[0]
                item['d_bcamera'] = d_bcamera
            else:
                item['d_bcamera'] = '其他'
            # 前摄主摄像素：
            if re.search('.*前摄主摄像素：(.*)', DetailParam):
                d_fcamera = re.findall('.*前摄主摄像素：(.*)', DetailParam)[0]
                item['d_fcamera'] = d_fcamera
            else:
                item['d_fcamera'] = '其他'
            # 主屏幕尺寸（英寸）
            if re.search('.*主屏幕尺寸（英寸）：(.*)', DetailParam):
                d_mainScreen = re.findall('.*主屏幕尺寸（英寸）：(.*)', DetailParam)[0]
                item['d_mainScreen'] = d_mainScreen
            else:
                item['d_mainScreen'] = '其他'
            # 分辨率：
            if re.search('.*分辨率：(.*)', DetailParam):
                d_resolution = re.findall('.*分辨率：(.*)', DetailParam)[0]
                item['d_resolution'] = d_resolution
            else:
                item['d_resolution'] = '其他'
            # 操作系统
            if re.search('.*操作系统：(.*)', DetailParam):
                d_os = re.findall('.*操作系统：(.*)', DetailParam)[0]
                item['d_os'] = d_os
            else:
                item['d_os'] = '其他'
            item['d_plate']='京东'
            return item
