import scrapy
import os
import json
import re
import math
import time

from utils.timestamp import create_timestamp
from utils.actuator import actuator
from utils.conf_read import Conf_Read

from pp.common import SU_NING_DB
from pp.db_model import ShopModel, ProductModel
from pp.items import Product as Product_Item, Product_List as Product_Item_List
from pp.log import create_log_filename


def get_conf_path(file_name):
  _current_path = os.path.dirname(__file__)
  return os.path.join(_current_path, file_name)


def create_product_url(shop_id, page_no = 0, page_size = 48):
  return 'https://csearch.suning.com/emall/brandquery/brandstoreQuery.jsonp?btc={shop_id}&cp={page_no}&ps={page_size}&callback='.format(
    shop_id = shop_id,
    page_no = page_no,
    page_size = page_size
  )


def create_product_img_url(product_id, shop_id, shop_type = 1):
  if(shop_type == 1):
    shop_id = '00000000'
  
  return 'https://shop.suning.com/mainpicture/mpBatchCallback/batchGetByLocation/00{shop_id}-{product_id}-0-1.json'.format(
    shop_id = shop_id,
    product_id = product_id
  )


class Product_Spider(scrapy.Spider):
  name = 'product'

  _conf_read = Conf_Read(get_conf_path('product_setting.yaml')) 
  _db = SU_NING_DB
  
  
  custom_settings = {
    'LOG_FILE': create_log_filename('product'),
    'LOG_LEVEL': 'INFO',
    'DOWNLOAD_DELAY ': 1, # 产品分页服务响应较慢, 过快请求, 服务将403抛弃部分请求
    'HTTPERROR_ALLOWED_CODES': [403],
    'ITEM_PIPELINES': {
      'pp.pipelines.ProductSavePipeline': 300,
    },

    # 'DOWNLOADER_MIDDLEWARES': {
    #   # 'pp.middlewares.GetHttpStatus': 543,
    # }
  }

  def start_requests(self):

    current_shop_id = self._conf_read.conf.get('current_shop_id')
    print(f'目标店铺Id: [{current_shop_id}]')
    target_shop_id = input()
    self._conf_read.conf['current_shop_id'] = target_shop_id
    
    session = self._db.DBSession()
    shop_list = session.query(ShopModel).filter(ShopModel.id > current_shop_id, ShopModel.id <= target_shop_id).all()

    
    self.logger.info(f'<--------{current_shop_id} / {target_shop_id}--------------->')
    self.logger.info(f'拉取店铺数量: {len(shop_list)}')
    
    for shop in shop_list:
      
      product_url = create_product_url(shop.shop_id)
      
      yield scrapy.Request(
        url=product_url, 
        callback=self.parse_product_total_count,
        dont_filter = True,
        cb_kwargs={ 'shop': shop }
      )

      time.sleep(1)
      
    session.close()
    
  # 记录已遍历 shop_id
  def closed(self, status):
    print('cache: ', self._conf_read.conf)
    self._conf_read.updateConf()

  
  async def parse_product_total_count(self, response, shop):

    res_str = re.findall(r'\((.*)\)', str(response.body, 'utf-8'), re.S) 
    has_json_err, res_json = actuator(json.loads)(res_str[0])
    
    
    if(not has_json_err):
      self.logger.error(f'产品总数json解析失败: {res_json} / {shop.shop_id}')
      return

    
    totalGoodsCount = res_json.get('totalGoodsCount', 0)
    self.logger.info(f'{shop.shop_name} : {totalGoodsCount}')
    print(f'{shop.shop_name} : {totalGoodsCount}')

      
    if(totalGoodsCount == 0):
      self.logger.info(f'{shop.shop_name}[{shop.shop_id}]-未找到商品')
      return

    # 平台默认分页起始号
    # page_no = 0
    # 平台默认分页数据量
    page_size = 48
    # 获取目标分页数
    page_count = math.ceil( totalGoodsCount / page_size )  

    
    url_list = [ create_product_url(
      shop_id=shop.shop_id, 
      page_no=page_no,
      page_size=page_size
    ) for page_no in range(page_count) ]

    # # 拉取分页数量
    for url in url_list:

      yield scrapy.Request(
        url=url,
        callback=self.parse_product_list,
        dont_filter = True,
        cb_kwargs={ 'shop': shop }
      )

    

  def parse_product_list(self, response, shop):

    res_str = re.findall(r'\((.*)\);', str(response.body, 'utf-8'), re.S) 
    not_err, res_json = actuator(json.loads)(res_str[0])
    
    if(not not_err):
      self.logger.error(f'产品列表json解析失败: {res_json} / {response.url} / {shop.shop_name}')
      self.logger.info('<--------------->')
      self.logger.info(res_str[0])
      self.logger.info('<--------------->')
      return

    good_list = res_json.get('goodList', [])
    product_list = []

    for good in good_list:

      product = ProductModel(
        product_name = good.get('title', ''),
        product_url = 'https:{}'.format(good.get('commidityUrl', '')),
        product_id = good.get('catentryId', ''),
        product_price = good.get('price', 0),
        product_classify = '',
        product_desc = '',
        product_screenshot = '',
        product_img = '',

        shop_id = shop.shop_id,
        
        create_time = create_timestamp(),
      ) 


      yield scrapy.Request(
        url=create_product_img_url(product.product_id, shop.shop_id, shop.shop_type),
        callback=self.parse_product_img,
        cb_kwargs={ 'product':  product}
      )

      product_list.append(product)
      
    yield Product_Item_List(list = product_list)


  def parse_product_img(self, response, product):
    res_str = re.findall(r'mpBatchCallback\((.*)\)', str(response.body, 'utf-8'), re.S) 
    not_err, res_json = actuator(json.loads)(res_str[0])

    if(not not_err):
      self.logger.error(f'产品图片json解析失败: {res_json}')
      return
    
    if(len(res_json) == 0):
      self.logger.info(f'未找到产品图片: {product}')
      return
    
    img_json = res_json[0]
    
    product.product_img = 'https://{domain}{img_path}'.format(
      domain = img_json.get('domain', ''),
      img_path = img_json.get('pictureUrl', ''),
    )
