import scrapy


from os import path
from datetime import datetime

from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By

from pp.items import SHOP_STATUS, Pipe_Pk
from pp.log import create_log_filename
from pp.common import SU_NING_DB
from pp.db_model import ShopModel


PICTRUES_PATH = path.abspath(path.join(path.dirname(__file__), '../pictrues/shop')) 

def get_filename(shop_id):
  t = datetime.today().strftime('%Y-%m-%d-%H-%M-%S')
  return f'{shop_id}_{t}.png'


def get_file_path(shop_id):
  return path.join(PICTRUES_PATH, get_filename(shop_id))

#浏览器设置
chorme_options = webdriver.chrome.options.Options()
chorme_options.add_argument('--window-size=375, 812')
chorme_options.add_argument("--headless")


def load_shop_list(db):
  current_shop_id = 0
  target_shop_id = input('请输入目标店铺ID: ')
  session = db.DBSession()
  shop_list = session.query(ShopModel).filter(
    # ShopModel.company_address.like('%增城%')
    # ShopModel.id > current_shop_id,
    # ShopModel.id <= target_shop_id
  ).all()
  
  return shop_list


class Shop_Pictrue(scrapy.Spider):
  name='shop-pictrue'
  custom_settings = {
    'LOG_FILE': create_log_filename('shop_screen_shot'),
    'LOG_LEVEL': 'INFO',
    'DOWNLOAD_DELAY ': 2,
    'HTTPERROR_ALLOWED_CODES': [403],
    'ITEM_PIPELINES': {
      'pp.pipelines.ShopUpdatePipeline': 300
      # 'pp.pipelines.ScreenShotPipeline': 300,
    }
  }
  

  def __init__(self):
    self.browser = webdriver.Chrome(chrome_options=chorme_options)
    super().__init__()

  def close(self):
    self.browser.quit()

  _db = SU_NING_DB


  def start_requests(self):
    
    shop_list = load_shop_list(self._db)
    self.logger.info(f'拉取店铺数量: {len(shop_list)}')
    
    for shop in shop_list:
      # 店铺详情页地址
      url = 'https://shop.m.suning.com/shopdetail/{shop_id}.html'.format(shop_id = shop.shop_id)
      # url = 'http://shop.suning.com/{shop_id}/index.html'.format(shop_id = shop.shop_id)
      yield scrapy.Request(
        url=url, 
        callback=self.parse,
        dont_filter = True,
        cb_kwargs={'shop': shop}
      )
    

  def parse(self, response, shop):
    
    url = response.url

    # 将内容包装到管道包
    pk = Pipe_Pk(
      body = shop
    )
    

    # 店铺是否有效
    page_error = response.xpath("//div[@class='no-protips']").get()

    if page_error != None:
      self.logger.info(
        f'店铺[{shop.shop_name}]失效: {shop.shop_id} | {url}'
      )
      shop.status = SHOP_STATUS['INVALID']
      yield pk
      return
    

    print('-----------------')
    print(f'BINGO: {shop.shop_name} | {url}')
    print('-----------------')
    
    try:
      filename = get_file_path(shop.shop_id)
      self.browser.get(url)

      '''
      等待内容加载
      等待内容元素 .JS-pageLayout 加载后，认为内容正常加载, 
      无法判断图片，或其他是否加载完成
      '''
      WebDriverWait(self.browser, 15, 0.5).until(
        EC.visibility_of(
          self.browser.find_element_by_class_name('shop-detail-main-wrap')
        )
      )
      # self.brower.implicitly_wait(10)
      self.browser.get_screenshot_as_file(filename)
      print('SUCCESS !!!', filename)
      self.logger.info(
        f'店铺截图保存成功: {shop.shop_name} | {shop.shop_id} | {filename}'
      )
      
      shop.status = SHOP_STATUS['VALID']
      shop.screen_shot = filename

      yield pk
    except Exception as e:
      self.logger.error(
        f'获取店铺截图失败: {shop.shop_name} | {shop.shop_id} | {e}'
      )

