#!/usr/bin/python3
# -*- coding: utf-8 -*-
import csv
import random
import re
import time
from time import sleep

import demjson
import eventlet
from lxml import etree
from selenium import webdriver
from selenium.webdriver import ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait

from wushi.utils import dbutil
from wushi.utils.dbutil import connectDB
from wushi.utils.logutil import LogUtil
from wushi.vipspider import spider_config
from wushi.vipspider.spider_config import THREAD_POOL_SIZE, SPIDER_SIZE, PROXY, USER_AGENT_LIST, DRIVER, CHANNEL

"""
爬取数据-支持多个颜色的商品
直接写入DB，并通过DB进行幂等， key: productId + color

#selenium.webdriver.chrome.options.Options
用于设置chrome浏览器，目前使用过的参数：
1、阻止chrome弹窗的出现：'profile.default_content_setting_values' :{'notifications' : 2}
2、不加载图片："profile.managed_default_content_settings.images":2
3、无头浏览器，--headless
"""

logger = LogUtil("xiaohongshu", __name__)


class Vip_Product_Spider():

    def __init__(self, index_shading):
        proxy = random.choice(PROXY)
        user_agent = random.choice(USER_AGENT_LIST)

        # 配置chrome浏览器的相关参数
        option = ChromeOptions()
        option.add_experimental_option('prefs', {'profile.managed_default_content_settings.images': 2,
                                                 'profile.default_content_setting_values' :{'notifications' : 2}
                                                 })
        option.add_argument('--headless')
        option.add_argument('--disable-gpu')  # 设置无头浏览器
        option.add_argument('--user-agent=%s' % user_agent)
        # centos环境下在root权限下运行
        option.add_argument('--no-sandbox')
        option.add_argument('--disable-dev-shm-usage')
        # 设置代理
        option.add_argument('--proxy-server=%s' % proxy)
        self.bro=webdriver.Chrome(executable_path = DRIVER, options = option)
        # Chrome DevTools Protocol ：Evaluates given script in every frame upon creation (before loading frame's scripts).
        # 目的在打开窗口前隐藏window.navigator.webdriver
        self.bro.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
            "source": """
                Object.defineProperty(navigator, 'webdriver', {
                  get: () => undefined
                })
              """
        })  # 可能失效
        # 隐式等待5秒
        self.bro.implicitly_wait(5)

        # 分片处理数据的序号
        self.index_shading = index_shading

        self.connect_db = connectDB('mmd_db', '192.168.6.76', 'root', 'root')


    def destroy(self):
        self.bro.quit()
        self.connect_db.close()



    def running(self, csv_file, start_brand_url):
        '''
        读取文件加载brand，并分别对brand进行下载
        :param csv_file:
        :param start_brand_url: 此处的品牌在VIP中是场次，且1个场次可能包含多个品牌！！
        :return:
        '''
        brands=[]
        start = False
        # step1:加载品牌；
        brands = self.load_brand_from_db()
        for brand in brands:
            # brand_url = brand[3]
            if not start_brand_url:
                # step2: 加载产品列表
                self.load_product_list(brand)
            else:
                if not start and (start_brand_url not in brand):
                    continue
                else:
                    start = True
                    self.load_product_list(brand)

        self.destroy()


    def running_shading_data(self, start_brand_id=None):
        '''
        读取文件加载brand，并分别对brand进行分片，然后根据分片brand进行爬取数据
        :param csv_file:
        :param start_brand_url:
        :return:
        '''
        start = False
        brands_urls = self.load_brand_from_db(start_brand_id)
        for brand in brands_urls:
            if SPIDER_SIZE == 1 or (hash(brand) % SPIDER_SIZE == self.index_shading):
                self.update_brand_status(brand[0], 1)
                self.load_product_list(brand[0])
                self.update_brand_status(brand[0], 3)

        self.destroy()

    def load_brand(self,csv_file):
        """
            从csv中读取brand信息
            ['杉杉FIRS男装专场', '', 'https://h2.appsimg.com/a.appsimg.com/upload/brand/upcb/2021/08/17/96/ias_2d2196a5097e183b2f93b787a05f7dec_1135x545_85.jpg', 'https://list.vip.com/100862479.html']
        :param csv_file:
        :return:
        """
        brands =[]
        with open(csv_file,"r", encoding="utf-8") as f:
            reader = csv.reader(f)
            for row in reader:
                # print(row)
                # if not row:
                #     print('======>the end of the brand')
                try:
                    product_list_url = row[3]
                    brands.append(product_list_url)
                except:
                    continue
                # if product_list_url:
                #     self.load_product(product_list_url)
        return brands

    def load_brand_from_db(self,start_brand_id):
        """
            从csv中读取brand信息
            ['杉杉FIRS男装专场', '', 'https://h2.appsimg.com/a.appsimg.com/upload/brand/upcb/2021/08/17/96/ias_2d2196a5097e183b2f93b787a05f7dec_1135x545_85.jpg', 'https://list.vip.com/100862479.html']
        :param csv_file:
        :return:
        """
        brands = []
        # connect_db = connectDB('mmd_db', '192.168.6.76', 'root', 'root')
        sql ='select product_list_url from bs_spider_brand where status = 0 and id >= %s order by id asc ' % (start_brand_id)
        brands = dbutil.exeSelectSql(sql, self.connect_db)
        # connect_db.close()
        return brands

    def load_product_list(self, url):
        """
        加载产品列表页面
        :param url:
        :return:
        """
        logger.info('======>begin loading brand product :{}'.format(url))
        self.bro.get(url)
        try:
            WebDriverWait(self.bro, 5).until(
                expected_conditions.visibility_of_element_located((By.ID, 'J_wrap_pro_add')))
        except:
            try:
                # 产品可能已经下线，则直接返回
                tips = self.bro.find_element_by_xpath('/html/body/div[2]/div[1]/div[2]/p')
                if tips and '非常遗憾' in tips.text:
                    logger.info('======> the page tip:{}'.format(tips.text))
            except Exception as err:
                logger.info('======> loading the product list error {}!!!!',err)
            return

        old_brandIndex = 0
        scroll_down = True
        while scroll_down:
            # 跳转到最新穿窗口
            # windows = self.bro.window_handles
            # self.bro.switch_to.window(windows[-1])
            sleep(1)
            # 下滑窗口
            scroll_down, old_brandIndex = self.scroll_down_page(old_brandIndex)
            # 下滑到当前页最低处，开始解析数据
            if not scroll_down:
                self.parser_data_product(url)
                # 翻页
                next_page = self.goto_next_page()
                if next_page:
                    scroll_down = True
                    old_brandIndex = 0
        logger.info('======>finish loading brand product :{}'.format(url))


    def scroll_down_page(self, old_brandIndex):
        '''
        下滑当前页面， 直至页面底部
        :param old_brandIndex:
        :return:
        '''
        result = False
        html=etree.HTML(self.bro.page_source)
        div_list=html.xpath('//*[@id="J_wrap_pro_add"]/div')[0:]
        if(old_brandIndex == 0 or (old_brandIndex >0 and div_list.__len__()>old_brandIndex)):
            logger.info('======>parse the page from {} to {}'.format(old_brandIndex,div_list.__len__()))
            result = True
        else:
            logger.warn('======>the end of the page!!!')
            return result,old_brandIndex
        self.bro.execute_script('window.scrollTo(0, document.body.scrollHeight)')  # 向下拉动一屏
        self.bro.execute_script('window.scrollTo(0, document.body.scrollHeight)')  # 向下拉动一屏
        # sleep(2)
        old_brandIndex = div_list.__len__()
        return result, old_brandIndex

    def goto_next_page(self) -> bool:
        """
        点击下一页，进行翻页
        :return:
        """
        # 通过etree解析页面；
        try:
            html = etree.HTML(self.bro.page_source)
            current_page = html.xpath('//*[@id="J-pagingWrap"]/span[2]/text()')[0]
            logger.info('======>current page {}'.format(current_page))

            # 此处要用find_element，通过element引用才能执行click、js等动作！！！
            # 不能是用html解析的元素
            page_list = self.bro.find_element_by_xpath('//*[@id="J-pagingWrap"]/span[2]/following-sibling::a')
            if page_list:
                # self.bro.get('https://list.vip.com' + page_list[0])
                # 仍然有下一页的link，继续翻页
                self.bro.execute_script("arguments[0].click();", page_list)
                sleep(2)
                return True
            else:
                return False
        except:
            logger.warn('======>the last page ')
            return False

    def parser_data_product(self,brand_url) -> bool:
        """
        在产品列表页面解析产品基本属性
        :param old_brandIndex:
        :return:
        """
        result = False
        brand_id = None
        csv_file = None
        product_index = 0
        # 通过etree解析页面；
        html=etree.HTML(self.bro.page_source)
        div_list=html.xpath('//*[@id="J_wrap_pro_add"]/div')[0:]

        for div in div_list:
            product_index +=1
            # sleep(0.5)
            prodcut_details = []
            dic = {}
            prodcut_details.append(dic)

            try:
                dic["product_id"]=div.attrib.get('data-product-id')
            except:
                continue

            try:
                dic["title"]=str(div.xpath('.//a/div[2]/div[2]/text()')[0])
            except:
                dic["title"]=""
            try:
                dic["sale_price"]=str(div.xpath('.//a/div[2]/div[1]/div/div[2]/text()')[0])
            except:
                dic["sale_price"]=""

            try:
                dic["sale_price_origin"] = str(div.xpath('.//a/div[2]/div[1]/div/div[3]/text()')[0])
            except:
                dic["sale_price_origin"] = ""

            try:
                dic["thumnail_img_link"]="https:"+ div.xpath('.//a/div[1]/div[1]/img/@src')[0]
            except:
                dic["thumnail_img_link"]=""
            try:
                dic["details_link"]="https:"+div.xpath('.//a/@href')[0]
                # TODO 数据已经在数据库中，不需要爬取
                if self.product_detail_in_db(dic["product_id"], dic["details_link"]):
                    continue
            except:
                dic["details_link"]=""
                continue


            if not brand_id:
                if (dic['details_link']):
                    brand_id = self.parse_brand_id(dic['details_link'])
                    if not brand_id:
                        logger.error('======>the brand id is null !!!!')
                        continue
            dic["brand_id"] = brand_id
            dic['brand_url'] = brand_url
            if not csv_file:
                csv_file = brand_id + '.csv'
            try:
                # 加载产品详情页
                other_color_detail_urls = self.load_product_detail(dic['details_link'], dic, True)
                if other_color_detail_urls and other_color_detail_urls.__len__() >1:
                    for color,other_color_detail_url in other_color_detail_urls.items():
                        # 根据color，productId 判断没有在数据库中，再去爬取
                        if not self.product_detail_in_db(dic["product_id"], other_color_detail_url):
                            dic_tmp = dic.copy()
                            prodcut_details.append(dic_tmp)
                            dic_tmp['details_link'] = other_color_detail_url
                            dic_tmp['color'] = color
                            # 此处不需要加载其他颜色，只加载当前颜色即可，第三参数未False
                            self.load_product_detail(dic_tmp['details_link'],dic_tmp,False)
                # 写入文档
                # with open(csv_file, "a", encoding="utf-8") as f:
                #     writer = csv.DictWriter(f, dic.keys())
                #     for product in prodcut_details:
                #         writer.writerow(product)
                if(prodcut_details and prodcut_details.__len__() >0):
                    self.writedata_2_db(prodcut_details)
                result = True
                logger.info('======>get the product success index:{} , {}'.format(product_index, dic))
            except:
                logger.error('======> get the product detail error !!!! {}'.format(dic["product_id"]))
        return result

    def load_product_detail(self, product_detail_url:str, dic:dict, check_other_color = False) -> {}:
        """
        解析产品详情，主要是产品图片、产品属性
        :param product_detail_url:
        :param dic:
        :param check_other_color: 是否检查其它颜色
        :return: other_color_detail_url: vip对于每个颜色产生了1个单独的详情页，所以除了当前默认颜色页面外，其他颜色的页面需要单独访问，这些页面只需要获取图片链接和颜色名称即可！！
        """
        other_color_detail_url = {}
        # self.bro.get(product_detail_url)
        js = 'window.open("{}");'.format(product_detail_url)
        self.bro.execute_script(js)
        # sleep(0.2)
        #跳转到最新穿窗口
        windows =self.bro.window_handles
        self.bro.switch_to.window(windows[-1])

        WebDriverWait(self.bro, 5).until(
            expected_conditions.visibility_of_element_located((By.ID, 'J_dc_Detail')))
        # 通过etree解析页面；
        product_detail=etree.HTML(self.bro.page_source)

        # step1:解析品牌
        try:
            brand_name = product_detail.xpath('//*[@id="J_detail_info_mation"]/div/a/text()')[0]
            dic['brand_name'] = str(brand_name)
        except:
            dic['brand_name']=''
        # step2: 解析图片
        # img_list = product_detail.xpath('//*[@id="J-img-content"]/div')[0:]
        img_list = product_detail.xpath('//*[@id="J-mer-ImgReview"]/div[1]//a/@href')
        imgs=[]
        for img in img_list:
            try:
                # img_url = img.xpath('.//img/@src')[0]
                imgs.append('https:' +img)
            except:
                continue

        dic['imgs_link'] = str(imgs)

        #解析尺寸
        # size_table_header = []
        # size_table_header_td = html.xpath('//*[@id="J-tableBody"]/tr[1]/td')
        # for head in size_table_header_td:
        #     size_table_header.append(head.text)
        # size_table=[]
        # size_table_tr = html.xpath('//*[@id="J-tableBody"]/tr')[2:]
        # for size in size_table_tr:
        #     sizes = {}
        #     for i,head in enumerate(size_table_header):
        #         sizes[head] = size.xpath('.//td[{}]/text()'.format(i))
        #     size_table.append(sizes)
        # dic['size_table'] = size_table

        # step3: 解析标签
        properties_html_tr = product_detail.xpath('//*[@id="J_dc_Detail"]/div[2]/div[1]/table/tbody/tr')
        properties = {}
        for property_tr in properties_html_tr:
            try:
                key1 = property_tr.xpath('.//th[1]/text()')[0]    #xpath('.//th')[0].text
                key1 = key1.replace('：', '')
            except:
                key1 = None
            try:
                value1 = property_tr.xpath('.//td[1]/text()')[0]
            except:
                value1=None
            try:
                key2 = property_tr.xpath('.//th[2]/text()')[0]
                key2 = key2.replace('：', '')
            except:
                key2 = None
            try:
                value2 = property_tr.xpath('.//td[2]/text()')[0]
            except:
                value1=None
            if key1:
                properties[key1] = value1
            if key2:
                properties[key2] = value2
        dic['properties'] = demjson.encode(properties,encoding='utf-8')

        # step4: 解析颜色
        try:
            colors = self.bro.find_elements_by_xpath('//*[@id="J_detail_color"]/dl/dd/ul/li/a')
            for color in colors:
                href = color.get_attribute('href')
                if (href in dic['details_link']) and (not dic.__contains__('color') or not dic['color']):
                    dic['color'] = color.text
                else:
                    if check_other_color:
                        other_color_detail_url[color.text] = href
        except Exception as error:
            logger.error('======>get the detail error {}', dic, error)

        #关闭当前窗口
        # self.bro.back()
        self.bro.close()
        # 跳转到最原产品列表窗口
        windows = self.bro.window_handles
        self.bro.switch_to.window(windows[-1])
        return other_color_detail_url

    def parse_brand_id(self, product_list_url):
        """
        从产品详情url中解析褚brandid
        :param product_detail_url:  https://detail.vip.com/detail-1710613538-6919163397591892034.html
        :return:
        """
        pattern = r'^.+-(\d+?)-(\d+).html$'
        hasbrandid = re.match(pattern, product_list_url)
        if (hasbrandid):
            brandid = hasbrandid.groups(0)[0]
            return brandid
        return None

    def update_brand_status(self, brand_url, status):
        # connect_db = connectDB('mmd_db', '192.168.6.76', 'root', 'root')
        sql = "update mmd_db.spider_brand_men set status = %d where product_list_url= '%s'" % (status, brand_url)
        dbutil.exeSql(sql,self.connect_db)
        # connect_db.close()

    def writedata_2_db(self, prodcut_details):
        sql ="""
        insert into mmd_db.spider_product(product_id,title,channel,sale_price,sale_price_origin,thumnail_img_link,
        details_link,brand_id,brand_name,imgs_link,color,properties,status,created_time,updated_time) 
        values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
        """
        datas=[]
        for product in prodcut_details:
            datas.append((product['product_id'],product['title'],CHANNEL,product['sale_price'],product['sale_price_origin'],product['thumnail_img_link'],product['details_link'],product['brand_id'],product['brand_name'],product['imgs_link'],product['color'],product['properties'], 0, time.time()*1000, time.time() * 1000))
        dbutil.exemanySql(datas,sql, self.connect_db)

    def product_detail_in_db(self, product_id, other_color_detail_url):
        sql = '''
          select * from mmd_db.spider_product where product_id='{}' and details_link='{}'
        '''.format(product_id,other_color_detail_url)
        dbutil.exeSelectSql(sql,self.connect_db)


''
def start(brand_file, start_brand_url):
    '''
    爬取产品数据，1个spider实例
    :param brand_file:
    :param start_brand_url:
    :return:
    '''
    Spider = Vip_Product_Spider(1)
    Spider.running(brand_file, start_brand_url)

def start_async(brand_file, start_brand_id):
    '''
    通过线程池并发执行，多个spider实例
    :param brand_file:
    :param start_brand_id:
    :return:
    '''
    eventlet.monkey_patch(socket=True, select=True)
    pool= eventlet.GreenPool(THREAD_POOL_SIZE)
    for i, proxy in enumerate(spider_config.PROXY):
        spider = Vip_Product_Spider(i)
        pool.spawn(spider.running_shading_data,start_brand_id)

    pool.waitall()

if __name__ == "__main__":
    # channel = "women_cloth"
    # channel = "men_cloth"

    # Spider.running(Vip_Product_Spider.womenClothURL)
    # Spider.parse_brand_id('https://detail.vip.com/detail-1710613538-6919163397591892034.html')

    # start(Vip_Product_Spider.manClothFile, '101052817')
    # spider = Vip_Product_Spider(1)
    # spider.update_brand_status('https://list.vip.com/100983102.html',3)
    start_async(Vip_Product_Spider.manClothFile,2777)


