from copy import deepcopy

import xlwt
from selenium import webdriver


class Lipstick(object):
    def __init__(self):
        self.driver = webdriver.Chrome()
        self.url = 'http://category.vip.com/suggest.php?keyword=%E5%8F%A3%E7%BA%A2'
        self.driver.set_window_size(1920, 1080)
        self.driver.get(self.url)

    def initail_work_book(self):
        # 创建一个workbook 设置编码
        self.workbook = xlwt.Workbook(encoding='utf-8')
        # 创建一个worksheet
        self.worksheet = self.workbook.add_sheet('Lipstick')
        # 定义好表头
        self.worksheet.write(0, 0, label='品牌')
        self.worksheet.write(0, 1, label='品牌链接')
        self.worksheet.write(0, 2, label='口红标题')
        self.worksheet.write(0, 3, label='口红链接')
        self.worksheet.write(0, 4, label='口红图片')
        self.worksheet.write(0, 5, label='口红价格')
        self.worksheet.write(0, 6, label='口红描述')
        self.worksheet.write(0, 7, label='口红详情')
        # 定义一个变量 存excel的x
        self.excel_x = 1

    def parse_all_brand(self):
        self.driver.find_element_by_class_name('c-filter-group-button-text').click()
        li_list = self.driver.find_elements_by_xpath(
            '//ul[@class="c-filter-data-list  c-filter-brand-list J-brand-filter-data-list J-filter-data-list"]/li/a')
        brand_list = []  # 定义一个列表 临时存放品牌信息
        for li in li_list:
            data_item = {}  # 定义一个字典 存放品牌 和品牌链接
            data_item['brand'] = li.find_element_by_xpath('./img').get_attribute('alt')
            data_item['brand_link'] = li.get_attribute('href')
            brand_list.append(data_item)
        return brand_list

    def save_data(self, lip_item):
        lip_item['lipstick_detail'] = self.driver.find_element_by_xpath('//table[@class="dc-table fst"]').text
        self.worksheet.write(self.excel_x, 0, label=lip_item['brand'])
        self.worksheet.write(self.excel_x, 1, label=lip_item['brand_link'])
        self.worksheet.write(self.excel_x, 2, label=lip_item['lipstick_title'])
        self.worksheet.write(self.excel_x, 3, label=lip_item['lipstick_link'])
        self.worksheet.write(self.excel_x, 4, label=lip_item['lipstick_img'])
        self.worksheet.write(self.excel_x, 5, label=lip_item['lipstick_price'])
        self.worksheet.write(self.excel_x, 6, label=lip_item['lipstick_desc'])
        self.worksheet.write(self.excel_x, 7, label=lip_item['lipstick_detail'])
        self.workbook.save('Lipstick_update.xls')
        print('已抓取%d组数据' % self.excel_x)
        self.excel_x += 1

    def parse_detail(self, one_page_item):
        # lip_item = deepcopy(data)
        for lip_item in one_page_item:
            self.driver.get(lip_item["lipstick_link"])
            try:
                lip_item['lipstick_title'] = self.driver.find_element_by_xpath(
                    '//p[@class="pib-title-detail"]').get_attribute('title')
            except:
                lip_item['lipstick_title'] = '口红标题获取失败'
            try:
                lip_item['lipstick_img'] = self.driver.find_element_by_xpath(
                    '//div[@class="zoomWindow"]//img').get_attribute(
                    'src')
            except:
                lip_item['lipstick_img'] = '口红图片获取失败'
            try:
                lip_item['lipstick_price'] = self.driver.find_element_by_xpath('//span[@class="sp-price"]').text
            except:
                try:
                    lip_item['lipstick_price'] = self.driver.find_element_by_xpath('//em[@class="J-price"]').text
                except:
                    lip_item['lipstick_price'] = self.driver.find_element_by_xpath('//del[@class="J-mPrice"]').text
            try:
                lip_item['lipstick_desc'] = self.driver.find_element_by_xpath(
                    './/span[@class="goods-description-title"]').text
            except:
                lip_item['lipstick_desc'] = "口红描述获取失败"
            self.save_data(lip_item)

    def get_all_lipitems(self, brand_list):
        for data in brand_list:
            self.driver.get(data['brand_link'])
            a_link = data['brand_link']
            # print('qianmian', a_link)
            self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            div_list = self.driver.find_elements_by_xpath('//section[@id="J_searchCatList"]/div[position()>1]')
            one_page_item = []
            brand_dict = deepcopy(data)
            for div in div_list:
                item = deepcopy(data)
                item['lipstick_link'] = div.find_element_by_xpath(
                    './/h4[@class="goods-info goods-title-info"]/a').get_attribute('href')
                # 每一个data['lipstick_link']都是一个口红的详情页信息
                one_page_item.append(item)
            # 遍历每一个详情页 拿到数据
            self.parse_detail(one_page_item)
            # 进行翻页
            # 翻页之前应回到列表页第一页
            # print('houmian',a_link)
            self.driver.get(a_link)
            self.get_next_page_all_items(brand_dict)

    def get_next_page_all_items(self, data):
        while True:
            try:
                next_page_link = self.driver.find_element_by_xpath(
                    './/div[@id="J_pagingCt"]/span[position()>1]/following-sibling::a[1]').get_attribute('href')
            except:
                break
            else:
                self.driver.get(next_page_link)
                self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
                div_list = self.driver.find_elements_by_xpath('//section[@id="J_searchCatList"]/div[position()>1]')
                one_page_items = []
                for div in div_list:
                    data = deepcopy(data)
                    data['lipstick_link'] = div.find_element_by_xpath(
                        './/h4[@class="goods-info goods-title-info"]/a').get_attribute('href')
                    one_page_items.append(data)
                self.parse_detail(one_page_items)
                self.driver.get(next_page_link)

    def start_operate(self, driver):
        # 初始化表格
        self.initail_work_book()
        # 得到所有品牌链接
        brand_list = self.parse_all_brand()
        # 翻页获取列表页口红数据
        link_urls = self.get_all_lipitems(brand_list)

    def __del__(self):
        try:
            self.driver.close()
            self.driver.quit()

        except:
            print('Process terminated')

    def run_spider(self):
        self.start_operate(self.driver)


if __name__ == '__main__':
    vip_lipstick = Lipstick()
    vip_lipstick.run_spider()
