# -*- coding: utf-8 -*-

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
import requests
from lxml import etree
import os
import stat
import urllib.request


class taobao_infos:
    # 对象初始化
    def __init__(self):
        url = 'https://login.taobao.com/member/login.jhtml'
        self.url = url
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
        }
        options = webdriver.ChromeOptions()
        # options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})  # 不加载图片,加快访问速度
        # 此步骤很重要，设置为开发者模式，防止被各大网站识别出来使用了Selenium
        options.add_experimental_option(
            'excludeSwitches', ['enable-automation'])
        options.add_argument("headless")
        self.browser = webdriver.Chrome(
            options=options, executable_path=chromedriver_path)
        self.wait = WebDriverWait(self.browser, 10)  # 超时时长为10s

    # 登录淘宝
    def login(self):

        # 打开网页
        self.browser.get(self.url)

        # 自适应等待，点击密码登录选项
        self.browser.implicitly_wait(30)  # 智能等待，直到网页加载完毕，最长等待时间为30s
        # self.browser.find_element_by_xpath('//*[@class="forget-pwd J_Quick2Static"]').click()

        # 自适应等待，点击微博登录宣传
        self.browser.implicitly_wait(30)
        self.browser.find_element_by_xpath('//*[@class="weibo-login"]').click()

        # 自适应等待，输入微博账号
        self.browser.implicitly_wait(30)
        self.browser.find_element_by_name('username').send_keys(weibo_username)
        sleep(1)
        # 自适应等待，输入微博密码
        self.browser.implicitly_wait(30)
        self.browser.find_element_by_name('password').send_keys(weibo_password)
        sleep(1)
        # 自适应等待，点击确认登录按钮
        self.browser.implicitly_wait(30)
        self.browser.find_element_by_xpath(
            '//*[@class="btn_tip"]/a/span').click()
        sleep(1)
        # 直到获取到淘宝会员昵称才能确定是登录成功
        taobao_name = self.wait.until(EC.presence_of_element_located(
            (By.CSS_SELECTOR, '.site-nav-bd > ul.site-nav-bd-l > li#J_SiteNavLogin > div.site-nav-menu-hd > div.site-nav-user > a.site-nav-login-info-nick ')))
        # 输出淘宝昵称
        print("````````````````````````````登录成功````````````````")
        print(taobao_name.text)

    # 获取产品列表
    def crawl_good_data(self, url):

        # self.browser.get("https://list.tmall.com/search_product.htm?q=羽毛球")
        # url = input('地址：')
        self.browser.get(url)

        # page_data = self.wait.until(EC.presence_of_element_located((By.XPATH,'//*[@id="J_ShopSearchResult"]/div/div[2]/div[1]/div[3]/span'))).text
        name = self.wait.until(lambda d: d.find_elements_by_xpath(
            "//div[@class='item4line1']//a[@class='item-name J_TGoldData']"))
        for i in name:
            href = i.get_attribute("href")
            self.get_detail(href)

        next_page = self.wait.until(lambda d: d.find_elements_by_xpath(
            '//div[@class="pagination"]//a[@class="J_SearchAsync next"]'))
        if next_page:
            for item in next_page:
                url = item.get_attribute('href')
                self.crawl_good_data(url)

    # 获取详情
    def get_detail(self, url):
        res = self.commonGet(url)
        title = res.xpath('//h3[@class="tb-main-title"]')[0].text.strip()
        imgs = res.xpath('//ul[@id="J_UlThumb"]//li//img/@data-src')

        for index, src in enumerate(imgs):
            url = 'https:'+src[:-10]
            self.save_img(title, url, str(index+1))

    def save_img(self, title, url, index):
        file_path = 'D:/img/'+title
        # 是否有这个路径
        if not os.path.exists(file_path):
            # 创建路径
            os.makedirs(file_path)
        # 获得图片后缀
        file_suffix = os.path.splitext(url)[1]
        filename = '{}{}{}{}'.format(
            file_path, os.sep, index, file_suffix)
        # 下载图片，并保存到文件夹中
        urllib.request.urlretrieve(url, filename=filename)
        print('下載成功---'+ title+ '的第   '+index+'   张图片')
        print('----------------------------------------')

    def commonGet(self, url):
        res = requests.get(url, headers=self.headers)
        res.encoding = "gbk"
        response = etree.HTML(res.text)
        return response


if __name__ == "__main__":

    # 改成你的chromedriver的完整路径地址
    chromedriver_path = "C:/Program Files (x86)/Google/Chrome/Application/chromedriver.exe"
    weibo_username = "18953989303"  # 改成你的微博账号
    weibo_password = "zhulei123"  # 改成你的微博密码
    url = 'https://51tennis.taobao.com/search.htm?spm=a1z10.5-c-s.0.0.1fc4377b1OdWL0&search=y'
    a = taobao_infos()
    a.login()  # 登录
    a.crawl_good_data(url)  # 爬取天猫商品数据
