from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import time
from .MongoDB import MyMongo


class TbSpider:
    def login(self, p):
        brower.get("https://www.taobao.com")
        time.sleep(2)
        if brower.find_element_by_link_text("亲，请登录"):
            brower.find_element_by_link_text("亲，请登录").click()
            print('请在15秒内完成扫码')
            try:
                locator_user = (By.CLASS_NAME, 'site-nav-user')
                WebDriverWait(brower, 15, 1).until(EC.presence_of_element_located(locator_user))
                print('登录成功')
            except:
                print('扫码超时')
        for page in range(p):
            self.search(page)

    def search(self,page):
        if page+1 == 1:  # 首页搜索
            brower.find_element_by_id("q").send_keys("男外套")
            brower.find_element_by_class_name("search-button").click()
            print('第1页')
        else: # 翻页
            try:
                locator = (By.XPATH, '//div[@class="wraper"]')
                WebDriverWait(brower, 10).until(EC.presence_of_element_located(locator))
                brower.find_element_by_xpath('//ul[@class="items"]/li[last()]').click()
                print(f'跳至第{page+1}页')
                time.sleep(2)
            except:
                print('翻页出错')
        # 解析
        title = brower.find_elements_by_css_selector('div[class="row row-2 title"]')
        price = brower.find_elements_by_css_selector('div[class="price g_price g_price-highlight"]')
        for i, j in zip(title, price):
            tit, pri = i.text, j.text
            self.save_mongo(tit, pri)
    #保存数据
    def save_mongo(self, tit, pri):
        my.insert({'name' : tit,
                   'price' : pri})

if __name__ == '__main__':
    brower = webdriver.Chrome(executable_path='C:\Google\Chrome\chromedriver.exe')
    tbspider = TbSpider()
    my = MyMongo('taobao_db', 'clothes_man')
    p = 5  # 爬取5页数据
    tbspider.login(p)
    # 最后查看入库数据
    my.find(flag=False)