from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from pyquery import PyQuery as pq
from config import *
import re
import os
import time
import json
import pymongo

client = pymongo.MongoClient(MONGO_URL)
db = client[MONGO_DB]

class TaobaoSpider():

    def __init__(self):
        self.browser = ''
        self.wait = ''
        self.key_world = KEY_WORD

    def parse_url(self, url):  # 通用解析url
        pass

    def get_selenium(self):  # selenium操作
        try:
            self.browser = browser = webdriver.Chrome()
            browser.get('http://www.taobao.com/')
            # 设置等待请求成功
            self.wait = wait = WebDriverWait(browser, 10)
            input = wait.until(
                EC.presence_of_element_located((By.ID, "q"))
            )
            submit = wait.until(
                EC.element_to_be_clickable((By.CSS_SELECTOR, ".btn-search"))
            )
            input.send_keys(self.key_world)
            submit.click()
            total = wait.until(
                EC.presence_of_element_located(
                    (By.CSS_SELECTOR, '.m-page .total'))
            )
            self.get_data_list()
            return total.text
        except TimeoutException:
            print('超时出错！！！')
            return self.get_selenium()

    def next_page(self, page_num):
        wait= self.wait
        borwser = self.browser
        try:
            input = wait.until(
                EC.presence_of_element_located((By.CSS_SELECTOR, ".m-page .input"))
            )
            submit = wait.until(
                EC.element_to_be_clickable((By.CSS_SELECTOR, ".m-page .btn"))
            )
            input.clear()
            input.send_keys(page_num)
            submit.click()
            wait.until(
                EC.text_to_be_present_in_element((By.CSS_SELECTOR,'.m-page li.item.active > span.num'), str(page_num))
            )
            self.get_data_list()
        except TimeoutException:
            print('获取下一页出错！！！')
            self.next_page(page_num)

    def get_data_list(self):  # 获取数据
        wait = self.wait
        borwser = self.browser
        wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-itemlist .items .item'))
        )
        html = borwser.page_source
        html = html.replace("xmlns", 'data-id')
        doc = pq(html)
        doc.remove_namespaces()
        items = doc('#mainsrp-itemlist .items .item')
        for item in items.items():
            produce = {
                'img_src': 'https:'+item('.pic-box img.img').attr('src'),
                'price': item('.price').text(),
                'deal': item('.deal-cnt').text()[:-3],
                'title': item('.title').text(),
                'shop': item('.shop .shopname .dsrs+span').text(),
                'location': item('.location').text()
            }
            self.save_mongodb(produce)

    def save_local(self):  # 保存到本地
        pass

    def save_mongodb(self, result):  # 保存在mongodb数据库
        try:
            if db[MONGO_TABLE].insert(result):
                print('保存mogodb 成功！！！', result)
        except Exception:
            print('储存到mongodb失败！', result)

    def run(self):  # 实现主要逻辑
        
        try:
            # 1. 使用selenium访问淘宝输入关键之并点击搜索
            totle = self.get_selenium()
            totle = re.search(r'\d+', totle).group()
            print(totle)
            
            for i in range(2, int(totle)+1):
                time.sleep(2)
                self.next_page(i)

            # 2. 解析获取的网页请求数据

            # 3. 提取有用的数据
            
            
            # 4. 保存在本地

            # 5. 保存在mogodb数据库
        except Exception:
            print('出错了!!')
        finally:
            self.browser.close()

            


if __name__ == '__main__':
    taobao = TaobaoSpider()
    taobao.run()
