import datetime
import time

from bs4 import BeautifulSoup
from selenium import webdriver
# 初始化配置根据自己chromedriver位置做相应的修改
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
from pymongo import MongoClient

# 代理服务器
proxyHost = "http-pro.abuyun.com"
proxyPort = "9010"

# 代理隧道验证信息
proxyUser = "H70B99V9Z752485P"
proxyPass = "67CA67814D3A9E21"

service_args = [
    "--proxy-type=http",
    "--proxy=%(host)s:%(port)s" % {
        "host": proxyHost,
        "port": proxyPort,
    },
    "--proxy-auth=%(user)s:%(pass)s" % {
        "user": proxyUser,
        "pass": proxyPass,
    },
]

dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (
    "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0")
# 设置user-agent请求头
dcap["phantomjs.page.settings.loadImages"] = False  # 禁止加载图片

mongodb_ip = '172.29.13.121'
mongodb_port = 27017
mongodb_lib = 'spider'
mongodb_user = 'spider'
mongodb_pwd = 'spiderQWwe741'


def search_by_keyword():
    # driver = webdriver.PhantomJS(desired_capabilities=dcap, service_args=service_args)
    driver = webdriver.PhantomJS(desired_capabilities=dcap)
    driver.implicitly_wait(20)
    driver.set_page_load_timeout(20)
    driver.get('https://www.tmall.com')
    time.sleep(10)
    # 1，搜索手机
    driver.find_element_by_name('q').send_keys('手机')
    driver.find_element_by_name('q').send_keys(Keys.ENTER)
    time.sleep(10)
    # 2，点击更多，获取各品牌链接
    driver.find_element_by_css_selector('form#J_NavAttrsForm a.ui-more-drop-l').click()
    time.sleep(10)
    html = driver.page_source
    driver.quit()
    soup = BeautifulSoup(html, 'lxml')
    aes = soup.select('form#J_NavAttrsForm ul.row-1 li a')
    list = []
    for a in aes:
        # print(a.text, 'https://list.tmall.com/search_product.htm' + a.get('href'))
        dict = {}
        dict['logo'] = a.text
        dict['url'] = 'https://list.tmall.com/search_product.htm' + a.get('href')
        list.append(dict)
    return list


def search_by_url(logo, url):
    db = MongoClient(mongodb_ip, mongodb_port).get_database(mongodb_lib)
    db.authenticate(mongodb_user, mongodb_pwd)
    collection = db.tmall_phone
    driver = webdriver.PhantomJS(desired_capabilities=dcap, service_args=service_args)
    driver.implicitly_wait(20)
    driver.set_page_load_timeout(20)
    driver.get(url)
    time.sleep(10)
    # 点击小图显示，减少翻页次数
    driver.find_element_by_link_text('小图').click()
    time.sleep(20)
    html = driver.page_source
    extract(logo=logo, html=html, collection=collection)
    soup = BeautifulSoup(html, 'lxml')
    page_total = soup.select_one('b.ui-page-s-len').text
    print(datetime.datetime.now(), logo, '总页数字段:', page_total)
    if '/' in page_total:
        page_total = int(page_total.split('/')[1])
    else:
        page_total = int(page_total)
    print(datetime.datetime.now(), logo, '总页数:', page_total)

    # 循环翻页，点击下一页
    for i in range(2, page_total + 1):
        print(datetime.datetime.now(), logo, '翻页，第%d页' % (i))
        driver.find_element_by_name('jumpto').clear()
        time.sleep(1)
        driver.find_element_by_name('jumpto').send_keys(i)
        driver.find_element_by_name('jumpto').submit()
        time.sleep(20)
        driver.get_screenshot_as_file('tmall' + str(i) + '.png')
        html = driver.page_source
        extract(logo=logo, html=html, collection=collection)
    driver.quit()


def extract(logo, html, collection):
    # html_object = open('tmall.txt', encoding='utf-8')
    # html = html_object.read()
    # html_object.close()
    soup = BeautifulSoup(html, 'lxml')
    count = 0
    list_data = []
    for item in soup.select('div#J_ItemList div.productMain'):
        try:
            title = item.select_one('div.productTitle').text.replace('\n', '')
            url = 'https:' + item.select_one('div.productTitle a')['href']
            price = item.select_one('em.proSell-price').text
            shop = item.select_one('a.productShop-name').text.replace('\n', '')
            shop_url = 'https://list.tmall.com/' + item.select_one('a.productShop-name')['href']
            status = item.select_one('p.productStatus').text.replace('\n', '').replace(' ', '')
            limited = item.select_one('div.product-limited').text.replace('\n', '').replace(' ', '').replace('\\n', '')
            single_data = {}
            single_data['title'] = title
            single_data['url'] = url
            single_data['price'] = price
            single_data['shop'] = shop
            single_data['shop_url'] = shop_url
            single_data['status'] = status
            single_data['limited'] = limited
            now = time.localtime()
            single_data['crawler_time'] = datetime.datetime(now[0], now[1], now[2], now[3], now[4])

            list_data.append(single_data)

            count += 1
            print(datetime.datetime.now(),
                  '名称:%s,价格：%s，链接：%s,店铺：%s，店铺链接：%s,状态：%s ,限制：%s' % (title, price, url, shop, shop_url, status, limited))

        except AttributeError as e:
            print(logo, 'AttributeError')
            continue
        except TypeError as e:
            print(logo, 'TypeError')
            continue
    if len(list_data) >0:
        collection.insert(list_data)

    print(datetime.datetime.now(), logo, '数量', count)


if __name__ == '__main__':
    search_by_url('苹果',
                  'https://list.tmall.com/search_product.htm?brand=30111&q=%CA%D6%BB%FA&sort=s&style=g&search_condition=23&from=sn__brand-qp&spm=875.7931836/B.a2227oh.d100#J_crumbs')
