import re

from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import ElementNotSelectableException, TimeoutException
from selenium.webdriver.common.action_chains import ActionChains
import time
from pyquery import PyQuery as pq
from config import *
import re
import pymysql
import requests
#使用谷歌浏览器，方便查看效果，如果追求速度可以用phantomJS
browser = webdriver.Chrome()
#调整最大窗口，否则某些元素无法显示
browser.maximize_window()
wait = WebDriverWait(browser, 10)


# 账号密码登录
def login_by_password(url):
    browser.get(url)
    time.sleep(2)  # 等待js加载完成
    with open('password.txt', 'r') as f:
        password_text = f.read()
        f.close()
    try:
        password_button = wait.until(
            EC.element_to_be_clickable((By.CSS_SELECTOR, '#J_QRCodeLogin > div.login-links > a.forget-pwd.J_Quick2Static'))
        )
        password_button.click()
        username = browser.find_element_by_id('TPL_username_1')
        password = browser.find_element_by_id('TPL_password_1')
        username.send_keys('13788819348')
        password.send_keys(password_text)
        # 滑动滑块方法
        verify()
        J_Submit = browser.find_element_by_class_name('J_Submit')
        J_Submit.click()
    except ElementNotSelectableException:
        print('不可点击')


# 滑动验证
def verify():
    # 等待滑动模块和其他JS文件加载完毕！
    time.sleep(3)
    i = 0
    while i < 10:
        try:
            # 定位滑块元素
            source = browser.find_element_by_xpath('//*[@id="nc_1__scale_text"]/span')
            # 定义鼠标拖放动作
            ActionChains(browser).drag_and_drop_by_offset(source, 400, 0).perform()
            # 等待JS认证运行,如果不等待容易报错
            time.sleep(2)
            text = browser.find_element_by_xpath("//div[@id='nc_1__scale_text']/span")
            # 目前只碰到3种情况：成功（请在在下方输入验证码,请点击图）；无响应（请按住滑块拖动)；失败（哎呀，失败了，请刷新）
            if text.text.startswith(u'请在下方'):
                print('成功滑动')
                break
            if text.text.startswith(u'请点击'):
                print('成功滑动')
                break
            if text.text.startswith(u'请按住'):
                continue

        except Exception as e:
            browser.find_element_by_xpath('//*[@id="nocaptcha"]/div/span/a').click()
            print(e)
        i += 1
        print('第{}次尝试'.format(i+1))
    else:
        print("淘宝有问题")
        return -1
    if i != 10:
        return 1


# 使用二维码登录淘宝
def login_by_QRCode(url):
    browser.get(url)
    time.sleep(5)
    browser.refresh()
    time.sleep(5)
    try:
        browser.find_element_by_css_selector('#J_Static2Quick').context_click()
        time.sleep(10)  # 休眠10S用于手工操作
    except Exception as e:
        print(e)
        print('访问出错')


# 搜索美食页面
def search():
    print('开始')
    try:
        input_content = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, '#q'))
        )
        print('获取input')
        # 法一
        input_content.clear()
        print("clear")
        input_content.send_keys(KEYWORD)
        print("keyword")
        input_content.send_keys(Keys.ENTER)
        print("enter")
        # 法二
        # submit = wait.until(
        #     EC.element_to_be_clickable((By.CSS_SELECTOR, '#J_TSearchForm > div.search-button > button'))
        # )
        # input_content.clear()
        # input_content.send_keys('美食')
        # submit.click()
        # 这里调用相应时间超时
        # total_page = wait.until(
        #     EC.presence_of_element_located((By.CSS_SELECTOR, '.total'))
        # )
        total_page = browser.find_element_by_css_selector('.total')

        print(total_page.text)
        get_products()
        print('get product')
        return total_page.text
    except TimeoutException:
        print("search error")
        return search()


def next_page(page_num):
    # print(page_num)
    try:
        input_page_num = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > div.form > input'))
        )
        input_page_num.clear()
        input_page_num.send_keys(str(page_num))
        input_page_num.send_keys(Keys.ENTER)
        wait.until(
            EC.text_to_be_present_in_element(
                (By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > ul > li.item.active > span'), str(page_num)
            )
        )
        # print('next page successful')
        get_products()
        # print('next page get products')
    except TimeoutException:
        return next_page(page_num)


def get_products():
    wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'div.item:nth-child(1)')))  # 如果有找到这个元素则说明访问页面成功
    # print('get itemlist select')
    html = browser.page_source
    # print('get page source')
    doc = pq(html)
    # print(doc)
    # print('解析html', type(doc))
    # id选择器和class选择器弄错了
    items = doc('#mainsrp-itemlist .m-itemlist .items .item').items()  # 得到所有选择的内容
    if items is None:
        print("items is None")

    time.sleep(3)
    for item in items:
        # print('product start')
        product = {
            'image': 'http:' + item.find('.pic .img').attr('src'),
            'price': item.find('.price').text(),
            'deal': item.find('.deal-cnt').text()[:-3],
            'title': item.find('.title').text(),
            'shop': item.find('.shop').text(),
            'location': item.find('.location').text()
        }
        # print("product end")
        print(product)
        save_information(product)
        break
    # BeautifulSoup解析
    # pattern = re.compile(r'<div id="mainsrp-itemlist">(.*)<div id="mainsrp-doubleonelist">', re.S)
    # html = re.search(pattern, html).group(1)
    # print("获取html")
    # soup = BeautifulSoup(html, 'lxml')
    # items = soup.find_all(attrs={'class': 'item'})  # list
    # print("获取items")
    #
    # for item in items:
    #     print('item解析')
    #     # 到这里还可以使用
    #     item = BeautifulSoup(item, 'lxml')
    #     titles = item.find(attrs={'class': 'row'}).a.stripped_strings
    #     title = ''
    #     for t in titles:
    #         title += t
    #     print(title)
    #     product = {
    #         'image': item.a.img['src'],
    #         'price': '￥' + item.find(attrs={'class': 'price'}).strong.string,
    #         'deal': re.search(r'<div class="deal-cnt">(.*)人付款', item.decode('utf-8')).group(1),
    #         'title': title,
    #         'shop': re.search(r'<span data-spm-anchor-id="(.*)">(.*)</span>', item.decode('utf-8')).group(2),
    #         'location': re.search(r'<div class="location" data-spm-anchor-id="(.*)">(.*)</span>', item.decode('utf-8')).group(2)
    #     }
    #     print(product)


def text_to_num(total_page_text):
    total_page_re = re.compile(r'(\d+)')  # 返回正则表达式
    total_page_string = re.search(total_page_re, total_page_text)  # 返回匹配对象
    if total_page_string:
        total_page_string = total_page_string.group(1)
        if total_page_string.isdigit():
            total_page_num = int(total_page_string)
            return total_page_num
        else:
            return -1
    else:
        return -1


# 存储信息存储在mysql中，或者存储在txt中
def save_information(product):
    # image_url = product['image']
    # image = requests.get(image_url).content
    # print('image content:',image)
    db = pymysql.connect('localhost', 'root', 'root', 'test')
    cursor = db.cursor()
    # print(product["image"], product["price"], product["deal"], product["title"], product["shop"], product["location"])
    # sql = 'insert into food_information(image, price, deal, title, shop, location) values({}, product["price"], product["deal"], product["title"], product["shop"], product["location"])'.format(image)
    # %s,%s,%s,%s,%s,%s
    # 注意插入数据库的时候需要加双引号
    sql = '''insert into food_information(image, price, deal, title, shop, location) values('%s','%s','%s','%s','%s','%s')''' % (product["image"], product["price"], product["deal"], product["title"], product["shop"], product["location"])
    if len(product['image']) > 255:
        print('image url too long')
        return
    try:
        cursor.execute(sql)
        db.commit()
    except:
        print('sql execute fail')
        db.rollback()
    db.close()


def main():
    try:
        # 淘宝登录页刷新有问题
        # 淘宝的登陆页
        url = 'https://login.taobao.com/member/login.jhtml?redirectURL=http%3A%2F%2Fs.taobao.com%2Fsearch%3Fq%3D%25E7%25BE%258E%25E9%25A3%259F%26imgfile%3D%26commend%3Dall%26ssid%3Ds5-e%26search_type%3Ditem%26sourceId%3Dtb.index%26spm%3Da21bo.2017.201856-taobao-item.2%26ie%3Dutf8%26initiative_id%3Dtbindexz_20170306'
        # 直接访问
        # if login_by_password(url):
        #     print('账号密码登录成功')
        # else:
        #     print('账号密码登录失败')
        #     login_by_QRCode(url)
        #     print('二维码登录成功')
        login_by_QRCode(url)
        total_page_text = search()
        total_page_num = text_to_num(total_page_text)
        print(total_page_num)
        if total_page_num:
            for i in range(2, total_page_num+1):
                next_page(i)
                break
    except Exception:
        print("出错啦")
    finally:
        browser.close()

if __name__ == '__main__':
    main()
