import time,re,json
from selenium import webdriver
from pyquery import PyQuery as pq
from urllib.parse import quote
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
browser = webdriver.Chrome()
wait = WebDriverWait(browser, 10)

def find_links():
    browser = webdriver.Chrome()
    wait = WebDriverWait(browser, 10)
    url = 'https://www.taobao.com/'
    links=[]
    browser.get(url)
    time.sleep(1)
    articles = browser.find_elements_by_class_name('J_Cat')
    for article in articles:
        ActionChains(browser).move_to_element(article).perform()
        time.sleep(2)
    soup=BeautifulSoup(browser.page_source,'lxml')
    pan=soup.find_all(attrs={'class':'service-panel'})

    for i in (pan):
        for j in (i.select('p a')):
            lins = {
                '种类':j.string,
                '链接':j.attrs['href']}
            links.append(lins)
    print(links)
    with open('zhonglei.txt','w') as f:
        for i in range(len(links)):
            f.write(str(links[i])+'\n')
    f.close()

    return links
# find_links()
def get_items():
    products=[]
    time.sleep(10)
    doc = pq(browser.page_source)
    items=doc('#mainsrp-itemlist .items .item').items()
    for item in items:
        product={
            'image':item.find('.pic .img').attr('data-src'),
            'pice':item.find('.price').text(),
            'deal':item.find('.deal-cnt').text(),
            'title':item.find('.title').text(),
            'shop':item.find('.shop').text(),
            'location':item.find('.location').text()
        }
        # print(product)
        products.append(product)
    return products

def index_page(page,url,zhonglei):
    """抓取索引页，"""
    print(zhonglei,':正在抓取第',page,'页')
    try:
        print(url)
        # browser.get(url)
        browser.get('https://s.taobao.com/search?q='+quote(zhonglei))
        time.sleep(10)
        if page >1:
            input=wait.until(
                EC.presence_of_element_located((
                    By.CSS_SELECTOR,"#mainsrp-pager div.form > input")))
            submit=wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,
                        '#mainsrp-pager div.form > span.btn.J_Submit')))
            input.clear()
            input.send_keys(page)
            submit.click()
        wait.until(
            EC.text_to_be_present_in_element((
            By.CSS_SELECTOR,'#mainsrp-pager li.item.active > span'),
            str(page))
        )
        wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR,
                                                '.m-itemlist .items .item')))
        print(zhonglei,get_items())
    except TimeoutError:
        index_page(page)

def read_txt():
    links=[]
    with open('zhonglei.txt', 'r') as f:
        # print(f.readline())
        for line in f.readlines():
            x = eval(line)
            links.append(x)
    f.close()
    return (links)

def get_tracks(space):
    # 模拟人工滑动，避免被识别为机器
    space += 2  # 先滑过一点，最后再反着滑动回来
    v = 0
    t = 0.2
    forward_tracks = []
    current = 0
    mid = space * 3 / 5
    while current < space:
        if current < mid:
            a = 2
        else:
            a = -3
        s = v * t + 0.5 * a * (t ** 2)
        v = v + a * t
        current += s
        forward_tracks.append(round(s))
        # 反着滑动到准确位置
    back_tracks = [-3, -3, -2, -2, -2, -2, -2, -1, -3, -4]
    return {'forward_tracks': forward_tracks, 'back_tracks': back_tracks}
def log():
    url="https://www.taobao.com/"
    browser.get(url)
    time.sleep(2)
    log_buttom=wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'.btn-search.tb-bg')))
    log_buttom.click()
    time.sleep(2)
    try:
        mima_logo=wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'#J_Quick2Static')))
        mima_logo.click()
        haoma_input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#TPL_username_1')))
        haoma_input.clear()
        haoma_input.send_keys('18101268506')
        mima_input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#TPL_password_1')))
        mima_input.clear()
        mima_input.send_keys('zhou3386168')
        time.sleep(2)
        slip_logo = browser.find_element_by_id('nc_1_n1z')
        space=int(browser.find_element_by_class_name('nc-lang-cnt').size['width'])-30
        tracks = get_tracks(space)
        ActionChains(browser).click_and_hold(slip_logo).perform()
        for track in tracks['forward_tracks']:
            ActionChains(browser).move_by_offset(xoffset=track, yoffset=0).perform()
        time.sleep(0.5)
        for back_track in tracks['back_tracks']:
            ActionChains(browser).move_by_offset(xoffset=back_track, yoffset=0).perform()
        ActionChains(browser).move_by_offset(xoffset=-3, yoffset=0).perform()
        ActionChains(browser).move_by_offset(xoffset=3, yoffset=0).perform()
        time.sleep(3)
        ActionChains(browser).release().perform()
        time.sleep(5)


        click_logo = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '.J_Submit')))
        # click_logo.click()
    except TimeoutError:
        print("登陆成功")
log()


# linkss=read_txt()
# page=1
# for lins in linkss:
#     zhonglei=lins.get('种类')
#     url=lins.get('链接')
#     for i in range(1,page+1):
#         index_page(i,url,zhonglei)