import time
from bs4 import BeautifulSoup
from selenium import webdriver
from pyquery import PyQuery as pq
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
browser=webdriver.Chrome()
wait = WebDriverWait(browser,10)

url='https://www.taobao.com/'
def find_links(url):
    links=[]
    browser.get(url)
    time.sleep(1)
    articles = browser.find_elements_by_class_name('J_Cat')
    for article in articles:
        ActionChains(browser).move_to_element(article).perform()
        time.sleep(2)
    # doc = pq(browser.page_source)
    # # print(doc, 'zhouhao1')
    # doc=doc('.service-panel .h')
    # link_zongs1=doc.items()
    # link_zongs2=doc.siblings().items()
    # link_zongs3=doc.parent().children().items()
    soup=BeautifulSoup(browser.page_source,'lxml')
    pan=soup.find_all(attrs={'class':'service-panel'})
    # h=pan.find_all(name='h')
    # a=pan.find_all(name='a')

    for i in (pan):
        # print(i,'zhouhao222')
        for j in (i.select('p a')):
            lins = {
                '种类':j.string,
                '链接':j.attrs['href']}
            # print(lins,'zhohao888')
            links.append(lins)
            # print(links,'zhouhao101010')

    # print(link_zongs1,'zhouhao111',link_zongs2,'zhouhao222',link_zongs3,'zhouhao333')
    # for link in (link_zongs1 or link_zongs2 or link_zongs3):
    #     # print(link,'zhouhao11')
    #     # print(link.children('p'),'zhouhao33')
    #
    #     links={
    #         '种类':link.text(),
    #         '链接':link.attr('href')
    #     }

    # print(links)
    return links

print(find_links(url))

