import time
from selenium import webdriver
import requests
import re
from bs4 import BeautifulSoup

browser = webdriver.Chrome('C:\Program Files\Google\Chrome\Application\chromedriver.exe')
def res():
    # res = requests.get(url='http://43.push2.eastmoney.com/api/qt/clist/get?cb=jQuery112405792404886685327_1631514566562&pn=1&pz=20&po=1&np=1&ut=bd1d9ddb04089700cf9c27f6f7426281&fltt=2&invt=2&fid=f3&fs=m:0+t:6,m:0+t:80,m:1+t:2,m:1+t:23&fields=f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152&_=1631514566563')
    # soup = BeautifulSoup(res.content, 'html.parser')
    # stockInfo = soup.find_all(class_="txtl")
    # name = soup.select()
    # print(stockInfo)
    # print(soup)
    # print(re.findall(r'"f14":"(.*?)"',str(soup)))
    list = []
    browser.implicitly_wait(3)
    html = browser.get('http://quote.eastmoney.com/center/gridlist.html#hs_a_board')
    ele = browser.find_element_by_xpath('//tbody').find_elements_by_xpath('//td')
    print(len(ele))
    for i in range(len(ele)):
        print(ele[i-1].text)
    # elem = browser.find_elements_by_xpath('//td')
    # print(elem)
    # list.append(ele)

def parses(response):
    infoDict = {}
    stockInfo = response.find_all(class_="txtl")[0]
    # stockInfo = response.css('.txtl')
    name = stockInfo.css('.qphox').extract()[0]
    keyList = stockInfo.css('dt').extract()
    valueList = stockInfo.css('dd').extract()
    print(stockInfo)
    # for i in range(len(keyList)):
    #     key = re.findall(r'>.*</dt>', keyList[i])[0][1:-5]
    #     try:
    #         val = re.findall(r'\d+\.?.*</dd>', valueList[i])[0][0:-5]
    #     except:
    #         val = '--'
    #     infoDict[key] = val
    # infoDict.update(
    #     {'股票名称': re.findall('\s.*\(', name)[0].split()[0] + re.findall('\>.*<', name)[0][1:-1]})
    # yield infoDict

if __name__ == '__main__':
    res()