import re
import requests
from bs4 import BeautifulSoup
import os
from selenium import webdriver
import queue
import time
from selenium.webdriver.chrome.options import Options

'''
table对应标签获取
    1、table标签查找
    2、table标签属性过滤

'''


def get(url):
    ch_options= Options()
    ch_options.add_argument('--headless')
    try:
        #driver = webdriver.Chrome(executable_path="D:\\pythonglr\python\\driver\\chromedriver",chrome_options= ch_options)
        driver = webdriver.Chrome(executable_path="D:\\pythonglr\python\\driver\\chromedriver")
        driver.get(url)
        html= driver.page_source
        #time.sleep(120)
        driver.close

        return html
    except Exception as e:
        print(1,3)
        return ''


url = "https://finance.ifeng.com/app/hq/stock/sh600621/index.shtml"
url_dict= {}
page_source= get(url)
soup= BeautifulSoup(page_source, 'html.parser')

table= soup.table
tables= soup.find_all('table')
info_table= ''
name_table= ''

for table in tables:
    if 'class' in table.attrs.keys() and table.attrs['class']==['tabNum']:
        #print(1,table)
        info_table= table
    elif 'class' in table.attrs.keys() and table.attrs['class']==['tabPic']:
        #print(1,table)
        name_table= table
        print(111)

stock_info= {}

trs= info_table.find_all('tr')
for tr in trs:
    #print(3,tr)
    tds= tr.find_all('td')
    #print(2,tds)
    for td in tds:
        #print(2,td.text)
        key= td.text.split('：')[0].replace(' ','').replace('\u3000','')
        value= td.text.split('：')[1]
        stock_info[key]=value
print(1,stock_info)

stock_name= name_table.find('td').text.strip()#获取第一个标签（公司名称）
print(2,stock_name)