#
# '''
# 陈鑫
# 爬取的板块：
#     class money_flow_daily(scrapy.Item):
#         net_amount= scrapy.Field() # 净额
#         net_percentage= scrapy.Field() # 净占比
#         main_net_inflow= scrapy.Field() # 今日主力净流入
#         large_net_inflow= scrapy.Field() # 今日超大单净流入
#         big_net_inflow= scrapy.Field() # 今日大单净流入
#         medium_net_inflow= scrapy.Field() # 今日中单净流入
#         small_net_inflow= scrapy.Field() # 今日小单净流入
# '''
#
# import re
# import json
# import requests
#
# #这个用户代理字符串通常用于模拟浏览器的行为，以便在发送HTTP请求时能够伪装成特定的浏览器或操作系统
# header={
#     'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0'
# }
#
# #抓的包
# url=('https://push2his.eastmoney.com/api/qt/stock/fflow/daykline/get?'
#      'cb=jQuery112307412672234742328_1710823071227&lmt=0&klt=101'
#      '&fields1=f1%2Cf2%2Cf3%2Cf7&fields2=f51%2Cf52%2Cf53%2Cf54%2Cf55%2Cf56%2Cf57%2Cf58%2Cf59%2Cf60%2Cf61%2Cf62%2Cf63%2Cf64%2Cf65'
#      '&ut=b2884a393a59ad64002292a3e90d46a5'
#      '&secid=1.688599'
#      '&_=1710823071228')
#
# #解析页面
# rqst=requests.get(url=url,headers=header)
# #使用正则表达式获取内容
# json_data_match = re.search(r'\(({.*})\)', rqst.text)
#
# json_data = json.loads(json_data_match.group(1)) if json_data_match else None
#
# date_name = json_data['data']['name'] if json_data else None
# print(date_name) #打印
#
# klines_value = json_data['data']['klines'] if json_data else None
#
# for value in klines_value:
#        # 对每条数据根据,分割返回列表
#        data = value.split(',')
#        print(data[0]+" 主力净流入,净额:"+data[1]+" 小单净流入,净额:"+data[2]+" 中单净流入,净额:"+data[3]+" 大单净流入,净额:"+data[4]
#              +" 超大单净流入,净额:"+data[5]+" 主力净流入,净占比:"+data[6]+" 小单净流入,净占比:"+data[7]+" 中单净流入,净占比:"+data[8]
#              +" 大单净流入,净占比:"+data[9]+" 超大单净流入,净占比:"+data[10]+" 收盘价:"+data[11]+" 涨跌幅:"+data[12])


#使用selenium获取


from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

# 创建Chrome浏览器对象
driver = webdriver.Chrome()
url = "https://data.eastmoney.com/zjlx/dpzjlx.html"

# 打开网页
driver.get(url)


# table = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@class="table_ls"]/tbody/tr')))
tables = driver.find_elements(by=By.XPATH, value='//*[@id="table_ls"]/table/tbody/tr')
# 找到表格中的所有行
# rows = table.find_elements(By.TAG_NAME, "tr")

# 遍历每一行，提取数据并打印
for table in tables:
    arr = (table.text).split(" ")
    print(arr[0] + " 主力净流入,净额:" + arr[1] + " 小单净流入,净额:" + arr[2] + " 中单净流入,净额:"
          + arr[3] + " 大单净流入,净额:" + arr[4] + " 超大单净流入,净额:" + arr[5]
          + " 主力净流入,净占比:" + arr[6] + " 小单净流入,净占比:" + arr[7] + " 中单净流入,净占比:" + arr[8]
          + " 大单净流入,净占比:" + arr[9] + " 超大单净流入,净占比:" + arr[10] + " 收盘价:" + arr[11]
          + " 涨跌幅:" + arr[12])
    print()

# 关闭浏览器
driver.quit()