from util import *
import re
import json
from bs4 import BeautifulSoup

def get_shang_etf():
  # 目标数据URL
  url = "https://www.sse.com.cn/js/common/ssesuggestfunddata.js"
  resp = get_with_headers_requests(url,{},False)
  
  # 使用正则提取所有_t.push({...})中的字典内容
  # 匹配模式：提取{val:"...",val2:"...",val3:"..."}部分
  pattern = r'_t\.push\((\{val:"[^"]*",val2:"[^"]*",val3:"[^"]*"\})'
  matches = re.findall(pattern, resp)
  
  _t = []
  for match in matches:
      # 提取val、val2、val3的值
      code = re.search(r'val:"([^"]*)"', match).group(1)
      name = re.search(r'val2:"([^"]*)"', match).group(1)
      val3 = re.search(r'val3:"([^"]*)"', match).group(1)
      
      # 过滤val2为'-'的记录
      if name == "-":
          continue

      # 组装为字典并添加到列表
      _t.append({
          "etfCode": code,
          "etfName": name,
          "val3": val3
      })
  
  return _t
  

def get_shen_etf():

  # 目标数据URL
  base_url = "https://investor.szse.cn/api/report/ShowReport/data?SHOWTYPE=JSON&CATALOGID=sgshqd&loading=first&PAGENO={}"
  resp = get_with_headers_requests(base_url.format("1"),{},False)
  page_data = json.loads(resp)[0]
  total_pages = page_data["metadata"]["pagecount"]
  print(f"总页数：{total_pages}")
  # 第二步：遍历所有页面
  # 存储结果的数组
  result = []
  for page in range(1, total_pages + 1):
    print(f"正在获取第 {page}/{total_pages} 页数据...")
    # 发送请求
    resp = get_with_headers_requests(base_url.format(page),{},False)
    page_data = json.loads(resp)[0]
    result.extend(getShenETFData(page_data))
    print(f"结束获取第 {page}/{total_pages} 页数据... result : {len(result)}")

  return result

# 格式化，从深交所提取的etf清单
def getShenETFData(data):
  result = []
  # 提取当前页的ETF数据
  items = data["data"]
  for item in items:
    # 解析JSON得到jjdm字段的HTML内容
    html_content = item['jjdm']

    # 用BeautifulSoup解析HTML
    soup = BeautifulSoup(html_content, 'html.parser')

    # 提取第一个<a>标签
    first_a_tag = soup.find('a')  # 找到第一个a标签

    # 提取标签文本（ETF159003申购赎回清单(2025-10-17)）
    text_content = first_a_tag.get_text(strip=True)

    # 提取encode-open属性的值
    encode_open_value = first_a_tag.get('encode-open')
    
    # 只添加数据完整的记录
    result.append({
      "etfcode": text_content,
      "encode-open": encode_open_value
    })

  return result

print(get_shen_etf())