import requests
from bs4 import BeautifulSoup
from selenium.webdriver.support.ui import Select
from selenium import webdriver
import csv

def get_bond_data():
    url = "https://iftp.chinamoney.com.cn/english/bdInfo/"
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0"
    }
    payload = {
        "BondType": "Treasury Bond",
        "IssueYear": "2023"
    }
    response = requests.get(url, params=payload, headers=headers)
    # print(response)
    #使用BeautifulSoup库解析HTTP响应的内容
    soup = BeautifulSoup(response.content, "html.parser")
    html_content = soup.prettify()

    #将HTML内容写入到HTML文件中
    html_file_path = "output.html"
    with open(html_file_path, "w", encoding="utf-8") as html_file:
        html_file.write(html_content)
    print("HTML文件保存成功。")

    # 这里发现是一个下拉框的页面，但是不会使用selenium库
    # 只找到了下拉列表的select为id="Bond_Type_select"，id分别为id="Bond_Type_select"和id="Issue_Year_select"
    driver = webdriver.Edge()
    driver.get('https://iftp.chinamoney.com.cn/english/bdInfo/')
    # 切换到iframe
    driver.switch_to.frame(driver.find_element_by_id('Bond_Type_select'))
    select_elm = Select(driver.find_element_by_class_name('Bond_Type_select'))
    driver.find_element_by_xpath('//option[@value="100001"]').click()

    #找不到数据没办法获取table，id为an-sheet-alternating
    table = soup.find("table", {"class": "san-sheet-alternating"})
    
    if table is None:
        print("Table not found on the page.")
        return []

    rows = table.find_all("tr")
    if not rows:
        print("No rows found in the table.")
        return []
    
    bond_data = []
    for row in rows[1:]: 
        cells = row.find_all("td")
        if len(cells) != 6:
            continue
        isin = cells[0].text.strip()
        bond_code = cells[1].text.strip()
        issuer = cells[2].text.strip()
        bond_type = cells[3].text.strip()
        issue_date = cells[4].text.strip()
        latest_rating = cells[5].text.strip()
        bond_data.append([isin, bond_code, issuer, bond_type, issue_date, latest_rating])
    
    return bond_data

def save_to_csv(data, filename):
    with open(filename, "w", newline="") as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(["ISIN", "Bond Code", "Issuer", "Bond Type", "Issue Date", "Latest Rating"])
        writer.writerows(data)

if __name__ == "__main__":
    bond_data = get_bond_data()
    if bond_data:
        save_to_csv(bond_data, "treasury_bonds_2023.csv")
        print("Data saved successfully.")
    else:
        print("No data to save.")
        
