"""
编程题1
"""
import requests
import pandas as pd


class ChinaMoneyBondScraper:
  
    def __init__(self):
        self.base_url = 'https://www.chinamoney.com.cn'
        self.api_base = f'{self.base_url}/ags/ms/'
        
        self.condition_api = f"{self.api_base}cm-u-bond-md/BondBaseInfoSearchConditionEN"
        self.list_api = f"{self.api_base}cm-u-bond-md/BondMarketInfoListEN"
        
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
            'Accept': 'application/json, text/javascript, */*; q=0.01',
            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'Origin': 'https://www.chinamoney.com.cn',
            'Referer': 'https://www.chinamoney.com.cn/english/bdInfo/'
        }
    
    def get_search_options(self):
        """获取搜索选项"""
        try:
            response = requests.post(self.condition_api, headers=self.headers, timeout=15)
            response.raise_for_status()
            data = response.json()
            return data.get('data')
        except Exception:
            return None
    
    def find_treasury_bond_type(self, conditions):
        """查找国债类型代码"""
        if not conditions or 'bondType' not in conditions:
            return None
        
        for bond_type in conditions['bondType']:
            display_name = bond_type.get('bondDisplayType', '').lower()
            if 'treasury' in display_name or '国债' in display_name:
                return bond_type.get('bondTypeCode')
        
        return None
    
    def fetch_bond_records(self, bond_type_code, issue_year='2023'):
        """获取债券记录"""
        params = {
            'pageNo': '1',
            'pageSize': '100',
            'bondType': bond_type_code or '',
            'issueYear': issue_year,
            'isin': '',
            'bondCode': '',
            'issueEnty': '',
            'couponType': '',
            'rtngShrt': '',
            'bondSpclPrjctVrty': ''
        }
        
        try:
            response = requests.post(self.list_api, headers=self.headers, data=params, timeout=15)
            response.raise_for_status()
            result = response.json()
            return result.get('data', {}).get('resultList', [])
        except Exception:
            return []
    
    def process_records(self, raw_data):
        """处理原始债券数据"""
        if not raw_data:
            return []
        
        processed = []
        for bond in raw_data:
            record = {
                'ISIN': bond.get('isin', ''),
                'Bond Code': bond.get('bondCode', ''),
                'Issuer': bond.get('entyFullName', ''),
                'Bond Type': bond.get('bondType', ''),
                'Issue Date': bond.get('issueStartDate', ''),
                'Latest Rating': bond.get('debtRtng', '')
            }
            processed.append(record)
        
        return processed
    
    def filter_treasury_bonds(self, records):
        """筛选国债记录"""
        return [
            bond for bond in records 
            if 'treasury' in bond.get('Bond Type', '').lower() 
            or '国债' in bond.get('Bond Type', '')
        ]
    
    def save_to_csv(self, data, filename='bond_data_2023.csv'):
        """保存数据到CSV文件"""
        if not data:
            return False
        
        try:
            df = pd.DataFrame(data)
            df.to_csv(filename, index=False, encoding='utf-8-sig')
            return True
        except Exception:
            return False


def main():
    """主程序"""
    # 初始化采集器
    scraper = ChinaMoneyBondScraper()
    
    # 获取搜索条件
    search_conditions = scraper.get_search_options()
    
    if not search_conditions:
        return
    
    # 识别国债类型代码
    treasury_code = scraper.find_treasury_bond_type(search_conditions)
    
    # 获取债券数据
    bond_data = scraper.fetch_bond_records(treasury_code, issue_year='2023')
    
    if not bond_data:
        return
    
    # 处理数据
    processed_data = scraper.process_records(bond_data)

    if not treasury_code:
        processed_data = scraper.filter_treasury_bonds(processed_data)
    
    if not processed_data:
        return
    
    # 保存结果
    filename = 'bond_data_2023.csv'
    scraper.save_to_csv(processed_data, filename)


if __name__ == "__main__":
    main()