from crawler.base import BaseCrawler
import requests
from .jsl_login import get_cookie_str  # 导入获取 cookie 的函数
from storage.mysql_storage import DbHandler  # 假设存在一个获取数据库实例的函数
from crawler.jsl.jsl_data_map import alive_bond_map
import pandas as pd
from typing import Optional

db_instance = DbHandler()
db_instance.create_session()

class Spider_jsl_alive(BaseCrawler):
    def __init__(self, name,url, data_process_type,params):
        super().__init__(name,url,data_process_type, params)
        self.db_instance = db_instance  # 获取数据库实例

    def fetch(self):
        """获取原始数据"""
        cookie_str = get_cookie_str(self.db_instance)  # 获取 cookie
        if not cookie_str:
            print("Failed to get valid cookie.")
            return None
        headers = {
            "Cookie": cookie_str,
            "Init": "1",
            "Content-Type": "application/json; charset=utf-8",
        }
        response = requests.get(self.url, params=self.params, headers=headers)
        if response.status_code == 200:
            return response.json()
        else:
            print(f"Failed to fetch data. Status code: {response.status_code}")
            return None

    def parse(self, raw_data):
        """解析原始数据"""
        pd_data = pd.DataFrame(raw_data["data"])
        pd_data.drop(['t_flag', 'owned', 'hold','noted', 'last_time', 'qstatus', 'sqflag',
        'ref_yield_info', 'price_tips', 'option_tip','pct_chg', 'adjust_orders',
        'redeem_orders', 'custom_formula1001'], axis=1, inplace=True)
        pd_data.rename(columns=alive_bond_map, inplace=True)
        return pd_data