"""参考链接: https://github.com/AI4Finance-LLC/FinRL"""

from __future__ import annotations

import datetime
import time
from datetime import date
from datetime import timedelta
from sqlite3 import Timestamp
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union

import numpy as np
import pandas as pd
import pandas_market_calendars as tc
import pytz
import yfinance as yf
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from stockstats import StockDataFrame as Sdf
from webdriver_manager.chrome import ChromeDriverManager



### 由 aymeric75 添加，用于 scrap_data 函数

class YahooFinanceProcessor:
    """提供从雅虎财经 API 获取每日股票数据的方法"""

    def __init__(self):
        pass

    """
    参数
    ----------
        start_date : str
            数据的起始日期
        end_date : str
            数据的结束日期
        ticker_list : list
            股票代码列表
    示例
    -------
    输入:
    ticker_list = config_tickers.DOW_30_TICKER
    start_date = '2009-01-01'
    end_date = '2021-10-31'
    time_interval == "1D"

    输出:
        date	    tic	    open	    high	    low	        close	    volume
    0	2009-01-02	AAPL	3.067143	3.251429	3.041429	2.767330	746015200.0
    1	2009-01-02	AMGN	58.590000	59.080002	57.750000	44.523766	6547900.0
    2	2009-01-02	AXP	    18.570000	19.520000	18.400000	15.477426	10955700.0
    3	2009-01-02	BA	    42.799999	45.560001	42.779999	33.941093	7010200.0
    ...
    """

    ######## 由 aymeric75 添加 ###################

    def date_to_unix(self, date_str) -> int:
        """将 yyyy-mm-dd 格式的日期字符串转换为 Unix 时间戳。"""
        dt = datetime.datetime.strptime(date_str, "%Y-%m-%d")
        return int(dt.timestamp())

    def fetch_stock_data(self, stock_name, period1, period2) -> pd.DataFrame:
        # 基础 URL
        url = f"https://finance.yahoo.com/quote/{stock_name}/history/?period1={period1}&period2={period2}&filter=history"

        # Selenium WebDriver 设置
        options = Options()
        options.add_argument("--headless")  # 无头模式以提高性能
        options.add_argument("--disable-gpu")  # 禁用 GPU 以提高兼容性
        driver = webdriver.Chrome(
            service=Service(ChromeDriverManager().install()), options=options
        )

        # 导航到 URL
        driver.get(url)
        driver.maximize_window()
        time.sleep(5)  # 等待重定向和页面加载

        # 处理可能出现的弹窗
        try:
            RejectAll = driver.find_element(
                By.XPATH, '//button[@class="btn secondary reject-all"]'
            )
            action = ActionChains(driver)
            action.click(on_element=RejectAll)
            action.perform()
            time.sleep(5)

        except Exception as e:
            print("未找到弹窗或已处理弹窗:", e)

        # 解析页面以获取表格
        soup = BeautifulSoup(driver.page_source, "html.parser")
        table = soup.find("table")
        if not table:
            raise Exception("处理重定向和弹窗后未找到表格。")

        # 提取表头
        headers = [th.text.strip() for th in table.find_all("th")]
        headers[4] = "Close"
        headers[5] = "Adj Close"
        headers = ["date", "open", "high", "low", "close", "adjcp", "volume"]
        # , 'tic', 'day'

        # 提取行数据
        rows = []
        for tr in table.find_all("tr")[1:]:  # 跳过表头行
            cells = [td.text.strip() for td in tr.find_all("td")]
            if len(cells) == len(headers):  # 仅添加列数正确的行
                rows.append(cells)

        # 创建 DataFrame
        df = pd.DataFrame(rows, columns=headers)

        # 将列转换为合适的数据类型
        def safe_convert(value, dtype):
            try:
                return dtype(value.replace(",", ""))
            except ValueError:
                return value

        df["open"] = df["open"].apply(lambda x: safe_convert(x, float))
        df["high"] = df["high"].apply(lambda x: safe_convert(x, float))
        df["low"] = df["low"].apply(lambda x: safe_convert(x, float))
        df["close"] = df["close"].apply(lambda x: safe_convert(x, float))
        df["adjcp"] = df["adjcp"].apply(lambda x: safe_convert(x, float))
        df["volume"] = df["volume"].apply(lambda x: safe_convert(x, int))

        # 添加 'tic' 列
        df["tic"] = stock_name

        # 添加 'day' 列
        start_date = datetime.datetime.fromtimestamp(period1)
        df["date"] = pd.to_datetime(df["date"])
        df["day"] = (df["date"] - start_date).dt.days
        df = df[df["day"] >= 0]  # 排除开始日期之前的行

        # 反转 DataFrame 行顺序
        df = df.iloc[::-1].reset_index(drop=True)

        return df

    def scrap_data(self, stock_names, start_date, end_date) -> pd.DataFrame:
        """获取并合并多个股票代码的股票数据。"""
        period1 = self.date_to_unix(start_date)
        period2 = self.date_to_unix(end_date)

        all_dataframes = []
        total_stocks = len(stock_names)

        for i, stock_name in enumerate(stock_names):
            try:
                print(
                    f"正在处理 {stock_name} ({i + 1}/{total_stocks})... 已完成 {(i + 1) / total_stocks * 100:.2f}%。"
                )
                df = self.fetch_stock_data(stock_name, period1, period2)
                all_dataframes.append(df)
            except Exception as e:
                print(f"获取 {stock_name} 数据时出错: {e}")

        combined_df = pd.concat(all_dataframes, ignore_index=True)
        combined_df = combined_df.sort_values(by=["day", "tick"]).reset_index(drop=True)

        return combined_df

    ######## 由 aymeric75 添加部分结束 ###################

    def convert_interval(self, time_interval: str) -> str:
        # 将 FinRL '标准化' 的时间周期转换为雅虎格式: 1m, 2m, 5m, 15m, 30m, 60m, 90m, 1h, 1d, 5d, 1wk, 1mo, 3mo
        yahoo_intervals = [
            "1m",
            "2m",
            "5m",
            "15m",
            "30m",
            "60m",
            "90m",
            "1h",
            "1d",
            "5d",
            "1wk",
            "1mo",
            "3mo",
        ]
        if time_interval in yahoo_intervals:
            return time_interval
        if time_interval in [
            "1Min",
            "2Min",
            "5Min",
            "15Min",
            "30Min",
            "60Min",
            "90Min",
        ]:
            time_interval = time_interval.replace("Min", "m")
        elif time_interval in ["1H", "1D", "5D", "1h", "1d", "5d"]:
            time_interval = time_interval.lower()
        elif time_interval == "1W":
            time_interval = "1wk"
        elif time_interval in ["1M", "3M"]:
            time_interval = time_interval.replace("M", "mo")
        else:
            raise ValueError("时间间隔参数错误")

        return time_interval

    def download_data(
        self,
        ticker_list: list[str],
        start_date: str,
        end_date: str,
        time_interval: str,
        proxy: str | dict = None,
    ) -> pd.DataFrame:
        time_interval = self.convert_interval(time_interval)

        self.start = start_date
        self.end = end_date
        self.time_interval = time_interval

        # 下载数据并保存到 pandas DataFrame 中
        start_date = pd.Timestamp(start_date)
        end_date = pd.Timestamp(end_date)
        delta = timedelta(days=1)
        data_df = pd.DataFrame()
        for tic in ticker_list:
            current_tic_start_date = start_date

            if time_interval == "1d":  # 日级数据可以直接下载整个时间段
                # 添加重试机制，直到成功下载数据为止
                retry_count = 0
                max_retries = 15
                while retry_count < max_retries:
                    try:
                        print(f"开始下载数据: {tic}")
                        temp_df = yf.download(
                            tic,
                            ignore_tz=True,
                            start=current_tic_start_date,
                            end=end_date,
                            interval=self.time_interval,
                            proxy=proxy,
                            auto_adjust=True
                        )
                        # 打印temp的列
                        temp_df.columns = [col[0] for col in temp_df.columns]
                        print(temp_df.columns)
                        # 检查是否成功下载到数据
                        if not temp_df.empty:
                            temp_df["tic"] = tic
                            data_df = pd.concat([data_df, temp_df])
                            break  # 成功下载，跳出重试循环
                        else:
                            print(f"警告: {tic} 没有返回数据，第 {retry_count + 1} 次重试")
                            retry_count += 1
                            time.sleep(5.5)  # 等待0.5秒后重试
                    except Exception as e:
                        print(f"下载 {tic} 数据时出错: {e}，第 {retry_count + 1} 次重试")
                        retry_count += 1
                        time.sleep(5.5)  # 等待0.5秒后重试
                
                # 如果超过最大重试次数仍未成功，记录错误但继续处理下一个股票
                if retry_count >= max_retries:
                    print(f"错误: {tic} 数据下载失败，已达到最大重试次数 {max_retries}")
                    
            else:  # 分钟级数据需要按天下载
                while (
                    current_tic_start_date <= end_date
                ):  # 按天下载，以解决 yfinance 单次下载最多只能获取 7 个日历日（非交易日）1 分钟数据的限制
                    # 添加重试机制，直到成功下载数据为止
                    retry_count = 0
                    max_retries = 5
                    while retry_count < max_retries:
                        try:
                            temp_df = yf.download(
                                tic,
                                ignore_tz=True ,
                                start=current_tic_start_date,
                                end=current_tic_start_date + delta,
                                interval=self.time_interval,
                                proxy=proxy,
                            )
                            # 检查是否成功下载到数据
                            if not temp_df.empty:
                                if temp_df.columns.nlevels != 1:
                                    temp_df.columns = temp_df.columns.droplevel(1)

                                temp_df["tic"] = tic
                                data_df = pd.concat([data_df, temp_df])
                                break  # 成功下载，跳出重试循环
                            else:
                                print(f"警告: {tic} 在 {current_tic_start_date} 没有返回数据，第 {retry_count + 1} 次重试")
                                retry_count += 1
                                time.sleep(0.5)  # 等待0.5秒后重试
                        except Exception as e:
                            print(f"下载 {tic} 在 {current_tic_start_date} 的数据时出错: {e}，第 {retry_count + 1} 次重试")
                            retry_count += 1
                            time.sleep(0.5)  # 等待0.5秒后重试
                    
                    # 如果超过最大重试次数仍未成功，记录错误但继续处理下一个日期
                    if retry_count >= max_retries:
                        print(f"错误: {tic} 在 {current_tic_start_date} 的数据下载失败，已达到最大重试次数 {max_retries}")
                    
                    current_tic_start_date += delta
                    time.sleep(0.5)  # 每次请求后休息0.5秒，避免过于频繁的请求

        #data_df.columns = [col[0] for col in data_df.columns]
        #判断是否包含 Adj Close 列
        if "Adj Close" in data_df.columns:
            data_df = data_df.reset_index().drop(columns=["Adj Close"])
        else:
            data_df = data_df.reset_index()
        data_df = data_df[["Date", "Open", "High", "Low", "Close", "Volume", "tic"]]
        # 尽可能使列名与 processor_alpaca.py 匹配
        data_df.columns = [
            "timestamp",
            "open",
            "high",
            "low",
            "close",
            "volume",
            "tic",
        ]

        return data_df

    def clean_data(self, df: pd.DataFrame) -> pd.DataFrame:
        tic_list = np.unique(df.tic.values)
        NY = "America/New_York"

        trading_days = self.get_trading_days(start=self.start, end=self.end)
        # 生成完整的时间戳索引
        if self.time_interval == "1d":
            times = trading_days
        elif self.time_interval == "1m":
            times = []
            for day in trading_days:
                #                NY = "America/New_York"
                current_time = pd.Timestamp(day + " 09:30:00").tz_localize(NY)
                for i in range(390):  # 交易日有 390 分钟
                    times.append(current_time)
                    current_time += pd.Timedelta(minutes=1)
        else:
            raise ValueError(
                "给定的时间间隔下，雅虎财经数据清洗功能不支持。"
            )

        # 创建一个包含完整时间戳序列的新 DataFrame
        new_df = pd.DataFrame()
        for tic in tic_list:
            tmp_df = pd.DataFrame(
                columns=["open", "high", "low", "close", "volume"], index=times
            )
            tic_df = df[
                df.tic == tic
            ]  # 从下载的数据中提取与该股票代码相关的行
            for i in range(tic_df.shape[0]):  # 使用原始数据填充空 DataFrame
                tmp_timestamp = tic_df.iloc[i]["timestamp"]
                if tmp_timestamp.tzinfo is None:
                    tmp_timestamp = tmp_timestamp.tz_localize(NY)
                else:
                    tmp_timestamp = tmp_timestamp.tz_convert(NY)
                tmp_df.loc[tmp_timestamp.strftime("%Y-%m-%d")] = tic_df.iloc[i][
                    ["open", "high", "low", "close", "volume"]
                ]
            # print("(9) tmp_df\n", tmp_df.to_string()) # 打印所有数据框以检查下载是否有缺失行

            # 如果起始日期的收盘价为 NaN，使用第一个有效收盘价填充数据，并将成交量设为 0
            if str(tmp_df.iloc[0]["close"]) == "nan":
                print("起始日期数据为 NaN，使用第一个有效数据填充。")
                for i in range(tmp_df.shape[0]):
                    if str(tmp_df.iloc[i]["close"]) != "nan":
                        first_valid_close = tmp_df.iloc[i]["close"]
                        tmp_df.iloc[0] = [
                            first_valid_close,
                            first_valid_close,
                            first_valid_close,
                            first_valid_close,
                            0.0,
                        ]
                        break

            # 如果第一行的收盘价仍然为 NaN（这种情况下所有价格均为 NaN）
            if str(tmp_df.iloc[0]["close"]) == "nan":
                print(
                    "股票代码: ",
                    tic,
                    " 数据缺失。所有价格均为 NaN，使用 0 填充。",
                )
                tmp_df.iloc[0] = [
                    0.0,
                    0.0,
                    0.0,
                    0.0,
                    0.0,
                ]

            # 用前一个收盘价填充 NaN 数据，并将成交量设为 0
            for i in range(tmp_df.shape[0]):
                if str(tmp_df.iloc[i]["close"]) == "nan":
                    previous_close = tmp_df.iloc[i - 1]["close"]
                    if str(previous_close) == "nan":
                        raise ValueError
                    tmp_df.iloc[i] = [
                        previous_close,
                        previous_close,
                        previous_close,
                        previous_close,
                        0.0,
                    ]
                    # print(tmp_df.iloc[i], " 用前一个收盘价填充 NaN 数据，并将成交量设为 0。股票代码: ", tic)

            # 将单个股票代码的数据合并到新 DataFrame 中
            tmp_df = tmp_df.astype(float)
            tmp_df["tic"] = tic
            new_df = pd.concat([new_df, tmp_df])

        #            print(("股票代码 ") + tic + (" 数据清洗完成。"))

        # 重置索引并重命名列
        new_df = new_df.reset_index()
        new_df = new_df.rename(columns={"index": "timestamp"})

        #        print("所有数据清洗完成！")

        return new_df

    def add_technical_indicator(
        self, data: pd.DataFrame, tech_indicator_list: list[str]
    ):
        """
        计算技术指标
        使用 stockstats 包添加技术指标
        :param data: (df) pandas 数据框
        :return: (df) pandas 数据框
        """
        df = data.copy()
        df = df.sort_values(by=["tic", "timestamp"])
        stock = Sdf.retype(df.copy())
        unique_ticker = stock.tic.unique()

        for indicator in tech_indicator_list:
            indicator_df = pd.DataFrame()
            for i in range(len(unique_ticker)):
                try:
                    temp_indicator = stock[stock.tic == unique_ticker[i]][indicator]
                    temp_indicator = pd.DataFrame(temp_indicator)
                    temp_indicator["tic"] = unique_ticker[i]
                    temp_indicator["timestamp"] = df[df.tic == unique_ticker[i]][
                        "timestamp"
                    ].to_list()
                    indicator_df = pd.concat(
                        [indicator_df, temp_indicator], ignore_index=True
                    )
                except Exception as e:
                    print(e)
            df = df.merge(
                indicator_df[["tic", "timestamp", indicator]],
                on=["tic", "timestamp"],
                how="left",
            )
        df = df.sort_values(by=["timestamp", "tic"])
        return df

    def add_vix(self, data: pd.DataFrame) -> pd.DataFrame:
        """
        从雅虎财经添加 VIX 数据
        :param data: (df) pandas 数据框
        :return: (df) pandas 数据框
        """
        vix_df = self.download_data(["VIXY"], self.start, self.end, self.time_interval)
        cleaned_vix = self.clean_data(vix_df)
        print("清洗后的 VIX 数据\n", cleaned_vix)
        vix = cleaned_vix[["timestamp", "close"]]
        print('清洗后的 VIX 数据[["timestamp", "close"]]\n', vix)
        vix = vix.rename(columns={"close": "VIXY"})
        print('重命名列后的 VIX 数据 ({"close": "VIXY"})\n', vix)

        df = data.copy()
        print("原始数据\n", df)
        df = df.merge(vix, on="timestamp")
        df = df.sort_values(["timestamp", "tic"]).reset_index(drop=True)
        return df

    def calculate_turbulence(
        self, data: pd.DataFrame, time_period: int = 252
    ) -> pd.DataFrame:
        # 可以添加其他市场资产
        df = data.copy()
        df_price_pivot = df.pivot(index="timestamp", columns="tic", values="close")
        # 使用收益率计算波动率
        df_price_pivot = df_price_pivot.pct_change()

        unique_date = df.timestamp.unique()
        # 在固定时间戳周期后开始计算
        start = time_period
        turbulence_index = [0] * start
        # turbulence_index = [0]
        count = 0
        for i in range(start, len(unique_date)):
            current_price = df_price_pivot[df_price_pivot.index == unique_date[i]]
            # 使用一年滚动窗口计算协方差
            hist_price = df_price_pivot[
                (df_price_pivot.index < unique_date[i])
                & (df_price_pivot.index >= unique_date[i - time_period])
            ]
            # 丢弃缺失值数量多于“最旧”股票代码的股票代码
            filtered_hist_price = hist_price.iloc[
                hist_price.isna().sum().min() :
            ].dropna(axis=1)

            cov_temp = filtered_hist_price.cov()
            current_temp = current_price[[x for x in filtered_hist_price]] - np.mean(
                filtered_hist_price, axis=0
            )
            temp = current_temp.values.dot(np.linalg.pinv(cov_temp)).dot(
                current_temp.values.T
            )
            if temp > 0:
                count += 1
                if count > 2:
                    turbulence_temp = temp[0][0]
                else:
                    # 避免因计算刚开始而出现较大异常值
                    turbulence_temp = 0
            else:
                turbulence_temp = 0
            turbulence_index.append(turbulence_temp)

        turbulence_index = pd.DataFrame(
            {"timestamp": df_price_pivot.index, "turbulence": turbulence_index}
        )
        return turbulence_index

    def add_turbulence(
        self, data: pd.DataFrame, time_period: int = 252
    ) -> pd.DataFrame:
        """
        从预先计算好的数据框中添加波动率指数
        :param data: (df) pandas 数据框
        :return: (df) pandas 数据框
        """
        df = data.copy()
        turbulence_index = self.calculate_turbulence(df, time_period=time_period)
        df = df.merge(turbulence_index, on="timestamp")
        df = df.sort_values(["timestamp", "tic"]).reset_index(drop=True)
        return df

    def df_to_array(
        self, df: pd.DataFrame, tech_indicator_list: list[str], if_vix: bool
    ) -> list[np.ndarray]:
        df = df.copy()
        unique_ticker = df.tic.unique()
        if_first_time = True
        for tic in unique_ticker:
            if if_first_time:
                price_array = df[df.tic == tic][["close"]].values
                tech_array = df[df.tic == tic][tech_indicator_list].values
                if if_vix:
                    turbulence_array = df[df.tic == tic]["VIXY"].values
                else:
                    turbulence_array = df[df.tic == tic]["turbulence"].values
                if_first_time = False
            else:
                price_array = np.hstack(
                    [price_array, df[df.tic == tic][["close"]].values]
                )
                tech_array = np.hstack(
                    [tech_array, df[df.tic == tic][tech_indicator_list].values]
                )
        #        print("成功转换为数组")
        return price_array, tech_array, turbulence_array

    def get_trading_days(self, start: str, end: str) -> list[str]:
        nyse = tc.get_calendar("NYSE")
        df = nyse.date_range_htf("1D", pd.Timestamp(start), pd.Timestamp(end))
        trading_days = []
        for day in df:
            trading_days.append(str(day)[:10])
        return trading_days

    # ****** 注意：雅虎财经数据可能是实时数据，也可能延迟 15 分钟或更长时间，具体取决于交易所 ******
    def fetch_latest_data(
        self,
        ticker_list: list[str],
        time_interval: str,
        tech_indicator_list: list[str],
        limit: int = 100,
    ) -> pd.DataFrame:
        time_interval = self.convert_interval(time_interval)

        end_datetime = datetime.datetime.now()
        start_datetime = end_datetime - datetime.timedelta(
            minutes=limit + 1
        )  # 获取最多 limit 行的最新数据

        data_df = pd.DataFrame()
        for tic in ticker_list:
            barset = yf.download(
                tic, start_datetime, end_datetime, interval=time_interval
            )  # 使用开始和结束日期时间模拟 limit 参数
            barset["tic"] = tic
            data_df = pd.concat([data_df, barset])

        data_df = data_df.reset_index().drop(
            columns=["Adj Close"]
        )  # Alpaca 数据没有 'Adj Close' 列

        data_df.columns = [  # 转换为 Alpaca 列名（小写）
            "timestamp",
            "open",
            "high",
            "low",
            "close",
            "volume",
            "tic",
        ]

        start_time = data_df.timestamp.min()
        end_time = data_df.timestamp.max()
        times = []
        current_time = start_time
        end = end_time + pd.Timedelta(minutes=1)
        while current_time != end:
            times.append(current_time)
            current_time += pd.Timedelta(minutes=1)

        df = data_df.copy()
        new_df = pd.DataFrame()
        for tic in ticker_list:
            tmp_df = pd.DataFrame(
                columns=["open", "high", "low", "close", "volume"], index=times
            )
            tic_df = df[df.tic == tic]
            for i in range(tic_df.shape[0]):
                tmp_df.loc[tic_df.iloc[i]["timestamp"]] = tic_df.iloc[i][
                    ["open", "high", "low", "close", "volume"]
                ]

                if str(tmp_df.iloc[0]["close"]) == "nan":
                    for i in range(tmp_df.shape[0]):
                        if str(tmp_df.iloc[i]["close"]) != "nan":
                            first_valid_close = tmp_df.iloc[i]["close"]
                            tmp_df.iloc[0] = [
                                first_valid_close,
                                first_valid_close,
                                first_valid_close,
                                first_valid_close,
                                0.0,
                            ]
                            break
                if str(tmp_df.iloc[0]["close"]) == "nan":
                    print(
                        "股票代码: ",
                        tic,
                        " 数据缺失。所有价格均为 NaN，使用 0 填充。",
                    )
                    tmp_df.iloc[0] = [
                        0.0,
                        0.0,
                        0.0,
                        0.0,
                        0.0,
                    ]

            for i in range(tmp_df.shape[0]):
                if str(tmp_df.iloc[i]["close"]) == "nan":
                    previous_close = tmp_df.iloc[i - 1]["close"]
                    if str(previous_close) == "nan":
                        previous_close = 0.0
                    tmp_df.iloc[i] = [
                        previous_close,
                        previous_close,
                        previous_close,
                        previous_close,
                        0.0,
                    ]
            tmp_df = tmp_df.astype(float)
            tmp_df["tic"] = tic
            new_df = pd.concat([new_df, tmp_df])

        new_df = new_df.reset_index()
        new_df = new_df.rename(columns={"index": "timestamp"})

        df = self.add_technical_indicator(new_df, tech_indicator_list)
        df["VIXY"] = 0

        price_array, tech_array, turbulence_array = self.df_to_array(
            df, tech_indicator_list, if_vix=True
        )
        latest_price = price_array[-1]
        latest_tech = tech_array[-1]
        start_datetime = end_datetime - datetime.timedelta(minutes=1)
        turb_df = yf.download("VIXY", start_datetime, limit=1)
        latest_turb = turb_df["Close"].values
        return latest_price, latest_tech, latest_turb
