# -*- coding: utf-8 -*-
"""
Created on Wed Apr  6 00:52:59 2022
code review by phil 09/20/2023

@author: phil
"""

import pandas as pd
import numpy as np
import math
import akshare as ak
import matplotlib.pyplot as plt
import seaborn as sns
from dolphindb_data_load import DdbData
ddb = DdbData()
sns.set(color_codes=True)


class DailyVol:
    """
    计算指定时间周期内的历史波动率
    """

    def __init__(self):
        self.hv_percent = None  # 已实现波动率的分位数集
        self.vol_mix = None  # 多合约混合波动率
        self.daily_df = None  # 获取指定日期的数据
        self.hv_term_data: pd.DataFrame = pd.DataFrame()  # 历史波动率期限结构
        self.method = {'hv_c': self.hv_c, 'hv_abs': self.hv_abs, 'ewma': self.ewma_hv_compute,
                       'garman_klass': self.garman_klass_volatility}

    def data_download(self, sym0: str = 'TA0', s_date: str = '20230101', e_date: str = '20230920', main=True,
                      symbol: str = 'TA2401') -> pd.DataFrame:
        """
        获取新浪连续合约数据以及特定月份合约数据，获取日期，开盘价、收盘价、最高价、最低价。
        当self.main=Ture时，获取的是主力合约的历史数据，默认为Ture，此时需要提供开始日期和结束日期以及合约代码如”TA0“
        当self.main=False是，获取特定合约的历史数据，只有一个参数，如”MA2401“,需要注意的是在新浪获取数据的时候，郑商所的合约代码需要增加年份
        :return:Dataframe
        """
        if main:
            self.daily_df = ak.futures_main_sina(symbol=sym0, start_date=s_date,
                                                 end_date=e_date)[['日期', '开盘价', '收盘价', '最高价', '最低价']]
        else:
            self.daily_df = ak.futures_zh_daily_sina(symbol=symbol)[['date', 'open', 'close', 'high', 'low']]
        self.daily_df.columns = ['date', 'open', 'close', 'high', 'low']
        self.daily_df['date'] = pd.to_datetime(self.daily_df['date'])
        return self.daily_df

    @staticmethod
    def hv_c(data: pd.DataFrame, term=20) -> pd.DataFrame:
        """
        最基本的close to close 计算历史波动率
        :param term: 波动率计算周期，默认20天
        :param data: 经处理的标准化收盘价数据
        :return:
        """
        data['R'] = np.log(data['close'] / data['close'].shift(1))
        data['std_20'] = data.R.rolling(term).std()  # 20日标准差
        data['std_250'] = data['std_20'] * math.sqrt(250)  # 年化
        hv_c = data[['date', 'std_250']]
        hv_c.columns = ['date', 'hv_c']
        return hv_c

    @staticmethod
    def hv_abs(data: pd.DataFrame, term=20) -> pd.DataFrame:
        """
        采用E|R|/sqrt(2/math.pi)来计算历史波动率
        :param term: 波动率计算周期，默认20天
        :param data: 经处理的标准化收盘价数据
        :return:
        """
        data['R'] = abs(np.log(data['close'] / data['close'].shift(1)))
        data['hv_abs'] = math.sqrt(250) * (data['R'].rolling(term).mean()) / math.sqrt(2 / math.pi)
        hv_abs = data[['date', 'hv_abs']]
        hv_abs.columns = ['date', 'hv_abs']
        return hv_abs

    @staticmethod
    def ewma_hv_compute(data: pd.DataFrame, lambda_factor=0.94) -> pd.DataFrame:
        """
        使用 EWMA 方法计算历史波动率。
        :param data: 包含 'close' 列的 DataFrame，代表日度收盘价
        :param lambda_factor: 衰减因子，通常在 [0, 1] 范围内
        :return: 包含计算完成的历史波动率的 DataFrame
        """
        # 计算日对数收益率
        data['R'] = np.log(data['close'] / data['close'].shift(1))
        # 用前期的对数收益率的样本方差作为初始方差
        initial_variance = data['R'][1:].var()
        # 初始化EWMA波动率数组
        ewma_var = np.zeros_like(data['R'])
        ewma_var[0] = initial_variance
        # 计算EWMA波动率
        for t in range(1, len(data)):
            ewma_var[t] = (lambda_factor * ewma_var[t - 1] +
                           (1 - lambda_factor) * data['R'][t] ** 2)
        # 将方差转换为波动率
        data['hv_ewma'] = np.sqrt(ewma_var * 252)
        # print(data)
        return data[['date', 'hv_ewma']]

    @staticmethod
    def garman_klass_volatility(data: pd.DataFrame) -> pd.DataFrame:
        """
        Calculate the Garman-Klass volatility estimator for daily data.
        主要用来反应日内波幅
        :param data: A DataFrame containing 'High', 'Low', 'Close', and 'Open' columns.
        :return: The annualized volatility estimate.
        """
        log_hl = (np.log(data['high'] / data['low'])) ** 2
        log_co = (np.log(data['close'] / data['open'])) ** 2
        data['factor'] = 0.5 * log_hl - (2 * np.log(2) - 1) * log_co
        data['vol_gk'] = np.sqrt(252 * data['factor'])
        return data[['date', 'vol_gk']]

    @staticmethod
    def rv(data: pd.DataFrame) -> pd.DataFrame:
        """
        需要获取dolphin数据库中的分钟频行情数据来做计算,获取日度实现波动率，并进行年化
        方法：minute_data_load()
        :param data: ddb行情数据，分钟级
        :return: 两列数据，其中包含日期列和实现波动率列
        """
        # 将 'trigger' 列转换为 datetime 类型，并设置为索引
        data['trigger'] = pd.to_datetime(data['trigger'])
        data.set_index('trigger', inplace=True)
        # 计算分钟对数收益率
        data['log_return'] = np.log(data['etfprice'] / data['etfprice'].shift(1))
        # 计算每分钟收益率的平方
        data['log_return_squared'] = data['log_return'] ** 2
        # 按日期分组并计算每日的实现方差
        daily_var = data['log_return_squared'].groupby(data.index.date).sum()
        # 计算每日的实现波动率
        daily_vol = np.sqrt(daily_var)

        # 年化每日波动率
        annualized_daily_vol = daily_vol * np.sqrt(252)

        # 创建结果数据框
        result = pd.DataFrame({
            'date': daily_var.index,
            'rv': annualized_daily_vol
        })
        return result[['date', 'rv']]

    @staticmethod
    def time_exit_method_for_hv(data: pd.DataFrame, threshold: float = 0.01) -> pd.DataFrame:
        """
        Compute historical volatility using time exit method, suitable for high-frequency data.
        This method captures the first instance daily where the price movement exceeds the threshold.
        # todo 目前逻辑的处理还存在问题，需要进一步探究模型的实现以及数据的处理
        :param data: High-frequency historical market data
        :param threshold: Threshold for price movement as a percentage
        :return: DataFrame with annualized historical volatility
        """
        data['Price_Change'] = data['etfprice'].pct_change().abs()

        # DataFrame to hold the daily time to threshold
        threshold_times = pd.DataFrame(index=pd.date_range(start=data.index.min(), end=data.index.max(), freq='D'))

        # Loop over each day
        for day in threshold_times.index:
            # Filter data for the current day
            day_data = data[data.index.date == day.date()]

            # If there is no data for the day, skip to the next day
            if day_data.empty:
                continue

            # Find the first instance where the price change exceeds the threshold
            threshold_crossing = day_data[day_data['Price_Change'] >= threshold]

            # If there is a threshold crossing, calculate the time to threshold
            if not threshold_crossing.empty:
                first_crossing_time = threshold_crossing.index[0]
                time_to_threshold = (first_crossing_time - day_data.index[0]).total_seconds() / 60
                threshold_times.loc[day, 'Time_To_Threshold'] = time_to_threshold
            else:
                # If there is no crossing, set the time to some large number to indicate no crossing
                threshold_times.loc[day, 'Time_To_Threshold'] = np.nan

        # Calculate the average time to threshold across all days
        avg_time_to_threshold = threshold_times['Time_To_Threshold'].mean()

        # Invert the average time to get the daily frequency of threshold crossings
        if not np.isnan(avg_time_to_threshold):
            daily_volatility_estimate = 1 / avg_time_to_threshold
        else:
            daily_volatility_estimate = 0  # If the average is NaN, set volatility estimate to 0

        # Annualize the daily volatility estimate
        annualized_volatility = daily_volatility_estimate * np.sqrt(252 * 390)
        # Assuming 390 minutes of trading per day
        print(annualized_volatility)
        return annualized_volatility

    def hv(self, data: pd.DataFrame, method: str = 'hv_abs') -> pd.DataFrame:
        """
        选择合适的方法计算历史波动率，分别有close-to-close方法，abs(hv),ewma方法, garman_klass方法
        :param data: 收盘价数据
        :param method: 选择方法hv_c、hv_abs、ewma
        :return:
        """
        if data is None:
            data = self.daily_df
        if method in self.method:
            return self.method[method](data)
        else:
            raise ValueError(f"Invalid method: {method}. Available methods are {list(self.method.keys())}.")

    def vol_complete(self, data: pd.DataFrame = None) -> pd.DataFrame:
        """
        计算不同周期的历史波动率数据
        :param data: 数据结构化的close数据，专门用来计算历史波动率
        :return: 不同周期的历史波动率矩阵
        """
        if data is None:
            data = self.daily_df
        terms = [10, 20, 40, 60, 80, 100, 120, 140, 160]
        dfs = [self.hv_abs(data, term)['hv_abs'] for term in terms]
        self.vol_mix = pd.concat(dfs, axis=1)
        self.vol_mix.columns = map(str, terms)
        self.vol_mix = self.vol_mix.round(4)
        return self.vol_mix

    def describe(self, data: pd.DataFrame = None) -> pd.DataFrame:
        """
        计算所有数据的描述统计量，如数量，最大最小值，不同百分比分位数。
        :param data: 计算完成的不同周期的历史波动率矩阵：20、40、60...160天
        :return:返回计算出的不同分位数的矩阵
        """
        if data is None:
            data = self.vol_mix
        self.hv_percent = data.describe()
        # print(self.hv_percent) # 展示统计信息
        self.hv_percent = self.hv_percent.drop(['mean', 'count', 'std'], axis=0)  # 去掉作图不要的列
        self.hv_percent = self.hv_percent.T
        self.hv_percent.columns = ['0.01', '0.25', '0.50', '0.75', '1']
        self.hv_percent.columns = self.hv_percent.columns.astype(float)
        self.hv_percent = self.hv_percent.set_index(self.hv_percent.index.astype(int))
        # print(self.hv_percent.index)  # 展示清洗过后的各分位数的hv
        return self.hv_percent

    # Function to calculate CDF and find values corresponding to specific quantiles for all columns
    @staticmethod
    def calculate_cdf_and_quantiles(data: pd.DataFrame,
                                    quantiles: list = [0.01, 0.25, 0.5, 0.75, 1]) -> pd.DataFrame:
        """
        使用CDF来精细化计算每个iv的累积概率分布，更精确地展示每个周期的历史波动率的高低点，是描述统计中分位点的替换方法，用累积概率分位数替代
        :param data: 计算完成的不同周期的历史波动率矩阵：20、40、60...160天
        :param quantiles: 累积概率分位数列表
        :return:返回计算出的不同累积概率分位数矩阵
        """
        quantile_vols = pd.DataFrame(index=quantiles)

        for col in data.columns:
            sorted_data = np.sort(data[col].dropna())
            yvals = np.arange(len(sorted_data)) / float(len(sorted_data) - 1)

            # Find the values corresponding to specific quantiles
            quantile_values = np.interp(quantiles, yvals, sorted_data)
            quantile_vols[col] = quantile_values

        quantile_vols = quantile_vols.T
        quantile_vols = quantile_vols.set_index(quantile_vols.index.astype(int))
        # print(quantile_vols.index)
        return quantile_vols

    def hv_term(self, sym_list: list = None) -> pd.DataFrame:
        """
        计算历史波动率期限结构
        :param sym_list: List of symbols for which to calculate historical volatility
                        sym_list = ['MA2311', 'MA2312', 'MA401']
        :return: DataFrame containing historical volatility for the given symbols
        """
        # Default symbol list if none provided
        if sym_list is None:
            sym_list = ['MA2311', 'MA2312', 'MA2401']

        data = pd.DataFrame()

        # Loop through each symbol to download data and calculate historical volatility
        for sym in sym_list:
            try:
                raw_data = self.data_download(main=False, symbol=sym)
                hv_data = self.hv_abs(raw_data)
                if 'date' in hv_data.columns:
                    hv_data.set_index('date', inplace=True)
                hv_data = hv_data.rename(columns={'hv_abs': sym})
                data = pd.concat([data, hv_data], axis=1)

            except Exception as e:
                print(f"Error processing symbol {sym}: {e}")
                continue

        self.hv_term_data = data.iloc[-1:].T
        self.hv_term_data = self.hv_term_data.reset_index()
        self.hv_term_data.columns = ['underlying', 'term_hv']

        def split_underlying(row):
            # 处理合约数据月份，拆分月份和合约名
            alphabetic_part = ''.join(filter(str.isalpha, row['underlying']))
            numeric_part = ''.join(filter(str.isdigit, row['underlying']))
            # Add the omitted digit "20" to the numeric part
            full_year_month = '20' + numeric_part
            return pd.Series([alphabetic_part, full_year_month], index=['symbol', 'month'])

        self.hv_term_data[['sym', 'month']] = self.hv_term_data.apply(split_underlying, axis=1)
        return self.hv_term_data

    @staticmethod
    def plot_transposed_specialized_quantile_vols(data: pd.DataFrame):
        """
        Plots the specified quantile vols for each term structure (column name) with updated specialized styles.
        Note: This function assumes the DataFrame is transposed, i.e., each row represents a specific term
        (e.g., 20, 40, 60,...) and the columns represent different quantile levels.
        :param data: Transposed DataFrame containing quantile vols.
        """
        plt.figure(figsize=(12, 6))

        # Plot lines for 0.01 and 1 quantiles with solid and thicker lines
        plt.plot(data.index, data[0.01], label='Quantile 0.01', linestyle='-', linewidth=2, color='grey')
        plt.plot(data.index, data[1], label='Quantile 1', linestyle='-', linewidth=2, color='orange')

        # Fill area between 0.25 and 0.01, 1 and 0.75 with a lighter shade of grey
        plt.fill_between(data.index, data[0.01], data[0.25], color='grey', alpha=0.2,
                         label='Area between 0.01 and 0.25')
        plt.fill_between(data.index, data[0.75], data[1], color='orange', alpha=0.2,
                         label='Area between 0.75 and 1')

        plt.xlabel("Term Structure")
        plt.ylabel("Volatility Value")
        plt.title("Updated Specialized Volatility Values at Different Quantiles for Each Term Structure")
        plt.legend()
        plt.grid(True)
        plt.show()

    def compare_vol_methods(self, data: pd.DataFrame):
        """
        计算并比较不同历史波动率计算方法的结果。
        :param data: DataFrame, 包含历史价格数据
        """
        # 计算不同波动率
        hv_c_data = self.hv_c(data)
        hv_abs_data = self.hv_abs(data)
        ewma_data = self.ewma_hv_compute(data)
        garman_klass_data = self.garman_klass_volatility(data)
        rv_data = self.rv(DdbData.minute_data_load(DdbData.s, 'MA401'))

        # 绘制波动率曲线
        plt.figure(figsize=(15, 8))
        plt.plot(hv_c_data['date'], hv_c_data['hv_c'], label='Close-to-Close')
        plt.plot(hv_abs_data['date'], hv_abs_data['hv_abs'], label='Absolute Return')
        plt.plot(ewma_data['date'], ewma_data['hv_ewma'], label='EWMA')
        plt.plot(garman_klass_data['date'], garman_klass_data['vol_gk'], label='Garman-Klass')
        plt.plot(rv_data['date'], rv_data['rv'], label='Realized Volatility')

        plt.title('Comparison of Historical Volatility Methods')
        plt.xlabel('Date')
        plt.ylabel('Volatility')
        plt.legend()
        plt.show()
        
    def compare_vol_methods_for_ewma(self, data: pd.DataFrame):
        """
        计算并比较不同历史波动率计算方法的结果。
        :param data: DataFrame, 包含历史价格数据
        """
        # 计算不同波动率
        hv_abs_data = self.hv_abs(data)
        hv_c_data = self.hv_c(data)
        e1 = self.ewma_hv_compute(data, lambda_factor=0.92)
        e2 = self.ewma_hv_compute(data, lambda_factor=0.93)
        e3 = self.ewma_hv_compute(data, lambda_factor=0.94)
        e4 = self.ewma_hv_compute(data, lambda_factor=0.95)
        
        # 绘制波动率曲线
        plt.figure(figsize=(15, 8))
        plt.plot(hv_abs_data['date'], hv_abs_data['hv_abs'], label='Absolute Return')
        plt.plot(hv_c_data['date'], hv_c_data['hv_c'], label='Close-to-Close')
        plt.plot(e1['date'], e1['hv_ewma'], label='0.92')
        plt.plot(e2['date'], e2['hv_ewma'], label='0.93')
        plt.plot(e3['date'], e3['hv_ewma'], label='0.94')
        plt.plot(e4['date'], e4['hv_ewma'], label='0.95')

        plt.title('Comparison of Historical Volatility Methods')
        plt.xlabel('Date')
        plt.ylabel('Volatility')
        plt.legend()
        plt.show()


if __name__ == '__main__':
    VOL = DailyVol()
    # VOL.plot_transposed_specialized_quantile_vols(VOL.describe(VOL.vol_complete(VOL.data_download())))
    # VOL.describe(VOL.vol_complete(VOL.data_download()))
    # VOL.calculate_cdf_and_quantiles(VOL.vol_complete(VOL.data_download()))
    # VOL.garman_klass_volatility(VOL.data_download())
    # VOL.rv(DdbData.minute_data_load(DdbData.s, 'RM401'))
    # VOL.compare_vol_methods(VOL.data_download(main=False, symbol='MA2401'))
    # VOL.compare_vol_methods_for_ewma(VOL.data_download(main=False, symbol='MA2401'))
