import os

import pandas as pd

from app_config import get_engine, get_pro

""" 930955
update stock_basic
update daily
update dividend
update daily_basic

手动筛选
1 年报无亏损
2 公告没有违法违规
"""


class O930955(object):
    """Calculate whether dividends have been paid continuously for 3 years"""
    three_year_ago = None
    intervals = None

    """Calculate the volatility of the last one year"""
    one_year_ago = None
    zero_year_ago = None

    """file output path"""
    str_filePre = None
    """<CSI All Share Index 930955> sample space"""
    zzqz_date = None
    """<CSI Dividend Low Volatility 100> constituent stock"""
    str_930955_date = None


def calculate_volatility(df):
    # sort = df.sort_values(by='trade_date')
    # return sort['pct_chg'].std
    df = df.sort_values(by='trade_date')
    df_one_year = df.copy()  # 使用.copy()显式创建副本
    # 计算每日收益率
    df_one_year['pct_change'] = df_one_year['close'].pct_change()
    return df_one_year['pct_change'].dropna().std()


# 以ts_code分组，判断year列是否包含指定的组合
def check_combinations(group, combinations_):
    years = set(group['year'])
    for combination in combinations_:
        if combination.issubset(years):
            return True
    return False


def check_intervals(dates, intervals):
    return all(any(start <= date <= end for date in dates) for start, end in intervals)


# 查询所有数据
# 按end_date分组
# 取anno_date最新的一个数据
# 重新构建dataframe
# 计算单季度报
# 获取区间净利润
# 排除净利润为负数的企业
def filter_income(df_csi_all_share, str_one_year_ago: str, str_zero_year_ago: str, folder_path: str):
    list_tsCode_dividend = df_csi_all_share['con_code'].tolist()

    sql_daily = f"""
         SELECT ts_code,f_ann_date,end_date,n_income_attr_p,ann_date FROM `income_vip`
         WHERE ts_code IN ({','.join(f"'{code}'" for code in list_tsCode_dividend)})
         """
    df_income = pd.read_sql_query(sql_daily, get_engine())
    group_by = df_income.groupby("ts_code")
    income_ts_codes: list[str] = []
    income_ts_codes_: list[str] = []
    for name, group in group_by:
        # 按照 end_date 分组，在每个组内按照 f_ann_date 降序排序，然后取每组的第一行
        df_st_income = group.sort_values(by=['end_date', 'f_ann_date'], ascending=[True, False]).groupby('end_date').first().reset_index()
        df_st_income['q_income'] = df_st_income['n_income_attr_p'] - df_st_income['n_income_attr_p'].shift(1)
        df_st_income.loc[df_st_income['end_date'].str.endswith('0331'), 'q_income'] = df_st_income['n_income_attr_p']

        filtered_df = df_st_income[(df_st_income['ann_date'] > str_one_year_ago) & (df_st_income['ann_date'] < str_zero_year_ago)]
        # if len(filtered_df) != 4:
        #     print(filtered_df)
        #     raise RuntimeError
        sum_q_income = filtered_df['q_income'].sum()
        # print(filtered_df)
        # print(sum_q_income)
        if sum_q_income > 0:
            income_ts_codes.append(str(name))
        else:
            income_ts_codes_.append(str(name))
    print("income_ts_codes:" + str(income_ts_codes))
    print("income_ts_codes_:" + str(income_ts_codes_))

    df_csi_all_share_income = df_csi_all_share[df_csi_all_share['con_code'].isin(income_ts_codes)]
    df_csi_all_share_income_ = df_csi_all_share[df_csi_all_share['con_code'].isin(income_ts_codes_)]

    df_csi_all_share_income_.to_excel(folder_path + "/0_csi_all_share_INCOME.xlsx")
    print("df_csi_all_share_size:" + str(len(df_csi_all_share)))
    print("df_csi_all_share_income_size:" + str(len(df_csi_all_share_income)))
    return df_csi_all_share_income


def calc(o_930955: O930955 | None = None):
    three_year_ago = o_930955.three_year_ago
    one_year_ago = o_930955.one_year_ago
    zero_year_ago = o_930955.zero_year_ago
    folder_path = o_930955.str_filePre
    intervals = o_930955.intervals
    csi_all_share_date = o_930955.zzqz_date
    str_930955Date = o_930955.str_930955_date

    str_1 = ""
    for start, end in intervals:
        str_1 = str_1 + "\n                    " + start.strftime('%Y%m%d') + "-" + end.strftime('%Y%m%d')
    print(f"""
    folder_path: {"    " + folder_path}
    three_year_ago: {" " + three_year_ago}
    one_year_ago: {"   " + one_year_ago}
    zero_year_ago: {"  " + zero_year_ago}
    intervals: {str_1}
    <CSI 93095>for merge   : {str_930955Date}
    <CSI 00085>sample space: {csi_all_share_date}
    """)

    create_folder(folder_path)

    engine = get_engine()
    """ 
        1、sample space: same with <CSI All Share Index 930955>
    """
    df_csi_all_share = get_pro().index_weight(index_code='000985.CSI', start_date=csi_all_share_date,
                                              end_date=csi_all_share_date)
    df_csi_all_share_income = filter_income(df_csi_all_share, one_year_ago, zero_year_ago, folder_path)
    list_tsCode_dividend = df_csi_all_share_income['con_code'].tolist()
    df_csi_all_share_income.rename(columns={'con_code': 'ts_code'}, inplace=True)
    df_csi_all_share_income.to_excel(folder_path + "/1_csi_all_share.xlsx")

    """（1）The securities in the sample space are ranked from highest to lowest according to the average daily 
    turnover(成交额) of the past year, and the bottom 20% of the securities are excluded;"""
    sql_daily = f"""
       SELECT ts_code, trade_date, pct_chg, amount,close FROM `daily`
       WHERE ts_code IN ({','.join(f"'{code}'" for code in list_tsCode_dividend)})
       AND trade_date >= '{one_year_ago}'
       AND trade_date <= '{zero_year_ago}'
       """

    df_daily = pd.read_sql_query(sql_daily, engine)
    df_daily['amount'] = df_daily['amount'] / 1000
    df_amountMean = df_daily.groupby('ts_code')['amount'].mean().reset_index()
    df_amountMean.columns = ['ts_code', '日均成交额']
    df_csiAllShareAvgAmount = pd.merge(df_csi_all_share_income, df_amountMean, on='ts_code', how='left')

    df_csiAllShareAvgAmount = df_csiAllShareAvgAmount.sort_values(by='日均成交额', ascending=False).reset_index(
        drop=True)
    df_csiAllShareAvgAmount['i_日成交额'] = df_csiAllShareAvgAmount.index
    num_rows = len(df_csiAllShareAvgAmount)
    df_csiAllShareAvgAmount['i_日成交额'] = df_csiAllShareAvgAmount['i_日成交额'].astype(str) + '/' + str(int(num_rows * 0.8))
    df_csiAllShareAvgAmountTop80P = df_csiAllShareAvgAmount.head(int(num_rows * 0.8))
    df_csiAllShareAvgAmountTop80P.to_excel(folder_path + "/2_csiAllShareAvgAmountTop80P.xlsx")

    """（2）Select securities that have paid cash dividends for the past three consecutive(连续) years and have a cash 
    dividend yield(收益) greater than 0 each year;"""
    # 构建查询语句 cash_div_tax (每股分红-税前) 	pay-date (派息日) base_share(基准股万)
    sql_dividend = f"""
       SELECT distinct(id) as id ,ts_code, pay_date, cash_div_tax, base_share FROM dividend WHERE div_proc = '实施' AND cash_div_tax > 0 
       AND pay_date >= '{three_year_ago}' AND pay_date <= '{zero_year_ago}'
       """
    df_dividend_dataset = pd.read_sql_query(sql_dividend, engine)
    df_dividend_dataset['year'] = df_dividend_dataset['pay_date'].str[:4]
    df_dividend_dataset['cash_amount'] = df_dividend_dataset['cash_div_tax'] * df_dividend_dataset['base_share']
    df_dividend_dataset['symbol'] = df_dividend_dataset['ts_code'].str.split('.').str[0]
    df_dividend_dataset.to_excel(folder_path + "/3_dividend_pass3year.xlsx")

    df_dividend_dataset['pay_date'] = pd.to_datetime(df_dividend_dataset['pay_date'], format='%Y%m%d')
    df_dividend_for3year = df_dividend_dataset.groupby('ts_code').filter(
        lambda group: check_intervals(group['pay_date'], intervals))

    df_dividend_for3year = df_dividend_for3year[['ts_code']].drop_duplicates()
    df_divide_for3year_tsCode = df_dividend_for3year.copy()
    df_divide_for3year_tsCode['symbol'] = df_divide_for3year_tsCode['ts_code'].str.split('.').str[0]
    df_divide_for3year_tsCode.to_excel(folder_path + "/4_dividend_forThreeYear.xlsx")

    df_dividend_tsCode2cashAmount = df_dividend_dataset.groupby('ts_code')['cash_amount'].sum().reset_index()
    df_dividend_tsCode2cashAmount.columns = ['ts_code', 'cash_amount_total']

    df_dividend_for3year_final = pd.merge(df_dividend_for3year, df_csiAllShareAvgAmountTop80P, on='ts_code', how='inner')
    df_dividend_for3year_final = pd.merge(df_dividend_for3year_final, df_dividend_tsCode2cashAmount, on='ts_code', how='left')
    df_dividend_for3year_final.to_excel(folder_path + "/5_dividend_forThreeYear_avgAmountTop80Percent.xlsx")

    """the top 300 securities are selected according to the dividend yield(股息率), which is ranked in descending order"""
    list_tsCode_dividend = df_dividend_for3year_final['ts_code'].tolist()
    query = f"""
       SELECT ts_code,total_mv FROM `daily_basic`
       WHERE ts_code IN ({','.join(f"'{code}'" for code in list_tsCode_dividend)})
       AND trade_date = '{zero_year_ago}'
       """
    df_daily_basic = pd.read_sql_query(query, engine)
    df_dividend_for3year_final = pd.merge(df_dividend_for3year_final, df_daily_basic, on='ts_code', how='left')

    df_dividend_for3year_final['股息率'] = df_dividend_for3year_final['cash_amount_total'] / 3 / df_dividend_for3year_final['total_mv']
    df_dividend_for3year_final.sort_values('股息率', ascending=False, inplace=True)
    df_dividend_for3year_final.reset_index(inplace=True, drop=True)
    df_dividend_for3year_final['i_股息率'] = df_dividend_for3year_final.index
    df_dividend_for3year_final.to_excel(folder_path + "/6_dividend_forThreeYear_avgAmountTop80Percent股息率.xlsx")
    df_dividend_for3year_final_head300 = df_dividend_for3year_final.head(300).copy()

    """Select the top 100 securities based on the ascending volatility(波动) ranking over the past year"""
    dict_volatility = {}
    # 遍历计算波动率
    for stock_code, group_df in df_daily.groupby('ts_code'):
        # dict_volatility[stock_code] = calculate_volatility(group_df)
        dict_volatility[stock_code] = group_df.sort_values(by='trade_date')['pct_chg'].std()
    # 将字典转换为 DataFrame
    df_volatility = pd.DataFrame(list(dict_volatility.items()), columns=['ts_code', '波动率'])

    df_dividend_for3year_final_head300 = pd.merge(df_dividend_for3year_final_head300, df_volatility, on='ts_code',
                                                  how='left')

    df_dividend_for3year_final_head300.sort_values('股息率', ascending=False, inplace=True)

    df_dividend_for3year_final_head300['weight_1'] = df_dividend_for3year_final_head300['股息率'] / df_dividend_for3year_final_head300['波动率']
    df_dividend_for3year_final_head300.sort_values(by='波动率', ascending=True, inplace=True)
    df_dividend_for3year_final_head300_index = df_dividend_for3year_final_head300.reset_index()
    df_dividend_for3year_final_head300_index.to_excel(folder_path + "/7 CSI Dividend Low Volatility 100.xlsx")
    """end the <CSI Dividend Low Volatility 100> calc"""
    """end the <CSI Dividend Low Volatility 100> calc"""
    """end the <CSI Dividend Low Volatility 100> calc"""
    """end the <CSI Dividend Low Volatility 100> calc"""
    df_930955 = get_pro().index_weight(index_code='930955.CSI', start_date=str_930955Date, end_date=str_930955Date)
    df_930955.rename(columns={'con_code': 'ts_code'}, inplace=True)

    df_dividend_for3year_final_head300_index = (
        pd.merge(df_dividend_for3year_final_head300_index, df_930955[['ts_code', 'weight']], on='ts_code', how='left'))

    df_930955 = pd.merge(df_930955, df_csiAllShareAvgAmount[['ts_code', 'i_日成交额']], on='ts_code', how='left')
    df_930955 = pd.merge(df_930955, df_dividend_for3year_final[['ts_code', 'i_股息率']], on='ts_code', how='left')

    df_930955.to_excel(folder_path + '/8_' + str_930955Date + '-930955.xlsx')
    df_930955['symbol'] = df_930955['ts_code'].str.split('.').str[0]

    # 获取 dataframe1 中的所有列
    columns_dataframe1 = df_dividend_for3year_final_head300_index.columns

    # 创建缺失列并填充为 '*'
    for column in columns_dataframe1:
        if column not in df_930955.columns:
            df_930955[column] = '**'

    # 将 dataframe2 的列顺序调整为与 dataframe1 一致
    df_930955 = df_930955[columns_dataframe1]

    # 追加 dataframe2 到 dataframe1
    result = pd.concat([df_dividend_for3year_final_head300_index, df_930955], ignore_index=True)

    result.to_excel(folder_path + '/9_DividendLowVolatility100Merge.xlsx')
    result.to_excel(folder_path + '_.xlsx')


def create_folder(folder_path):
    if not os.path.exists(folder_path):
        os.makedirs(folder_path)
        print(f"文件夹 '{folder_path}' 创建成功")


if __name__ == '__main__':
    _2025_1one = O930955()
    _2024_1one = O930955()
    _2023_1one = O930955()
    _2022_1one = O930955()
    # _2021_one = Student()

    _2024_3thr = O930955()
    _2023_3thr = O930955()
    _2022_3thr = O930955()
    _2021_3thr = O930955()

    _2024_4fou = O930955()
    _2023_4fou = O930955()
    _2022_4fou = O930955()
    _2021_4fou = O930955()

    _2024_two = O930955()
    _2023_two = O930955()
    _2022_two = O930955()
    _2021_two = O930955()

    # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
    _2025_1one.three_year_ago = '20220201'
    _2024_1one.three_year_ago = '20210201'
    _2023_1one.three_year_ago = '20200201'
    _2022_1one.three_year_ago = '20190201'

    _2025_1one.one_year_ago = '20240201'
    _2024_1one.one_year_ago = '20230201'
    _2023_1one.one_year_ago = '20220201'
    _2022_1one.one_year_ago = '20210201'

    _2025_1one.zero_year_ago = '20250127'
    _2024_1one.zero_year_ago = '20240131'
    _2023_1one.zero_year_ago = '20230131'
    _2022_1one.zero_year_ago = '20220128'

    _2025_1one.intervals = [
        (pd.to_datetime('20240201'), pd.to_datetime('20250131')),
        (pd.to_datetime('20230201'), pd.to_datetime('20240131')),
        (pd.to_datetime('20220201'), pd.to_datetime('20230131'))
    ]
    _2024_1one.intervals = [
        (pd.to_datetime('20230201'), pd.to_datetime('20240131')),
        (pd.to_datetime('20220201'), pd.to_datetime('20230131')),
        (pd.to_datetime('20210201'), pd.to_datetime('20220131'))
    ]
    _2023_1one.intervals = [
        (pd.to_datetime('20220201'), pd.to_datetime('20230131')),
        (pd.to_datetime('20210201'), pd.to_datetime('20220131')),
        (pd.to_datetime('20200201'), pd.to_datetime('20210131'))
    ]
    _2022_1one.intervals = [
        (pd.to_datetime('20210201'), pd.to_datetime('20220131')),
        (pd.to_datetime('20200201'), pd.to_datetime('20210131')),
        (pd.to_datetime('20190201'), pd.to_datetime('20200131'))
    ]

    _2025_1one.zzqz_date = '20241231'
    _2024_1one.zzqz_date = '20231229'
    _2023_1one.zzqz_date = '20221230'
    _2022_1one.zzqz_date = '20211231'

    _2025_1one.str_930955_date = '20241231'
    _2024_1one.str_930955_date = '20240329'
    _2023_1one.str_930955_date = '20230331'
    _2022_1one.str_930955_date = '20220331'
    # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

    _2024_4fou.three_year_ago = '20211101'
    _2023_4fou.three_year_ago = '20201101'
    _2022_4fou.three_year_ago = '20191101'
    _2021_4fou.three_year_ago = '20181101'

    _2024_4fou.one_year_ago = '20231101'
    _2023_4fou.one_year_ago = '20221101'
    _2022_4fou.one_year_ago = '20211101'
    _2021_4fou.one_year_ago = '20201101'

    _2024_4fou.zero_year_ago = '20241031'
    _2023_4fou.zero_year_ago = '20231031'
    _2022_4fou.zero_year_ago = '20221031'
    _2021_4fou.zero_year_ago = '20211031'

    _2024_4fou.intervals = [
        (pd.to_datetime('20231101'), pd.to_datetime('20241031')),
        (pd.to_datetime('20221101'), pd.to_datetime('20231031')),
        (pd.to_datetime('20211101'), pd.to_datetime('20221031'))
    ]
    _2023_4fou.intervals = [
        (pd.to_datetime('20221101'), pd.to_datetime('20231031')),
        (pd.to_datetime('20211101'), pd.to_datetime('20221031')),
        (pd.to_datetime('20201101'), pd.to_datetime('20211031'))
    ]
    _2022_4fou.intervals = [
        (pd.to_datetime('20211101'), pd.to_datetime('20221031')),
        (pd.to_datetime('20201101'), pd.to_datetime('20211031')),
        (pd.to_datetime('20191101'), pd.to_datetime('20201031'))
    ]
    _2021_4fou.intervals = [
        (pd.to_datetime('20201101'), pd.to_datetime('20211031')),
        (pd.to_datetime('20191101'), pd.to_datetime('20201031')),
        (pd.to_datetime('20181101'), pd.to_datetime('20191031'))
    ]

    _2024_4fou.zzqz_date = '20241031'
    _2023_4fou.zzqz_date = '20231031'
    _2022_4fou.zzqz_date = '20221031'
    _2021_4fou.zzqz_date = '20211029'

    _2024_4fou.str_930955_date = '20241231'
    _2023_4fou.str_930955_date = '20231229'
    _2022_4fou.str_930955_date = '20221230'
    _2021_4fou.str_930955_date = '20211231'

    # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
    _2024_3thr.three_year_ago = '20210801'
    _2023_3thr.three_year_ago = '20200801'
    _2022_3thr.three_year_ago = '20190801'
    _2021_3thr.three_year_ago = '20180801'

    _2024_3thr.one_year_ago = '20230801'
    _2023_3thr.one_year_ago = '20220801'
    _2022_3thr.one_year_ago = '20210801'
    _2021_3thr.one_year_ago = '20200801'

    _2024_3thr.zero_year_ago = '20240731'
    _2023_3thr.zero_year_ago = '20230731'
    _2022_3thr.zero_year_ago = '20220729'
    _2021_3thr.zero_year_ago = '20210730'

    _2024_3thr.intervals = [
        (pd.to_datetime('20230801'), pd.to_datetime('20240731')),
        (pd.to_datetime('20210801'), pd.to_datetime('20220731')),
        (pd.to_datetime('20220801'), pd.to_datetime('20230731'))
    ]
    _2023_3thr.intervals = [
        (pd.to_datetime('20200801'), pd.to_datetime('20210731')),
        (pd.to_datetime('20210801'), pd.to_datetime('20220731')),
        (pd.to_datetime('20220801'), pd.to_datetime('20230731'))
    ]
    _2022_3thr.intervals = [
        (pd.to_datetime('20190801'), pd.to_datetime('20200731')),
        (pd.to_datetime('20200801'), pd.to_datetime('20210731')),
        (pd.to_datetime('20210801'), pd.to_datetime('20220731'))
    ]
    _2021_3thr.intervals = [
        (pd.to_datetime('20190801'), pd.to_datetime('20200731')),
        (pd.to_datetime('20200801'), pd.to_datetime('20210731')),
        (pd.to_datetime('20180801'), pd.to_datetime('20190731'))
    ]

    _2024_3thr.zzqz_date = '20240731'
    _2023_3thr.zzqz_date = '20230731'
    _2022_3thr.zzqz_date = '20220729'
    _2021_3thr.zzqz_date = '20210730'

    _2024_3thr.str_930955_date = '20241031'
    _2023_3thr.str_930955_date = '20231031'
    _2022_3thr.str_930955_date = '20221031'
    _2021_3thr.str_930955_date = '20211029'

    _2025_1one.str_filePre = 'zfile/a1_930955_1one_2025_v1'
    _2024_1one.str_filePre = 'zfile/a1_930955_1one_2024_v1'
    _2023_1one.str_filePre = 'zfile/a1_930955_1one_2023_v1'
    _2022_1one.str_filePre = 'zfile/a1_930955_1one_2022_v1'
    _2024_3thr.str_filePre = 'zfile/a1_930955_3thr_2024_v1'
    _2023_3thr.str_filePre = 'zfile/a1_930955_3thr_2023_v1'
    _2022_3thr.str_filePre = 'zfile/a1_930955_3thr_2022_v1'
    _2021_3thr.str_filePre = 'zfile/a1_930955_3thr_2021_v1'
    _2024_4fou.str_filePre = 'zfile/a1_930955_4fou_2024_v1'
    _2023_4fou.str_filePre = 'zfile/a1_930955_4fou_2023_v1'
    _2022_4fou.str_filePre = 'zfile/a1_930955_4fou_2022_v1'
    _2021_4fou.str_filePre = 'zfile/a1_930955_4fou_2021_v1'


    calc(_2024_1one)
    calc(_2025_1one)
    calc(_2023_1one)
    calc(_2022_1one)
    # calc(_2021_1one)

    calc(_2024_3thr)
    calc(_2023_3thr)
    calc(_2022_3thr)
    calc(_2021_3thr)

    calc(_2024_4fou)
    calc(_2023_4fou)
    calc(_2022_4fou)
    calc(_2021_4fou)

    # calc(_2024_two)
    # calc(_2023_two)
    # calc(_2022_two)
    # calc(_2021_two)
    pass
