import tracemalloc
import numpy as np
import pandas as pd
import time
from collections import defaultdict
from utils_func.get_logger import get_logger
from utils_func.env import ROOT_PATH

logger = get_logger("monitor", ROOT_PATH.joinpath('log', 'monitor.log'))
def check_memory():

    snapshot = tracemalloc.take_snapshot()
    top_stats = snapshot.statistics('lineno')

    # Get the current memory usage statistics
    current, peak = tracemalloc.get_traced_memory()
    logger.info(f"Current memory usage is {current / 10**6}MB; Peak was {peak / 10**6}MB")

    logger.info("[ Top 5 ]")
    for stat in top_stats[:5]:
        logger.info(stat)

    # tracemalloc.stop()
def show_time(notice:str):
    cur_time = time.ctime()
    logger.info(f'-----------{cur_time} {notice}-----------')

def check_data(_df):
    # 异常值处理,drop'保障开始时间'为nan和2008/01前的row
    abnormal_df = _df[(_df['保障开始时间'].isnull())|(_df['保障开始时间']<'2008/01')]
    if _df['保障开始时间'].isnull().any():
        _df.drop(_df[_df['保障开始时间'].isnull()].index,inplace=True)
    _df.drop(_df[_df['保障开始时间']<'2008/01'].index,inplace=True)
    return _df, abnormal_df

def check_tofill(_df):
    # TODO
    # idx = _df.reset_index().groupby(['匹配用'])['index'].min().to_list()
    _df['rank'] = _df.groupby(['匹配用'])['匹配用'].rank(method='first')
    # cols = list(set(_df.columns).intersection(set(net_premium.columns)))
    df = _df[_df['rank'] == 1].reset_index()
    # [cols].to_excel('待补充净费_苏宁大陆人保2020至今.xlsx')
    return df


def concat_df_chunks(dfs, chunksize, filename):
    """
    按照指定块大小对多个DataFrame对象进行拼接
    :param dfs: 多个DataFrame对象组成的list
    :param chunksize: 每个块的大小
    :param filename: 存储的文件名
    """

    # 计算拼接后矩阵的形状
    n_cols = dfs[0].shape[1]
    n_rows = sum((df.shape[0] for df in dfs))

    # 计算每个块在矩阵中的位置
    block_starts = np.arange([df.shape[0] for df in dfs])
    block_ends = np.arange([df.shape[0] for df in dfs])
    # block_starts = np.arange(n_rows // chunksize + 1) * chunksize

    # 使用Numpy memmap创建磁盘上的临时文件
    mmap_file = np.memmap(filename, dtype='float32', mode='w+',
                          shape=(n_rows, n_cols))

    # 逐块操作
    col_start = 0
    indexes = []
    for i, start in enumerate(block_starts):
        block_end = block_ends[i]#min(start + chunksize, n_rows)

        # 拼接数据块
        rows = []
        for df in dfs:
            rows.append(df.iloc[block_end-chunksize:block_end])

        block = pd.concat(rows, axis=1)

        # 将数据块保存到磁盘上的临时文件中
        mmap_file[start:block_end, col_start:col_start+block.shape[1]] = block.values

        # 保存索引位置
        indexes.append(block.index)

        # 更新列位置
        col_start += block.shape[1]

    # 构建最终的DataFrame对象
    result = pd.DataFrame(mmap_file, dtype='float32')

    # 恢复索引
    result.index = pd.concat(indexes)

    return result


def get_month_range(start_day,end_day):
    # date_format = "%Y-%m-%d"
    # start_day = datetime.datetime.strptime(start_day, date_format)
    # end_day = datetime.datetime.strptime(end_day, date_format)
    months = (end_day.year - start_day.year)*12 + end_day.month - start_day.month
    month_range = ['%s-%s'%(start_day.year + mon//12,mon%12+1) 
					for mon in range(start_day.month-1,start_day.month + months)]
    month_range = [i if len(i) == 7 else i.split('-')[0] + '-0' + i.split('-')[1] for i in month_range]
    month_range = [i + '-01' for i in month_range]
    return month_range

def add_new_cols(df_,colname1,colname2):
    # Pre-process the data outside of the DataFrame
    allocation_data = []
    for _, row in df_.iterrows():
        data_dict = dict(zip(row[colname1], row[colname2]))
        allocation_data.append(data_dict)

    dict_union = defaultdict(int)
    for _dict in allocation_data:
        for k,v in _dict.items():
            dict_union[k] += v
    allocation_all = pd.DataFrame([dict_union]).T.sort_index()
    # # Convert the list of dictionaries to a DataFrame
    # allocation_all = pd.DataFrame(allocation_data).sort_index(axis=1)
    # # Concatenate this new DataFrame with the original DataFrame
    # result_df = pd.concat([df_.reset_index(), allocation_all], axis=1)
    return allocation_all

# def rearrange_df(df_,colname1,colname2):
#     # Pre-process the data outside of the DataFrame
#     allocation_data = []
#     for _, row in df_.iterrows():
#         data_dict = dict(zip(row[colname1], row[colname2]))
#         row_df = row[:-2].to_frame().T
#         df_single = pd.DataFrame([data_dict]).T.reset_index()
#         df_single.rename(columns={0:'分摊','index':'日期'},inplace=True)
#         df_i = pd.concat([row_df.reset_index(),df_single],axis=0,ignore_index=True).ffill().iloc[1:,:]#.drop('index',axis=1).dropna(axis=1,how='all')

#         allocation_data.append(df_i.to_numpy())
#     allocation_all = pd.DataFrame(np.concatenate(allocation_data, axis=0))
#     allocation_all.columns = ['index']+list(df_.columns)[:-2]+['日期','分摊']

#     return allocation_all
def rearrange_df(df_,colname1,colname2):
    # Pre-process the data outside of the DataFrame
    N = int(df_['保期月份'].sum()+df_.shape[0])
    # check_memory()

    allocation_all = pd.DataFrame(index=pd.RangeIndex(0, N), 
                                  columns=['index']+list(df_.columns)[:-2]+['日期','分摊']
                                  )#dtype={'日期':'object','保障开始时间':'int','保障结束时间':''}
    idx = 0
    for _, row in df_.iterrows():
        data_dict = dict(zip(row[colname1], row[colname2]))
        row_df = row[:-2].to_frame().T
        df_single = pd.DataFrame([data_dict]).T.reset_index()
        df_single.rename(columns={0:'分摊','index':'日期'},inplace=True)
        df_i = pd.concat([row_df.reset_index(),df_single],axis=0,ignore_index=True).ffill().iloc[1:,:]#.drop('index',axis=1).dropna(axis=1,how='all')
        allocation_all.iloc[idx:idx+df_i.shape[0],:] = df_i
        idx += df_i.shape[0]
    allocation_all.dropna(axis=1,how='all',inplace=True)
    return allocation_all



def process_dataframe(df_, colname1, colname2):
    # Flatten the list columns and create the new '日期' and '分摊' columns
    dates = np.concatenate(df_[colname1].values)
    allocations = np.concatenate(df_[colname2].values)
    
    # Repeat the other columns to match the length of the new columns
    repeated_data = np.repeat(df_.iloc[:, :-2].values, [len(lst) for lst in df_[colname1]], axis=0)
    
    # Create the index for the DataFrame
    index_col = np.concatenate([[i] * len(lst) for i, lst in enumerate(df_[colname1])]).reshape(-1, 1)
    
    # Combine all columns into a single NumPy array
    combined_data = np.hstack((index_col, repeated_data, dates.reshape(-1, 1), allocations.reshape(-1, 1)))
    
    # Define the columns for the resulting DataFrame
    columns = ['index'] + list(df_.columns)[:-2] + ['日期', '分摊']
    
    # Create the DataFrame from the combined NumPy array
    allocation_all_df = pd.DataFrame(combined_data, columns=columns)
    
    return allocation_all_df