#!/usr/bin/env python
# coding: utf-8

# # 加载工具包
# ```python
# import sys
# sys.path.append("/home/loong/jupyter")
# import common_utils
# from common_utils import *
# ```
# # 重新加载
# ```python
# import imp
# imp.reload(common_utils)
# from common_utils import *
# ```
# # ipynb转化为py文件
# ```shell
# jupyter nbconvert --to script common_utils.ipynb
# ```

# # 导包

# In[1]:


import os 
import time
import json
import gc
import math
import numpy as np
import pandas as pd
import warnings
import re
from matplotlib import pyplot as plt
from datetime import datetime,timedelta
from sklearn import metrics
import lightgbm as lgb
from lightgbm import log_evaluation, early_stopping

from tqdm import tqdm
tqdm.pandas()

import pymysql
from sqlalchemy import create_engine


# # 日期工具

# ## 日期转化为周

# In[2]:


def format_date2week(date_obj):
    weekday = date_obj.weekday()
    start_of_week = date_obj - timedelta(days=weekday)
    end_of_week = start_of_week + timedelta(days=6)
    monday = start_of_week.strftime('%m.%d')
    year = start_of_week.year
    sunday = end_of_week.strftime('%m.%d')
    return f'{year}.{monday}~{sunday}'


# # json操作

# ## 提取json中的字符串

# In[3]:


def search_json(json_str, query):
    obj = json.loads(json_str)
    return query_dict(obj, query)

def query_dict(data, query):
    keys = query.strip("$.").split(".")
    result = data
    for key in keys:
        if isinstance(result, dict) and key in result:
            result = result[key]
        else:
            return None
    return result

def query_json(json_str, querys):
    if pd.isna(json_str):
        return None
    try:
        obj = json.loads(json_str)
        if isinstance(querys, str) and len(querys) > 0:
            return query_dict(obj, querys)
        if isinstance(querys, list) and len(querys) > 0:
            result_arr = []
            for query in querys:
                result_arr.append(query_dict(obj, query))
            return tuple(result_arr)
    except json.JSONDecodeError:
        print(f"发生异常json: \n{json_str}")


# In[4]:


# 模板代码
# columns = ['modelScoreParams_scoreV1','modelScoreParams_scoreV2','modelScoreParams_scoreV3','modelScoreParams_scoreV4','modelScoreParams_scoreV5','modelScoreParams_scoreV6',
#            'modelScoreParams_scoreV7','modelScoreParams_scoreV8','modelScoreParams_scoreV9','modelScoreParams_scoreV10','modelScoreParams_scoreV11','modelScoreParams_scoreV12',
#            'modelScoreParams_scoreV13']
# df[columns] = df.apply(lambda x : query_json(x['req_data'],columns),axis=1,result_type='expand')
# df[columns] = df[columns].astype(float)


# # pandas相关

# ## pandas显示设置

# In[5]:


def set_float_show(f="{:,.3f}"):
    pd.set_option('display.float_format', f)

def set_warnings(need=True):
    if need:
        warnings.filterwarnings('default')
    else:
        warnings.filterwarnings('ignore')
set_warnings(False)

def set_pd_show(max_rows=500, max_columns=200, max_colwidth=70):
    pd.set_option("display.max_rows", max_rows)
    pd.set_option("display.max_columns", max_columns)
    pd.set_option("display.max_colwidth", max_colwidth)


# ## 根据datafame中的url字段进行数据下载

# In[6]:


import requests 
from tqdm import tqdm
from urllib.parse import urlparse
def is_valid_url(url):
    try:
        result = urlparse(url)
        return all([result.scheme, result.netloc])
    except ValueError:
        return False
def download_data(df, url_column, id_column,suffixes='_data'):
    if url_column in df.columns and id_column in df.columns:
        df[url_column + '_data'] = None
        with tqdm(total=len(df), desc="Downloading") as pbar:  #, bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}"
            with requests.Session() as session:
                for i, row in df.iterrows():
                    url = row[url_column]
                    id_column_value = row[id_column]
                    new_column_name = url_column + suffixes
                    if isinstance(url, str) and is_valid_url(url):  # 检查URL是否是字符串并且有效
                        response = session.get(url, stream=True)
                        if response.status_code == 200:
                            # total_size_in_bytes = int(response.headers.get('content-length', 0))
                            block_size = 1024  # 1 Kibibyte
                            # progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True) # 如果下大文件则开启
                            data = b''.join(data_chunk for data_chunk in response.iter_content(block_size))
                            df.loc[i, new_column_name] = data.decode()
                            # progress_bar.close()
                        else:
                            print(
                                f"Failed to download data for {id_column}:{id_column_value} from {url}: {response.status_code}")
                    else:
                        print(f"url format is erro ,{id_column}:{id_column_value},url:{url}")
                    pbar.update(1)
    else:
        raise Exception(f"Please check whether the data contains these two columns:{url_column},{id_column}")
    return df


# ## 拉平json格式的字段

# In[7]:


def parse_json_data(df, json_column, id_column, retain_column=None):
    """
    将df中的json字符串的摊平，并根据想要保留的字段，保留相关的值
    :param df:  待摊平处理的dataframe
    :param json_column: json字符串所在的那一列
    :param id_column: 唯一关联主键，后续用于定位转化失败的数据，或外部唯一关联的id
    :param retain_column: 待保留的列，可以是字符串，也可以是数组保存多列
    :return:
    """
    new_rows = []
    for i, row in df.iterrows():
        json_data_str = row[json_column]
        id_column_value = row[id_column]
        if isinstance(json_data_str, str):
            try:
                json_data = json.loads(json_data_str)
                for item in json_data:
                    new_row = item
                    new_row[id_column] = id_column_value
                    if isinstance(retain_column, str) and len(retain_column) > 0:
                        new_row[retain_column] = row[retain_column]
                    elif isinstance(retain_column, list) and len(retain_column) > 0:
                        for column in retain_column:
                            new_row[column] = row[column]
                    new_rows.append(new_row)
            except json.JSONDecodeError:
                print(f"Failed to parse JSON data for {id_column}:{id_column_value}")
    return pd.DataFrame(new_rows)


# ## 数据校验

# In[8]:


# 比较的必须是没有缺失值
def is_same_df(small_df, big_df):
    """
    比较两个行数相同的数据的内容
    :param small_df: 
    :param big_df: 
    :return: 
    """
    for col in small_df.columns:
        s1 = small_df[col].fillna(-1)
        s2 = big_df[col].fillna(-1)
        l = (s1 == s2).sum()
        diff_value_num = small_df.shape[0] - l
        if diff_value_num != 0:
            print(f"{col}:{diff_value_num}")


def compare_df_data(small_df, big_df, id_columns):
    """
    通过id_column 关联比较两个不同的df的数据内容是否完全一致
    :param small_df: 
    :param big_df: 
    :param id_columns: 
    :return: 
    """
    big_df = big_df.copy()
    small_df = small_df.copy()
    big_df_new = big_df[big_df[id_columns].isin(small_df[id_columns])] \
        .sort_values(id_columns).reset_index(drop=True)
    small_df = small_df.sort_values(id_columns).reset_index(drop=True)
    is_same_df(small_df, big_df_new)


# ## dataframe数据切割

# In[9]:


def split_df(df, sheet_size=3000):
    import gc
    splited_arr = []
    df_len = len(df)
    start_range = range(0, df_len, sheet_size)
    end_range = range(sheet_size, df_len + sheet_size, sheet_size)
    for start_index, end_index in zip(start_range, end_range):
        splited_arr.append(df.iloc[start_index:end_index])
    del df
    gc.collect()
    return splited_arr


# # 连接数据库

# In[10]:


def postgre_engine(host, port, user, passwd, db):
    try:
        engine = create_engine(f"postgresql://{user}:{passwd}@{host}:{port}/{db}")
        # engine = create_engine("postgresql://BASIC$longxiaolei:ZGU3NGVkOTU0@ali-hologres.palmcash.com:88/prod")
        print("Successfully connected to the postgre:{}/{}".format(host, port))
        return engine
    except Exception as e:
        print(f"An error occurred: {e}")


def mysql_connection(host, port, user, password):
    try:
        connection = pymysql.connect(host=host, port=port, user=user, password=password)
        print("Successfully connected to the mysql:{}/{}".format(host, port))
        return connection
    except Exception as e:
        print(f"An error occurred: {e}")
        return None
    
def mysql_engine(host, port, user, passwd, db=None):
    try:
        engine = create_engine(f'mysql+pymysql://{user}:{passwd}@{host}:{port}/{db}')
        return engine
    except Exception as e:
        print(f"An error occurred: {e}")

# mysql_rule = mysql_engine('47.253.56.86',4000,'rule','Z3Vl7enFj2eqkeJf','rule')
#mysql_rule = mysql_engine('172.20.1.129', 3306, 'rule', 'Z3Vl7enFj2eqkeJf', 'rule')


# # 路径操作

# ## 校验或创建文件夹

# In[11]:


def mkdir_if_not_exists(dir_path):
    import os
    if os.path.exists(dir_path):
        return
    os.mkdir(dir_path)


# ## 扫描文件夹中的数据

# In[12]:


def data_of_dir(dir_path:str, query_flags="", start_date=None): 
    def _fetch_filenams(dir_path:str, contains_str,start_date=None):
        file_paths = []
        contains_str = contains_str or ""   # "" if contains_str is None else contains_str
        pattern = r"\d{4}-\d{2}-\d{2}" 
        for file_name in os.listdir(dir_path):
            if  (contains_str in file_name) & ( any( file_type in file_name for file_type in ['.parquet','.pqt','.pickle','.pkl','.csv','.xlsx'] ) ):
                if start_date is None:
                    file_paths.append(os.path.join(dir_path, file_name))
                else:
                    match = re.search(pattern, file_name)
                    date = match.group() # type: ignore
                    if date>= start_date:
                        file_paths.append(os.path.join(dir_path, file_name))
        file_paths.sort()
        return file_paths
    
    if isinstance(query_flags,str) or query_flags is None:
        return _fetch_filenams(dir_path,query_flags,start_date)
    elif isinstance(query_flags,list):
        file_names= None
        for qf in query_flags: # type: ignore
            if file_names is None:
                file_names=_fetch_filenams(dir_path,qf,start_date)
            else:
                file_names = file_names + _fetch_filenams(dir_path,qf,start_date)
        return file_names


# ## 批量加载指定路径的文件

# In[13]:


def batch_load_data(file_paths):
    df_list = []
    for file_path in file_paths:
        try:
            if file_path.endswith(".parquet") or file_path.endswith(".pqt"):
                df = pd.read_parquet(file_path)
            elif file_path.endswith(".pickle") or file_path.endswith(".pkl"):
                df = pd.read_pickle(file_path)
            elif file_path.endswith(".xlsx"):
                df = pd.read_excel(file_path)
            elif file_path.endswith(".csv"):
                df = pd.read_csv(file_path)
            df_list.append(df)
        except Exception as e:
            print(f'发生异常，报错信息如下: \n{e}')
            print(f"异常的文件为:{file_path}")
    df_new = pd.concat(df_list).reset_index(drop=True)
    print(f"load {len(file_paths)} file,data shape {df_new.shape}")
    return df_new


# # 分析工具

# ## 高效率填充缺失值

# In[14]:


def quick_fillna(df, cols, value):
    """
    鉴于fillna函数中inplace无效，为了避免df的复制操作，在需要高速替换缺失值的场景建议使用该函数
    :param df: 需要替换的数据
    :param cols: 需要操作的列
    :param value: 对应的填充值
    :return:
    """
    for col in cols:
        if df[col].isnull().sum() > 0:
            df.loc[df[col].isnull(), col] = value


# ## 批量计算枚举值

# In[15]:


def clac_unique(df, cols):
    unique_arr = []
    for col in cols:
        uni = len(df[col].unique())
        unique_arr.append([col, uni])
    return pd.DataFrame(unique_arr, columns=["var", "unique"])


# ## 单变量分析

# ### ks计算

# In[16]:


def calc_ks(y_label, y_pred):
    pred_list = list(y_pred)
    label_list = list(y_label)
    total_bad = sum(label_list)
    total_good = len(label_list) - total_bad
    items = sorted(zip(pred_list, label_list), key=lambda x: x[0])
    step = (max(pred_list) - min(pred_list)) / 200

    pred_bin = []
    good_rate = []
    bad_rate = []
    ks_list = []
    for i in range(1, 201):
        idx = min(pred_list) + i * step
        pred_bin.append(idx)
        label_bin = [x[1] for x in items if x[0] < idx]
        bad_num = sum(label_bin)
        good_num = len(label_bin) - bad_num
        goodrate = good_num / total_good
        badrate = bad_num / total_bad
        ks = abs(goodrate - badrate)
        good_rate.append(goodrate)
        bad_rate.append(badrate)
        ks_list.append(ks)
    return max(ks_list)


# ### psi计算

# In[17]:


def fea_psi_calc(actual, predict, bins=10):
    """
    功能: 计算连续变量和离散变量的PSI值
    输入值:
    actual: 一维数组或series，代表训练集中的变量
    predict: 一维数组或series，代表测试集中的变量
    bins: 违约率段划分个数
    输出值:
    字典，键值关系为{'psi': PSI值，'psi_fig': 实际和预期占比分布曲线}
    """
    psi_dict = {}
    actual = np.sort(actual)
    actual_distinct = np.sort(list(set(actual)))
    predict = np.sort(predict)
    # predict_distinct = np.sort(list(set(predict)))
    actual_len = len(actual)
    actual_distinct_len = len(actual_distinct)
    predict_len = len(predict)
    # predict_distinct_len = len(predict_distinct)
    psi_cut = []
    actual_bins = []
    predict_bins = []
    actual_min = actual.min()
    actual_max = actual.max()
    cuts = []
    binlen = (actual_max - actual_min) / bins
    if actual_distinct_len < bins:
        for i in actual_distinct:
            cuts.append(i)
        for i in range(2, (actual_distinct_len + 1)):
            if i == bins:
                lowercut = cuts[i - 2]
                uppercut = float("Inf")
            else:
                lowercut = cuts[i - 2]
                uppercut = cuts[i - 1]
            actual_cnt = ((actual >= lowercut) & (actual < uppercut)).sum() + 1
            predict_cnt = ((predict >= lowercut) & (predict < uppercut)).sum() + 1
            actual_pct = (actual_cnt + 0.0) / actual_len
            predict_pct = (predict_cnt + 0.0) / predict_len
            psi_cut.append(
                (actual_pct - predict_pct) * math.log(actual_pct / predict_pct)
            )
            actual_bins.append(actual_pct)
            predict_bins.append(predict_pct)
    else:
        for i in range(1, bins):
            cuts.append(actual_min + i * binlen)
        for i in range(1, (bins + 1)):
            if i == 1:
                lowercut = float("-Inf")
                uppercut = cuts[i - 1]
            elif i == bins:
                lowercut = cuts[i - 2]
                uppercut = float("Inf")
            else:
                lowercut = cuts[i - 2]
                uppercut = cuts[i - 1]
            actual_cnt = ((actual >= lowercut) & (actual < uppercut)).sum() + 1
            predict_cnt = ((predict >= lowercut) & (predict < uppercut)).sum() + 1
            actual_pct = (actual_cnt + 0.0) / actual_len
            predict_pct = (predict_cnt + 0.0) / predict_len
            psi_cut.append(
                (actual_pct - predict_pct) * math.log(actual_pct / predict_pct)
            )
            actual_bins.append(actual_pct)
            predict_bins.append(predict_pct)
    psi = sum(psi_cut)
    psi_dict["psi"] = psi
    return psi_dict


# In[18]:


def batch_calc_psi(df1, df2, cols):
    psi_arr = []
    for col in tqdm(cols):
        if (df1[col].isnull().sum() == 0) and (df2[col].isnull().sum() == 0):
            psi = fea_psi_calc(df1[col], df2[col])["psi"]
            psi_arr.append([col, psi])
        else:
            print(f"field {col} has Null value")
    return pd.DataFrame(psi_arr, columns=["var", "psi"]).sort_values("psi", ascending=False)


# ## 综合分析

# In[19]:


def group_calc(df, group=[], sum_col=[], count_col=[], unique_col=[], rate_tupes=[]):
    """
    业务分析工具类，同时对比计算多个target指标，查看结果
    data : 数据集
    sum_col : 需要group_sum的列
    count_col : 需要group_count的列
    rate_tupe : 需要除法计算的列 格式为 (字段1，字段2，新列名称) 或者 (字段，新列名称)
    """
    grouped = df.groupby(group)

    grouped_sum = None
    if isinstance(sum_col, list) and len(sum_col) > 0:
        grouped_sum = grouped[sum_col].sum()
    grouped_count = None
    if isinstance(count_col, list) and len(count_col) > 0:
        grouped_count = grouped[count_col].count()

    grouped_unique = None
    if isinstance(unique_col, list) and len(unique_col) > 0:
        grouped_unique = grouped[unique_col].nunique()
    results = list(
        filter(lambda x: x is not None, [grouped_sum, grouped_count, grouped_unique])
    )

    assert len(results) > 0, "参数异常，无法计算出相应的结果，请检查入参信息"

    grouped = pd.concat(results, axis=1)

    if isinstance(rate_tupes, list) and len(rate_tupes) > 0:
        for tup in rate_tupes:
            if len(tup) == 3:
                grouped[tup[2]] = grouped[tup[0]] / grouped[tup[1]]
            else:
                print(f"param rate_tupes has error format tupe:{tup}")

    return grouped


# In[20]:


def univerate( df, var_name, target, lamb=0.001, retWoeDict=False):
    """
    单变量分析函数，目前支持的计算指标为 IV,KS,LIFT
    建议用于编码后的数值型变量进行分析,若在前面使用了cond_insert方法调整了cond
    """
    dti = pd.crosstab(df[var_name], df[target])
    dti.rename(
        {1: "positive",0: "negative"},
        axis=1,
        inplace=True,
    )
    dti["positive"] = dti["positive"].astype(int)
    dti["negative"] = dti["negative"].astype(int)
    p_t = dti["positive"].sum()
    n_t = dti["negative"].sum()
    t_t = p_t + n_t
    r_t = p_t / t_t
    dti["total"] = dti["positive"] + dti["negative"]
    dti["total_rate"] = dti["total"] / t_t
    dti["positive_rate"] = (
            dti["positive"] / dti["total"]
    )  # (rs["positive"] + rs["negative"])
    dti["negative_cum"] = dti["negative"].cumsum()
    dti["positive_cum"] = dti["positive"].cumsum()
    dti["woe"] = np.log(
        ((dti["negative"] / n_t) + lamb) / ((dti["positive"] / p_t) + lamb)
    )
    dti["LIFT"] = dti["positive_rate"] / r_t
    dti["KS"] = np.abs((dti["positive_cum"] / p_t) - (dti["negative_cum"] / n_t))
    dti["IV"] = (dti["negative"] / n_t - dti["positive"] / p_t) * dti["woe"]
    IV = dti["IV"].sum()
    KS = dti["KS"].max()
    dti["IV"] = IV
    dti["KS"] = KS
    dti = dti.reset_index()
    dti.columns.name = None
    dti.rename({"Total": "num", var_name: "bin"}, axis=1, inplace=True)
    dti.insert(0, "target", [target] * dti.shape[0])
    dti.insert(0, "var", [var_name] * dti.shape[0])
    if retWoeDict:
        if isinstance(dti["bin"].dtype, pd.CategoricalDtype): # pd.core.dtypes.dtypes.CategoricalDtype 高版本替换
            dti["v"] = dti["bin"].map(lambda x: x.right)
        else:
            dti["v"] = dti["bin"]
        woeDict = pd.Series(dti["woe"].values, index=dti["v"].values).to_dict() # type: ignore
        dti.drop(columns=["negative_cum", "positive_cum", "v"], inplace=True)
        return dti, woeDict
    dti.drop(columns=["negative_cum", "positive_cum"], inplace=True)
    return dti


# ## 绘图工具

# In[21]:


def plot_roc_ks(y_pred, y_label, suptitle="标题"):
    # 创建一个画布，并添加子图
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4))
    fig.suptitle(suptitle, fontsize=17)
    tpr, fpr, threshold = metrics.roc_curve(y_label, y_pred)
    AUC = metrics.roc_auc_score(y_label, y_pred)
    ax1.plot(tpr, fpr, color="blue", label="AUC=%.3f" % AUC)
    ax1.plot([0, 1], [0, 1], "r--")
    ax1.set_ylim(0 - 0.02, 1 + 0.02)
    ax1.set_xlim(0 - 0.02, 1 + 0.02)
    ax1.set_title("ROC")
    ax1.legend(loc="best")

    pred_list = list(y_pred)
    label_list = list(y_label)
    total_bad = sum(label_list)
    total_good = len(label_list) - total_bad
    items = sorted(zip(pred_list, label_list), key=lambda x: x[0])
    pred_bin = []
    good_rate = []
    bad_rate = []
    ks_list = []

    for i in range(0, len(items)):
        item = items[i]
        items_sub = items[0:i]
        pred_bin.append(item[0])
        label_bin = [x[1] for x in items_sub]
        bad_num = sum(label_bin)
        good_num = len(label_bin) - bad_num
        goodrate = good_num / total_good
        badrate = bad_num / total_bad
        ks = abs(goodrate - badrate)
        good_rate.append(goodrate)
        bad_rate.append(badrate)
        ks_list.append(ks)
    ax2.plot(pred_bin, good_rate, color="green", label="good_rate")
    ax2.plot(pred_bin, bad_rate, color="red", label="bad_rate")
    ax2.plot(pred_bin, ks_list, color="blue", label="good-bad")
    # ax2.set_ylim(0-0.02, 1+0.02)
    # ax2.set_xlim(min(y_pred)-0.02, max(y_pred)+0.02)
    ax2.set_title("KS:{:.3f}".format(max(ks_list)))
    ax2.legend(loc="best")
    return plt.show(fig)


# # 常用业务字段转换

# In[22]:


def def_pd1_aclc(row):
    if row['agr_pd1'] ==1 :
        if row['def_pd1'] ==1 :
            return 1
        else:
            return 0
    else :
        return None
def def_cpd_aclc(row):
    if row['agr_cpd'] ==1 :
        if row['def_cpd'] ==1 :
            return 1
        else:
            return 0
    else :
        return None


# 
