{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "a7307320",
   "metadata": {},
   "source": [
    "# 加载工具包\n",
    "```python\n",
    "import sys\n",
    "sys.path.append(\"/home/loong/jupyter\")\n",
    "import common_utils\n",
    "from common_utils import *\n",
    "```\n",
    "# 重新加载\n",
    "```python\n",
    "import importlib\n",
    "importlib.reload(common_utils)\n",
    "import common_utils\n",
    "from common_utils import *\n",
    "```\n",
    "# ipynb转化为py文件\n",
    "```shell\n",
    "jupyter nbconvert --to script common_utils.ipynb\n",
    "```"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2cd8ba22",
   "metadata": {},
   "source": [
    "# 导包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "70f6c859-9ddc-4dad-a782-4c941fb9b68d",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:05.780239Z",
     "iopub.status.busy": "2024-03-22T03:20:05.780130Z",
     "iopub.status.idle": "2024-03-22T03:20:06.090485Z",
     "shell.execute_reply": "2024-03-22T03:20:06.089990Z",
     "shell.execute_reply.started": "2024-03-22T03:20:05.780229Z"
    },
    "tags": [],
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.588498Z",
     "start_time": "2024-03-30T07:02:36.590405Z"
    }
   },
   "outputs": [],
   "source": [
    "import os \n",
    "import time\n",
    "import json\n",
    "import gc\n",
    "import math\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import warnings\n",
    "import re\n",
    "from matplotlib import pyplot as plt\n",
    "from datetime import datetime,timedelta\n",
    "from sklearn import metrics\n",
    "import lightgbm as lgb\n",
    "from lightgbm import log_evaluation, early_stopping\n",
    "\n",
    "from tqdm import tqdm\n",
    "tqdm.pandas()\n",
    "\n",
    "import pymysql\n",
    "from sqlalchemy import create_engine"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "95d2707b-6007-4f33-a7e8-fac4b84396e2",
   "metadata": {},
   "source": [
    "# 日期工具"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ebfe7419-5ffd-438f-bbbf-e22e41443c47",
   "metadata": {},
   "source": [
    "## 日期转化为周"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "48c93442-255f-470c-ba23-e1b331d67860",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.091380Z",
     "iopub.status.busy": "2024-03-22T03:20:06.090997Z",
     "iopub.status.idle": "2024-03-22T03:20:06.094540Z",
     "shell.execute_reply": "2024-03-22T03:20:06.094136Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.091368Z"
    },
    "tags": [],
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.593344Z",
     "start_time": "2024-03-30T07:02:37.590078Z"
    }
   },
   "outputs": [],
   "source": [
    "def format_date2week(date_obj):\n",
    "    weekday = date_obj.weekday()\n",
    "    start_of_week = date_obj - timedelta(days=weekday)\n",
    "    end_of_week = start_of_week + timedelta(days=6)\n",
    "    monday = start_of_week.strftime('%m.%d')\n",
    "    year = start_of_week.year\n",
    "    sunday = end_of_week.strftime('%m.%d')\n",
    "    return f'{year}.{monday}~{sunday}'"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c1ec4901-b25f-4833-b898-9cdd13c5b521",
   "metadata": {},
   "source": [
    "# json操作"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3c4877cf-2b2e-4724-a1b9-c5bae77e3884",
   "metadata": {},
   "source": [
    "## 提取json中的字符串"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "d5cd60a4-7fad-4af2-9c08-ed0591ab1146",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.095107Z",
     "iopub.status.busy": "2024-03-22T03:20:06.094989Z",
     "iopub.status.idle": "2024-03-22T03:20:06.107161Z",
     "shell.execute_reply": "2024-03-22T03:20:06.106740Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.095098Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.605793Z",
     "start_time": "2024-03-30T07:02:37.595448Z"
    }
   },
   "outputs": [],
   "source": [
    "def search_json(json_str, query):\n",
    "    obj = json.loads(json_str)\n",
    "    return query_dict(obj, query)\n",
    "\n",
    "def query_dict(data, query):\n",
    "    keys = query.strip(\"$.\").split(\".\")\n",
    "    result = data\n",
    "    for key in keys:\n",
    "        if isinstance(result, dict) and key in result:\n",
    "            result = result[key]\n",
    "        else:\n",
    "            return None\n",
    "    return result\n",
    "\n",
    "def query_json(json_str, querys):\n",
    "    if pd.isna(json_str):\n",
    "        return None\n",
    "    try:\n",
    "        obj = json.loads(json_str)\n",
    "        if isinstance(querys, str) and len(querys) > 0:\n",
    "            return query_dict(obj, querys)\n",
    "        if isinstance(querys, list) and len(querys) > 0:\n",
    "            result_arr = []\n",
    "            for query in querys:\n",
    "                result_arr.append(query_dict(obj, query))\n",
    "            return tuple(result_arr)\n",
    "    except json.JSONDecodeError:\n",
    "        print(f\"发生异常json: \\n{json_str}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "8a9c35ae",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.610903Z",
     "start_time": "2024-03-30T07:02:37.607868Z"
    }
   },
   "outputs": [],
   "source": [
    "# 模板代码\n",
    "# columns = ['modelScoreParams_scoreV1','modelScoreParams_scoreV2','modelScoreParams_scoreV3','modelScoreParams_scoreV4','modelScoreParams_scoreV5','modelScoreParams_scoreV6',\n",
    "#            'modelScoreParams_scoreV7','modelScoreParams_scoreV8','modelScoreParams_scoreV9','modelScoreParams_scoreV10','modelScoreParams_scoreV11','modelScoreParams_scoreV12',\n",
    "#            'modelScoreParams_scoreV13']\n",
    "# df[columns] = df.apply(lambda x : query_json(x['req_data'],columns),axis=1,result_type='expand')\n",
    "# df[columns] = df[columns].astype(float)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ab536cd8-cd60-4d3a-921e-210c247c84e4",
   "metadata": {},
   "source": [
    "# pandas相关"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9c2f392c-8a9f-45d3-b1a3-d80c8be99787",
   "metadata": {},
   "source": [
    "## pandas显示设置"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "e2f49e55-700a-441d-adcc-3fd5b1ce140c",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.107655Z",
     "iopub.status.busy": "2024-03-22T03:20:06.107541Z",
     "iopub.status.idle": "2024-03-22T03:20:06.111927Z",
     "shell.execute_reply": "2024-03-22T03:20:06.111477Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.107644Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.616296Z",
     "start_time": "2024-03-30T07:02:37.612819Z"
    }
   },
   "outputs": [],
   "source": [
    "def set_float_show(f=\"{:,.3f}\"):\n",
    "    pd.set_option('display.float_format', f)\n",
    "\n",
    "def set_warnings(need=True):\n",
    "    if need:\n",
    "        warnings.filterwarnings('default')\n",
    "    else:\n",
    "        warnings.filterwarnings('ignore')\n",
    "set_warnings(False)\n",
    "\n",
    "def set_pd_show(max_rows=500, max_columns=200, max_colwidth=70):\n",
    "    pd.set_option(\"display.max_rows\", max_rows)\n",
    "    pd.set_option(\"display.max_columns\", max_columns)\n",
    "    pd.set_option(\"display.max_colwidth\", max_colwidth)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7dc5a81f-1e8c-41cb-b818-ba22b0a58bc5",
   "metadata": {},
   "source": [
    "## 根据datafame中的url字段进行数据下载"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "0e48b2a2-848e-4236-a3bd-f0053d1f45fe",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.112832Z",
     "iopub.status.busy": "2024-03-22T03:20:06.112711Z",
     "iopub.status.idle": "2024-03-22T03:20:06.170682Z",
     "shell.execute_reply": "2024-03-22T03:20:06.169946Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.112822Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.655372Z",
     "start_time": "2024-03-30T07:02:37.617966Z"
    }
   },
   "outputs": [],
   "source": [
    "import requests \n",
    "from tqdm import tqdm\n",
    "from urllib.parse import urlparse\n",
    "def is_valid_url(url):\n",
    "    try:\n",
    "        result = urlparse(url)\n",
    "        return all([result.scheme, result.netloc])\n",
    "    except ValueError:\n",
    "        return False\n",
    "def download_data(df, url_column, id_column,suffixes='_data'):\n",
    "    if url_column in df.columns and id_column in df.columns:\n",
    "        df[url_column + '_data'] = None\n",
    "        with tqdm(total=len(df), desc=\"Downloading\") as pbar:  #, bar_format=\"{l_bar}{bar}| {n_fmt}/{total_fmt}\"\n",
    "            with requests.Session() as session:\n",
    "                for i, row in df.iterrows():\n",
    "                    url = row[url_column]\n",
    "                    id_column_value = row[id_column]\n",
    "                    new_column_name = url_column + suffixes\n",
    "                    if isinstance(url, str) and is_valid_url(url):  # 检查URL是否是字符串并且有效\n",
    "                        response = session.get(url, stream=True)\n",
    "                        if response.status_code == 200:\n",
    "                            # total_size_in_bytes = int(response.headers.get('content-length', 0))\n",
    "                            block_size = 1024  # 1 Kibibyte\n",
    "                            # progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True) # 如果下大文件则开启\n",
    "                            data = b''.join(data_chunk for data_chunk in response.iter_content(block_size))\n",
    "                            df.loc[i, new_column_name] = data.decode()\n",
    "                            # progress_bar.close()\n",
    "                        else:\n",
    "                            print(\n",
    "                                f\"Failed to download data for {id_column}:{id_column_value} from {url}: {response.status_code}\")\n",
    "                    else:\n",
    "                        print(f\"url format is erro ,{id_column}:{id_column_value},url:{url}\")\n",
    "                    pbar.update(1)\n",
    "    else:\n",
    "        raise Exception(f\"Please check whether the data contains these two columns:{url_column},{id_column}\")\n",
    "    return df"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "db343280-4d21-4e78-8609-534817bee5e6",
   "metadata": {},
   "source": [
    "## 拉平json格式的字段"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "7116a86d-6956-40d6-8257-6a37f6c7cfe3",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.171700Z",
     "iopub.status.busy": "2024-03-22T03:20:06.171364Z",
     "iopub.status.idle": "2024-03-22T03:20:06.175662Z",
     "shell.execute_reply": "2024-03-22T03:20:06.175179Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.171687Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.660296Z",
     "start_time": "2024-03-30T07:02:37.656688Z"
    }
   },
   "outputs": [],
   "source": [
    "def parse_json_data(df, json_column, id_column, retain_column=None):\n",
    "    \"\"\"\n",
    "    将df中的json字符串的摊平，并根据想要保留的字段，保留相关的值\n",
    "    :param df:  待摊平处理的dataframe\n",
    "    :param json_column: json字符串所在的那一列\n",
    "    :param id_column: 唯一关联主键，后续用于定位转化失败的数据，或外部唯一关联的id\n",
    "    :param retain_column: 待保留的列，可以是字符串，也可以是数组保存多列\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    new_rows = []\n",
    "    for i, row in df.iterrows():\n",
    "        json_data_str = row[json_column]\n",
    "        id_column_value = row[id_column]\n",
    "        if isinstance(json_data_str, str):\n",
    "            try:\n",
    "                json_data = json.loads(json_data_str)\n",
    "                for item in json_data:\n",
    "                    new_row = item\n",
    "                    new_row[id_column] = id_column_value\n",
    "                    if isinstance(retain_column, str) and len(retain_column) > 0:\n",
    "                        new_row[retain_column] = row[retain_column]\n",
    "                    elif isinstance(retain_column, list) and len(retain_column) > 0:\n",
    "                        for column in retain_column:\n",
    "                            new_row[column] = row[column]\n",
    "                    new_rows.append(new_row)\n",
    "            except json.JSONDecodeError:\n",
    "                print(f\"Failed to parse JSON data for {id_column}:{id_column_value}\")\n",
    "    return pd.DataFrame(new_rows)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fadbaad1-0e0e-4c84-a4f5-24573ae43ee8",
   "metadata": {},
   "source": [
    "## 数据校验"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "e4718ed3-385d-4133-b3ea-f4fc95c06568",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.178148Z",
     "iopub.status.busy": "2024-03-22T03:20:06.177821Z",
     "iopub.status.idle": "2024-03-22T03:20:06.181790Z",
     "shell.execute_reply": "2024-03-22T03:20:06.181262Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.178134Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.665819Z",
     "start_time": "2024-03-30T07:02:37.661628Z"
    }
   },
   "outputs": [],
   "source": [
    "# 比较的必须是没有缺失值\n",
    "def is_same_df(small_df, big_df):\n",
    "    \"\"\"\n",
    "    比较两个行数相同的数据的内容\n",
    "    :param small_df: \n",
    "    :param big_df: \n",
    "    :return: \n",
    "    \"\"\"\n",
    "    for col in small_df.columns:\n",
    "        s1 = small_df[col].fillna(-1)\n",
    "        s2 = big_df[col].fillna(-1)\n",
    "        l = (s1 == s2).sum()\n",
    "        diff_value_num = small_df.shape[0] - l\n",
    "        if diff_value_num != 0:\n",
    "            print(f\"{col}:{diff_value_num}\")\n",
    "\n",
    "\n",
    "def compare_df_data(small_df, big_df, id_columns):\n",
    "    \"\"\"\n",
    "    通过id_column 关联比较两个不同的df的数据内容是否完全一致\n",
    "    :param small_df: \n",
    "    :param big_df: \n",
    "    :param id_columns: \n",
    "    :return: \n",
    "    \"\"\"\n",
    "    big_df = big_df.copy()\n",
    "    small_df = small_df.copy()\n",
    "    big_df_new = big_df[big_df[id_columns].isin(small_df[id_columns])] \\\n",
    "        .sort_values(id_columns).reset_index(drop=True)\n",
    "    small_df = small_df.sort_values(id_columns).reset_index(drop=True)\n",
    "    is_same_df(small_df, big_df_new)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "efa76ad5-c1e4-4201-b884-0d670377d7fb",
   "metadata": {},
   "source": [
    "## dataframe数据切割"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "771164a2-eec0-4fed-b343-a742ae1245aa",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.182417Z",
     "iopub.status.busy": "2024-03-22T03:20:06.182299Z",
     "iopub.status.idle": "2024-03-22T03:20:06.185375Z",
     "shell.execute_reply": "2024-03-22T03:20:06.184945Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.182407Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.670565Z",
     "start_time": "2024-03-30T07:02:37.667017Z"
    }
   },
   "outputs": [],
   "source": [
    "def split_df(df, sheet_size=3000):\n",
    "    import gc\n",
    "    splited_arr = []\n",
    "    df_len = len(df)\n",
    "    start_range = range(0, df_len, sheet_size)\n",
    "    end_range = range(sheet_size, df_len + sheet_size, sheet_size)\n",
    "    for start_index, end_index in zip(start_range, end_range):\n",
    "        splited_arr.append(df.iloc[start_index:end_index])\n",
    "    del df\n",
    "    gc.collect()\n",
    "    return splited_arr"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "59cff684-e796-4eb9-a87c-7add34efc1dd",
   "metadata": {},
   "source": [
    "# 连接数据库"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "26ca990a-259a-4bb4-994c-7e8e4233e018",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.185954Z",
     "iopub.status.busy": "2024-03-22T03:20:06.185848Z",
     "iopub.status.idle": "2024-03-22T03:20:06.189705Z",
     "shell.execute_reply": "2024-03-22T03:20:06.189199Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.185945Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.676342Z",
     "start_time": "2024-03-30T07:02:37.672515Z"
    }
   },
   "outputs": [],
   "source": [
    "def postgre_engine(host, port, user, passwd, db):\n",
    "    try:\n",
    "        engine = create_engine(f\"postgresql://{user}:{passwd}@{host}:{port}/{db}\")\n",
    "        # engine = create_engine(\"postgresql://BASIC$longxiaolei:ZGU3NGVkOTU0@ali-hologres.palmcash.com:88/prod\")\n",
    "        print(\"Successfully connected to the postgre:{}/{}\".format(host, port))\n",
    "        return engine\n",
    "    except Exception as e:\n",
    "        print(f\"An error occurred: {e}\")\n",
    "\n",
    "\n",
    "def mysql_connection(host, port, user, password):\n",
    "    try:\n",
    "        connection = pymysql.connect(host=host, port=port, user=user, password=password)\n",
    "        print(\"Successfully connected to the mysql:{}/{}\".format(host, port))\n",
    "        return connection\n",
    "    except Exception as e:\n",
    "        print(f\"An error occurred: {e}\")\n",
    "        return None\n",
    "    \n",
    "def mysql_engine(host, port, user, passwd, db=None):\n",
    "    try:\n",
    "        engine = create_engine(f'mysql+pymysql://{user}:{passwd}@{host}:{port}/{db}')\n",
    "        return engine\n",
    "    except Exception as e:\n",
    "        print(f\"An error occurred: {e}\")\n",
    "\n",
    "# mysql_rule = mysql_engine('47.253.56.86',4000,'rule','Z3Vl7enFj2eqkeJf','rule')\n",
    "#mysql_rule = mysql_engine('172.20.1.129', 3306, 'rule', 'Z3Vl7enFj2eqkeJf', 'rule')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2e33d6fe-d86e-446a-9754-00c79a9bed5e",
   "metadata": {},
   "source": [
    "# 路径操作"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6939dbb5-779e-457d-80ff-80422335ac2e",
   "metadata": {},
   "source": [
    "## 校验或创建文件夹"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "d279129b-cadb-4486-9406-38f341736394",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.190657Z",
     "iopub.status.busy": "2024-03-22T03:20:06.190289Z",
     "iopub.status.idle": "2024-03-22T03:20:06.194707Z",
     "shell.execute_reply": "2024-03-22T03:20:06.193917Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.190645Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.680591Z",
     "start_time": "2024-03-30T07:02:37.677621Z"
    }
   },
   "outputs": [],
   "source": [
    "def mkdir_if_not_exists(dir_path):\n",
    "    import os\n",
    "    if os.path.exists(dir_path):\n",
    "        return\n",
    "    os.mkdir(dir_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "195db194-2c0d-4291-86a6-3b8131e20896",
   "metadata": {},
   "source": [
    "## 扫描文件夹中的数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "1f0c27a1-f18b-4474-ab70-6dd9db5dfdd6",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.195819Z",
     "iopub.status.busy": "2024-03-22T03:20:06.195335Z",
     "iopub.status.idle": "2024-03-22T03:20:06.199697Z",
     "shell.execute_reply": "2024-03-22T03:20:06.199278Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.195807Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.686936Z",
     "start_time": "2024-03-30T07:02:37.681821Z"
    }
   },
   "outputs": [],
   "source": [
    "def data_of_dir(dir_path:str, query_flags=\"\", start_date=None): \n",
    "    def _fetch_filenams(dir_path:str, contains_str,start_date=None):\n",
    "        file_paths = []\n",
    "        contains_str = contains_str or \"\"   # \"\" if contains_str is None else contains_str\n",
    "        pattern = r\"\\d{4}-\\d{2}-\\d{2}\" \n",
    "        for file_name in os.listdir(dir_path):\n",
    "            if  (contains_str in file_name) & ( any( file_type in file_name for file_type in ['.parquet','.pqt','.pickle','.pkl','.csv','.xlsx'] ) ):\n",
    "                if start_date is None:\n",
    "                    file_paths.append(os.path.join(dir_path, file_name))\n",
    "                else:\n",
    "                    match = re.search(pattern, file_name)\n",
    "                    date = match.group() # type: ignore\n",
    "                    if date>= start_date:\n",
    "                        file_paths.append(os.path.join(dir_path, file_name))\n",
    "        file_paths.sort()\n",
    "        return file_paths\n",
    "    \n",
    "    if isinstance(query_flags,str) or query_flags is None:\n",
    "        return _fetch_filenams(dir_path,query_flags,start_date)\n",
    "    elif isinstance(query_flags,list):\n",
    "        file_names= None\n",
    "        for qf in query_flags: # type: ignore\n",
    "            if file_names is None:\n",
    "                file_names=_fetch_filenams(dir_path,qf,start_date)\n",
    "            else:\n",
    "                file_names = file_names + _fetch_filenams(dir_path,qf,start_date)\n",
    "        return file_names"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4387c8c2-6fb2-4bd2-a7f9-53c229a1b753",
   "metadata": {},
   "source": [
    "## 批量加载指定路径的文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "ebeb5060-0909-46b7-bc4c-e3870a1c3dc4",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.200495Z",
     "iopub.status.busy": "2024-03-22T03:20:06.200201Z",
     "iopub.status.idle": "2024-03-22T03:20:06.203433Z",
     "shell.execute_reply": "2024-03-22T03:20:06.202970Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.200484Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.692380Z",
     "start_time": "2024-03-30T07:02:37.688577Z"
    }
   },
   "outputs": [],
   "source": [
    "def batch_load_data(file_paths):\n",
    "    df_list = []\n",
    "    for file_path in file_paths:\n",
    "        try:\n",
    "            if file_path.endswith(\".parquet\") or file_path.endswith(\".pqt\"):\n",
    "                df = pd.read_parquet(file_path)\n",
    "            elif file_path.endswith(\".pickle\") or file_path.endswith(\".pkl\"):\n",
    "                df = pd.read_pickle(file_path)\n",
    "            elif file_path.endswith(\".xlsx\"):\n",
    "                df = pd.read_excel(file_path)\n",
    "            elif file_path.endswith(\".csv\"):\n",
    "                df = pd.read_csv(file_path)\n",
    "            df_list.append(df)\n",
    "        except Exception as e:\n",
    "            print(f'发生异常，报错信息如下: \\n{e}')\n",
    "            print(f\"异常的文件为:{file_path}\")\n",
    "    df_new = pd.concat(df_list).reset_index(drop=True)\n",
    "    print(f\"load {len(file_paths)} file,data shape {df_new.shape}\")\n",
    "    return df_new"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "dda9bf58-5514-4d63-b02a-6a95b47ee1fb",
   "metadata": {},
   "source": [
    "# 分析工具"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d76c81e5-2f4d-4906-87ac-6b5fd7686b03",
   "metadata": {},
   "source": [
    "## 高效率填充缺失值"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "eadfbf42-7f49-4ed9-b5e5-4bca68c54a6c",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.204046Z",
     "iopub.status.busy": "2024-03-22T03:20:06.203897Z",
     "iopub.status.idle": "2024-03-22T03:20:06.206798Z",
     "shell.execute_reply": "2024-03-22T03:20:06.206325Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.204038Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.697432Z",
     "start_time": "2024-03-30T07:02:37.693640Z"
    }
   },
   "outputs": [],
   "source": [
    "def quick_fillna(df, cols, value):\n",
    "    \"\"\"\n",
    "    鉴于fillna函数中inplace无效，为了避免df的复制操作，在需要高速替换缺失值的场景建议使用该函数\n",
    "    :param df: 需要替换的数据\n",
    "    :param cols: 需要操作的列\n",
    "    :param value: 对应的填充值\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    for col in cols:\n",
    "        if df[col].isnull().sum() > 0:\n",
    "            df.loc[df[col].isnull(), col] = value"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "51f2a8b3-1356-4257-8f94-768d55302689",
   "metadata": {},
   "source": [
    "## 批量计算枚举值"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "7a659d56-ec10-4331-874f-17594cbcc50b",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.207557Z",
     "iopub.status.busy": "2024-03-22T03:20:06.207429Z",
     "iopub.status.idle": "2024-03-22T03:20:06.210366Z",
     "shell.execute_reply": "2024-03-22T03:20:06.209946Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.207547Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.701500Z",
     "start_time": "2024-03-30T07:02:37.698457Z"
    }
   },
   "outputs": [],
   "source": [
    "def clac_unique(df, cols):\n",
    "    unique_arr = []\n",
    "    for col in cols:\n",
    "        uni = len(df[col].unique())\n",
    "        unique_arr.append([col, uni])\n",
    "    return pd.DataFrame(unique_arr, columns=[\"var\", \"unique\"])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "34eff902-1c2f-4005-93a0-ff8bdb061369",
   "metadata": {},
   "source": [
    "## 单变量分析"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "83862a2b-6a42-4eba-9769-c411d5d8c55e",
   "metadata": {},
   "source": [
    "### ks计算"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "c3ee5646-abdc-4144-bd8f-6bb5863f970c",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.211041Z",
     "iopub.status.busy": "2024-03-22T03:20:06.210823Z",
     "iopub.status.idle": "2024-03-22T03:20:06.214672Z",
     "shell.execute_reply": "2024-03-22T03:20:06.214068Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.211029Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.707175Z",
     "start_time": "2024-03-30T07:02:37.702642Z"
    }
   },
   "outputs": [],
   "source": [
    "def calc_ks(y_label, y_pred):\n",
    "    pred_list = list(y_pred)\n",
    "    label_list = list(y_label)\n",
    "    total_bad = sum(label_list)\n",
    "    total_good = len(label_list) - total_bad\n",
    "    items = sorted(zip(pred_list, label_list), key=lambda x: x[0])\n",
    "    step = (max(pred_list) - min(pred_list)) / 200\n",
    "\n",
    "    pred_bin = []\n",
    "    good_rate = []\n",
    "    bad_rate = []\n",
    "    ks_list = []\n",
    "    for i in range(1, 201):\n",
    "        idx = min(pred_list) + i * step\n",
    "        pred_bin.append(idx)\n",
    "        label_bin = [x[1] for x in items if x[0] < idx]\n",
    "        bad_num = sum(label_bin)\n",
    "        good_num = len(label_bin) - bad_num\n",
    "        goodrate = good_num / total_good\n",
    "        badrate = bad_num / total_bad\n",
    "        ks = abs(goodrate - badrate)\n",
    "        good_rate.append(goodrate)\n",
    "        bad_rate.append(badrate)\n",
    "        ks_list.append(ks)\n",
    "    return max(ks_list)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fa8edac5-f112-4b10-ad1c-c220f5880ada",
   "metadata": {},
   "source": [
    "### psi计算"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "8396d7dd-acc5-41b6-8128-1c7853f11003",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.215561Z",
     "iopub.status.busy": "2024-03-22T03:20:06.215291Z",
     "iopub.status.idle": "2024-03-22T03:20:06.221397Z",
     "shell.execute_reply": "2024-03-22T03:20:06.220737Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.215548Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.714795Z",
     "start_time": "2024-03-30T07:02:37.708571Z"
    }
   },
   "outputs": [],
   "source": [
    "def fea_psi_calc(actual, predict, bins=10):\n",
    "    \"\"\"\n",
    "    功能: 计算连续变量和离散变量的PSI值\n",
    "    输入值:\n",
    "    actual: 一维数组或series，代表训练集中的变量\n",
    "    predict: 一维数组或series，代表测试集中的变量\n",
    "    bins: 违约率段划分个数\n",
    "    输出值:\n",
    "    字典，键值关系为{'psi': PSI值，'psi_fig': 实际和预期占比分布曲线}\n",
    "    \"\"\"\n",
    "    psi_dict = {}\n",
    "    actual = np.sort(actual)\n",
    "    actual_distinct = np.sort(list(set(actual)))\n",
    "    predict = np.sort(predict)\n",
    "    # predict_distinct = np.sort(list(set(predict)))\n",
    "    actual_len = len(actual)\n",
    "    actual_distinct_len = len(actual_distinct)\n",
    "    predict_len = len(predict)\n",
    "    # predict_distinct_len = len(predict_distinct)\n",
    "    psi_cut = []\n",
    "    actual_bins = []\n",
    "    predict_bins = []\n",
    "    actual_min = actual.min()\n",
    "    actual_max = actual.max()\n",
    "    cuts = []\n",
    "    binlen = (actual_max - actual_min) / bins\n",
    "    if actual_distinct_len < bins:\n",
    "        for i in actual_distinct:\n",
    "            cuts.append(i)\n",
    "        for i in range(2, (actual_distinct_len + 1)):\n",
    "            if i == bins:\n",
    "                lowercut = cuts[i - 2]\n",
    "                uppercut = float(\"Inf\")\n",
    "            else:\n",
    "                lowercut = cuts[i - 2]\n",
    "                uppercut = cuts[i - 1]\n",
    "            actual_cnt = ((actual >= lowercut) & (actual < uppercut)).sum() + 1\n",
    "            predict_cnt = ((predict >= lowercut) & (predict < uppercut)).sum() + 1\n",
    "            actual_pct = (actual_cnt + 0.0) / actual_len\n",
    "            predict_pct = (predict_cnt + 0.0) / predict_len\n",
    "            psi_cut.append(\n",
    "                (actual_pct - predict_pct) * math.log(actual_pct / predict_pct)\n",
    "            )\n",
    "            actual_bins.append(actual_pct)\n",
    "            predict_bins.append(predict_pct)\n",
    "    else:\n",
    "        for i in range(1, bins):\n",
    "            cuts.append(actual_min + i * binlen)\n",
    "        for i in range(1, (bins + 1)):\n",
    "            if i == 1:\n",
    "                lowercut = float(\"-Inf\")\n",
    "                uppercut = cuts[i - 1]\n",
    "            elif i == bins:\n",
    "                lowercut = cuts[i - 2]\n",
    "                uppercut = float(\"Inf\")\n",
    "            else:\n",
    "                lowercut = cuts[i - 2]\n",
    "                uppercut = cuts[i - 1]\n",
    "            actual_cnt = ((actual >= lowercut) & (actual < uppercut)).sum() + 1\n",
    "            predict_cnt = ((predict >= lowercut) & (predict < uppercut)).sum() + 1\n",
    "            actual_pct = (actual_cnt + 0.0) / actual_len\n",
    "            predict_pct = (predict_cnt + 0.0) / predict_len\n",
    "            psi_cut.append(\n",
    "                (actual_pct - predict_pct) * math.log(actual_pct / predict_pct)\n",
    "            )\n",
    "            actual_bins.append(actual_pct)\n",
    "            predict_bins.append(predict_pct)\n",
    "    psi = sum(psi_cut)\n",
    "    psi_dict[\"psi\"] = psi\n",
    "    return psi_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "7691fce0-7c0c-45ce-b8dd-5e00ec64221f",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.222320Z",
     "iopub.status.busy": "2024-03-22T03:20:06.222026Z",
     "iopub.status.idle": "2024-03-22T03:20:06.225392Z",
     "shell.execute_reply": "2024-03-22T03:20:06.224886Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.222308Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.720132Z",
     "start_time": "2024-03-30T07:02:37.716083Z"
    }
   },
   "outputs": [],
   "source": [
    "def batch_calc_psi(df1, df2, cols):\n",
    "    psi_arr = []\n",
    "    for col in tqdm(cols):\n",
    "        if (df1[col].isnull().sum() == 0) and (df2[col].isnull().sum() == 0):\n",
    "            psi = fea_psi_calc(df1[col], df2[col])[\"psi\"]\n",
    "            psi_arr.append([col, psi])\n",
    "        else:\n",
    "            print(f\"field {col} has Null value\")\n",
    "    return pd.DataFrame(psi_arr, columns=[\"var\", \"psi\"]).sort_values(\"psi\", ascending=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "935116c8-2b18-4b34-9448-e8acbf762c41",
   "metadata": {},
   "source": [
    "## 综合分析"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "013eefa0-3cda-4c07-85fc-33c4fe4bf248",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.226380Z",
     "iopub.status.busy": "2024-03-22T03:20:06.226016Z",
     "iopub.status.idle": "2024-03-22T03:20:06.230629Z",
     "shell.execute_reply": "2024-03-22T03:20:06.230090Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.226368Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.725659Z",
     "start_time": "2024-03-30T07:02:37.721354Z"
    }
   },
   "outputs": [],
   "source": [
    "def group_calc(df, group=[], sum_col=[], count_col=[], unique_col=[], rate_tupes=[]):\n",
    "    \"\"\"\n",
    "    业务分析工具类，同时对比计算多个target指标，查看结果\n",
    "    data : 数据集\n",
    "    sum_col : 需要group_sum的列\n",
    "    count_col : 需要group_count的列\n",
    "    rate_tupe : 需要除法计算的列 格式为 (字段1，字段2，新列名称) 或者 (字段，新列名称)\n",
    "    \"\"\"\n",
    "    grouped = df.groupby(group)\n",
    "\n",
    "    grouped_sum = None\n",
    "    if isinstance(sum_col, list) and len(sum_col) > 0:\n",
    "        grouped_sum = grouped[sum_col].sum()\n",
    "    grouped_count = None\n",
    "    if isinstance(count_col, list) and len(count_col) > 0:\n",
    "        grouped_count = grouped[count_col].count()\n",
    "\n",
    "    grouped_unique = None\n",
    "    if isinstance(unique_col, list) and len(unique_col) > 0:\n",
    "        grouped_unique = grouped[unique_col].nunique()\n",
    "    results = list(\n",
    "        filter(lambda x: x is not None, [grouped_sum, grouped_count, grouped_unique])\n",
    "    )\n",
    "\n",
    "    assert len(results) > 0, \"参数异常，无法计算出相应的结果，请检查入参信息\"\n",
    "\n",
    "    grouped = pd.concat(results, axis=1)\n",
    "\n",
    "    if isinstance(rate_tupes, list) and len(rate_tupes) > 0:\n",
    "        for tup in rate_tupes:\n",
    "            if len(tup) == 3:\n",
    "                grouped[tup[2]] = grouped[tup[0]] / grouped[tup[1]]\n",
    "            else:\n",
    "                print(f\"param rate_tupes has error format tupe:{tup}\")\n",
    "\n",
    "    return grouped"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "456fbaa1-f2cb-4b7d-92a3-1ea40504cccb",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.231276Z",
     "iopub.status.busy": "2024-03-22T03:20:06.231154Z",
     "iopub.status.idle": "2024-03-22T03:20:06.237678Z",
     "shell.execute_reply": "2024-03-22T03:20:06.236937Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.231266Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.733680Z",
     "start_time": "2024-03-30T07:02:37.727118Z"
    }
   },
   "outputs": [],
   "source": [
    "def univerate( df, var_name, target, lamb=0.001, retWoeDict=False):\n",
    "    \"\"\"\n",
    "    单变量分析函数，目前支持的计算指标为 IV,KS,LIFT\n",
    "    建议用于编码后的数值型变量进行分析,若在前面使用了cond_insert方法调整了cond\n",
    "    \"\"\"\n",
    "    dti = pd.crosstab(df[var_name], df[target])\n",
    "    dti.rename(\n",
    "        {1: \"positive\",0: \"negative\"},\n",
    "        axis=1,\n",
    "        inplace=True,\n",
    "    )\n",
    "    dti[\"positive\"] = dti[\"positive\"].astype(int)\n",
    "    dti[\"negative\"] = dti[\"negative\"].astype(int)\n",
    "    p_t = dti[\"positive\"].sum()\n",
    "    n_t = dti[\"negative\"].sum()\n",
    "    t_t = p_t + n_t\n",
    "    r_t = p_t / t_t\n",
    "    dti[\"total\"] = dti[\"positive\"] + dti[\"negative\"]\n",
    "    dti[\"total_rate\"] = dti[\"total\"] / t_t\n",
    "    dti[\"positive_rate\"] = (\n",
    "            dti[\"positive\"] / dti[\"total\"]\n",
    "    )  # (rs[\"positive\"] + rs[\"negative\"])\n",
    "    dti[\"negative_cum\"] = dti[\"negative\"].cumsum()\n",
    "    dti[\"positive_cum\"] = dti[\"positive\"].cumsum()\n",
    "    dti[\"woe\"] = np.log(\n",
    "        ((dti[\"negative\"] / n_t) + lamb) / ((dti[\"positive\"] / p_t) + lamb)\n",
    "    )\n",
    "    dti[\"LIFT\"] = dti[\"positive_rate\"] / r_t\n",
    "    dti[\"KS\"] = np.abs((dti[\"positive_cum\"] / p_t) - (dti[\"negative_cum\"] / n_t))\n",
    "    dti[\"IV\"] = (dti[\"negative\"] / n_t - dti[\"positive\"] / p_t) * dti[\"woe\"]\n",
    "    IV = dti[\"IV\"].sum()\n",
    "    KS = dti[\"KS\"].max()\n",
    "    dti[\"IV\"] = IV\n",
    "    dti[\"KS\"] = KS\n",
    "    dti = dti.reset_index()\n",
    "    dti.columns.name = None\n",
    "    dti.rename({\"Total\": \"num\", var_name: \"bin\"}, axis=1, inplace=True)\n",
    "    dti.insert(0, \"target\", [target] * dti.shape[0])\n",
    "    dti.insert(0, \"var\", [var_name] * dti.shape[0])\n",
    "    if retWoeDict:\n",
    "        if isinstance(dti[\"bin\"].dtype, pd.CategoricalDtype): # pd.core.dtypes.dtypes.CategoricalDtype 高版本替换\n",
    "            dti[\"v\"] = dti[\"bin\"].map(lambda x: x.right)\n",
    "        else:\n",
    "            dti[\"v\"] = dti[\"bin\"]\n",
    "        woeDict = pd.Series(dti[\"woe\"].values, index=dti[\"v\"].values).to_dict() # type: ignore\n",
    "        dti.drop(columns=[\"negative_cum\", \"positive_cum\", \"v\"], inplace=True)\n",
    "        return dti, woeDict\n",
    "    dti.drop(columns=[\"negative_cum\", \"positive_cum\"], inplace=True)\n",
    "    return dti"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "144e8699-57d2-4e0d-8ac0-46b98ef0574e",
   "metadata": {},
   "source": [
    "## 绘图工具"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "b5eb9422-0ef4-4d52-a522-27bdc790069f",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-03-22T03:20:06.238457Z",
     "iopub.status.busy": "2024-03-22T03:20:06.238287Z",
     "iopub.status.idle": "2024-03-22T03:20:06.243356Z",
     "shell.execute_reply": "2024-03-22T03:20:06.242816Z",
     "shell.execute_reply.started": "2024-03-22T03:20:06.238447Z"
    },
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.740225Z",
     "start_time": "2024-03-30T07:02:37.734786Z"
    }
   },
   "outputs": [],
   "source": [
    "def plot_roc_ks(y_pred, y_label, suptitle=\"标题\"):\n",
    "    # 创建一个画布，并添加子图\n",
    "    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4))\n",
    "    fig.suptitle(suptitle, fontsize=17)\n",
    "    tpr, fpr, threshold = metrics.roc_curve(y_label, y_pred)\n",
    "    AUC = metrics.roc_auc_score(y_label, y_pred)\n",
    "    ax1.plot(tpr, fpr, color=\"blue\", label=\"AUC=%.3f\" % AUC)\n",
    "    ax1.plot([0, 1], [0, 1], \"r--\")\n",
    "    ax1.set_ylim(0 - 0.02, 1 + 0.02)\n",
    "    ax1.set_xlim(0 - 0.02, 1 + 0.02)\n",
    "    ax1.set_title(\"ROC\")\n",
    "    ax1.legend(loc=\"best\")\n",
    "\n",
    "    pred_list = list(y_pred)\n",
    "    label_list = list(y_label)\n",
    "    total_bad = sum(label_list)\n",
    "    total_good = len(label_list) - total_bad\n",
    "    items = sorted(zip(pred_list, label_list), key=lambda x: x[0])\n",
    "    pred_bin = []\n",
    "    good_rate = []\n",
    "    bad_rate = []\n",
    "    ks_list = []\n",
    "\n",
    "    for i in range(0, len(items)):\n",
    "        item = items[i]\n",
    "        items_sub = items[0:i]\n",
    "        pred_bin.append(item[0])\n",
    "        label_bin = [x[1] for x in items_sub]\n",
    "        bad_num = sum(label_bin)\n",
    "        good_num = len(label_bin) - bad_num\n",
    "        goodrate = good_num / total_good\n",
    "        badrate = bad_num / total_bad\n",
    "        ks = abs(goodrate - badrate)\n",
    "        good_rate.append(goodrate)\n",
    "        bad_rate.append(badrate)\n",
    "        ks_list.append(ks)\n",
    "    ax2.plot(pred_bin, good_rate, color=\"green\", label=\"good_rate\")\n",
    "    ax2.plot(pred_bin, bad_rate, color=\"red\", label=\"bad_rate\")\n",
    "    ax2.plot(pred_bin, ks_list, color=\"blue\", label=\"good-bad\")\n",
    "    # ax2.set_ylim(0-0.02, 1+0.02)\n",
    "    # ax2.set_xlim(min(y_pred)-0.02, max(y_pred)+0.02)\n",
    "    ax2.set_title(\"KS:{:.3f}\".format(max(ks_list)))\n",
    "    ax2.legend(loc=\"best\")\n",
    "    return plt.show(fig)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "dbfdbac7-7f69-4f02-8721-60184c57d766",
   "metadata": {},
   "source": [
    "# 常用业务字段转换"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "fa911216-d498-4eaf-a000-cb0c5a3f6962",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-03-30T07:02:37.744987Z",
     "start_time": "2024-03-30T07:02:37.741633Z"
    }
   },
   "outputs": [],
   "source": [
    "def def_pd1_aclc(row):\n",
    "    if row['agr_pd1'] ==1 :\n",
    "        if row['def_pd1'] ==1 :\n",
    "            return 1\n",
    "        else:\n",
    "            return 0\n",
    "    else :\n",
    "        return None\n",
    "def def_cpd_aclc(row):\n",
    "    if row['agr_cpd'] ==1 :\n",
    "        if row['def_cpd'] ==1 :\n",
    "            return 1\n",
    "        else:\n",
    "            return 0\n",
    "    else :\n",
    "        return None"
   ]
  },
  {
   "cell_type": "markdown",
   "source": [],
   "metadata": {
    "collapsed": false
   },
   "id": "d6cc0b6dc2fb7f32"
  }
 ],
 "metadata": {
  "kernelspec": {
   "name": "py311",
   "language": "python",
   "display_name": "py311"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
