{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "ea5383ff",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1a0562d1",
   "metadata": {},
   "source": [
    "滑窗"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6bc3dc3d",
   "metadata": {},
   "source": [
    "最近K天滑窗"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c04076b1",
   "metadata": {},
   "source": [
    "最近K次滑窗"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ce3715c4",
   "metadata": {},
   "source": [
    "每月月初滑窗"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0b880b1d",
   "metadata": {},
   "source": [
    "对特定访问路径合并 滑窗"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3d38fa67",
   "metadata": {},
   "source": [
    "TF-IDF"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "540e0441",
   "metadata": {},
   "source": [
    "文本词向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "98535108",
   "metadata": {},
   "outputs": [],
   "source": [
    "from gensim.models import Word2Vec"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "4e7dbbbb",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "行为向量示例:\n",
      "'登录'向量: [-0.00704154  0.00303742  0.06392278  0.1158705  -0.11873835 -0.08944307\n",
      "  0.08161257  0.11464314]\n",
      "'转账'向量: [ 0.02947551 -0.05651722  0.10493112 -0.1225777   0.08399375  0.03635468\n",
      " -0.0614      0.05528858]\n",
      "\n",
      "与'购买'最相似的行为:\n",
      "通知查看: 0.6897\n",
      "提交材料: 0.6872\n",
      "登录: 0.3064\n",
      "\n",
      "行为关系推理: '转账'相对于'交易记录'类似于'购买'相对于什么?\n",
      "结果: 通知查看 (相似度: 0.6213)\n",
      "\n",
      "序列 ['登录', '理财产品', '购买'] 的平均向量:\n",
      "[ 0.06386767 -0.04381852  0.00774424  0.05493123 -0.03755122 -0.07104804\n",
      " -0.00384232  0.01473889]\n"
     ]
    }
   ],
   "source": [
    "from gensim.models import Word2Vec\n",
    "import numpy as np\n",
    "\n",
    "# 测试数据：用户行为序列\n",
    "sentences = [\n",
    "    ['登录', '账户概览', '转账', '交易记录'],\n",
    "    ['登录', '理财产品', '产品详情', '购买'],\n",
    "    ['登录', '贷款申请', '提交材料', '审批结果'],\n",
    "    ['登录', '信用卡', '账单查询', '还款'],\n",
    "    ['登录', '消息中心', '通知查看', '设置'],\n",
    "    ['登录', '账户概览', '理财产品', '产品详情'],\n",
    "    ['登录', '贷款申请', '产品详情', '购买']\n",
    "]\n",
    "\n",
    "# 训练Word2Vec模型\n",
    "model = Word2Vec(\n",
    "    sentences=sentences,\n",
    "    vector_size=8,       # 使用小维度便于演示\n",
    "    window=3,            # 考虑前后3个行为\n",
    "    min_count=1,         # 所有行为都参与训练\n",
    "    sg=1,                # 使用Skip-gram算法\n",
    "    epochs=100           # 增加迭代次数确保收敛\n",
    ")\n",
    "\n",
    "# 1. 查看行为向量\n",
    "print(\"行为向量示例:\")\n",
    "print(f\"'登录'向量: {model.wv['登录']}\")\n",
    "print(f\"'转账'向量: {model.wv['转账']}\")\n",
    "\n",
    "# 2. 查找最相似行为\n",
    "print(\"\\n与'购买'最相似的行为:\")\n",
    "for behavior, similarity in model.wv.most_similar('购买', topn=3):\n",
    "    print(f\"{behavior}: {similarity:.4f}\")\n",
    "\n",
    "# 3. 行为关系推理\n",
    "print(\"\\n行为关系推理: '转账'相对于'交易记录'类似于'购买'相对于什么?\")\n",
    "result = model.wv.most_similar(positive=['购买', '交易记录'], negative=['转账'], topn=1)\n",
    "print(f\"结果: {result[0][0]} (相似度: {result[0][1]:.4f})\")\n",
    "\n",
    "# 4. 序列向量生成\n",
    "def get_sequence_vector(seq):\n",
    "    vectors = [model.wv[word] for word in seq if word in model.wv]\n",
    "    if vectors:\n",
    "        return np.mean(vectors, axis=0)\n",
    "    return np.zeros(model.vector_size)\n",
    "\n",
    "# 测试序列向量\n",
    "test_sequence = ['登录', '理财产品', '购买']\n",
    "seq_vector = get_sequence_vector(test_sequence)\n",
    "print(f\"\\n序列 {test_sequence} 的平均向量:\\n{seq_vector}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ff1afc53",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b322e4fb",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b25143db",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 广东分行特征处理\n",
    "def get_id_category_features(df_fea, df_to_groupby, fea1, fea2, stat):\n",
    "    tmp = df_to_groupby.groupby(['CUST_NO', fea1])[fea2].agg(\n",
    "        stat if stat != \"kurt\" else lambda x: x.kurt()\n",
    "    ).to_frame(\n",
    "        '_'.join(['CUST_NO', fea1, fea2, stat])\n",
    "    ).reset_index()\n",
    "    \n",
    "    df_tmp = pd.pivot(data=tmp, index='CUST_NO', columns=fea1, values='_'.join(['CUST_NO', fea1, fea2, stat]))  # index: CUST_NO\n",
    "    new_fea_cols = ['_'.join(['CUST_NO', fea1, fea2, stat, str(col)]) for col in df_tmp.columns]\n",
    "    df_tmp.columns = new_fea_cols\n",
    "    df_tmp.reset_index(inplace=True)\n",
    "        \n",
    "    if stat == 'count':\n",
    "        df_tmp = df_tmp.fillna(0)\n",
    "        \n",
    "    # 去掉全NaN列\n",
    "    valid_cols = []\n",
    "    for col in df_tmp.columns:\n",
    "        if not df_tmp[col].isna().all():\n",
    "            valid_cols.append(col)\n",
    "            \n",
    "    df_fea = df_fea.merge(df_tmp[valid_cols], on='CUST_NO', how='left')\n",
    "    return df_fea, new_fea_cols \n",
    "    \n",
    "def get_all_id_category_features(df_fea, df_to_groupby, fea1, fea2, stats):\n",
    "    all_new_fea_cols = []\n",
    "    for stat in tqdm(stats):\n",
    "        df_fea, new_fea_cols = get_id_category_features(df_fea, df_to_groupby, fea1, fea2, stat)\n",
    "        all_new_fea_cols += new_fea_cols\n",
    "    return df_fea, all_new_fea_cols\n",
    "\n",
    "# 6 掌银页面访问明细表(GTGSH_MB_PAGEVIEW_DTL)\n",
    "# 6.1.1 点击模块/页面分组统计量特征\n",
    "def gen_mb_features_by_day(df):\n",
    "    def get_days_to_now(df):\n",
    "        df[\"mb_pageview_dtl_OPERATION_DATE\"] = pd.to_datetime(df[\"mb_pageview_dtl_OPERATION_DATE\"], format=\"%Y%m%d\")\n",
    "        df_months_to_now = (df[\"mb_pageview_dtl_OPERATION_DATE\"].max() - df[\"mb_pageview_dtl_OPERATION_DATE\"]).dt.days\n",
    "        df[\"date_months_to_now\"] = df_months_to_now // 31  # 距今月数\n",
    "        df[\"date_weeks_to_now\"] = df_months_to_now // 7  # 距今周数\n",
    "        df[\"date_days_to_now\"] = df_months_to_now  # 距今天数\n",
    "        return df\n",
    "\n",
    "    def get_max_cnt_days_to_now(df, month):\n",
    "        # 每月日点击数/日点击页面数最大天距今天数\n",
    "        tmp_df_nunique = df.groupby(['CUST_NO']).agg({\"mb_pageview_dtl_PAGE_TITLE_nunique\": \"max\"}).reset_index()  \n",
    "        tmp_df_nunique = tmp_df_nunique.merge(df[['CUST_NO', 'date_days_to_now', 'mb_pageview_dtl_PAGE_TITLE_nunique']], on=[\"CUST_NO\", 'mb_pageview_dtl_PAGE_TITLE_nunique'], how=\"inner\")\n",
    "        tmp_df_nunique = tmp_df_nunique.groupby(['CUST_NO'])[\"date_days_to_now\"].min().to_frame(\"max_nunique_days_to_now_{}\".format(str(month))).reset_index()  # 取最近一天\n",
    "\n",
    "        tmp_df_cnt = df.groupby(['CUST_NO']).agg({\"mb_pageview_dtl_PAGE_TITLE_count\": \"max\"}).reset_index()  \n",
    "        tmp_df_cnt = tmp_df_cnt.merge(df[['CUST_NO', 'date_days_to_now', 'mb_pageview_dtl_PAGE_TITLE_count']], on=[\"CUST_NO\", 'mb_pageview_dtl_PAGE_TITLE_count'], how=\"inner\")\n",
    "        tmp_df_cnt = tmp_df_cnt.groupby(['CUST_NO'])[\"date_days_to_now\"].min().to_frame(\"max_count_days_to_now_{}\".format(str(month))).reset_index()  # 取最近一天\n",
    "            \n",
    "        return tmp_df_nunique, tmp_df_cnt\n",
    "\n",
    "    \"\"\" 1 数据预处理 \"\"\"\n",
    "    # 1.1 日期转换为距最大日期天数\n",
    "    df = df.copy()\n",
    "    df = get_days_to_now(df)\n",
    "    feature = df[[\"CUST_NO\"]].drop_duplicates(['CUST_NO']).copy().reset_index(drop=True)\n",
    "    \n",
    "    # 1.2 按天/交易代码聚合\n",
    "    # 1.2.1 按天聚合\n",
    "    df_by_day = df.groupby([\"CUST_NO\", \"date_days_to_now\", \"date_weeks_to_now\", \"date_months_to_now\"])[\"mb_pageview_dtl_PAGE_TITLE\"].agg(['nunique', 'count'])\n",
    "    df_by_day.columns = ['mb_pageview_dtl_PAGE_TITLE_nunique', 'mb_pageview_dtl_PAGE_TITLE_count']\n",
    "    df_by_day = df_by_day.reset_index()\n",
    "\n",
    "    \"\"\"\" 2 RFM-R: 每月日点击笔数/日点击页面数最大天距今天数 \"\"\"\n",
    "    tmp_feature = df_by_day[[\"CUST_NO\"]].drop_duplicates([\"CUST_NO\"]).copy().reset_index(drop=True)\n",
    "    for month in tqdm([0, 1, 2]):\n",
    "        data_month = df_by_day[df_by_day[\"date_months_to_now\"]==month]\n",
    "        tmp_df_nunique, tmp_df_cnt = get_max_cnt_days_to_now(data_month, month)\n",
    "        tmp_feature = tmp_feature.merge(tmp_df_nunique, how=\"left\", on=\"CUST_NO\")\n",
    "        tmp_feature = tmp_feature.merge(tmp_df_cnt, how=\"left\", on=\"CUST_NO\")\n",
    "    feature = feature.merge(tmp_feature, how=\"left\", on=\"CUST_NO\")\n",
    "\n",
    "    return feature\n",
    "\n",
    "mb_feature_train = gen_mb_features_by_day(mb_pageview_dtl_train)\n",
    "mb_feature_test = gen_mb_features_by_day(mb_pageview_dtl_test_a)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
