{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "2f7c6768",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import os\n",
    "import warnings\n",
    "from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold\n",
    "from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\n",
    "from sklearn.metrics import roc_auc_score\n",
    "from sklearn.feature_selection import SelectFromModel\n",
    "from imblearn.over_sampling import SMOTE\n",
    "from imblearn.pipeline import Pipeline as ImbPipeline\n",
    "import lightgbm as lgb\n",
    "import xgboost as xgb\n",
    "import catboost as cb\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "\n",
    "# 忽略警告\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "0e14ceab",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 当前工作目录\n",
    "cwd = 'D:/Temporary/jqxx'\n",
    "os.makedirs(f'{cwd}/tmp', exist_ok=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "d0b1cb36",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. 增强的数据预处理函数\n",
    "def enhanced_data_process(samples_file, logs_file, user_info_file=None, need_label=True, out_file=None):\n",
    "    \"\"\"增强的数据预处理函数\"\"\"\n",
    "    # 如果输出文件已存在，直接读取\n",
    "    if out_file and os.path.exists(out_file):\n",
    "        return pd.read_csv(out_file)\n",
    "    \n",
    "    # 读取样本数据\n",
    "    samples = pd.read_csv(samples_file)\n",
    "    \n",
    "    # 读取用户日志\n",
    "    print(f\"正在读取日志文件: {logs_file}\")\n",
    "    chunks = []\n",
    "    for chunk in pd.read_csv(logs_file, chunksize=100000):\n",
    "        chunks.append(chunk)\n",
    "    user_logs = pd.concat(chunks, axis=0)\n",
    "    print(f\"日志数据加载完成，形状: {user_logs.shape}\")\n",
    "    \n",
    "    # 读取用户基本信息\n",
    "    if user_info_file and os.path.exists(user_info_file):\n",
    "        user_info = pd.read_csv(user_info_file)\n",
    "        # 处理缺失值\n",
    "        user_info['age_range'] = user_info['age_range'].fillna(-1)\n",
    "        user_info['gender'] = user_info['gender'].fillna(2)  # 2表示未知\n",
    "        print(f\"用户基本信息加载完成，形状: {user_info.shape}\")\n",
    "    else:\n",
    "        user_info = None\n",
    "    \n",
    "    # 基础行为特征\n",
    "    print(\"开始计算行为特征...\")\n",
    "    behavior_features = user_logs.groupby(['user_id', 'merchant_id']).agg(\n",
    "        total_actions=('action_type', 'count'),\n",
    "        clicks=('action_type', lambda x: (x == 0).sum()),\n",
    "        buys=('action_type', lambda x: (x == 1).sum()),\n",
    "        collects=('action_type', lambda x: (x == 2).sum()),\n",
    "        carts=('action_type', lambda x: (x == 3).sum())\n",
    "    ).reset_index()\n",
    "    \n",
    "    # 转化率特征\n",
    "    behavior_features['click_ratio'] = behavior_features['clicks'] / (behavior_features['total_actions'] + 1e-5)\n",
    "    behavior_features['buy_ratio'] = behavior_features['buys'] / (behavior_features['total_actions'] + 1e-5)\n",
    "    behavior_features['collect_ratio'] = behavior_features['collects'] / (behavior_features['total_actions'] + 1e-5)\n",
    "    behavior_features['cart_ratio'] = behavior_features['carts'] / (behavior_features['total_actions'] + 1e-5)\n",
    "    \n",
    "    # 用户维度特征\n",
    "    print(\"开始计算用户特征...\")\n",
    "    user_features = user_logs.groupby('user_id').agg(\n",
    "        unique_merchants=('merchant_id', 'nunique'),\n",
    "        total_actions=('action_type', 'count'),\n",
    "        avg_action=('action_type', 'mean'),\n",
    "        last_action=('time_stamp', 'max'),\n",
    "        first_action=('time_stamp', 'min'),\n",
    "        action_std=('action_type', 'std')\n",
    "    ).reset_index()\n",
    "    user_features['user_active_days'] = (user_features['last_action'] - user_features['first_action']) / (24*3600) + 1\n",
    "    user_features['daily_actions'] = user_features['total_actions'] / user_features['user_active_days']\n",
    "    \n",
    "    # 商家维度特征\n",
    "    print(\"开始计算商家特征...\")\n",
    "    merchant_features = user_logs.groupby('merchant_id').agg(\n",
    "        unique_users=('user_id', 'nunique'),\n",
    "        total_actions=('action_type', 'count'),\n",
    "        avg_action=('action_type', 'mean'),\n",
    "        action_std=('action_type', 'std')\n",
    "    ).reset_index()\n",
    "    \n",
    "    # 时间特征\n",
    "    print(\"开始计算时间特征...\")\n",
    "    user_logs['timestamp'] = pd.to_datetime(user_logs['time_stamp'], unit='s')\n",
    "    user_logs['hour'] = user_logs['timestamp'].dt.hour\n",
    "    user_logs['day_of_week'] = user_logs['timestamp'].dt.dayofweek\n",
    "    \n",
    "    # 用户-商家时间特征\n",
    "    time_features = user_logs.groupby(['user_id', 'merchant_id']).agg(\n",
    "        favorite_hour=('hour', lambda x: x.mode()[0] if not x.empty else 12),\n",
    "        favorite_day=('day_of_week', lambda x: x.mode()[0] if not x.empty else 3),\n",
    "        visit_frequency=('time_stamp', 'count'),\n",
    "        visit_days=('time_stamp', lambda x: x.nunique())\n",
    "    ).reset_index()\n",
    "    time_features['visit_regularity'] = time_features['visit_frequency'] / (time_features['visit_days'] + 1e-5)\n",
    "    \n",
    "    # 合并所有特征\n",
    "    print(\"开始合并特征...\")\n",
    "    features = samples.merge(behavior_features, on=['user_id', 'merchant_id'], how='left')\n",
    "    features = features.merge(user_features, on='user_id', how='left', suffixes=('', '_user'))\n",
    "    features = features.merge(merchant_features, on='merchant_id', how='left', suffixes=('', '_merchant'))\n",
    "    features = features.merge(time_features, on=['user_id', 'merchant_id'], how='left')\n",
    "    \n",
    "    # 合并用户基本信息\n",
    "    if user_info is not None:\n",
    "        features = features.merge(user_info, on='user_id', how='left')\n",
    "    \n",
    "    # 新增特征：用户-商家交互强度\n",
    "    features['interaction_intensity'] = features['total_actions'] * features['daily_actions']\n",
    "    \n",
    "    # 填充缺失值\n",
    "    features.fillna(0, inplace=True)\n",
    "    \n",
    "    # 添加标签\n",
    "    if need_label:\n",
    "        features['label'] = samples['label']\n",
    "    \n",
    "    # 保存处理后的数据\n",
    "    if out_file:\n",
    "        features.to_csv(out_file, index=False)\n",
    "        print(f\"已保存处理后的数据到: {out_file}\")\n",
    "    \n",
    "    return features"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "89282d59",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 2. 增强的特征工程\n",
    "def enhanced_feature_engineering(data):\n",
    "    \"\"\"增强的特征工程\"\"\"\n",
    "    # 用户价值评分\n",
    "    if all(col in data.columns for col in ['total_actions', 'unique_merchants', 'user_active_days']):\n",
    "        data['user_value_score'] = (\n",
    "            np.log1p(data['total_actions']) * 0.5 +\n",
    "            np.log1p(data['unique_merchants']) * 0.3 +\n",
    "            np.log1p(data['user_active_days']) * 0.2\n",
    "        )\n",
    "    \n",
    "    # 商家热度评分\n",
    "    if all(col in data.columns for col in ['total_actions_merchant', 'unique_users']):\n",
    "        data['merchant_popularity'] = (\n",
    "            np.log1p(data['total_actions_merchant']) * 0.7 +\n",
    "            np.log1p(data['unique_users']) * 0.3\n",
    "        )\n",
    "    \n",
    "    # 时间相关特征\n",
    "    if 'favorite_hour' in data.columns:\n",
    "        data['is_peak_hour'] = data['favorite_hour'].between(10, 22).astype(int)\n",
    "        data['is_weekend'] = data['favorite_day'].isin([5, 6]).astype(int)\n",
    "    \n",
    "    # 行为特征组合\n",
    "    if all(col in data.columns for col in ['clicks', 'carts', 'collects', 'buys']):\n",
    "        data['pre_purchase_actions'] = data['carts'] + data['collects']\n",
    "        data['purchase_conversion'] = data['buys'] / (data['pre_purchase_actions'] + 1e-5)\n",
    "    \n",
    "    # 用户-商家匹配度\n",
    "    if all(col in data.columns for col in ['avg_action', 'avg_action_merchant']):\n",
    "        data['action_compatibility'] = 1 / (1 + np.abs(data['avg_action'] - data['avg_action_merchant']))\n",
    "    \n",
    "    # 处理类别特征\n",
    "    if 'gender' in data.columns:\n",
    "        data['gender'] = data['gender'].astype('category').cat.codes\n",
    "    \n",
    "    return data# 2. 增强的特征工程\n",
    "def enhanced_feature_engineering(data):\n",
    "    \"\"\"增强的特征工程\"\"\"\n",
    "    # 用户价值评分\n",
    "    if all(col in data.columns for col in ['total_actions', 'unique_merchants', 'user_active_days']):\n",
    "        data['user_value_score'] = (\n",
    "            np.log1p(data['total_actions']) * 0.5 +\n",
    "            np.log1p(data['unique_merchants']) * 0.3 +\n",
    "            np.log1p(data['user_active_days']) * 0.2\n",
    "        )\n",
    "    \n",
    "    # 商家热度评分\n",
    "    if all(col in data.columns for col in ['total_actions_merchant', 'unique_users']):\n",
    "        data['merchant_popularity'] = (\n",
    "            np.log1p(data['total_actions_merchant']) * 0.7 +\n",
    "            np.log1p(data['unique_users']) * 0.3\n",
    "        )\n",
    "    \n",
    "    # 时间相关特征\n",
    "    if 'favorite_hour' in data.columns:\n",
    "        data['is_peak_hour'] = data['favorite_hour'].between(10, 22).astype(int)\n",
    "        data['is_weekend'] = data['favorite_day'].isin([5, 6]).astype(int)\n",
    "    \n",
    "    # 行为特征组合\n",
    "    if all(col in data.columns for col in ['clicks', 'carts', 'collects', 'buys']):\n",
    "        data['pre_purchase_actions'] = data['carts'] + data['collects']\n",
    "        data['purchase_conversion'] = data['buys'] / (data['pre_purchase_actions'] + 1e-5)\n",
    "    \n",
    "    # 用户-商家匹配度\n",
    "    if all(col in data.columns for col in ['avg_action', 'avg_action_merchant']):\n",
    "        data['action_compatibility'] = 1 / (1 + np.abs(data['avg_action'] - data['avg_action_merchant']))\n",
    "    \n",
    "    # 处理类别特征\n",
    "    if 'gender' in data.columns:\n",
    "        data['gender'] = data['gender'].astype('category').cat.codes\n",
    "    \n",
    "    return data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "bd0751b3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 3. 特征选择函数\n",
    "def feature_selection(data, target='label', num_features=30):\n",
    "    \"\"\"使用基于模型的特征选择\"\"\"\n",
    "    # 确保没有非数值特征\n",
    "    X = data.select_dtypes(include=np.number).drop(columns=[target], errors='ignore')\n",
    "    y = data[target]\n",
    "    \n",
    "    # 使用LightGBM进行特征选择\n",
    "    model = lgb.LGBMClassifier(n_estimators=100, random_state=42, verbose=-1)\n",
    "    model.fit(X, y)\n",
    "    \n",
    "    # 创建特征选择器\n",
    "    selector = SelectFromModel(model, prefit=True, max_features=num_features)\n",
    "    \n",
    "    # 获取选择的特征\n",
    "    selected_features = X.columns[selector.get_support()]\n",
    "    \n",
    "    return list(selected_features)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "6e060cad",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 4. 高级模型训练函数\n",
    "def advanced_model_training(train_data, features, target='label'):\n",
    "    \"\"\"使用多种高级模型并进行集成\"\"\"\n",
    "    X = train_data[features]\n",
    "    y = train_data[target]\n",
    "    \n",
    "    # 处理类别不平衡\n",
    "    smote = SMOTE(sampling_strategy=0.5, random_state=42)\n",
    "    \n",
    "    # 初始化模型\n",
    "    models = {\n",
    "        'lgb': lgb.LGBMClassifier(random_state=42, n_jobs=-1, verbose=-1),\n",
    "        'xgb': xgb.XGBClassifier(random_state=42, n_jobs=-1, verbosity=0),\n",
    "        'rf': RandomForestClassifier(random_state=42, n_jobs=-1)\n",
    "    }\n",
    "    \n",
    "    # 参数网格\n",
    "    param_grids = {\n",
    "        'lgb': {\n",
    "            'model__n_estimators': [100, 200],\n",
    "            'model__learning_rate': [0.05, 0.1],\n",
    "            'model__num_leaves': [31, 63],\n",
    "            'model__max_depth': [5, 7]\n",
    "        },\n",
    "        'xgb': {\n",
    "            'model__n_estimators': [100, 200],\n",
    "            'model__learning_rate': [0.05, 0.1],\n",
    "            'model__max_depth': [5, 7],\n",
    "            'model__subsample': [0.8, 1.0]\n",
    "        },\n",
    "        'rf': {\n",
    "            'model__n_estimators': [100, 200],\n",
    "            'model__max_depth': [10, 20],\n",
    "            'model__min_samples_split': [2, 5]\n",
    "        }\n",
    "    }\n",
    "    \n",
    "    best_models = {}\n",
    "    cv_scores = {}\n",
    "    \n",
    "    # 训练并调优每个模型\n",
    "    for name in models.keys():\n",
    "        print(f\"\\n正在训练并优化 {name.upper()} 模型...\")\n",
    "        \n",
    "        # 创建流水线\n",
    "        pipeline = ImbPipeline([\n",
    "            ('smote', smote),\n",
    "            ('model', models[name])\n",
    "        ])\n",
    "        \n",
    "        # 网格搜索\n",
    "        grid_search = GridSearchCV(\n",
    "            estimator=pipeline,\n",
    "            param_grid=param_grids[name],\n",
    "            cv=StratifiedKFold(n_splits=3, shuffle=True, random_state=42),\n",
    "            scoring='roc_auc',\n",
    "            n_jobs=-1,\n",
    "            verbose=1\n",
    "        )\n",
    "        \n",
    "        grid_search.fit(X, y)\n",
    "        \n",
    "        best_models[name] = grid_search.best_estimator_\n",
    "        cv_scores[name] = grid_search.best_score_\n",
    "        \n",
    "        print(f\"{name.upper()} 最佳参数: {grid_search.best_params_}\")\n",
    "        print(f\"{name.upper()} 最佳AUC: {cv_scores[name]:.4f}\")\n",
    "    \n",
    "    # 创建加权集成模型\n",
    "    class WeightedEnsemble:\n",
    "        def __init__(self, models, weights):\n",
    "            self.models = models\n",
    "            self.weights = weights\n",
    "            \n",
    "        def fit(self, X, y):\n",
    "            # 基模型已经训练好，不需要再次训练\n",
    "            return self\n",
    "            \n",
    "        def predict_proba(self, X):\n",
    "            probas = [model.predict_proba(X)[:, 1] for model in self.models]\n",
    "            weighted_proba = sum(w * p for w, p in zip(self.weights, probas))\n",
    "            return np.vstack([1 - weighted_proba, weighted_proba]).T\n",
    "    \n",
    "    # 根据CV分数计算权重\n",
    "    total_score = sum(cv_scores.values())\n",
    "    weights = [cv_scores[name] / total_score for name in best_models.keys()]\n",
    "    \n",
    "    ensemble = WeightedEnsemble(\n",
    "        models=[best_models[name] for name in best_models.keys()],\n",
    "        weights=weights\n",
    "    )\n",
    "    \n",
    "    # 评估集成模型\n",
    "    X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)\n",
    "    ensemble.fit(X_train, y_train)\n",
    "    val_proba = ensemble.predict_proba(X_val)[:, 1]\n",
    "    val_auc = roc_auc_score(y_val, val_proba)\n",
    "    print(f\"\\n集成模型在验证集上的AUC: {val_auc:.4f}\")\n",
    "    \n",
    "    return ensemble, val_auc\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "d7066212",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 5. 预测函数\n",
    "def make_predictions(model, data, features, out_file=None):\n",
    "    \"\"\"使用训练好的模型进行预测\"\"\"\n",
    "    predictions = data[['user_id', 'merchant_id']].copy()\n",
    "    \n",
    "    # 确保所有特征都存在\n",
    "    for feature in features:\n",
    "        if feature not in data.columns:\n",
    "            data[feature] = 0\n",
    "    \n",
    "    # 进行预测\n",
    "    predictions['prob'] = model.predict_proba(data[features])[:, 1]\n",
    "    predictions['label'] = (predictions['prob'] > 0.5).astype(int)\n",
    "    \n",
    "    # 保存结果\n",
    "    if out_file:\n",
    "        predictions.to_csv(out_file, index=False)\n",
    "        print(f\"预测结果已保存到: {out_file}\")\n",
    "    \n",
    "    return predictions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "1d3352ff",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 6. 主程序\n",
    "def main():\n",
    "    # 设置文件路径\n",
    "    data_dir = f'{cwd}/data/'\n",
    "    tmp_dir = f'{cwd}/tmp/'\n",
    "    \n",
    "    train_file = os.path.join(data_dir, 'train.csv')\n",
    "    test_file = os.path.join(data_dir, 'test_without_label.csv')\n",
    "    logs_file = os.path.join(data_dir, 'user_log_format2.csv')  # 使用精简版日志\n",
    "    user_info_file = os.path.join(data_dir, 'user_info_format1.csv')\n",
    "    \n",
    "    train_out_file = os.path.join(tmp_dir, 'enhanced_train.csv')\n",
    "    test_out_file = os.path.join(tmp_dir, 'enhanced_test.csv')\n",
    "    result_file = os.path.join(tmp_dir, 'final_predictions.csv')\n",
    "    \n",
    "    # 数据预处理\n",
    "    print(\"=\"*50)\n",
    "    print(\"开始增强数据预处理...\")\n",
    "    train_data = enhanced_data_process(\n",
    "        train_file, logs_file, user_info_file, \n",
    "        need_label=True, out_file=train_out_file\n",
    "    )\n",
    "    test_data = enhanced_data_process(\n",
    "        test_file, logs_file, user_info_file, \n",
    "        need_label=False, out_file=test_out_file\n",
    "    )\n",
    "    \n",
    "    # 特征工程\n",
    "    print(\"\\n进行增强特征工程...\")\n",
    "    train_data = enhanced_feature_engineering(train_data)\n",
    "    test_data = enhanced_feature_engineering(test_data)\n",
    "    \n",
    "    # 特征选择\n",
    "    print(\"\\n进行特征选择...\")\n",
    "    selected_features = feature_selection(train_data, num_features=25)\n",
    "    print(f\"选择的{len(selected_features)}个特征: {selected_features}\")\n",
    "    \n",
    "    # 模型训练与集成\n",
    "    print(\"\\n\" + \"=\"*50)\n",
    "    print(\"训练高级模型并进行集成...\")\n",
    "    model, auc_score = advanced_model_training(train_data, selected_features)\n",
    "    print(f\"\\n最终集成模型AUC: {auc_score:.4f}\")\n",
    "    \n",
    "    # 在测试集上进行预测\n",
    "    print(\"\\n\" + \"=\"*50)\n",
    "    print(\"在测试集上进行预测...\")\n",
    "    result = make_predictions(model, test_data, selected_features, out_file=result_file)\n",
    "    \n",
    "    # 显示部分预测结果\n",
    "    print(\"\\n预测结果样例:\")\n",
    "    print(result.head(10))\n",
    "    \n",
    "    print(f\"\\n预测完成! 结果已保存到 {result_file}\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "914f2f80",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "==================================================\n",
      "开始增强数据预处理...\n",
      "\n",
      "进行增强特征工程...\n",
      "\n",
      "进行特征选择...\n",
      "选择的17个特征: ['user_id', 'merchant_id', 'first_action_time', 'click_ratio', 'collect_ratio', 'avg_action', 'first_action', 'action_std', 'unique_users', 'total_actions_merchant', 'avg_action_merchant', 'action_std_merchant', 'visit_regularity', 'age_range', 'user_value_score', 'merchant_popularity', 'action_compatibility']\n",
      "\n",
      "==================================================\n",
      "训练高级模型并进行集成...\n",
      "\n",
      "正在训练并优化 LGB 模型...\n",
      "Fitting 3 folds for each of 16 candidates, totalling 48 fits\n",
      "LGB 最佳参数: {'model__learning_rate': 0.1, 'model__max_depth': 7, 'model__n_estimators': 200, 'model__num_leaves': 31}\n",
      "LGB 最佳AUC: 0.6374\n",
      "\n",
      "正在训练并优化 XGB 模型...\n",
      "Fitting 3 folds for each of 16 candidates, totalling 48 fits\n",
      "XGB 最佳参数: {'model__learning_rate': 0.1, 'model__max_depth': 7, 'model__n_estimators': 200, 'model__subsample': 0.8}\n",
      "XGB 最佳AUC: 0.6364\n",
      "\n",
      "正在训练并优化 RF 模型...\n",
      "Fitting 3 folds for each of 8 candidates, totalling 24 fits\n",
      "RF 最佳参数: {'model__max_depth': 20, 'model__min_samples_split': 5, 'model__n_estimators': 200}\n",
      "RF 最佳AUC: 0.6115\n",
      "\n",
      "集成模型在验证集上的AUC: 0.8958\n",
      "\n",
      "最终集成模型AUC: 0.8958\n",
      "\n",
      "==================================================\n",
      "在测试集上进行预测...\n",
      "预测结果已保存到: D:/Temporary/jqxx/tmp/final_predictions.csv\n",
      "\n",
      "预测结果样例:\n",
      "   user_id  merchant_id      prob  label\n",
      "0    34176         3906  0.139111      0\n",
      "1    34176          121  0.072985      0\n",
      "2    34176         4356  0.141021      0\n",
      "3    34176         2217  0.055636      0\n",
      "4   230784         4818  0.083673      0\n",
      "5   362112         2618  0.039598      0\n",
      "6    34944         2051  0.079821      0\n",
      "7   231552         3828  0.464373      0\n",
      "8   231552         2124  0.037043      0\n",
      "9   232320         1168  0.071031      0\n",
      "\n",
      "预测完成! 结果已保存到 D:/Temporary/jqxx/tmp/final_predictions.csv\n"
     ]
    }
   ],
   "source": [
    "# 执行主程序\n",
    "if __name__ == '__main__':\n",
    "    main()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "79129e2b",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
