{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "hZS0BmefTdQY"
      },
      "source": [
        "# 测试数据和方法准备\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "w4SNN1Pnv6ca"
      },
      "outputs": [],
      "source": [
        "# 代码改了重新reload\n",
        "import importlib\n",
        "importlib.reload(myscript)\n",
        "importlib.reload(myscript.cbt)\n",
        "importlib.reload(myscript.fbt)\n",
        "importlib.reload(myscript.x)\n",
        "from myscript.cbt import MyCBT1"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "xqW2BLlX2SXv"
      },
      "outputs": [],
      "source": [
        "from myscript.test_global import outer_f\n",
        "x = outer_f('a')\n",
        "transforms = [x]"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "xfkgxBtGzvJF"
      },
      "outputs": [],
      "source": [
        "import pickle\n",
        "with open('transforms.pkl', 'wb') as file:\n",
        "  pickle.dump(transforms, file)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 3,
      "metadata": {
        "id": "-_WyARcpNR_b"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "   A  B  C  D      E\n",
            "0  1  x  p  m   True\n",
            "1  2  y  q  n  False\n",
            "2  3  z  r  o   True\n"
          ]
        }
      ],
      "source": [
        "import pandas as pd\n",
        "\n",
        "data = {\n",
        "    'A': [1, 2, 3],\n",
        "    'B': ['x', 'y', 'z'],\n",
        "    'C': ['p', 'q', 'r'],\n",
        "    'D': ['m', 'n', 'o'],\n",
        "    'E': [True, False, True]\n",
        "}\n",
        "\n",
        "df = pd.DataFrame(data)\n",
        "print(df)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": true,
        "id": "aCAOhVGfNtyL"
      },
      "outputs": [],
      "source": [
        "print(df.shape)\n",
        "print(df.columns[0])\n",
        "df.dtypes"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "NR66bQIYpi6K"
      },
      "source": [
        "## 字典编码"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": true,
        "id": "lVRR6a4RR1Bt"
      },
      "outputs": [],
      "source": [
        "vocab = {}\n",
        "for index, row in df.iterrows():\n",
        "  for column_name, value in row.iteritems():\n",
        "    print(column_name + \"_\" + str(value))\n",
        "    vocab.setdefault(column_name + \"_\" + str(value), len(vocab))\n",
        "\n",
        "print(vocab)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": true,
        "id": "tiCtdWEUUkUy"
      },
      "outputs": [],
      "source": [
        "new_df = pd.DataFrame(columns=range(len(vocab)))\n",
        "print(new_df)\n",
        "print(df.shape)\n",
        "for index, row in df.iterrows():\n",
        "  new_df.loc[index] = 0.0\n",
        "  for column_name, value in row.iteritems():\n",
        "    # print(column_name + \"_\" + str(value))\n",
        "    index_in_vocab = vocab[column_name + \"_\" + str(value)]\n",
        "    new_df.loc[index, index_in_vocab] = 1.0\n",
        "    # print(f\"set {index}, {index_in_vocab} = 1\")\n",
        "    # print(new_df)\n",
        "\n",
        "print(new_df)\n",
        "# print(df)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "qhLxxfSTvefP"
      },
      "source": [
        "# 基于函数dump"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "i9AgslljuUYv"
      },
      "source": [
        "## 只保存了对两个方法的引用, 没保存到任何状态\n",
        "\n",
        "add_to_arr引用了明显泄露的一个变量dynamic_v"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 1,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 343
        },
        "id": "5XT2BHI0dVSe",
        "outputId": "b2685432-2bc8-41c1-af4a-3dc42c490b13"
      },
      "outputs": [
        {
          "data": {
            "text/plain": [
              "<function myscript.x.myadd(b)>"
            ]
          },
          "execution_count": 1,
          "metadata": {},
          "output_type": "execute_result"
        }
      ],
      "source": [
        "from myscript.x import myadd\n",
        "myadd"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 2,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 183
        },
        "id": "XOqrYvAVdqK0",
        "outputId": "172235f0-7445-4893-b92a-20d890932140"
      },
      "outputs": [
        {
          "data": {
            "text/plain": [
              "8"
            ]
          },
          "execution_count": 2,
          "metadata": {},
          "output_type": "execute_result"
        }
      ],
      "source": [
        "myadd(5)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "fZNTxEI2fC9b"
      },
      "outputs": [],
      "source": [
        "import myscript\n",
        "from myscript.x import add_to_arr\n",
        "add_to_arr(4)\n",
        "print(myscript.x.dynamic_v)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Au-2ULuvqrkB"
      },
      "outputs": [],
      "source": [
        "add_to_arr"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "VLDx1ijIqsfu"
      },
      "outputs": [],
      "source": [
        "transforms = [add_to_arr, myadd]"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "HmNpD3szeOsP"
      },
      "outputs": [],
      "source": [
        "import pickle\n",
        "with open('method.pkl', 'wb') as file:\n",
        "  pickle.dump(transforms, file)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "IUM8nu6rzQg-"
      },
      "source": [
        "## 为什么会泄露呢\n",
        "\n",
        "- 外部程序不知道你的方法内引用了哪些泄露变量\n",
        "- 泄露的地方是通过静态方式引用的, **引用的根是某个代码文件(也就是module)定义的全局变量**\n",
        "- 也就是该方法是有副作用的方法\n",
        "  - 所以一个想法是将泄露的dynamic_v作为参数传给方法, 这样这个方法变成了一个无副作用的函数\n",
        "  - 那传入的参数, 也要通过其他方式dump好, 还得想想怎么传递\n",
        "\n",
        "> 引用静态泄露变量无所谓\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "bKqNLMj-QI3i"
      },
      "source": [
        "### 方法内部状态会丢失么, 会\n",
        "这种情况应该比较少 因为方法内部变量, 他们自己也没法保存留作predict用. 除非方法内部把变量写到文件里, 那就是下述依赖文件的情况了"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "bVFZLBG74PPr"
      },
      "source": [
        "- 无闭包情况, 内部变量不会被保存, 相当于每次重新fit, 肯定不行"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": true,
        "id": "hcrNXTpAQNXC"
      },
      "outputs": [],
      "source": [
        "from myscript.fbt import transform\n",
        "transform(df)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "rECS4Gr93niG"
      },
      "outputs": [],
      "source": [
        "transforms = [transform]"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "b5J5OsjQ7t2R"
      },
      "outputs": [],
      "source": [
        "from myscript.fbt import transform0\n",
        "transform0(df)\n",
        "transforms = [transform0]"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "houLYGq_4dNP"
      },
      "source": [
        "- 闭包, 不能dump local\n",
        "<!-- TODO 如果不用pickle, 用joblib之类的能否正确dump闭包呢 -->"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": true,
        "id": "j5NjSehkp9_1"
      },
      "outputs": [],
      "source": [
        "# TODO 如果不用pickle, 用joblib之类的能否正确dump闭包呢\n",
        "from myscript.fbt import transformer_1\n",
        "real_trans = transformer_1(df)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ZWe8nV4j4hxo"
      },
      "source": [
        "- 闭包, 全都global了还闭个屁"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": true,
        "id": "Im0nc0sF1Eio"
      },
      "outputs": [],
      "source": [
        "from myscript.fbt import transformer_2\n",
        "real_trans = transformer_2(df)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": true,
        "id": "MrLWmr3Fy6lu"
      },
      "outputs": [],
      "source": [
        "real_trans(df)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "N58uJmAIzi2J"
      },
      "outputs": [],
      "source": [
        "transforms = [real_trans]"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "0JNgDztS42Ol"
      },
      "source": [
        "- 把内部变量变成参数传进来肯定可以, 那就变成下述情况了"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Q8spNYdV9JRV"
      },
      "source": [
        "## 如何把握住泄露的变量呢?\n",
        "> transforms这个对象是dump的核心, 不需要模型上线的话, 不用定义\n",
        "\n",
        "- **无副作用函数, 无需要保存的内部变量**, 只有1个df作为参数. 可以直接序列化\n",
        "> df参数: array-like的数据, 如dataframe, array, nparray\n",
        "\n",
        "- **无副作用函数, 无需要保存的内部变量**, 除了df参数, 也有其他参数, 其他参数需要和函数一起放到transforms数组里\n",
        "\n",
        "- **有副作用函数 - 依赖global**, 不行\n",
        "> 有副作用函数, 即使让用户把副作用变量放到tramsforms里dump, 但是load的时候还得框架来赋值, 不行\n",
        "- **有需要保存的内部变量**, 当成参数或者当成(返回值? 不太好吧)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "mjVWytd71aXY"
      },
      "source": [
        "### 无副作用函数+额外参数"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "sXaMmtd8zkvn"
      },
      "outputs": [],
      "source": [
        "from myscript.fbt import add_to_arr_extra_param\n",
        "myarr = ['add_to_arr_extra_param']\n",
        "add_to_arr_extra_param(myarr, 44)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "OisD-tAb6AsO"
      },
      "outputs": [],
      "source": [
        "# 这里用元组了, 实际封装个类可能更好\n",
        "# 可以正常dump/load\n",
        "transforms = [(add_to_arr_extra_param, myarr)]\n",
        "transforms\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "o84hMfd0-Pue"
      },
      "source": [
        "### 依赖global就dump整个module?\n",
        "pickle不让dump module, 其他的没试 TODO\n",
        "\n",
        "想也不行, module是从代码加载出来的, 不能从序列化反推吧?\n",
        "\n",
        "非要用global变量, 只能是有个额外的setter将反序列化的东西set到module上"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "-YS5DAig6b73"
      },
      "source": [
        "## 参数不是df的函数又如何\n",
        "期望每个trans都是输入df, 输出df的, 一个参数最好了\n",
        "不是的话需要包装一下\n",
        "\n",
        "### 对单行, 指定列s进行转换的\n",
        "类似map, udf, 如月冠sex模型开始的特征处理\n",
        "\n",
        "提供一个工具方法或者工具类, 将这种udf包装一下, 包装成cbt或fbt"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "uXHRT3RJKktI"
      },
      "outputs": [],
      "source": [
        "# 使用function包装成fbt\n",
        "def map_to_transform(map_f):\n",
        "  def wrapper_f(df):\n",
        "      for(x in df)\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "emAfiN4Pl7F-"
      },
      "source": [
        "## 函数想既fit又transform\n",
        "默认我们推荐函数只负责transform, 将fit迁移, 将fit结果作为参数传入\n",
        "\n",
        "非要fit_transform, 方案:\n",
        "\n",
        "可行:\n",
        "- cbt\n",
        "- fit结果作为参数空引用传入\n",
        "  \n",
        "  - 函数内根据fit结果引用参数是否为空, 或者下述train/predict标记, 来判断要不要fit\n",
        "  - 执行之后, 通过额外参数方案, fit结果就会被序列化\n",
        "- fit结果在函数内部自行写到额外文件路径等地方了\n",
        "  - 函数内部判断要执行fit还是要load fit, 相当于自行load/*save了. 参看下述重复初始化问题\n",
        "\n",
        "不行:\n",
        "- fit结果作为函数的额外返回值? 不太好吧, 倒是能序列化保存上, 但是预测时如何传给函数呢\n",
        "\n",
        "下述seq模型就是同时fit_transform, 而且训练/预测阶段都是fit_transform一起调用的"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "sLeuo-f3mOqk"
      },
      "source": [
        "## 多个返回值\n",
        "- 第一个肯定是转换后的df\n",
        "- 如果额外返回值是预测时也需要的东西, 那不允许\n",
        "- 如果额外返回值是下一trans需要的东西, 那么作为下一trans参数参与序列化不用管.\n",
        "  - 如果是下一trans的东西, 切不能固化, 要预测时重新算的\n",
        "    \n",
        "    这次可以用global了 (那么额外参数除了支持具体的对象, 是否支持传个reference呢 TODO)\n",
        "    \n",
        "    两个trans合为一\n",
        "\n",
        "- 如果额外返回值是无关的, 那就无关吧"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "8c0JBo0nvnbQ"
      },
      "source": [
        "# 基于class/object的dump"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "vrclnbWaufXl"
      },
      "outputs": [],
      "source": [
        "from myscript.cbt import MyCBT1\n",
        "myarr = []\n",
        "a = MyCBT1(myarr)\n",
        "a.add_to_arr(33)\n",
        "a.add_to_arr1(44)\n",
        "print(myarr)\n",
        "print(a.arr) # 传入的变量\n",
        "print(a.arr1)  # 和内建的变量, 只有复制给self, 都会正确dump"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "lqW90cDbvvxV"
      },
      "outputs": [],
      "source": [
        "a.add_to_arr(44)\n",
        "print(myarr)\n",
        "print(a.arr)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "W1eaZc8sud1i"
      },
      "outputs": [],
      "source": [
        "transforms = [a]"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "FkuMqfOuyFCP"
      },
      "source": [
        "## 测试cbt也可以引用外部变量, 泄露\n",
        "b.add_to_arr2(44) 引用的变量是泄露的\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "7HvJWshUuT8m"
      },
      "outputs": [],
      "source": [
        "b = MyCBT1(myarr)\n",
        "b.add_to_arr(33)\n",
        "b.add_to_arr2(44)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "y7nzhaXTxa0k"
      },
      "outputs": [],
      "source": [
        "from myscript.x import dynamic_v\n",
        "print(dynamic_v)  # 外部变量有问题\n",
        "print(myarr)  # 参数变量没问题"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "ALkbD1eUxs6h"
      },
      "outputs": [],
      "source": [
        "transforms = [b]\n",
        "import pickle\n",
        "with open('object.pkl', 'wb') as file:\n",
        "  pickle.dump(transforms, file)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "pKXR6x4sigEK"
      },
      "source": [
        "## load/save/fit/transform方法\n",
        "\n",
        "对于cbt的对象, 完整的协议是 load/save/init/fit/transform\n",
        "\n",
        "- 框架判断对象有load/save就用, 没有就pickle的load/save\n",
        "- fit最好直接return self, 如果return其他对象, 那么框架应该关心的是fit返回的对象\n",
        "- transform里不要做一次性的初始化工作, 防止反复调用时重复执行了\n",
        "\n",
        "可能也要考虑fit_transform一起的情况\n",
        "\n",
        "方法调用\n",
        "- 训练时: 构造对象(空load), (init), fit, transform, save\n",
        "- 预测时: load, (init), transform"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "c8USBQUk91vG"
      },
      "source": [
        "## cbt如果不想继承框架的类呢\n",
        "\n",
        "- 只是方法名不一样还好.\n",
        "\n",
        "```\n",
        "myObj = MyCutomCls\n",
        "myObj1 = myObj.fit1(xxx)\n",
        "\n",
        "myObj1.transfomr1(xxx)\n",
        "\n",
        "```\n",
        "在包装一下咯\n",
        "transforms = [someWrapper(myObj1, transfomr1)]\n",
        "`\n",
        "- 如果不遵循load/save/fit, 到时也能包装, 但是可能会出现fbt遇到的那些问题\n",
        "\n",
        "<br/>\n",
        "\n",
        "### fbt相当于简化版本的cbt, 不继承框架的对象, 也是简化版本的cbt\n",
        "为他们都提供一种包装, 自行指定load/save/transform方法来构造一个cbt"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "TA42d0j2gOUA"
      },
      "source": [
        "# 如何执行transforms"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "CXon_VlvMCm0"
      },
      "source": [
        "## array-like问题\n",
        "df参数: array-like的数据, 如dataframe, array, nparray\n",
        "\n",
        "tranform内部代码是用户写的, 他们可以依赖或者不依赖具体的df类型, 那么就得假定可能依赖的?\n",
        "\n",
        "\n",
        "- 先假定离线predict和在线serving都用pandas的df做输入\n",
        "\n",
        "\n",
        "- 如果train阶段代码是基于spark写的, 那么这个代码确实很难复用到在线serving上把, 能复用到离线predict就不错了\n",
        "\n",
        "  > 如果愿意改成基于pandas/sklearn的那更好了(mappartitions方式)\n",
        "  \n",
        "  只考虑train和离线predict的话, 输入就统一成spark的df, 即要求每个trans都是输入df输出df的, 不是的话包装一下\n",
        "\n",
        "\n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "CEYY5dtwgVKo"
      },
      "source": [
        "## 构造df传给transforms\n",
        "- 在线serving, 把输入的一条记录, 构建成1行n列的df, 传入\n",
        "- 离线predict, 单机, 整个数据读成df, 传入\n",
        "- 离线predict, spark, mappartitions, 然后一小批(100行) 组成一个df, 传入\n",
        "- spark df的情况以后再说"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "77LhKLinoO6I"
      },
      "source": [
        "## 方法调用\n",
        "根据各种情况中,transforms数组里可能出现的各种类型, 找到应该调用的方法"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "CUkn-lGq5gbj"
      },
      "source": [
        "## 训练时的transforms数组\n",
        "### 用于dump的transforms数组的创建时机\n",
        "\n",
        "要在fit之后, 额外参数具体化之后, cbt构造/init/fit之后\n",
        "\n",
        "### **驱动训练数据的变换**\n",
        "\n",
        "如果在训练时, 用户也想用transforms数组来驱动训练数据的变换\n",
        "\n",
        "- 额外参数需要具体化\n",
        "- init和fit可以交由框架来调用\n",
        "\n",
        "\n",
        "### 所以有关tranforms数组可能有些辅助方法\n",
        "对应 save load init fit transform可能都要有\n",
        "\n",
        "那为啥为弄个CompositeCBT, 专门用于组合的, 替换这个数组\n",
        "\n",
        "\n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "N2DvPMXYEqQT"
      },
      "source": [
        "# 各种特殊场景"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "DNwIj5rIEDbJ"
      },
      "source": [
        "## 不想fbt/cbt 直接一堆代码堆在main文件里\n",
        "这可不行哦\n",
        "框架都不知道哪到哪是transform, 怎么调用"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "oKQXxXjhc1Ih"
      },
      "source": [
        "## 框架负责save/load, 用户代码如何save/load\n",
        "这种transforms直接框架负责save和load, 用户自己的代码没有save/load的时机了?\n",
        "- fbt,是的, 如果需要, 利用fbt封装cbt的那种工具.\n",
        "- cbt,可以自行实现load/save\n",
        "\n",
        "框架save的东西, 框架load了, 应该直接就能用(transform一个df)了, 还缺少啥东西要用户自己load么?\n",
        "  \n",
        "- 比如一个类各种变量的初始化? 不用对象已经帮你反序列化出来了, 不用初始化.\n",
        "- **重复初始化1**. 离线场景中tranforms反序列化后, 会按小批df, 调用多次tranforms, 是否有重复初始化情况?\n",
        "  - 额外参数不会, 框架会反序列化一次, 多次传入而已\n",
        "  - 如果用户根据参数里的路径, 读取一个文件, 将文件构造成方法内变量, 这就尴尬了.\n",
        "    \n",
        "    参看其他章节 \"函数想既fit又transform\",\n",
        "\n",
        "    解决办法, fbt也遵循cbt约定, 将这种操作(多半是fit或init)移动到方法外, 然后通过额外参数形式传入(或者全局变量).\n",
        "    \n",
        "    万一必须fit_transform呢 别万一了, 拆到函数外去, 或者直接cbt, (非要这样也能执行, 就是重复初始化)\n",
        "\n",
        "\n",
        "    \n",
        "- **重复初始化2**. 对于cbt的对象, 他可能有完整的load/save/fit/transform方法, 按照约定的话不会重复初始化\n",
        "  - **如有必要, 可加入新的接口方法, init** 感觉有必要了\n",
        "  - 像sklearn的FeatureHasher这种, 提供了fit_transform方法的, 一定有个单独的transform方法的\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "> 核心方法包括: save/load/fit/transform"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "OcPcE5GBAwyg"
      },
      "source": [
        "## 依赖当前目录的非代码文件\n",
        "如配置文件, 甚至动态文件\n",
        "- 对于参数文件, 通过上述\"额外参数\"功能序列化, 可以保存/加载/使用;\n",
        "所以可以把配置信息的对象, 当做参数传递\n",
        "- 如果非要读文件, 参看上述重复初始化的讨论\n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "1kxObVnKddPE"
      },
      "source": [
        "## train/predict标记\n",
        "\n",
        "transforms, 框架可以save/load/transform, 除了df以外, 要一个标记参数, 告诉是train过程还是predict过程\n",
        "\n",
        "- 环境变量\n",
        "- global模块变量\n",
        "- 当成参数不方便"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "zmN64hdgKYzm"
      },
      "source": [
        "## tranform在step中的问题\n",
        "\n",
        "直接看transform还好, 如果放到step中, 就要考虑用户完整的transform过程,\n",
        "是都放在特征变换的step还是一部分放在训练的step\n",
        "- 为啥不都在变换的step?\n",
        "\n",
        "  - 都变完了的话, 出来的可能是sparse.csr_matrix/DMatrix等等框架特定的类型的数据, 不一定能orc等进行序列化保存\n",
        "  \n",
        "  - 直接出向量的话, 也不利于统计分析了\n",
        "\n",
        "- 拆开的问题\n",
        "  \n",
        "   - 用户体验稍显割裂, 完整变换代码分为两部分\n",
        "   - 线上线下统一化割裂, 在离线预测和线上serving中, 是需要完整变换代码的\n",
        "     \n",
        "     (那只能要求用户在train的step也递交一个transforms了)\n",
        "\n",
        "拆不拆开都可以, 用户要保证整个tranforms执行完之后的输出, 是可以直接作为model.predict的参数的就行\n",
        "\n",
        "暂定transform step不执行不保存\n",
        "transforms包含所有变换, 直达向量\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "VFm6JqmoV7Fw"
      },
      "source": [
        "# 结论们\n",
        "- 框架主要目标是预测阶段的transform托管, 怎么进行变换不管\n",
        "- transforms和这套抽象, 如果不想要进行预测的托管, 可以不用定义\n",
        "- 代码得全部保留, 框架不知道tranforms引用了多少东西\n",
        "- 不管fit方法, 自己去fit, 给我框架transforms就行\n",
        "- 输入先按照pandas的df封装, 搞不定的再说一些其他输入类型封装\n",
        "- fbt相当于简化版本的cbt, 不继承框架的对象, 也是简化版本的cbt\n",
        "-\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Zil14M4aG0px"
      },
      "source": [
        "## 其他tips\n",
        "- partial函数, 偏函数功能? 可以将一个多参数函数的一些参数固化?\n",
        "- pickle是否只能保存对象? 方法/局部变量等能保存上么 ? 方法可以, 局部变量不可以, module不可以"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Q1u9gOyNxz6E"
      },
      "source": [
        "# 五个模型中的transform如何改造"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "cpH56FSB3nu7"
      },
      "source": [
        "## map/udf的函数+`[x for y]语法`\n",
        "`data1['phone_factory_days'] = [get_phone_day(str(xx)) for xx in data1.dv_factorytime.fillna('').astype(str)]`\n",
        "\n",
        "纯静态的\n",
        "\n",
        "### fbt\n",
        "\n",
        "- 一个输入df输出df的函数\n",
        "\n",
        "- 将上述写法抽象为\n",
        "`df['out_col'] = [udf(xx)) for xx in df['input_col']]`\n",
        "实现一个辅助wrapper, 用户只需要传udf+输入字段名+输出字段名, 替用户操作df\n",
        "\n",
        "### cbt\n",
        "实现一个cbt类, 实现一个tranform方法. 上述2也是cbt"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "OL8hbsFj-FY2"
      },
      "source": [
        "## df的一些基本操作\n",
        "也得写个函数:\n",
        "```\n",
        "# 原始\n",
        "X = df.drop(target, axis=1)\n",
        "# 改造后\n",
        "def x(df, target):\n",
        " return df.drop(target, axis=1)\n",
        "\n",
        "transforms = [f(x, target)]\n",
        "```\n",
        "或者放到其他transform的函数内去, 省的小函数太多"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "OyKyFsFnDLTM"
      },
      "source": [
        "## sex模型中的子模型又该如何应对呢\n",
        "### fbt\n",
        "- 有静态的额外参数或global参数\n",
        "- 引用额外文件, 最好将这个初始工作提前做好, 防止重复初始问题, 将子模型clf作为参数\n",
        "\n",
        "fbt内部, 对df做一些数值处理, 转向量, 调用clf, 增加新列到df, 返回\n",
        "\n",
        "训练时和预测时没有区别.\n",
        "\n",
        "## cbt\n",
        "实现一个cbt类\n",
        "- 静态参数可以写死, 可以作为类参数传进来\n",
        "- 额外文件, 可以读取好作为参数传进来\n",
        "- 额外文件, 可以在类init方法进行初始化\n",
        "- 实现一个tranform方法\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "K1Wd1Hky9ZdB"
      },
      "source": [
        "## sklearn的transform\n",
        "有这些类: Pipeline/ColumnTransformer/FeatureHasher等, 他们自有一些方法transform/fit/fit_transform\n",
        "\n",
        "直接将fit之后的上述对象放到数组\n",
        "\n",
        "框架通过鸭子类型或者通过判断他的class是sklearn, 进行调用"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "JOJhuKLIARC8"
      },
      "source": [
        "## seq模型的csv转TF特征\n",
        "手动读取csv文件生成的字符串数组, 按行处理, 根据字典编码, 特征衍生, 转为TF的对象, 写出(写出特殊类型, 如何支持, 是pipeline要考虑的. 托管预测不需要写出)\n",
        "\n",
        "手动读csv改为接受df作为输入.\n",
        "\n",
        "### cbt\n",
        "- 字典直接写死为类成员变量\n",
        "  - 初始内容写死或传参\n",
        "  - fit过程会修改他, save/load会自动处理它\n",
        "- 其他参数还是正常类参数\n",
        "\n",
        "原代码中`for row in log_iter:`之后就是具体逻辑了, 可以看出是fit_transform一起调用的, 而且train和predict都是同时fit_transform的.\n",
        "\n",
        "对于cbt来讲, 对象我给你序列化好了, 是可以支持的:\n",
        "\n",
        "- 实现一个fit_transform方法即可\n",
        "  \n",
        "  一边读数据一边编字典\n",
        "\n",
        "  如果需要的话, 可以判断当前是训练还是在预测\n",
        "\n",
        "- 输出df么\n",
        "  - 如果df和pipeline支持TF类型对象, 可以也输出df\n",
        "  - 如果不支持(大概率不行, 而且用了n_fold写多路了), 直接自己写了, 不return df了\n",
        "\n",
        "> 但是不return的话, 在线预测的时候不方便哦"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "3do5VSLCD-1u"
      },
      "source": [
        "## mr进行迭代的\n",
        "训练阶段用户就是爱用mr, 那就用吧, 还好是基于python的mr.\n",
        "一样要把tranforms数组定义了.\n",
        "\n",
        "map阶段, 一批一批的构造pandas df"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "g6UKW2v92Xty"
      },
      "source": [
        "## TODO sparkml的transform"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "mgtNzuYfZlSD"
      },
      "source": [
        "# 为Glaucus提供sdk\n",
        "## 模型部分\n",
        "```\n",
        "# 方案一\n",
        "model = oceanus_sdk.load_model(model_id, path, xx)\n",
        "#该方法内找到代码和模型dump文件(mlflow格式的)\n",
        "y = model.predict(df1)\n",
        "\n",
        "# 方案二\n",
        "info = oceanus_sdk.model_info(model_id, path, xx)\n",
        "file = oceanus_sdk.model_file(model_id, path, xx)\n",
        "\n",
        "if info.type == xgboost:\n",
        "  m = xgboot.load(file)\n",
        "  y = m.predict(df1)\n",
        "elif:\n",
        "  m = sklearn.load(file)\n",
        "  y = m.predict_proba(df1)\n",
        "elif:\n",
        "else\n",
        "\n",
        "# 方案二.1\n",
        "m = triton.load_model(file)\n",
        "y = m.predict\n",
        "\n",
        "```\n",
        "\n",
        "\n",
        "\n",
        "## trans部分\n",
        "```\n",
        "t = oceanus_sdk.load_transforms(model_id, path, xx) #该方法内部找到代码和pickledump文件, 进行load, 然后init\n",
        "df1 = t.trasform(df)\n",
        "\n",
        "y = model.predict(df1)\n",
        "```"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "collapsed_sections": [
        "NR66bQIYpi6K",
        "i9AgslljuUYv"
      ],
      "provenance": [],
      "toc_visible": true
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.7.13"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
