{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "6321595e",
   "metadata": {},
   "source": [
    "# Cox生存分析\n",
    "\n",
    "* `mydir`：自己的数据\n",
    "* `ostime_column`: 数据对应的生存时间，不一定非的是OST，也可以是DST、FST等。\n",
    "* `os`：生存状态，不一定非的是OS，也可以是DS、FS等。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cb0fc498",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from lifelines import CoxPHFitter\n",
    "import pandas as pd\n",
    "from onekey_algo.custom.components.comp1 import normalize_df\n",
    "from sklearn.model_selection import train_test_split\n",
    "from onekey_algo import get_param_in_cwd\n",
    "from onekey_algo.custom.components.comp1 import fillna\n",
    "from lifelines.utils import concordance_index\n",
    "\n",
    "event_col = get_param_in_cwd('event_col')\n",
    "group_info = 'group'\n",
    "task_type = 'Combined_'\n",
    "duration_col= get_param_in_cwd('duration_col')\n",
    "data = pd.read_csv('features/Clinical_features_mulsel.csv')\n",
    "# data = normalize_df(data, not_norm='ID')\n",
    "label_data = pd.read_csv(get_param_in_cwd('label_file'))\n",
    "label_data['ID'] = label_data['ID'].map(lambda x: f\"{x}.nii.gz\" if not (f\"{x}\".endswith('.nii.gz') or  f\"{x}\".endswith('.nii')) else x)\n",
    "\n",
    "data = pd.merge(data, label_data[['ID', event_col, duration_col, 'group']], on='ID', how='inner')\n",
    "data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c5ce1d5f",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_sig(prefix):\n",
    "    results = [pd.read_csv(f'results/{prefix}_cox_predictions_{subset}.csv')for subset in get_param_in_cwd('subsets')]\n",
    "    results = pd.concat(results, axis=0)\n",
    "    return results\n",
    "\n",
    "mn = {'Radiomics': f'Radiomics'}\n",
    "for sig in mn:\n",
    "    signature = get_sig(mn[sig])\n",
    "    signature.columns = ['ID', sig, '-']\n",
    "    data = pd.merge(data, signature, on='ID', how='inner')\n",
    "data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7b7f67fd",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_dl():\n",
    "    sel_model = get_param_in_cwd('dl_sel_model')\n",
    "    model_root = os.path.join(get_param_in_cwd('model_root'), sel_model) \n",
    "    if not os.path.exists(model_root):\n",
    "        raise ValueError(f'配置有问题: dl_sel_model, 选择的模型{sel_model}不存在！')\n",
    "    sel_epoch = get_param_in_cwd('dl_sel_epoch')\n",
    "    if sel_epoch is None:\n",
    "        results = [pd.read_csv(os.path.join(model_root, 'viz', f'BST_{subset}_RESULTS.txt'), sep='\\t', header=None,\n",
    "                               names=['ID', 'DL', 'event_', 'duration_']) for subset in ['TRAIN', 'VAL']]\n",
    "    else:\n",
    "        results = [pd.read_csv(os.path.join(model_root, subset, f'Epoch-{sel_epoch}.txt'), sep='\\t', header=None,\n",
    "                               names=['ID', 'DL', 'event_', 'duration_']) for subset in ['train', 'valid']]\n",
    "    results = pd.concat(results, axis=0)\n",
    "    results['ID'] = results['ID'].map(lambda x: os.path.basename(x).replace('.png', '.gz'))\n",
    "    return results\n",
    "\n",
    "data = pd.merge(data, get_dl()[['ID', 'DL']], on='ID', how='inner')\n",
    "for subset in get_param_in_cwd('subsets'):\n",
    "    sdata = data[data[group_info] == subset]\n",
    "    sdata[['ID', '-', 'DL']].to_csv(f'results/DL_cox_predictions_{subset}.csv', index=False)\n",
    "data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a436fe52",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_dl():\n",
    "    sel_model = get_param_in_cwd('dl25d_sel_model')\n",
    "    model_root = os.path.join(get_param_in_cwd('model25d_root'), sel_model) \n",
    "    if not os.path.exists(model_root):\n",
    "        raise ValueError(f'配置有问题: dl_sel_model, 选择的模型{sel_model}不存在！')\n",
    "    sel_epoch = get_param_in_cwd('dl25d_sel_epoch')\n",
    "    if sel_epoch is None:\n",
    "        results = [pd.read_csv(os.path.join(model_root, 'viz', f'BST_{subset}_RESULTS.txt'), sep='\\t', header=None,\n",
    "                               names=['ID', 'DL25D', 'event_', 'duration_']) for subset in ['TRAIN', 'VAL']]\n",
    "    else:\n",
    "        results = [pd.read_csv(os.path.join(model_root, subset, f'Epoch-{sel_epoch}.txt'), sep='\\t', header=None,\n",
    "                               names=['ID', 'DL25D', 'event_', 'duration_']) for subset in ['train', 'valid']]\n",
    "    results = pd.concat(results, axis=0)\n",
    "    results['ID'] = results['ID'].map(lambda x: os.path.basename(x).replace('.npy', '.gz'))\n",
    "    return results\n",
    "\n",
    "data = pd.merge(data, get_dl()[['ID', 'DL25D']], on='ID', how='inner')\n",
    "for subset in get_param_in_cwd('subsets'):\n",
    "    sdata = data[data[group_info] == subset]\n",
    "    sdata[['ID', '-', 'DL25D']].to_csv(f'results/DL25D_cox_predictions_{subset}.csv', index=False)\n",
    "data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e773045e",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import onekey_algo.custom.components as okcomp\n",
    "from collections import OrderedDict\n",
    "\n",
    "group_info = 'group'\n",
    "train_data = data[(data[group_info] == 'train')]\n",
    "\n",
    "subsets = get_param_in_cwd('subsets')\n",
    "val_datasets = OrderedDict()\n",
    "for subset in subsets:\n",
    "    val_data = data[data[group_info] == subset]\n",
    "    val_datasets[subset] = val_data\n",
    "    val_data.to_csv(f'features/{task_type}{subset}_cox.csv', index=False)\n",
    "\n",
    "print('，'.join([f\"{subset}样本数：{d_.shape}\" for subset, d_ in val_datasets.items()]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0453d1f0",
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "from lifelines import CoxPHFitter\n",
    "from lifelines.statistics import logrank_test\n",
    "from lifelines import KaplanMeierFitter\n",
    "from lifelines.plotting import add_at_risk_counts\n",
    "from lifelines.utils import concordance_index\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "c_index_list = []\n",
    "thres = 1e-4\n",
    "bst_split = {'train': 1.0, 'val':1, 'test': 1}\n",
    "loc = {'train': 0.5, 'val':0.5, 'test': 0.5}\n",
    "for subset, test_data in val_datasets.items():\n",
    "    c_index = concordance_index(test_data[get_param_in_cwd('duration_col')], test_data['DL'], \n",
    "                                test_data[get_param_in_cwd('event_col')])\n",
    "    cox_data = test_data.copy()\n",
    "    mean = float(cox_data[['DL']].describe().loc['50%'])\n",
    "    cox_data['HR'] = test_data['DL'] > mean\n",
    "    dem = (cox_data[\"HR\"] == True)\n",
    "    results = logrank_test(cox_data[duration_col][dem], cox_data[duration_col][~dem], \n",
    "                           event_observed_A=cox_data[event_col][dem], event_observed_B=cox_data[event_col][~dem])\n",
    "    p_value = f\"={results.p_value:.3f}\" if results.p_value > thres else f'<{thres}'\n",
    "    plt.title(f\"Cohort {subset} C-index:{c_index:.3f}\")\n",
    "    plt.ylabel('Probability')\n",
    "    if sum(dem):\n",
    "        kmf_high = KaplanMeierFitter()\n",
    "        kmf_high.fit(cox_data[duration_col][dem], event_observed=cox_data[event_col][dem], label=\"High Risk\")\n",
    "        kmf_high.plot_survival_function(color='r')\n",
    "    if sum(~dem):\n",
    "        kmf_low = KaplanMeierFitter()\n",
    "        kmf_low.fit(cox_data[duration_col][~dem], event_observed=cox_data[event_col][~dem], label=\"Low Risk\")\n",
    "        kmf_low.plot_survival_function(color='g')\n",
    "    plt.text(0.5, loc[subset] if subset in loc else 0.2, f\"P{p_value}\")\n",
    "    plt.xlabel('Time(months)')\n",
    "    add_at_risk_counts(kmf_high, kmf_low, rows_to_show=['At risk'])\n",
    "    plt.savefig(f'img/DL_KM_{subset}.svg', bbox_inches='tight')\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "868a0d31",
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "from lifelines import CoxPHFitter\n",
    "from lifelines.statistics import logrank_test\n",
    "from lifelines import KaplanMeierFitter\n",
    "from lifelines.plotting import add_at_risk_counts\n",
    "from lifelines.utils import concordance_index\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "c_index_list = []\n",
    "thres = 1e-4\n",
    "bst_split = {'train': 1.0, 'val':1, 'test': 1}\n",
    "loc = {'train': 0.5, 'val':0.5, 'test': 0.5}\n",
    "for subset, test_data in val_datasets.items():\n",
    "    c_index = concordance_index(test_data[get_param_in_cwd('duration_col')], test_data['DL25D'], \n",
    "                                test_data[get_param_in_cwd('event_col')])\n",
    "    cox_data = test_data.copy()\n",
    "    mean = float(cox_data[['DL25D']].describe().loc['50%'])\n",
    "    cox_data['HR'] = test_data['DL25D'] > mean\n",
    "    dem = (cox_data[\"HR\"] == True)\n",
    "    results = logrank_test(cox_data[duration_col][dem], cox_data[duration_col][~dem], \n",
    "                           event_observed_A=cox_data[event_col][dem], event_observed_B=cox_data[event_col][~dem])\n",
    "    p_value = f\"={results.p_value:.3f}\" if results.p_value > thres else f'<{thres}'\n",
    "    plt.title(f\"Cohort {subset} C-index:{c_index:.3f}\")\n",
    "    plt.ylabel('Probability')\n",
    "    if sum(dem):\n",
    "        kmf_high = KaplanMeierFitter()\n",
    "        kmf_high.fit(cox_data[duration_col][dem], event_observed=cox_data[event_col][dem], label=\"High Risk\")\n",
    "        kmf_high.plot_survival_function(color='r')\n",
    "    if sum(~dem):\n",
    "        kmf_low = KaplanMeierFitter()\n",
    "        kmf_low.fit(cox_data[duration_col][~dem], event_observed=cox_data[event_col][~dem], label=\"Low Risk\")\n",
    "        kmf_low.plot_survival_function(color='g')\n",
    "    plt.text(0.5, loc[subset] if subset in loc else 0.2, f\"P{p_value}\")\n",
    "    plt.xlabel('Time(months)')\n",
    "    add_at_risk_counts(kmf_high, kmf_low, rows_to_show=['At risk'])\n",
    "    plt.savefig(f'img/DL25D_KM_{subset}.svg', bbox_inches='tight')\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9cce536d",
   "metadata": {},
   "source": [
    "## Cox概览\n",
    "\n",
    "所有Cox回归的必要数据，主要关注的数据有3个\n",
    "1. `Concordance`: c-index\n",
    "2. `exp(coef)`: 每个特征对应的HR，同时也有期对应的95%分位数。\n",
    "3. `p`: 表示特征是否显著。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b0201324",
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "from lifelines import CoxPHFitter\n",
    "\n",
    "cph = CoxPHFitter(penalizer=0.3)\n",
    "cph.fit(train_data[[c for c in train_data.columns if c not in ['ID', 'group'] and not c.startswith('-')]], \n",
    "        duration_col=duration_col, event_col=event_col)\n",
    "cph.print_summary()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ac98781f",
   "metadata": {},
   "source": [
    "#### 输出每个特征的HR"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "addcccac",
   "metadata": {},
   "outputs": [],
   "source": [
    "cph.concordance_index_"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "349aa141",
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "\n",
    "plt.figure(figsize=(10, train_data.shape[1]-4))\n",
    "cph.plot(hazard_ratios=True)\n",
    "plt.savefig(f'img/{task_type}_feature_pvalue.svg')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b641bb02",
   "metadata": {},
   "outputs": [],
   "source": [
    "from onekey_algo.custom.components import nomogram\n",
    "from onekey_algo import get_param_in_cwd\n",
    "\n",
    "cox_data = train_data.round(decimals=2)\n",
    "sur_name = get_param_in_cwd('time_settings')\n",
    "nomogram.nomogram(train_data, duration=duration_col, result=event_col, \n",
    "                  columns=[c for c in train_data.columns if c not in ['ID', 'group', duration_col, event_col] and not c.startswith('-')],\n",
    "                  survs=list(sur_name.values()), surv_names=list(sur_name.keys()), with_r=False,\n",
    "                  width=7000, height=5000, save_name=f'img/nomogram.png',\n",
    "                  x_range='0.01,0.25,0.5,0.75,0.95')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6bf8910e",
   "metadata": {},
   "source": [
    "# KM 曲线\n",
    "\n",
    "根据HR进行分组，计算KM以及log ranktest"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "669023fa",
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "from lifelines import CoxPHFitter\n",
    "from lifelines.statistics import logrank_test\n",
    "from lifelines import KaplanMeierFitter\n",
    "from lifelines.plotting import add_at_risk_counts\n",
    "\n",
    "c_index_list = []\n",
    "thres = 1e-4\n",
    "bst_split = {'train': 1.0, 'val':1, 'test': 1}\n",
    "loc = {'train': 0.5, 'val':0.5, 'test': 0.5}\n",
    "for subset, test_data in val_datasets.items():\n",
    "    c_index = cph.score(test_data[[c for c in test_data.columns if c != 'ID']], scoring_method=\"concordance_index\")\n",
    "#     y_pred = cph.predict_median(test_data[[c for c in test_data.columns if c != 'ID']])\n",
    "#     cox_data = pd.concat([test_data, y_pred], axis=1)\n",
    "#     mean = cox_data.describe()[0.5]['mean']\n",
    "#     cox_data['HR'] = cox_data[0.5] < mean\n",
    "    y_pred = cph.predict_partial_hazard(test_data[[c for c in test_data.columns if c != 'ID']])\n",
    "    cox_data = pd.concat([test_data, y_pred], axis=1)\n",
    "    mean = cox_data.describe()[0]['50%']\n",
    "    cox_data['HR'] = cox_data[0] > mean\n",
    "#     cox_data['HR'] = cox_data[0] > 1\n",
    "\n",
    "    dem = (cox_data[\"HR\"] == True)\n",
    "    results = logrank_test(cox_data[duration_col][dem], cox_data[duration_col][~dem], \n",
    "                           event_observed_A=cox_data[event_col][dem], event_observed_B=cox_data[event_col][~dem])\n",
    "    p_value = f\"={results.p_value:.3f}\" if results.p_value > thres else f'<{thres}'\n",
    "    plt.title(f\"Cohort {subset} C-index:{c_index:.3f}\")\n",
    "    plt.ylabel('Probability')\n",
    "    if sum(dem):\n",
    "        kmf_high = KaplanMeierFitter()\n",
    "        kmf_high.fit(cox_data[duration_col][dem], event_observed=cox_data[event_col][dem], label=\"High Risk\")\n",
    "        kmf_high.plot_survival_function(color='r')\n",
    "    if sum(~dem):\n",
    "        kmf_low = KaplanMeierFitter()\n",
    "        kmf_low.fit(cox_data[duration_col][~dem], event_observed=cox_data[event_col][~dem], label=\"Low Risk\")\n",
    "        kmf_low.plot_survival_function(color='g')\n",
    "    plt.text(0.5, loc[subset] if subset in loc else 0.2, f\"P{p_value}\")\n",
    "    plt.xlabel('Time(months)')\n",
    "    add_at_risk_counts(kmf_high, kmf_low, rows_to_show=['At risk'])\n",
    "    plt.savefig(f'img/{task_type}KM_{subset}.svg', bbox_inches='tight')\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "45e697b4",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import numpy as np\n",
    "\n",
    "def get_prediction(model: CoxPHFitter, data, ID=None, **kwargs):\n",
    "    hr = model.predict_partial_hazard(data)\n",
    "    expectation = model.predict_expectation(data)\n",
    "    \n",
    "    predictions = pd.concat([hr, expectation], axis=1)\n",
    "    predictions.columns = ['HR', 'expectation']\n",
    "    if ID is not None:\n",
    "        predictions = pd.concat([ID, hr, expectation], axis=1)\n",
    "        predictions.columns = ['ID', 'HR', 'expectation']\n",
    "    else:\n",
    "        predictions = pd.concat([hr, expectation], axis=1)\n",
    "        predictions.columns = ['HR', 'expectation']\n",
    "    return predictions\n",
    "\n",
    "os.makedirs('results', exist_ok=True)\n",
    "info = []\n",
    "for subset, test_data in val_datasets.items():\n",
    "    if subset in get_param_in_cwd('subsets'):\n",
    "        results = get_prediction(cph, test_data, ID=test_data['ID'])\n",
    "        results.to_csv(f'results/{task_type}cox_predictions_{subset}.csv', index=False)\n",
    "        results['group'] = subset\n",
    "        info.append(results)\n",
    "        pd.merge(results, label_data[['ID', event_col, duration_col]], on='ID', how='inner').to_csv(f'features/{task_type}4xtile_{subset}.txt', \n",
    "                                                                                                    index=False, sep='\\t')\n",
    "info = pd.concat(info, axis=0)\n",
    "info"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "82433b3b",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
