{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 1. 导入补偿飞行数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import math\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from process_lib import pd_lowpass, pd_highpass, pd_bandpass, construct_features, calc_model, calc_compensation, calc_angle"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Setup Global variables:\n",
    "float_format, filename basename, input_dir, output_dir, intermediate_dir, input_file\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "float_format = '%6.6e'\n",
    "\n",
    "# setup input filename\n",
    "filename = '4colsY2new.txt'\n",
    "# filename = '4cols.txt'\n",
    "# filename = '4cols2.txt'\n",
    "# filename = '4colsY2.txt'\n",
    "# filename = '4colUAV.txt'\n",
    "\n",
    "# setup output directory\n",
    "basename = os.path.splitext(filename)[0]\n",
    "input_dir = './input/'\n",
    "output_dir = './output/'\n",
    "\n",
    "if os.path.exists('./input/'):\n",
    "  input_dir = './input/'\n",
    "  output_dir = './output/'\n",
    "elif os.path.exists('../input/'):\n",
    "  input_dir = '../input/'\n",
    "  output_dir = '../output/'\n",
    "else:\n",
    "  print('input_dir not found!')\n",
    "\n",
    "intermediate_dir = output_dir + 'intermediate/'\n",
    "if not os.path.exists(output_dir):\n",
    "  os.mkdir(output_dir)\n",
    "  os.mkdir(intermediate_dir)\n",
    "\n",
    "input_file = os.path.join(input_dir, filename)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 计算航向角\n",
    "calc_angle(filename)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Setup filter options:\n",
    "_USING_LOWPASS, _USING_HIGHPASS, _USING_BANDPASS, _USING_SOS\n",
    "type, order\n",
    "\n",
    "# setup filter cutoff frequency\n",
    "wn1, wn2, wn3, delta_t"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# setup filter options\n",
    "_USING_LOWPASS  = True\n",
    "_USING_HIGHPASS = True\n",
    "_USING_BANDPASS = False\n",
    "_USING_SOS      = False\n",
    "print('_USING_LOWPASS:',  _USING_LOWPASS)\n",
    "print('_USING_HIGHPASS:', _USING_HIGHPASS)\n",
    "print('_USING_BANDPASS:', _USING_BANDPASS)\n",
    "print('_USING_SOS:', _USING_SOS)\n",
    "\n",
    "type = 'butter' #default\n",
    "# type = 'bessel'\n",
    "# type = 'ellip'\n",
    "# type = 'cheby1'\n",
    "# type = 'cheby2'\n",
    "order = 2;\n",
    "\n",
    "# for cheby1, cheby2, ellip, bessel\n",
    "rp = 0.1\n",
    "rs = 80\n",
    "\n",
    "# setup filter cutoff frequency\n",
    "wn1 = 0.1\n",
    "wn2 = [0.04, 0.1]\n",
    "wn3 = 0.04\n",
    "delta_t = 0.1\n",
    "\n",
    "# for cheby1, cheby2, ellip, bessel\n",
    "rp = 0.1\n",
    "rs = 80"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "input_base = input_dir + os.path.splitext(input_file)[0]\n",
    "lowpass_file = intermediate_dir + basename +'_lowpass.csv'\n",
    "features_file = intermediate_dir + basename + '_features.csv'\n",
    "features_bandpass_file =  intermediate_dir + basename + '_features_bandpass.csv'\n",
    "\n",
    "print('read input file: ', input_file)\n",
    "df = pd.read_csv(input_file, delim_whitespace=True)\n",
    "\n",
    "# print('save input to df0')\n",
    "# if filename == '4cols.txt':\n",
    "#     df = df.iloc[200:,:]\n",
    "# df0 = df\n",
    "\n",
    "# Is lowpass necessary?\n",
    "# perform lowpass filter\n",
    "if _USING_LOWPASS:\n",
    "    df.iloc[:,:4] = pd_lowpass(df.iloc[:,:4], wn1, order=order, type=type, rp=rp, rs=rs)\n",
    "    print('save lowpass file:', lowpass_file)\n",
    "    df.to_csv(lowpass_file, float_format=float_format, sep='\\t', index=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 2. 构造（16+1）项训练数据并做带通滤波（高通滤波）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# construct 16terms of features + label\n",
    "df = construct_features(df, delta_t=delta_t, base_name=basename, output_dir=intermediate_dir)\n",
    "print('Save features_file: ', features_file)\n",
    "df.to_csv(features_file, float_format=float_format, sep='\\t', index=False)\n",
    "\n",
    "# perform band filter\n",
    "print('对(16特征+1标签)进行带通滤波(高通滤波)')\n",
    "# df = pd_bandpass(df, wn2, order=order, type=type, rp=rp, rs=rs)\n",
    "# perform band/high filter\n",
    "if not _USING_HIGHPASS or _USING_HIGHPASS==False:\n",
    "  df = pd_bandpass(df, wn2, order=order, type=type, rp=rp, rs=rs)\n",
    "  print('Perform bandpass.')\n",
    "else:\n",
    "  df = pd_highpass(df, wn3, order=order, type=type, rp=rp, rs=rs)\n",
    "  print('Perform highpass.')\n",
    "\n",
    "print('Save filered features: ', features_bandpass_file)\n",
    "df.to_csv(features_bandpass_file, float_format=float_format, sep='\\t', index=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 3. 计算补偿数据的改善比"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# calc cofficients: model_type = LR, RidgeCV, Ridge, PLSR, SM\n",
    "#model_types = ['OLS', 'Ridge', 'SM', 'PLSR', 'BayesianRidge']\n",
    "model_types = ['BayesianRidge']\n",
    "ir = None\n",
    "for model_type in model_types:\n",
    "    _model, coef = calc_model(df, model_type)\n",
    "\n",
    "    X_train = df.iloc[:, :-1]   # 除最后1列\n",
    "    Y_train = df.iloc[:, -1]    # 最后1列\n",
    "    # X_train = df.iloc[:, :16]   # 前16列\n",
    "    # Y_train = df.iloc[:, 16]    # 第17列\n",
    "\n",
    "    Y_Pred = _model.predict(X_train)\n",
    "    STD_Y = np.std(Y_train)\n",
    "    STD_Err = np.std(Y_train - Y_Pred) # type: ignore\n",
    "    ir = STD_Y, STD_Err, STD_Y / STD_Err # type: ignore\n",
    "    print('model_coef:%s\\n' % model_type, coef)\n",
    "    print('改善比:  %2.6f, %2.6f, %2.6f' % ir)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 4. 计算butterworth参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from scipy import signal\n",
    "#order = order\n",
    "w1 = wn1 # 0.1\n",
    "if type == 'cheby1':\n",
    "  b1, a1 = signal.cheby1(order, rp, w1, btype='low') # type: ignore\n",
    "elif type == 'cheby2':\n",
    "  b1, a1 = signal.cheby2(order, rs, w1, btype='low') # type: ignore\n",
    "elif type == 'bessel':\n",
    "  b1, a1 = signal.bessel(order, w1, btype='low') # type: ignore\n",
    "else:\n",
    "  b1, a1 = signal.butter(order, w1, btype='low') # type: ignore\n",
    "print('b1:', b1)\n",
    "print('a1:', a1)\n",
    "\n",
    "w2 = wn2 # [0.04, 0.1]\n",
    "if type == 'cheby1':\n",
    "  b2, a2 = signal.cheby1(order, rp, w2, btype='band') # type: ignore\n",
    "elif type == 'cheby2':\n",
    "  b2, a2 = signal.cheby2(order, rs, w2, btype='band') # type: ignore\n",
    "elif type == 'bessel':\n",
    "  b2, a2 = signal.bessel(order, w2, btype='band') # type: ignore\n",
    "else:\n",
    "  b2, a2 = signal.butter(order, w2, btype='band') # type: ignore\n",
    "print('b2:', b2)\n",
    "print('a2:',a2)\n",
    "\n",
    "w3 = wn3 # 0.04\n",
    "if type == 'cheby1':\n",
    "  b3, a3 = signal.cheby1(order, rp, w1, btype='high') # type: ignore\n",
    "elif type == 'cheby2':\n",
    "  b3, a3 = signal.cheby2(order, rs, w1, btype='high') # type: ignore\n",
    "elif type == 'bessel':\n",
    "  b3, a3 = signal.bessel(order, w1, btype='high') # type: ignore\n",
    "else:\n",
    "  b3, a3 = signal.butter(order, w1, btype='high') # type: ignore\n",
    "\n",
    "# Low Pass for SOS\n",
    "if type == 'cheby1':\n",
    "  sos1 = signal.cheby1(order, rp, w1, btype='low', output='sos')\n",
    "elif type == 'cheby2':\n",
    "  sos1 = signal.cheby2(order, rs, w1, btype='low', output='sos')\n",
    "elif type == 'bessel':\n",
    "  sos1 = signal.bessel(order, w1, btype='low', output='sos')\n",
    "else:\n",
    "  sos1 = signal.butter(order, w1, btype='low', output='sos')\n",
    "print('sos1:\\n',sos1)\n",
    "\n",
    "# Band Pass for SOS\n",
    "if type == 'cheby1':\n",
    "  sos2 = signal.cheby1(order, rp, w2, btype='band', output='sos')\n",
    "elif type == 'cheby2':\n",
    "  sos2 = signal.cheby2(order, rs, w2, btype='band', output='sos')\n",
    "elif type == 'bessel':\n",
    "  sos2 = signal.bessel(order, w2, btype='band', output='sos')\n",
    "else:\n",
    "  sos2 = signal.butter(order, w2, btype='band', output='sos')\n",
    "print('sos2:\\n',sos2)\n",
    "\n",
    "# High Pass for SOS\n",
    "if type == 'cheby1':\n",
    "  sos3 = signal.cheby1(order, rp, w3, btype='high', output='sos')\n",
    "elif type == 'cheby2':\n",
    "  sos3 = signal.cheby2(order, rs, w3, btype='high', output='sos')\n",
    "elif type == 'bessel':\n",
    "  sos3 = signal.bessel(order, w3, btype='high', output='sos')\n",
    "else:\n",
    "  sos3 = signal.butter(order, w3, btype='high', output='sos')\n",
    "print('sos3:\\n',sos3)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 5. 输出补偿系数、改善比到json文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "out_dict = {'name':model_types[-1], \n",
    "            'ir':ir, 'coef':coef.tolist(), \n",
    "            'wn1':wn1, 'wn2':wn2, 'wn3':wn3,\n",
    "            'b1':b1.tolist(), 'a1':a1.tolist(),\n",
    "            'b2':b2.tolist(), 'a2':a2.tolist(),\n",
    "            'b3':b3.tolist(), 'a3':a3.tolist(),\n",
    "            'sos1':sos1.tolist(), 'sos2':sos2.tolist(), # type: ignore\n",
    "            '_USING_SOS':_USING_SOS, 'LowPass':_USING_LOWPASS, 'HighPass':_USING_HIGHPASS,\n",
    "            'order':order, 'type':type, 'rp':rp, 'rs':rs, 'dt':delta_t}\n",
    "\n",
    "#json_file = output_dir + basename + '_compensation.json'\n",
    "json_file = output_dir + basename + '_compensation' + '%s' % order + '.json'\n",
    "if _USING_LOWPASS != True:\n",
    "  json_file = output_dir + basename + '_compensation_nolp.json'\n",
    "  json_file = output_dir + basename + '_compensation' + '%s' % order + '_nolp.json'\n",
    "#print(\"output to json file: \", json_file)\n",
    "#print(out_dict)\n",
    "json_object = json.dumps(out_dict, indent=4)\n",
    "print(json_object)\n",
    "with open(json_file, 'w') as ofile:\n",
    "  json.dump(out_dict, ofile)\n",
    "  # json.dump(json_object, ofile)\n",
    "print(\"Write to json file: \", json_file)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 6. 运行补偿"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "filename = '7colsY2_op9999.txt'\n",
    "# filename = '7colsY2_op2110.txt'\n",
    "# filename = '7colUAV_op1010.txt'\n",
    "basename = os.path.splitext(filename)[0]\n",
    "input_file = input_dir + filename\n",
    "input_base = input_dir + basename\n",
    "print('Read operation data from: ', input_file)\n",
    "df = pd.read_csv(input_file, delim_whitespace=True)\n",
    "\n",
    "if _USING_LOWPASS:\n",
    "  print(\"Do lowpass for the first 4 columns of input.\")\n",
    "  df.iloc[:,:4] = pd_lowpass(df.iloc[:,:4], wn1, order=order, type=type, rp=rp, rs=rs)\n",
    "else:\n",
    "  print(\"Don't Do lowpass for the first 4 columns of input.\")\n",
    "\n",
    "df['未补磁低通1'] = df.iloc[:,3]\n",
    "\n",
    "print('构造16+1训练数据')\n",
    "df_features = construct_features(df, delta_t = delta_t, base_name=basename, output_dir=intermediate_dir)\n",
    "\n",
    "train_components = df_features\n",
    "df_features_file = intermediate_dir + basename + '_features.csv'\n",
    "print('Write features to: ', df_features_file)\n",
    "df_features.to_csv(df_features_file)\n",
    "\n",
    "# Perform band filter\n",
    "# df_features = pd_bandpass(df_features, wn2, order=order, type=type, rp=rp, rs=rs)\n",
    "# perform band/high filter\n",
    "if not _USING_HIGHPASS or _USING_HIGHPASS==False:\n",
    "  df_features = pd_bandpass(df_features, wn2, order=order, type=type, rp=rp, rs=rs)\n",
    "else:\n",
    "  df_features = pd_highpass(df_features, wn3, order=order, type=type, rp=rp, rs=rs)\n",
    "  \n",
    "df_features_bandpass_file = intermediate_dir + basename + '_features_bandpass.csv'\n",
    "print('Write filtered features to: ', df_features_bandpass_file)\n",
    "df_features.to_csv(df_features_bandpass_file)\n",
    "\n",
    "# prepair output columns (tobe apeending to origin file).\n",
    "# df['未补磁低通1'] = df.iloc[:,3]\n",
    "df['未补磁高通1'] = df_features.iloc[:,-1]\n",
    "compansation = calc_compensation(df_features, coef)\n",
    "df['补偿值1'] = compansation\n",
    "df['已补磁高通1'] = df['未补磁高通1'] - compansation\n",
    "df['已补磁总场1'] = df['未补磁总场'] - compansation\n",
    "\n",
    "compensated_file = output_dir + basename + '_final'  + '%s' % order + '.txt'\n",
    "if _USING_LOWPASS != True:\n",
    "  compensated_file = output_dir + basename + '_final_nolp.txt'\n",
    "print('Write to final compensated file: ', compensated_file)\n",
    "df.to_csv(compensated_file, float_format=float_format, sep='\\t', index=False, encoding='gbk')\n",
    "df5 = df.iloc[:,-5:]\n",
    "df5_file = intermediate_dir + basename + '_output'  + '%s' % order + '.txt'\n",
    "print('Write to final5 output file: ', df5_file)\n",
    "df5.to_csv(df5_file, float_format=float_format, sep='\\t', index=False)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
