{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "35f2c8ea",
   "metadata": {},
   "source": [
    "# 迁移学习"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8aeb4ee1",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import joblib\n",
    "from sklearn.preprocessing import MinMaxScaler\n",
    "from sklearn.model_selection import train_test_split\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "import matplotlib.pyplot as plt\n",
    "import math\n",
    "import xgboost as xgb\n",
    "from sklearn.multioutput import MultiOutputRegressor\n",
    "from sklearn.preprocessing import StandardScaler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "534f6df3",
   "metadata": {},
   "outputs": [],
   "source": [
    "data = pd.read_excel('D:/data/399071348888_2/123.xlsx')\n",
    "\n",
    "\n",
    "#将文字类型转换为数字类型\n",
    "data['活动状态'] = data['设备状态'].apply(lambda x: extract_field(x, 6))\n",
    "data['活动状态']=data['活动状态'].map({'静止状态':0,'运动状态':1})\n",
    "\n",
    "data['定位方式'] = data['设备状态'].apply(lambda x: extract_field(x, 1))\n",
    "data['定位方式']=data['定位方式'].map({'GPS有效定位':0,'GPS无效定位':1,'LBS':1})\n",
    "\n",
    "\n",
    "data=data[['定位时间','经度','纬度','活动状态','定位方式','方向','速度']]\n",
    "# print(data)\n",
    "\n",
    "data['前活动状态'] = data['活动状态'].shift(+1)\n",
    "data['前定位方式'] = data['定位方式'].shift(+1)\n",
    "data['前方向'] = data['方向'].shift(+1)\n",
    "data['前速度'] = data['速度'].shift(+1)\n",
    "data['前经度'] = data['经度'].shift(+1)\n",
    "data['前纬度'] = data['纬度'].shift(+1)\n",
    "\n",
    "# 计算两点间距\n",
    "data['距离差']=np.NaN\n",
    "data=Span(data)\n",
    "\n",
    "# 处理时间列\n",
    "data['定位时间'] = pd.to_datetime(data['定位时间'])  # 确保时间列是datetime格式\n",
    "data['时间差']=data['定位时间'].diff().dt.total_seconds() # 计算时间差\n",
    "data['时间差']=data['时间差'].fillna(0).astype(int)#将时间差转换为以秒为单位的int类型\n",
    "data['year'] = data['定位时间'].dt.year\n",
    "data['month'] = data['定位时间'].dt.month\n",
    "data['day'] = data['定位时间'].dt.day\n",
    "data['hour'] = data['定位时间'].dt.hour\n",
    "data['minute'] = data['定位时间'].dt.minute\n",
    "data['second'] = data['定位时间'].dt.second\n",
    "\n",
    "#计算加速度\n",
    "#由 V=V0+at    a=(V-V0)/t\n",
    "data['加速度']=(data['速度']-data['前速度'])/data['时间差']\n",
    "\n",
    "data = data.dropna()  # 删除缺失值\n",
    "\n",
    "\n",
    "# # 删除原时间列\n",
    "data = data.drop(columns=['定位时间'])\n",
    "data.info()\n",
    "\n",
    "#用0滤除掉时间差为0的点\n",
    "data.replace([np.inf, -np.inf], 0 , inplace=True)\n",
    "data.info()\n",
    "\n",
    "# # 提取特征和标签\n",
    "features = ['year', 'month', 'day', 'hour', 'minute', 'second', '活动状态', '定位方式' ,'方向','速度','前经度', '前纬度','前活动状态','前定位方式','前方向','前速度','距离差','时间差','加速度']\n",
    "labels = ['经度', '纬度']\n",
    "\n",
    "X = data[features].values\n",
    "y = data[labels].values\n",
    "\n",
    "\n",
    "# 归一化特征和标签\n",
    "scaler_X = MinMaxScaler()\n",
    "scaler_y = MinMaxScaler()\n",
    "\n",
    "X_scaled = scaler_X.fit_transform(X)\n",
    "y_scaled = scaler_y.fit_transform(y)\n",
    "\n",
    "# #标准化\n",
    "# scaler_X = StandardScaler()\n",
    "# scaler_X.fit(X)\n",
    "# X_scaled = scaler_X.fit_transform(X)\n",
    "# scaler_y = StandardScaler()\n",
    "# scaler_y.fit(y)\n",
    "# y_scaled = scaler_y.transform(y)\n",
    "\n",
    "# 创建时间序列数据\n",
    "def create_dataset(X, y, time_steps=1):\n",
    "    Xs, ys = [], []\n",
    "    for i in range(len(X) - time_steps):\n",
    "        Xs.append(X[i:i + time_steps])\n",
    "        ys.append(y[i + time_steps])\n",
    "    return np.array(Xs), np.array(ys)\n",
    "time_steps=1\n",
    "X_seq, y_seq = create_dataset(X_scaled, y_scaled, time_steps)\n",
    "\n",
    "# 划分训练集和测试集\n",
    "X_train, X_test, y_train, y_test = train_test_split(X_seq, y_seq, test_size=0.2, random_state=42)\n",
    "X_test.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7a0c9c2a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Initialize model with the same architecture\n",
    "model = LSTMModel(input_size, hidden_size, num_layers, output_size, dropout)\n",
    "\n",
    "# Load the pre-trained model parameters\n",
    "model.load_state_dict(torch.load('D:/data/0280/lstm_model_3.pth'))\n",
    "\n",
    "# Set the model to training mode\n",
    "model.train()\n",
    "\n",
    "# Optionally: Freeze certain layers if needed\n",
    "# for param in model.lstm.parameters():\n",
    "#     param.requires_grad = False\n",
    "\n",
    "# Define a new optimizer for fine-tuning\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=0.001)  # Lower learning rate for fine-tuning\n",
    "\n",
    "# Fine-tune on target data\n",
    "num_finetune_epochs = 1000\n",
    "for epoch in range(num_finetune_epochs):\n",
    "    outputs = model(torch.tensor(X_train, dtype=torch.float32))\n",
    "    loss = criterion(outputs, torch.tensor(y_train, dtype=torch.float32))\n",
    "    optimizer.zero_grad()\n",
    "    loss.backward()\n",
    "    optimizer.step()\n",
    "    print(f'Finetune Epoch {epoch+1}/{num_finetune_epochs}, Loss: {loss.item():.7f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ca9f9018",
   "metadata": {},
   "outputs": [],
   "source": [
    "model.eval()\n",
    "with torch.no_grad():\n",
    "    val_outputs = model(torch.tensor(X_test, dtype=torch.float32))\n",
    "    val_loss = criterion(val_outputs, torch.tensor(y_test, dtype=torch.float32))\n",
    "    print(f'Validation Loss: {val_loss.item()}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b029d498",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 保存模型\n",
    "torch.save(model.state_dict(), 'D:/data/0280/lstm_model_4.pth')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "58d3d7f3",
   "metadata": {},
   "source": [
    "# 第二次"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "19fe7211",
   "metadata": {},
   "outputs": [],
   "source": [
    "data = pd.read_excel('D:/data/399070622788_3/123.xlsx')\n",
    "\n",
    "def train(data):\n",
    "    #将文字类型转换为数字类型\n",
    "    data['活动状态'] = data['设备状态'].apply(lambda x: extract_field(x, 6))\n",
    "    data['活动状态']=data['活动状态'].map({'静止状态':0,'运动状态':1})\n",
    "\n",
    "    data['定位方式'] = data['设备状态'].apply(lambda x: extract_field(x, 1))\n",
    "    data['定位方式']=data['定位方式'].map({'GPS有效定位':0,'GPS无效定位':1,'LBS':1})\n",
    "\n",
    "\n",
    "    data=data[['定位时间','经度','纬度','活动状态','定位方式','方向','速度']]\n",
    "    # print(data)\n",
    "\n",
    "    data['前活动状态'] = data['活动状态'].shift(+1)\n",
    "    data['前定位方式'] = data['定位方式'].shift(+1)\n",
    "    data['前方向'] = data['方向'].shift(+1)\n",
    "    data['前速度'] = data['速度'].shift(+1)\n",
    "    data['前经度'] = data['经度'].shift(+1)\n",
    "    data['前纬度'] = data['纬度'].shift(+1)\n",
    "\n",
    "    # 计算两点间距\n",
    "    data['距离差']=np.NaN\n",
    "    data=Span(data)\n",
    "\n",
    "    # 处理时间列\n",
    "    data['定位时间'] = pd.to_datetime(data['定位时间'])  # 确保时间列是datetime格式\n",
    "    data['时间差']=data['定位时间'].diff().dt.total_seconds() # 计算时间差\n",
    "    data['时间差']=data['时间差'].fillna(0).astype(int)#将时间差转换为以秒为单位的int类型\n",
    "    data['year'] = data['定位时间'].dt.year\n",
    "    data['month'] = data['定位时间'].dt.month\n",
    "    data['day'] = data['定位时间'].dt.day\n",
    "    data['hour'] = data['定位时间'].dt.hour\n",
    "    data['minute'] = data['定位时间'].dt.minute\n",
    "    data['second'] = data['定位时间'].dt.second\n",
    "\n",
    "    #计算加速度\n",
    "    #由 V=V0+at    a=(V-V0)/t\n",
    "    data['加速度']=(data['速度']-data['前速度'])/data['时间差']\n",
    "\n",
    "    data = data.dropna()  # 删除缺失值\n",
    "\n",
    "\n",
    "    # # 删除原时间列\n",
    "    data = data.drop(columns=['定位时间'])\n",
    "    data.info()\n",
    "\n",
    "    #用0滤除掉时间差为0的点\n",
    "    data.replace([np.inf, -np.inf], 0 , inplace=True)\n",
    "    data.info()\n",
    "\n",
    "    # # 提取特征和标签\n",
    "    features = ['year', 'month', 'day', 'hour', 'minute', 'second', '活动状态', '定位方式' ,'方向','速度','前经度', '前纬度','前活动状态','前定位方式','前方向','前速度','距离差','时间差','加速度']\n",
    "    labels = ['经度', '纬度']\n",
    "\n",
    "    X = data[features].values\n",
    "    y = data[labels].values\n",
    "\n",
    "\n",
    "    # 归一化特征和标签\n",
    "    scaler_X = MinMaxScaler()\n",
    "    scaler_y = MinMaxScaler()\n",
    "\n",
    "    X_scaled = scaler_X.fit_transform(X)\n",
    "    y_scaled = scaler_y.fit_transform(y)\n",
    "\n",
    "    # #标准化\n",
    "    # scaler_X = StandardScaler()\n",
    "    # scaler_X.fit(X)\n",
    "    # X_scaled = scaler_X.fit_transform(X)\n",
    "    # scaler_y = StandardScaler()\n",
    "    # scaler_y.fit(y)\n",
    "    # y_scaled = scaler_y.transform(y)\n",
    "\n",
    "    # 创建时间序列数据\n",
    "    def create_dataset(X, y, time_steps=1):\n",
    "        Xs, ys = [], []\n",
    "        for i in range(len(X) - time_steps):\n",
    "            Xs.append(X[i:i + time_steps])\n",
    "            ys.append(y[i + time_steps])\n",
    "        return np.array(Xs), np.array(ys)\n",
    "    time_steps=1\n",
    "    X_seq, y_seq = create_dataset(X_scaled, y_scaled, time_steps)\n",
    "\n",
    "    # 划分训练集和测试集\n",
    "    X_train, X_test, y_train, y_test = train_test_split(X_seq, y_seq, test_size=0.2, random_state=42)\n",
    "    X_test.shape\n",
    "\n",
    "\n",
    "    # Initialize model with the same architecture\n",
    "    dropout=0.5\n",
    "    model = LSTMModel(input_size, hidden_size, num_layers, output_size, dropout)\n",
    "\n",
    "    # Load the pre-trained model parameters\n",
    "    model.load_state_dict(torch.load('D:/data/0280/lstm_model_4.pth'))\n",
    "\n",
    "    # Set the model to training mode\n",
    "    model.train()\n",
    "\n",
    "    # Optionally: Freeze certain layers if needed\n",
    "    # for param in model.lstm.parameters():\n",
    "    #     param.requires_grad = False\n",
    "\n",
    "    # Define a new optimizer for fine-tuning\n",
    "    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)  # Lower learning rate for fine-tuning\n",
    "\n",
    "    # Fine-tune on target data\n",
    "    num_finetune_epochs = 1000\n",
    "    for epoch in range(num_finetune_epochs):\n",
    "        outputs = model(torch.tensor(X_train, dtype=torch.float32))\n",
    "        loss = criterion(outputs, torch.tensor(y_train, dtype=torch.float32))\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        print(f'Finetune Epoch {epoch+1}/{num_finetune_epochs}, Loss: {loss.item():.7f}')\n",
    "\n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        val_outputs = model(torch.tensor(X_test, dtype=torch.float32))\n",
    "        val_loss = criterion(val_outputs, torch.tensor(y_test, dtype=torch.float32))\n",
    "        print(f'Validation Loss: {val_loss.item()}')\n",
    "\n",
    "\n",
    "    # 保存模型\n",
    "    torch.save(model.state_dict(), 'D:/data/0280/lstm_model_5.pth')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ce66b965",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 保存模型\n",
    "torch.save(model.state_dict(), 'D:/data/0280/lstm_model_5.pth')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "437691f0",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
