import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import classification_report
import joblib  # 用于保存模型
import xgboost as xgb
import lightgbm as lgb
import catboost as cb
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader

# 1. 加载数据
file_path = r'D:\develop\PythonCode\python基础\附_项目实战\十_反无人机大赛\data\train.csv'
excel_data = pd.ExcelFile(file_path)

# 2. 查看文件中的表格名称
print("表格名称：", excel_data.sheet_names)

# 3. 读取数据 (假设主要数据在 'Sheet2')
data = pd.read_excel(file_path, sheet_name='Sheet1')

# 4. 检查数据的前几行
print("数据前几行：\n", data.head())

# 5. 检查缺失值
print("缺失值统计：\n", data.isnull().sum())

# 6. 数据清理：删除缺失值
cleaned_data = data.dropna()

# 7. 特征与标签的分离
X = cleaned_data[['目标方位角(°)', '目标斜距(m)', '相对高度(m)', '径向速率(m/s)', '记录时间(s)', 'RCS']]
y = cleaned_data['标签']

# 8. 将数据划分为训练集和测试集 (70%训练，30%测试)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

# 9. 特征标准化
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)


# 10. 模型训练与评估
def train_and_evaluate_model(model, model_name):
    model.fit(X_train_scaled, y_train)
    y_pred = model.predict(X_test_scaled)
    report = classification_report(y_test, y_pred)
    print(f"{model_name} 分类报告：\n", report)
    joblib.dump(model, f"{model_name}_model.pkl")  # 保存模型
    with open(f"{model_name}_classification_report.txt", "w") as f:
        f.write(report)


# a) 随机森林
rf_model = RandomForestClassifier(random_state=42)
train_and_evaluate_model(rf_model, "随机森林")

# b) 支持向量机
svm_model = SVC(random_state=42)
train_and_evaluate_model(svm_model, "支持向量机")

# c) 逻辑回归
lr_model = LogisticRegression(random_state=42)
train_and_evaluate_model(lr_model, "逻辑回归")

# d) K近邻算法
knn_model = KNeighborsClassifier()
train_and_evaluate_model(knn_model, "K近邻")

# e) 决策树
dt_model = DecisionTreeClassifier(random_state=42)
train_and_evaluate_model(dt_model, "决策树")

# f) 梯度提升树
gb_model = GradientBoostingClassifier(random_state=42)
train_and_evaluate_model(gb_model, "梯度提升树")

# g) 极限随机树
et_model = ExtraTreesClassifier(random_state=42)
train_and_evaluate_model(et_model, "极限随机树")

# h) XGBoost
xgb_model = xgb.XGBClassifier(random_state=42)
train_and_evaluate_model(xgb_model, "XGBoost")

# i) 朴素贝叶斯
nb_model = GaussianNB()
train_and_evaluate_model(nb_model, "朴素贝叶斯")

# j) LightGBM
lgb_model = lgb.LGBMClassifier(random_state=42)
train_and_evaluate_model(lgb_model, "LightGBM")

# k) CatBoost
cb_model = cb.CatBoostClassifier(random_state=42, verbose=0)  # verbose=0 可以避免太多输出
train_and_evaluate_model(cb_model, "CatBoost")


# l) PyTorch 神经网络模型
class NeuralNetwork(nn.Module):
    def __init__(self, input_size, num_classes):
        super(NeuralNetwork, self).__init__()
        self.layer1 = nn.Linear(input_size, 64)
        self.layer2 = nn.Linear(64, 32)
        self.output = nn.Linear(32, num_classes)

    def forward(self, x):
        x = torch.relu(self.layer1(x))
        x = torch.relu(self.layer2(x))
        x = self.output(x)
        return x


def train_and_evaluate_nn():
    # 将数据转换为 PyTorch 的 Tensor
    X_train_tensor = torch.tensor(X_train_scaled, dtype=torch.float32)
    X_test_tensor = torch.tensor(X_test_scaled, dtype=torch.float32)
    y_train_tensor = torch.tensor(y_train.values, dtype=torch.long)
    y_test_tensor = torch.tensor(y_test.values, dtype=torch.long)

    # 创建 DataLoader
    train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
    train_loader = DataLoader(dataset=train_dataset, batch_size=32, shuffle=True)

    # 定义模型、损失函数和优化器
    input_size = X_train_scaled.shape[1]
    num_classes = len(y_train.unique())
    model = NeuralNetwork(input_size=input_size, num_classes=num_classes)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # 训练模型
    num_epochs = 20
    for epoch in range(num_epochs):
        for X_batch, y_batch in train_loader:
            # 前向传播
            outputs = model(X_batch)
            loss = criterion(outputs, y_batch)

            # 反向传播与优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')

    # 评估模型
    with torch.no_grad():
        outputs = model(X_test_tensor)
        _, predicted = torch.max(outputs, 1)
        report = classification_report(y_test_tensor, predicted)
        print(f"PyTorch 神经网络 分类报告：\n{report}")
        # 保存模型
        torch.save(model.state_dict(), "pytorch_nn_model.pth")


train_and_evaluate_nn()
