import os
import re
from typing import List, Dict
from ast import literal_eval
from collections import namedtuple

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
import platform
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils import data

# 忽略字体警告，但只在设置字体后再忽略，避免忽略其他重要警告
# warnings.filterwarnings("ignore", category=UserWarning, module="matplotlib.font_manager")

# 确保负号正确显示
plt.rcParams["axes.unicode_minus"] = False

# 定义跨平台的字体选择策略，按优先级排列
# 优先使用系统默认无衬线字体，然后是常见的替代字体
font_families = {
    "Windows": ["Arial", "SimHei", "sans-serif"],  # Windows 常用字体
    "Darwin": ["Helvetica", "Arial Unicode MS", "sans-serif"],  # macOS 常用字体
    "Linux": ["DejaVu Sans", "Arial", "sans-serif"]  # Linux 常用字体
}

# 根据当前操作系统选择字体
system = platform.system()
if system in font_families:
    plt.rcParams["font.family"] = font_families[system]
else:
    # 默认为无衬线字体
    plt.rcParams["font.family"] = ["sans-serif"]

# 现在可以安全地忽略字体警告
warnings.filterwarnings("ignore", category=UserWarning, module="matplotlib.font_manager")

# ---------------------
# 数据处理模块 (utils.py)
# ---------------------

# 定义数据存储文件夹
TRAIN_DATA_FOLDER = './data/train'
TEST_DATA_FOLDER = './data/test'
filename_fields = ['vehicle', 'trajectory', 'method', 'wind_condition']


def save_data(data_list: List[dict], folder_path: str, fields=['t', 'acc', 'vel', 'gyro', 'pos', 'fa']):
    """将数据保存到csv文件，多维数组序列化"""
    if not os.path.isdir(folder_path):
        os.makedirs(folder_path)
        print(f'Created data folder {folder_path}')

    for data in data_list:
        df = pd.DataFrame()

        missing_fields = []
        for field in fields:
            try:
                if field in ['acc', 'vel', 'gyro', 'pos', 'fa']:  # 处理三维数据
                    for i, axis in enumerate(['x', 'y', 'z']):
                        df[f'{field}_{axis}'] = data[field][:, i].tolist()
                else:
                    df[field] = data[field].tolist()
            except KeyError as err:
                missing_fields.append(field)

        if len(missing_fields) > 0:
            print(f'Missing fields: {", ".join(missing_fields)}')

        filename = '_'.join(data[field] for field in filename_fields)
        df.to_csv(f"{folder_path}/{filename}.csv", index=False)


def load_data(folder_path: str, expnames=None, data_type="train") -> List[dict]:
    """Load CSV files and convert to list of dictionaries, enhanced error handling"""
    data_list = []

    if not os.path.isdir(folder_path):
        print(f"Directory {folder_path} does not exist")
        return data_list

    if expnames is None:
        filenames = os.listdir(folder_path)
    elif isinstance(expnames, str):  # Handle as regular expression
        filenames = []
        for filename in os.listdir(folder_path):
            if re.search(expnames, filename) is not None:
                filenames.append(filename)
    elif isinstance(expnames, list):
        filenames = [expname + '.csv' for expname in expnames]
    else:
        raise NotImplementedError()

    for filename in filenames:
        # Ignore non-CSV files
        if not filename.endswith('.csv'):
            continue

        try:
            # Load CSV file
            df = pd.read_csv(f"{folder_path}/{filename}")

            # Convert data to dictionary
            data = {}
            for field in df.columns:
                if field == 't':  # 时间戳
                    data[field] = df[field].values
                elif '_' in field:  # Process multi-dimensional data
                    parts = field.split('_')
                    if len(parts) < 2:
                        continue

                    base_field, axis = parts[0], parts[1]

                    # 处理3维数据(acc_x, vel_y, etc.)
                    if base_field in ['acc', 'vel', 'gyro', 'pos', 'fa'] and axis in ['x', 'y', 'z']:
                        if base_field not in data:
                            data[base_field] = np.zeros((len(df), 3))
                        axis_idx = ['x', 'y', 'z'].index(axis)
                        data[base_field][:, axis_idx] = df[field].values
                    # Handle other multi-dimensional data
                    elif base_field in ['acc', 'vel', 'gyro', 'pos', 'fa']:
                        print(f"Warning: Dimension identifier {axis} for field {field} is not x,y,z, will be ignored")
                    # Handle non-vector data
                    else:
                        data[base_field] = df[field].values

            # 从文件名添加元数据
            namesplit = filename.split('.')[0].split('_')
            for i, field in enumerate(filename_fields):
                if i < len(namesplit):
                    data[field] = namesplit[i]

            # 添加数据类型标签（训练或测试）
            data['data_type'] = data_type

            data_list.append(data)

        except Exception as e:
            print(f"Error loading file {filename}: {str(e)}")
            continue

    return data_list

#产生样本数据
def generate_sample_data(num_samples=1000, wind_condition='no_wind', data_type="train"):
    """Generate simulated UAV sensor data with option to specify data type (train/test)"""
    t = np.linspace(0, 10, num_samples)  # Time series

    # 风况等级映射
    wind_level_map = {
        'no_wind': 0,  # 无风
        'light_wind': 1,  # 微风
        'moderate_wind': 2,  # 中风
        'strong_wind': 3,  # 强风
        'gale_wind': 4  # 狂风
    }
    wind_level = wind_level_map.get(wind_condition, 0)

    # 生成模拟传感器数据，根据风况等级添加不同程度的干扰
    if data_type == "test":
        # 测试数据可以包含一些异常或不同分布的数据
        acc = np.random.normal(0, 1.2, (num_samples, 3))  # 更大的噪声
        vel = np.cumsum(acc, axis=0) * (t[1] - t[0])
        gyro = np.random.normal(0, 0.15, (num_samples, 3))  # 更大的噪声
        pos = np.cumsum(vel, axis=0) * (t[1] - t[0])
    else:
        # 训练数据
        acc = np.random.normal(0, 1, (num_samples, 3))  # 加速度
        vel = np.cumsum(acc, axis=0) * (t[1] - t[0])  # 速度
        gyro = np.random.normal(0, 0.1, (num_samples, 3))  # 陀螺仪
        pos = np.cumsum(vel, axis=0) * (t[1] - t[0])  # 位置

    # 生成模拟气动力（输出），加入风场影响
    fa = np.zeros((num_samples, 3))

    # 风对气动力的影响系数（可根据实际物理模型调整）
    wind_effect = np.array([
        0.1 * wind_level,  # x 轴风影响
        0.2 * wind_level,  # y 轴风影响（通常更大）
        0.05 * wind_level  # z 轴风影响（通常较小）
    ])

    fa[:, 0] = 0.5 * acc[:, 0] + 0.2 * vel[:, 0] + wind_effect[0] + np.random.normal(0, 0.1, num_samples)
    fa[:, 1] = 0.5 * acc[:, 1] + 0.2 * vel[:, 1] + wind_effect[1] + np.random.normal(0, 0.1, num_samples)
    fa[:, 2] = 0.5 * acc[:, 2] + 0.2 * vel[:, 2] + wind_effect[2] + np.random.normal(0, 0.1, num_samples)

    # Create DataFrame
    data = {
        't': t,
        'acc_x': acc[:, 0], 'acc_y': acc[:, 1], 'acc_z': acc[:, 2],
        'vel_x': vel[:, 0], 'vel_y': vel[:, 1], 'vel_z': vel[:, 2],
        'gyro_x': gyro[:, 0], 'gyro_y': gyro[:, 1], 'gyro_z': gyro[:, 2],
        'pos_x': pos[:, 0], 'pos_y': pos[:, 1], 'pos_z': pos[:, 2],
        'fa_x': fa[:, 0], 'fa_y': fa[:, 1], 'fa_z': fa[:, 2]
    }

    df = pd.DataFrame(data)

    # 确定保存文件夹
    folder = "data/train" if data_type == "train" else "data/test"
    os.makedirs(folder, exist_ok=True)

    # 在文件名中添加数据类型标识
    filename = f'drone_{wind_condition}_{data_type}.csv'

    df.to_csv(f'{folder}/{filename}', index=False)

    print(f"Simulated {data_type} data generated: {filename}")
    return filename


SubDataset = namedtuple('SubDataset', 'X Y C meta')
feature_len = {}


def format_data(raw_data: List[Dict['str', np.ndarray]],
                features: 'list[str]' = ['acc', 'vel', 'gyro', 'pos'],
                output: str = 'fa',
                hover_pwm_ratio=1.):
    """Format data into model-usable format"""
    data_list = []
    for i, data in enumerate(raw_data):
        # Create input features
        X = []
        for feature in features:
            if feature in data:
                X.append(data[feature])
                feature_len[feature] = data[feature].shape[1]  # Typically 3 (x,y,z)
            else:
                print(f"Warning: Feature {feature} not found in data!")

        if not X:
            print(f"Error: Data {i} has no valid features!")
            continue

        X = np.hstack(X)

        # Create output labels
        Y = data[output] if output in data else np.zeros((len(data['t']), 3))  # Default 3D force

        #Wind condition label
        C = i

        # Save dataset
        data_list.append(SubDataset(X, Y, C, {'method': data.get('method', 'unknown'),
                                              'wind_condition': data.get('wind_condition', 'unknown'),
                                              't': data['t'],
                                              'data_type': data.get('data_type', 'unknown')}))

    return data_list


def plot_subdataset(data, features, title_prefix=''):
    """Visualize dataset"""
    fig, axs = plt.subplots(1, len(features) + 1, figsize=(15, 4))

    idx = 0
    for i, feature in enumerate(features):
        ax = axs[i]
        if feature in ['acc', 'vel', 'gyro', 'pos']:
            for j in range(3):  # Three dimensions
                ax.plot(data.meta['t'], data.X[:, idx + j], label=f"{feature}_{'xyz'[j]}")
            idx += 3
        else:
            ax.plot(data.meta['t'], data.X[:, idx])
            idx += 1

        ax.legend()
        ax.set_xlabel('Time [s]')
        ax.set_title(feature)

    # Plot output (force)
    ax = axs[-1]
    for i in range(data.Y.shape[1]):
        ax.plot(data.meta['t'], data.Y[:, i], label=f'fa_{"xyz"[i]}')
    ax.legend()
    ax.set_xlabel('Time [s]')
    ax.set_title('Aerodynamic Force')

    data_type_str = " (Test Data)" if data.meta['data_type'] == "test" else " (Train Data)"
    fig.suptitle(f"{title_prefix} {data.meta['wind_condition']}{data_type_str}: c={data.C}")
    fig.tight_layout()
    plt.show()


# ---------------------
# 模型定义模块 (mlmodel.py)
# ---------------------

import collections

import numpy as np
import matplotlib.pyplot as plt

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils import data

torch.set_default_dtype(torch.float64)
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataset import random_split

# 确保Model命名元组定义正确
try:
    Model = collections.namedtuple('Model', ['phi', 'h', 'options'])
except TypeError:
    # 如果已定义，则跳过
    pass


class Phi_Net(nn.Module):
    def __init__(self, options):
        super(Phi_Net, self).__init__()

        self.fc1 = nn.Linear(options['dim_x'], 50)
        self.fc2 = nn.Linear(50, 60)
        self.fc3 = nn.Linear(60, 50)
        # Network output is dim_a-1, last dimension is constant bias
        self.fc4 = nn.Linear(50, options['dim_a'] - 1)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.relu(self.fc3(x))
        x = self.fc4(x)
        if len(x.shape) == 1:  # Single sample input
            return torch.cat([x, torch.ones(1)])
        else:  # Batch input
            return torch.cat([x, torch.ones([x.shape[0], 1])], dim=-1)


# 对抗网络
class H_Net_CrossEntropy(nn.Module):
    def __init__(self, options):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(options['dim_a'], 128),
            nn.BatchNorm1d(128),
            nn.ReLU(),
            nn.Dropout(0.5),  # 防止过拟合
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, options['num_c'])
        )
    def forward(self, x):
        return self.net(x)


#保存模型
def save_model(phi_net, h_net, model_name, options):
    """保存模型到指定路径"""
    os.makedirs('./models', exist_ok=True)
    save_dict = {
        'phi_net_state_dict': phi_net.state_dict(),
        'h_net_state_dict': h_net.state_dict() if h_net is not None else None,
        'options': options,
        'model_version': 1.0  # 添加版本信息以便未来兼容性
    }
    torch.save(save_dict, f'./models/{model_name}.pth')
    print(f"模型已保存到: {model_name}.pth，包含{len(save_dict)}个键")


# 加载模型
def load_model(model_name, model_folder='./models/'):
    """加载模型，支持不同的保存格式"""
    try:
        model_path = f"{model_folder}{model_name}.pth"
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"模型文件不存在: {model_path}")

        model = torch.load(model_path, map_location=torch.device('cpu'))

        # 检查模型格式
        if 'phi_net_state_dict' in model and 'options' in model:
            # 新格式
            print(f"加载新格式模型: {model_name}")
            options = model['options']
            phi_net = Phi_Net(options=options)
            phi_net.load_state_dict(model['phi_net_state_dict'])
            phi_net.eval()

            h_net = None
            if 'h_net_state_dict' in model and model['h_net_state_dict'] is not None:
                h_net = H_Net_CrossEntropy(options=options)
                h_net.load_state_dict(model['h_net_state_dict'])
                h_net.eval()

            return Model(phi=phi_net, h=h_net, options=options)

        elif 'phi_state_dict' in model and 'h_state_dict' in model:
            # 旧格式
            print(f"加载旧格式模型: {model_name}")
            # 尝试从模型中恢复options或使用当前options
            if 'options' in model:
                options = model['options']
            else:
                print("警告: 模型中未找到options，使用当前配置")
                options = {
                    'dim_x': 12,  # 输入维度
                    'dim_y': 3,  # 输出维度
                    'num_c': 10,  # 假设风况条件数量
                    'dim_a': 3,
                    'loss_type': 'crossentropy-loss'
                }

            phi_net = Phi_Net(options=options)
            phi_net.load_state_dict(model['phi_state_dict'])
            phi_net.eval()

            h_net = H_Net_CrossEntropy(options=options)
            h_net.load_state_dict(model['h_state_dict'])
            h_net.eval()

            return Model(phi=phi_net, h=h_net, options=options)

        else:
            # 尝试其他可能的格式
            print(f"模型文件格式不常见，键: {list(model.keys())}")

            # 尝试提取state_dict
            state_dict_keys = [k for k in model.keys() if 'state_dict' in k]
            if len(state_dict_keys) >= 1:
                print("尝试从非常规格式加载模型...")

                # 尝试找到phi网络的state_dict
                phi_state_dict = None
                for k in state_dict_keys:
                    if 'phi' in k.lower():
                        phi_state_dict = model[k]
                        break

                if phi_state_dict is None and len(state_dict_keys) > 0:
                    phi_state_dict = model[state_dict_keys[0]]

                if phi_state_dict:
                    print("找到phi网络的state_dict")
                    # 使用当前options
                    options = {
                        'dim_x': 12,
                        'dim_y': 3,
                        'num_c': 10,
                        'dim_a': 3,
                        'loss_type': 'crossentropy-loss'
                    }

                    phi_net = Phi_Net(options=options)
                    phi_net.load_state_dict(phi_state_dict)
                    phi_net.eval()

                    # 尝试找到h网络的state_dict
                    h_state_dict = None
                    for k in state_dict_keys:
                        if 'h' in k.lower():
                            h_state_dict = model[k]
                            break

                    h_net = None
                    if h_state_dict:
                        print("找到h网络的state_dict")
                        h_net = H_Net_CrossEntropy(options=options)
                        h_net.load_state_dict(h_state_dict)
                        h_net.eval()

                    return Model(phi=phi_net, h=h_net, options=options)

            raise KeyError(f"模型文件格式不支持，缺少必要的键: {model.keys()}")

    except Exception as e:
        print(f"加载模型失败: {e}")
        return None


# 模型文件内容
def inspect_model_file(model_name, model_folder='./models/'):
    """检查模型文件内容"""
    try:
        model_path = f"{model_folder}{model_name}.pth"
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"模型文件不存在: {model_path}")

        model = torch.load(model_path, map_location=torch.device('cpu'))
        print(f"模型文件 {model_name}.pth 内容:")
        for key, value in model.items():
            if key in ['phi_net_state_dict', 'h_net_state_dict', 'phi_state_dict', 'h_state_dict']:
                print(f"  {key}: 状态字典，包含{len(value)}个参数")
            elif key == 'options':
                print(f"  {key}: 字典，包含{len(value)}个键: {list(value.keys())}")
            else:
                print(f"  {key}: {type(value)}, 大小: {sys.getsizeof(value)} 字节")
        return model
    except Exception as e:
        print(f"检查模型文件失败: {e}")
        return None


class MyDataset(Dataset):
    def __init__(self, inputs, outputs, c):
        self.inputs = inputs
        self.outputs = outputs
        self.c = c

    def __len__(self):
        return len(self.inputs)

    def __getitem__(self, idx):
        input_data = self.inputs[idx]
        output_data = self.outputs[idx]
        sample = {'input': input_data, 'output': output_data, 'c': self.c}

        return sample


_softmax = nn.Softmax(dim=1)


# Model validation function
def validation(phi_net, h_net, adapt_input: np.ndarray, adapt_label: np.ndarray, val_input: np.ndarray, options, lam=0):
    """
    Helper function to compute model outputs for given adaptation and validation data
    """
    with torch.no_grad():
        # Perform least squares on adaptation set to get a
        X = torch.from_numpy(adapt_input)  # K x dim_x
        Y = torch.from_numpy(adapt_label)  # K x dim_y
        Phi = phi_net(X)  # K x dim_a
        Phi_T = Phi.transpose(0, 1)  # dim_a x K
        A = torch.inverse(torch.mm(Phi_T, Phi) + lam * torch.eye(options['dim_a']))  # dim_a x dim_a
        a = torch.mm(torch.mm(A, Phi_T), Y)  # dim_a x dim_y

        # Compute predictions for validation and adaptation sets
        inputs = torch.from_numpy(val_input)  # B x dim_x
        val_prediction = torch.mm(phi_net(inputs), a)  # B x dim_y
        adapt_prediction = torch.mm(phi_net(X), a)  # K x dim_y

        # Compute adversarial network predictions
        temp = phi_net(inputs)
        if h_net is None:
            h_output = None
        else:
            h_output = h_net(temp)  # B x num_of_c
            if options['loss_type'] == 'crossentropy-loss':
                h_output = _softmax(h_output)
            h_output = h_output.numpy()

    return adapt_prediction.numpy(), val_prediction.numpy(), a.numpy(), h_output


def vis_validation(*, t, x, y, phi_net, h_net, idx_adapt_start, idx_adapt_end, idx_val_start, idx_val_end, c, options,
                   lam=0):
    """
    Visualize adaptation and validation performance
    """
    adapt_input = x[idx_adapt_start:idx_adapt_end, :]
    val_input = x[idx_val_start:idx_val_end, :]
    adapt_label = y[idx_adapt_start:idx_adapt_end, :]
    y_adapt, y_val, a, h_output = validation(phi_net, h_net, adapt_input, adapt_label, val_input, options, lam=lam)
    print(f'a = {a}')
    print(f"|a| = {np.linalg.norm(a, 'fro')}")

    idx_min = min(idx_adapt_start, idx_val_start)
    idx_max = max(idx_adapt_end, idx_val_end)

    plt.figure(figsize=(15, 3))

    for i in range(3):
        plt.subplot(1, 4, i + 1)
        plt.plot(t[idx_min:idx_max], y[idx_min:idx_max, i], 'k', alpha=0.3, label='True Value')
        plt.plot(t[idx_val_start:idx_val_end], y_val[:, i], label='Validation')
        plt.plot(t[idx_adapt_start:idx_adapt_end], y_adapt[:, i], label='Adaptation')
        plt.legend()
        plt.title(r'$F_{s,' + 'xyz'[i] + '}$')

    if h_output is not None:
        plt.subplot(1, 4, 4)
        if options['loss_type'] == 'crossentropy-loss':
            plt.plot(h_output)
            plt.title('Condition Prediction (After Softmax)')
    plt.tight_layout()
    plt.show()


def error_statistics(data_input: np.ndarray, data_output: np.ndarray, phi_net, h_net, options):
    """Calculate error statistics on given data"""
    criterion = nn.MSELoss()

    with torch.no_grad():
        # Zero prediction error
        error_1 = criterion(torch.from_numpy(data_output), 0.0 * torch.from_numpy(data_output)).item()
        # Mean prediction error
        error_2 = criterion(torch.from_numpy(data_output),
                            torch.from_numpy(np.ones((len(data_output), 1))) * np.mean(data_output, axis=0)[np.newaxis,
                                                                               :])
        # Model prediction error
        _, prediction, _, _ = validation(phi_net, h_net, data_input, data_output, data_input, options=options)
        error_3 = criterion(torch.from_numpy(data_output), torch.from_numpy(prediction)).item()

        return error_1, error_2, error_3


# ---------------------
# Training and Validation Module
# ---------------------

import numpy as np
import matplotlib.pyplot as plt

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

torch.set_default_dtype(torch.float64)
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataset import random_split

# Set number of worker threads
import sys

if sys.platform == 'win32':
    NUM_WORKERS = 0  # Multiprocessing not supported on Windows
else:
    NUM_WORKERS = 2
print(f'Running on platform: {sys.platform}, setting {NUM_WORKERS} worker threads')

# 首先确保数据文件夹存在
os.makedirs(TRAIN_DATA_FOLDER, exist_ok=True)
os.makedirs(TEST_DATA_FOLDER, exist_ok=True)

# 生成训练集数据（使用风况标签）
print("Generating training data...")
train_wind_conditions = ['no_wind', 'light_wind', 'moderate_wind', 'strong_wind', 'gale_wind']
for cond in train_wind_conditions:
    generate_sample_data(wind_condition=cond, data_type="train")  # 修改为wind_condition

# 生成测试集数据（使用风况标签）
print("Generating test data...")
test_wind_conditions = ['no_wind', 'light_wind', 'moderate_wind', 'strong_wind', 'gale_wind']
for cond in test_wind_conditions:
    generate_sample_data(wind_condition=cond, data_type="test")  # 修改为wind_condition

# Configuration parameters
dim_a = 3
features = ['acc', 'vel', 'gyro', 'pos']  # Sensor data to use
label = 'fa'  # Aerodynamic force to predict

# Dataset settings
dataset = 'drone_data'
train_dataset_folder = TRAIN_DATA_FOLDER
test_dataset_folder = TEST_DATA_FOLDER

model_name = f"{dataset}_dim-a-{dim_a}_{'-'.join(features)}"

# 加载训练集
print("Loading training data...")
raw_train_data = load_data(train_dataset_folder, data_type="train")
train_data = format_data(raw_train_data, features=features, output=label)

# 加载测试集
print("Loading test data...")
raw_test_data = load_data(test_dataset_folder, data_type="test")
test_data = format_data(raw_test_data, features=features, output=label)

# 可视化训练集数据
print("Visualizing training data...")
for data_item in train_data:
    plot_subdataset(data_item, features, title_prefix="(Training Data)")

# 可视化测试集数据
print("Visualizing test data...")
for data_item in test_data:
    plot_subdataset(data_item, features, title_prefix="(Test Data)")

# 设置模型参数
options = {}
if train_data:  # Ensure data is not empty
    options['dim_x'] = train_data[0].X.shape[1]  # Input dimension
    options['dim_y'] = train_data[0].Y.shape[1]  # Output dimension
    options['num_c'] = len(train_data)  # Number of conditions (wind levels)
    print(f"Input feature dimension: {options['dim_x']}")
    print(f"Output dimension: {options['dim_y']}")
    print(f"Number of wind conditions: {options['num_c']}")
else:
    print("Error: No valid training data loaded!")
    sys.exit(1)

# 设置超参数
options['features'] = features
options['dim_a'] = dim_a
options['loss_type'] = 'crossentropy-loss'

options['shuffle'] = True  # Whether to shuffle trajectory points
options['K_shot'] = 32  # Number of samples for least squares
options['phi_shot'] = 16  # Reduce batch size to fit memory
options['alpha'] = 0.3  # Adversarial regularization strength
options['learning_rate'] = 1e-4
options['frequency_h'] = 2  # Update h network after this many phi network updates
options['SN'] = 2.0  # Maximum spectral norm for phi network
options['gamma'] = 5.0  # Maximum L2 norm for a
options['num_epochs'] = 100  # Reduce number of epochs for faster testing

# Prepare data loaders for training
train_loaders = []
adapt_loaders = []
for i in range(options['num_c']):
    full_set = MyDataset(train_data[i].X, train_data[i].Y, train_data[i].C)

    l = len(train_data[i].X)
    if options['shuffle']:
        train_set, adapt_set = random_split(full_set, [int(2 / 3 * l), l - int(2 / 3 * l)])
    else:
        train_set = MyDataset(train_data[i].X[:int(2 / 3 * l)], train_data[i].Y[:int(2 / 3 * l)], train_data[i].C)
        adapt_set = MyDataset(train_data[i].X[int(2 / 3 * l):], train_data[i].Y[int(2 / 3 * l):], train_data[i].C)

    train_loader = DataLoader(train_set, batch_size=options['phi_shot'], shuffle=options['shuffle'],
                              num_workers=NUM_WORKERS)
    adapt_loader = DataLoader(adapt_set, batch_size=options['K_shot'], shuffle=options['shuffle'],
                              num_workers=NUM_WORKERS)

    train_loaders.append(train_loader)  # For training phi
    adapt_loaders.append(adapt_loader)  # For computing a

# Initialize models
phi_net = Phi_Net(options)
h_net = H_Net_CrossEntropy(options)

# Define loss functions and optimizers
criterion = nn.MSELoss()
criterion_h = nn.CrossEntropyLoss()
optimizer_h = optim.Adam(h_net.parameters(), lr=options['learning_rate'])
optimizer_phi = optim.Adam(phi_net.parameters(), lr=options['learning_rate'])

model_save_freq = 5  # Model save frequency

# Store training statistics
loss_f = []  # Force prediction loss
loss_c = []  # Adversarial loss

# Test losses for each test sub-dataset
loss_test_nominal = []  # Loss without learning
loss_test_mean = []  # Loss with mean prediction
loss_test_phi = []  # Loss with model prediction
for i in range(len(test_data)):
    loss_test_nominal.append([])
    loss_test_mean.append([])
    loss_test_phi.append([])

# Training loop
print("Starting training...")
for epoch in range(options['num_epochs']):
    # Randomly shuffle the training order of sub-datasets
    arr = np.arange(options['num_c'])
    np.random.shuffle(arr)

    # Cumulative loss for all sub-datasets
    running_loss_f = 0.0
    running_loss_c = 0.0

    for i in arr:
        with torch.no_grad():
            adapt_loader = adapt_loaders[i]
            kshot_data = next(iter(adapt_loader))
            train_loader = train_loaders[i]
            data_batch = next(iter(train_loader))

        optimizer_phi.zero_grad()

        # Compute a from K-shot data
        X = kshot_data['input']  # K x dim_x
        Y = kshot_data['output']  # K x dim_y
        Phi = phi_net(X)  # K x dim_a
        Phi_T = Phi.transpose(0, 1)  # dim_a x K
        A = torch.inverse(torch.mm(Phi_T, Phi))  # dim_a x dim_a
        a = torch.mm(torch.mm(A, Phi_T), Y)  # dim_a x dim_y

        # Limit norm of a
        if torch.norm(a, 'fro') > options['gamma']:
            a = a / torch.norm(a, 'fro') * options['gamma']

        # Batch training for phi network
        inputs = data_batch['input']  # B x dim_x
        labels = data_batch['output']  # B x dim_y
        c_labels = data_batch['c'].type(torch.long)

        # Forward pass + backward pass + optimization
        outputs = torch.mm(phi_net(inputs), a)
        loss_f_batch = criterion(outputs, labels)
        temp = phi_net(inputs)

        loss_c_batch = criterion_h(h_net(temp), c_labels)

        loss_phi = loss_f_batch - options['alpha'] * loss_c_batch
        loss_phi.backward()
        optimizer_phi.step()

        # Train discriminator network
        if np.random.rand() <= 1.0 / options['frequency_h']:
            optimizer_h.zero_grad()
            temp = phi_net(inputs)

            loss_c_batch = criterion_h(h_net(temp), c_labels)

            loss_h = loss_c_batch
            loss_h.backward()
            optimizer_h.step()

        # Spectral normalization
        if options['SN'] > 0:
            for param in phi_net.parameters():
                M = param.detach().numpy()
                if M.ndim > 1:
                    s = np.linalg.norm(M, 2)
                    if s > options['SN']:
                        param.data = param / s * options['SN']

        running_loss_f += loss_f_batch.item()
        running_loss_c += loss_c_batch.item()

    # Save statistics
    loss_f.append(running_loss_f / options['num_c'])
    loss_c.append(running_loss_c / options['num_c'])
    if epoch % 2 == 0:
        print(
            f'[{epoch + 1}] loss_f: {running_loss_f / options["num_c"]:.4f} loss_c: {running_loss_c / options["num_c"]:.4f}')

    # Evaluate on test data
    with torch.no_grad():
        for j in range(len(test_data)):
            loss_nominal, loss_mean, loss_phi_val = error_statistics(test_data[j].X, test_data[j].Y, phi_net, h_net,
                                                                     options=options)
            loss_test_nominal[j].append(loss_nominal)
            loss_test_mean[j].append(loss_mean)
            loss_test_phi[j].append(loss_phi_val)

    # Save model
    if epoch % model_save_freq == 0:
        save_model(phi_net=phi_net, h_net=h_net, model_name=model_name + '-epoch-' + str(epoch), options=options)

        # 检查模型是否成功保存
        model_path = f'./models/{model_name}-epoch-{epoch}.pth'
        if os.path.exists(model_path):
            print(f"模型已成功保存到: {model_path}")
        else:
            print(f"警告: 模型未能保存到路径: {model_path}")

# Plot training loss curves
plt.figure(figsize=(10, 6))
plt.subplot(2, 1, 1)
plt.plot(loss_f)
plt.xlabel('Epoch')
plt.ylabel('Force Prediction Loss')
plt.title('Training Force Prediction Loss')
plt.subplot(2, 1, 2)
plt.plot(loss_c)
plt.title('Training Wind Condition Prediction Loss')
plt.xlabel('Epoch')
plt.ylabel('Condition Loss')
plt.tight_layout()
plt.show()

# Plot test loss curves
print("Plotting test loss curves...")
for j in range(len(test_data)):
    plt.figure()
    plt.plot(loss_test_mean[j], label='Mean Prediction')
    plt.plot(np.array(loss_test_phi[j]), label='Model Prediction')
    plt.legend()
    plt.title(f'Test Dataset {j} - {test_data[j].meta["wind_condition"]}')
    plt.xlabel('Epoch')
    plt.ylabel('MSE Loss')
    plt.show()


# 查找最新模型
def find_latest_model(prefix, folder='./models/'):
    """查找以prefix开头的最新模型文件"""
    if not os.path.exists(folder):
        return None

    model_files = [f for f in os.listdir(folder) if f.startswith(prefix) and f.endswith('.pth')]
    if not model_files:
        return None

    # 提取epoch号并找到最大值
    epochs = []
    for f in model_files:
        try:
            # 使用正则表达式匹配"epoch-数字"模式
            match = re.search(r'epoch-(\d+)', f)
            if match:
                epochs.append(int(match.group(1)))
        except:
            continue

    if not epochs:
        return None

    latest_epoch = max(epochs)
    latest_model = f"{prefix}-epoch-{latest_epoch}"

    return latest_model


# Select final model
stopping_epoch = min(options['num_epochs'] - 1, 8)  # Select best model based on validation results
options['num_epochs'] = stopping_epoch

# ===== 新增：导出模型为ONNX格式 =====
print("\n导出模型为ONNX格式...")
try:
    import onnx
    import onnxruntime as ort
except ImportError:
    print("错误：缺少 onnx 或 onnxruntime 库，请安装后再试。")
    print("安装命令：conda install -c conda-forge onnx onnxruntime")
    sys.exit(1)

# 加载最终模型 - 改进的加载逻辑
model_to_load = model_name + '-epoch-' + str(stopping_epoch)
model_path = f'./models/{model_to_load}.pth'

final_model = None

if os.path.exists(model_path):
    print(f"加载模型: {model_to_load}")
    # 在加载前检查模型文件
    inspect_model_file(model_to_load)
    final_model = load_model(model_name=model_to_load)
else:
    print(f"找不到模型: {model_to_load}，尝试查找最新可用模型...")
    latest_model = find_latest_model(model_name)
    if latest_model:
        print(f"找到最新模型: {latest_model}")
        # 在加载前检查模型文件
        inspect_model_file(latest_model)
        final_model = load_model(model_name=latest_model)
    else:
        print("错误: 没有找到可用的模型文件!")

# 确保模型已成功加载
if final_model is not None:
    # 导出phi网络为ONNX格式
    dummy_input = torch.randn(1, options['dim_x'], requires_grad=True)
    onnx_path = f'./models/{model_name}.onnx'
    torch.onnx.export(
        final_model.phi,
        dummy_input,
        onnx_path,
        export_params=True,
        opset_version=11,
        do_constant_folding=True,
        input_names=['sensor_input'],
        output_names=['feature_representation'],
        dynamic_axes={'sensor_input': {0: 'batch_size'}, 'feature_representation': {0: 'batch_size'}}
    )

    print(f"模型已成功导出为ONNX格式: {onnx_path}")

    # 验证ONNX模型
    try:
        onnx_model = onnx.load(onnx_path)
        onnx.checker.check_model(onnx_model)
        print("ONNX模型验证通过!")
    except Exception as e:
        print(f"ONNX模型验证失败: {e}")
else:
    print("无法导出模型为ONNX格式，因为没有可用的模型")
    # 如果没有找到模型，使用随机初始化的模型进行演示
    print("使用随机初始化的模型进行演示")
    # 修正初始化参数名称以匹配命名元组定义
    final_model = Model(
        phi=Phi_Net(options),
        h=H_Net_CrossEntropy(options),
        options=options
    )

# ===== 新增：模型预测演示 =====
print("\n模型预测演示:")
if test_data:
    # 选择第一个测试数据集进行演示
    demo_data = test_data[0]
    demo_input = demo_data.X
    demo_labels = demo_data.Y
    demo_t = demo_data.meta['t']
    demo_condition = demo_data.meta['wind_condition']

    print(f"使用测试数据集: {demo_condition}")

    # 进行预测
    with torch.no_grad():
        # 将输入转换为PyTorch张量
        input_tensor = torch.from_numpy(demo_input)

        # 计算phi网络输出
        phi_output = final_model.phi(input_tensor)

        # 对整个数据集执行最小二乘求解a
        Y_tensor = torch.from_numpy(demo_labels)
        Phi_T = phi_output.transpose(0, 1)  # dim_a x N
        A = torch.inverse(torch.mm(Phi_T, phi_output))  # dim_a x dim_a
        a = torch.mm(torch.mm(A, Phi_T), Y_tensor)  # dim_a x dim_y

        # 进行预测
        predictions = torch.mm(phi_output, a).numpy()

    # 计算误差
    mse = np.mean((predictions - demo_labels) ** 2)
    print(f"预测均方误差: {mse:.6f}")

    # 可视化预测结果
    plt.figure(figsize=(15, 10))
    for i in range(3):  # 三个力分量
        plt.subplot(3, 1, i + 1)
        plt.plot(demo_t, demo_labels[:, i], 'b-', label='真实值')
        plt.plot(demo_t, predictions[:, i], 'r--', label='预测值')
        plt.legend()
        plt.title(f'气动力分量 {["X", "Y", "Z"][i]} (风况: {demo_condition})')
        plt.xlabel('时间 (秒)')
        plt.ylabel('力 (N)')

    plt.tight_layout()
    plt.show()

print("\n训练和评估完成!")


class DroneModelWrapper:
    def __init__(self, model_path=None):
        """
        初始化无人机风况模型
        :param model_path: 可选，模型路径。如果为None，则加载最新模型
        """
        # 设置与训练时相同的选项
        self.options = {
            'dim_x': 12,
            'dim_y': 3,
            'num_c': 5,
            'features': ['acc', 'vel', 'gyro', 'pos'],
            'dim_a': 3,
            'loss_type': 'crossentropy-loss'
        }

        # 加载模型
        if model_path is None:
            # 查找最新模型
            model_files = [f for f in os.listdir('./models')
                           if f.startswith('drone_data_dim-a-3_acc-vel-gyro-pos-epoch-')]
            epochs = [int(re.search(r'epoch-(\d+)', f).group(1)) for f in model_files]
            latest_model = f'drone_data_dim-a-3_acc-vel-gyro-pos-epoch-{max(epochs)}.pth'
            model_path = f'./models/{latest_model}'

        self.model = load_model(model_path.split('/')[-1].split('.')[0])
        print(f"成功加载模型: {model_path}")

    def predict(self, input_data):
        """
        预测气动力和风况
        :param input_data: numpy数组，形状为(N, 12)或(12,)
        :return: 字典，包含'force'和'wind_condition'
        """
        # 确保输入数据格式正确
        input_data = np.asarray(input_data)
        if input_data.ndim == 1:
            input_data = input_data.reshape(1, -1)

        # 转换为PyTorch张量
        input_tensor = torch.from_numpy(input_data.astype(np.float64))

        # 使用模型预测
        with torch.no_grad():
            phi_output = self.model.phi(input_tensor)

            # 这里简化了适配过程，实际使用时可能需要K-shot适配
            # 使用单位矩阵作为简化的适配参数
            a = torch.eye(self.options['dim_a'], self.options['dim_y'])

            # 预测气动力
            force = torch.mm(phi_output, a).numpy()

            # 预测风况
            if self.model.h is not None:
                wind_prob = F.softmax(self.model.h(phi_output), dim=1).numpy()
                wind_condition = np.argmax(wind_prob, axis=1)
            else:
                wind_condition = np.zeros(len(input_data))

        # 将风况索引转换为字符串
        wind_map = ['no_wind', 'light_wind', 'moderate_wind', 'strong_wind', 'gale_wind']
        wind_str = [wind_map[int(i)] for i in wind_condition]

        return {
            'force': force.squeeze(),
            'wind_condition': wind_str[0] if len(wind_str) == 1 else wind_str
        }


if __name__ == "__main__":
    # 测试代码
    model = DroneModelWrapper()
    test_input = np.random.randn(12)
    result = model.predict(test_input)
    print("测试结果:", result)
    