# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import torch.optim as optim
from sklearn.metrics import accuracy_score, f1_score, recall_score, roc_auc_score
from tqdm import tqdm
import pandas as pd
from gwlsa.datasets import load_df_fromfile
from gwlsa_settings import net_params
from utils.general_utils import get_onehot, timer

class CustomDataset(Dataset):
    def __init__(self, x_data, y_data, y_onehot, geoid):
        self.x_data = x_data
        self.y_data = y_data
        self.y_onehot = y_onehot
        self.geoid = geoid

    def __len__(self):
        """
        返回数据集中的样本总数
        """
        return len(self.x_data)

    def __getitem__(self, idx):
        """
        根据索引获取样本
        """
        cur_X = self.x_data[idx,:]
        cur_y = self.y_data[idx]
        cur_y_onehot = self.y_onehot[idx, :]
        cur_geoid = self.geoid[idx]

        return cur_X, cur_y, cur_y_onehot, cur_geoid

# 定义网络结构
class NetDNN(nn.Module):
    def __init__(self, input_shape, nb_mid_layers, nb_class, node_num, drop_out, drop_percentage):
        super(NetDNN, self).__init__()
        if type(input_shape)==tuple and len(input_shape)==2:
            input_dim = input_shape[0] * input_shape[1]
        elif type(input_shape)==tuple and len(input_shape)==1:
            input_dim = input_shape[0]
        elif type(input_shape)==int:
            input_dim = input_shape
        self.fc1 = nn.Linear(input_dim, node_num)
        self.mid_layers = nn.ModuleList([nn.Linear(node_num, node_num) for _ in range(nb_mid_layers - 1)])
        self.dropout = nn.Dropout(p=drop_percentage) if drop_out else None
        self.flatten = nn.Flatten()
        self.fc_last = nn.Linear(node_num, nb_class)

    def forward(self, x):
        x = x.view(x.size(0), -1)  # 展平输入
        x = torch.relu(self.fc1(x))
        if self.dropout:
            x = self.dropout(x)
        for layer in self.mid_layers:
            x = torch.relu(layer(x))
            if self.dropout:
                x = self.dropout(x)
        x = self.flatten(x)
        x = self.dropout(x)
        y_pred = torch.sigmoid(self.fc_last(x))
        return y_pred

# 定义训练和评估函数
def fit(x_train, y_train_r,  y_train_onehot, geoId_train,
       x_val, y_val_r, y_val_onehot, geoId_val,
       x_test, y_test_r, y_test_onehot, geoId_test,
        nb_epochs=100, verbose=0,
        input_shape=(18,1), nb_mid_layers=2,
        node_num=480,
        lr_rate=0.001,
        class_weight=None,
        bat_size=1000):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # 数据预处理
    x_train = torch.tensor(x_train).float()
    y_train_onehot = torch.tensor(y_train_onehot).float()
    x_val = torch.tensor(x_val).float()
    y_val_onehot = torch.tensor(y_val_onehot).float()
    x_test = torch.tensor(x_test).float()
    y_test_onehot = torch.tensor(y_test_onehot).float()

    # 数据转移到device设备上，一般指的是GPU
    # x_train = x_train.to(device)
    # y_train_onehot = y_train_onehot.to(device)
    # x_val = x_val.to(device)
    # y_val_onehot = y_val_onehot.to(device)
    # x_test = x_test.to(device)
    # y_test_onehot = y_test_onehot.to(device)

    # 创建数据加载器
    train_dataset = CustomDataset(x_train, y_train_r, y_train_onehot, geoId_train)
    val_dataset = CustomDataset(x_val, y_val_r, y_val_onehot, geoId_val)
    test_dataset = CustomDataset(x_test, y_test_r, y_test_onehot, geoId_test)

    train_loader = DataLoader(train_dataset, batch_size=bat_size, shuffle=True , num_workers=net_params['num_workers'])
    val_loader   = DataLoader(val_dataset,   batch_size=bat_size, shuffle=False, num_workers=net_params['num_workers'])
    test_loader  = DataLoader(test_dataset,  batch_size=bat_size, shuffle=False, num_workers=net_params['num_workers'])

    # 初始化模型
    model = NetDNN(input_shape, nb_mid_layers, 2, node_num, True, 0.5)  # 二分类问题，直接设置 nb_class=2


    # 根据 class_weight 初始化优化器和损失函数
    if class_weight is None:
        class_weights = torch.tensor([1.0, 1.0])
    criterion = torch.nn.BCELoss(reduction='sum', weight=class_weights).to(device=device)
    optimizer = optim.Adam(model.parameters(), lr=lr_rate)


    class_weights = class_weights.to(device)
    # device = "cpu"
    # 训练模型
    for epoch in range(nb_epochs):
        model.train()
        model = model.to(device)
        total_loss = 0
        with tqdm(total=len(train_loader), desc=f'Epoch {epoch+1}/{nb_epochs}', disable=not verbose) as pbar:
            for i, (bx, by, by_onehot, bid) in enumerate(train_loader):
                bx, by, by_onehot = bx.to(device), by.to(device), by_onehot.to(device)
                outputs = model(bx)
                # outputs = outputs.to(device)
                loss = criterion(outputs, by_onehot)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                total_loss += loss.item()
                pbar.update(1)
        avg_loss = total_loss / len(train_loader)
        print(f'Epoch {epoch+1}, Train Loss: {avg_loss:.4f}')

        # 验证模型
        model.eval()
        val_preds = []
        val_pred_probs = []
        val_y_r = []
        with torch.no_grad():
            for bx, by, by_onehot, bid in val_loader:
                bx, by, by_onehot = bx.to(device), by.to(device), by_onehot.to(device)
                outputs = model(bx)
                _, predicted = torch.max(outputs, 1)
                val_pred_prob = outputs[:, 1]
                val_pred_probs.extend(val_pred_prob.cpu().numpy())
                val_preds.extend(predicted.cpu().numpy())
                val_y_r.extend(by.cpu().numpy())
        val_accuracy = accuracy_score(val_y_r, val_preds)
        val_f1 = f1_score(val_y_r, val_preds, average='macro')
        val_recall = recall_score(val_y_r, val_preds, average='macro')
        val_auc = roc_auc_score(val_y_r, val_pred_probs)
        print(f'Epoch {epoch+1}, Val Accuracy: {val_accuracy:.4f}, Val F1: {val_f1:.4f}, Val Recall: {val_recall:.4f}, Val AUC: {val_auc:.4f}')

    # 测试模型
    model.eval()
    test_preds = []
    test_pred_probs = []
    with torch.no_grad():
        for bx, by, by_onehot, bid in test_loader:
            bx, by, by_onehot = bx.to(device), by.to(device), by_onehot.to(device)
            outputs = model(bx)
            _, predicted = torch.max(outputs, 1)
            test_pred_prob = outputs[:, 1]
            test_pred_probs.extend(test_pred_prob.cpu().numpy())
            test_preds.extend(predicted.cpu().numpy())
    test_accuracy = accuracy_score(y_test_r, test_preds)
    test_f1 = f1_score(y_test_r, test_preds, average='macro')
    test_recall = recall_score(y_test_r, test_preds, average='macro')
    test_auc = roc_auc_score(y_test_r, test_pred_probs)
    print(f'Test Accuracy: {test_accuracy:.4f}, Test F1: {test_f1:.4f}, Test Recall: {test_recall:.4f}, Test AUC: {test_auc:.4f}')

    return model, test_accuracy, test_f1, test_recall, test_auc
def get_data():
    train_df, val_df, test_df = load_df_fromfile(
        net_params['data_load_dir'],
        net_params['max_distance'],
        net_params['resolution']
    )
    all_df = pd.concat([train_df, val_df, test_df], ignore_index=False)

    x_cols = net_params['x_column_names']
    nb_features = len(x_cols)
    y_col = net_params['y_column_name']
    id_col = net_params['id_column']

    x_all, y_r, geoId_r = all_df[x_cols].values, all_df[y_col].values, all_df[id_col].values
    x_train, y_train_r, geoId_train_r = train_df[x_cols].values, train_df[y_col].values, train_df[id_col].values
    x_val, y_val_r, geoId_val_r = val_df[x_cols].values, val_df[y_col].values, val_df[id_col].values
    x_test, y_test_r, geoId_test_r = test_df[x_cols].values, test_df[y_col].values, test_df[id_col].values

    # 将y标签独热化
    y_train_onehot = get_onehot(y_train_r)
    y_val_onehot = get_onehot(y_val_r)
    y_test_onehot = get_onehot(y_test_r)
    y_onehot = get_onehot(y_r)

    return (x_all, y_r, y_onehot, geoId_r), (x_train, y_train_r, y_train_onehot, geoId_train_r), \
           (x_val, y_val_r, y_val_onehot, geoId_val_r), (x_test, y_test_r, y_test_onehot, geoId_test_r)

@timer
def run_exp():
    (x_all, y_r, y_onehot, geoId_r), (x_train, y_train_r, y_train_onehot, geoId_train_r), \
    (x_val, y_val_r, y_val_onehot, geoId_val_r), (x_test, y_test_r, y_test_onehot, geoId_test_r) = get_data()
    model, test_accuracy, test_f1, test_recall, test_auc = fit(x_train, y_train_r,  y_train_onehot, geoId_train_r,
                                                               x_val, y_val_r, y_val_onehot, geoId_val_r,
                                                               x_test, y_test_r, y_test_onehot, geoId_test_r,
                                                               nb_epochs=4,
                                                               verbose=0,
                                                               input_shape=x_test.shape[1:],
                                                               nb_mid_layers=2,
                                                               node_num=480,
                                                               lr_rate=0.001,
                                                               class_weight=None,
                                                               bat_size=1000)



if __name__ == '__main__':
    run_exp()
