
import os
import math
import time
import random
import datetime
import warnings

warnings.filterwarnings('ignore')  # 忽略警告

# 导入科学计算与数据分析库
import numpy as np
import pandas as pd

# 导入可视化工具
import seaborn as sns
import matplotlib.pyplot as plt

# 导入 TensorFlow 及其相关模块
import tensorflow as tf
from tensorflow.keras import Sequential, regularizers, Model, Input
from tensorflow.keras.layers import Flatten, Dense, Conv1D, MaxPool1D, Dropout, AvgPool1D
from tensorflow.keras.callbacks import EarlyStopping, LearningRateScheduler

# 导入不平衡数据处理工具
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import KFold
from sklearn.preprocessing import OneHotEncoder


# 数据精度量化压缩函数
def reduce_mem_usage(df):
    start_mem = df.memory_usage().sum() / 1024 ** 2
    print(f'优化前的数据集内存大小 {start_mem:.2f} MB')

    for col in df.columns:
        col_type = df[col].dtype
        if col_type != 'object':
            c_min = df[col].min()
            c_max = df[col].max()
            if str(col_type)[:3] == 'int':
                if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
                    df[col] = df[col].astype(np.int8)
                elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
                    df[col] = df[col].astype(np.int16)
                elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
                    df[col] = df[col].astype(np.int32)
                else:
                    df[col] = df[col].astype(np.int64)
            else:
                if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
                    df[col] = df[col].astype(np.float16)
                elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
                    df[col] = df[col].astype(np.float32)
                else:
                    df[col] = df[col].astype(np.float64)
        else:
            df[col] = df[col].astype('category')

    end_mem = df.memory_usage().sum() / 1024 ** 2
    print(f'优化后的内存占用: {end_mem:.2f} MB')
    print(f'减少的百分比 {100 * (start_mem - end_mem) / start_mem:.1f}%\n')
    return df


# 数据加载与预处理
# 加载数据集（需替换为实际文件路径或Kaggle数据集路径）
# 训练集文件：train.csv（需包含id,heartbeat_signals,label三列）
# 测试集文件：testA.csv（需包含id,heartbeat_signals两列）
train = pd.read_csv('./train.csv')
test = pd.read_csv('./testA.csv')

# 处理训练集
train_list = []
for items in train.values:
    train_list.append([items[0]] + [float(i) for i in items[1].split(',')] + [items[2]])
train = pd.DataFrame(np.array(train_list))
train.columns = ['id'] + [f's_{i}' for i in range(len(train_list[0]) - 2)] + ['label']
train = reduce_mem_usage(train)

# 处理测试集
test_list = []
for items in test.values:
    test_list.append([items[0]] + [float(i) for i in items[1].split(',')])
test = pd.DataFrame(np.array(test_list))
test.columns = ['id'] + [f's_{i}' for i in range(len(test_list[0]) - 1)]
test = reduce_mem_usage(test)

# 分离特征与标签
y_train = train['label']
x_train = train.drop(['id', 'label'], axis=1)
X_test = test.drop(['id'], axis=1)

# 处理为CNN输入格式
X_test = np.array(X_test).reshape(X_test.shape[0], X_test.shape[1], 1)

# SMOTE过采样处理不平衡数据
smote = SMOTE(random_state=2021)
k_x_train, k_y_train = smote.fit_resample(x_train, y_train)
k_x_train = np.array(k_x_train).reshape(k_x_train.shape[0], k_x_train.shape[1], 1)


# 定义评估函数
def abs_sum(y_pred, y_true):
    y_pred = np.array(y_pred)
    y_true = np.array(y_true)
    loss = sum(sum(abs(y_pred - y_true)))
    return loss


# 定义模型Net1
class Net1(Model):
    def __init__(self):
        super(Net1, self).__init__()
        self.conv1 = Conv1D(16, 3, padding='same', activation='relu', input_shape=(205, 1))
        self.conv2 = Conv1D(32, 3, dilation_rate=2, padding='same', activation='relu')
        self.conv3 = Conv1D(64, 3, dilation_rate=2, padding='same', activation='relu')
        self.conv4 = Conv1D(64, 5, dilation_rate=2, padding='same', activation='relu')
        self.max_pool1 = MaxPool1D(3, 2, padding='same')
        self.conv5 = Conv1D(128, 5, dilation_rate=2, padding='same', activation='relu')
        self.conv6 = Conv1D(128, 5, dilation_rate=2, padding='same', activation='relu')
        self.max_pool2 = MaxPool1D(3, 2, padding='same')
        self.dropout = Dropout(0.5)
        self.flatten = Flatten()
        self.fc1 = Dense(256, activation='relu')
        self.fc21 = Dense(16, activation='relu')
        self.fc22 = Dense(256, activation='sigmoid')
        self.fc3 = Dense(4, activation='softmax')

    def call(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        x = self.conv4(x)
        x = self.max_pool1(x)
        x = self.conv5(x)
        x = self.conv6(x)
        x = self.max_pool2(x)
        x = self.dropout(x)
        x = self.flatten(x)
        x1 = self.fc1(x)
        x2 = self.fc22(self.fc21(x))
        x = self.fc3(x1 + x2)
        return x


# 定义模型Net2（包含GeMPooling）
class GeMPooling(tf.keras.layers.Layer):
    def __init__(self, p=1.0, train_p=False):
        super().__init__()
        self.eps = 1e-6
        self.p = tf.Variable(p, dtype=tf.float32) if train_p else p

    def call(self, inputs):
        inputs = tf.clip_by_value(inputs, self.eps, tf.reduce_max(inputs))
        inputs = tf.pow(inputs, self.p)
        inputs = tf.reduce_mean(inputs, axis=1, keepdims=False)
        inputs = tf.pow(inputs, 1. / self.p)
        return inputs


class Net2(Model):
    def __init__(self):
        super(Net2, self).__init__()
        self.conv1 = Conv1D(16, 3, padding='same', activation='relu', input_shape=(205, 1))
        self.conv2 = Conv1D(32, 3, dilation_rate=2, padding='same', activation='relu')
        self.conv3 = Conv1D(64, 3, dilation_rate=2, padding='same', activation='relu')
        self.max_pool1 = MaxPool1D(3, 2, padding='same')
        self.conv4 = Conv1D(64, 5, dilation_rate=2, padding='same', activation='relu')
        self.conv5 = Conv1D(128, 5, dilation_rate=2, padding='same', activation='relu')
        self.max_pool2 = MaxPool1D(3, 2, padding='same')
        self.conv6 = Conv1D(256, 5, dilation_rate=2, padding='same', activation='relu')
        self.conv7 = Conv1D(128, 7, dilation_rate=2, padding='same', activation='relu')
        self.gempool = GeMPooling()
        self.dropout1 = Dropout(0.5)
        self.flatten = Flatten()
        self.fc1 = Dense(256, activation='relu')
        self.fc21 = Dense(16, activation='relu')
        self.fc22 = Dense(256, activation='sigmoid')
        self.fc3 = Dense(4, activation='softmax')

    def call(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        x = self.max_pool1(x)
        x = self.conv4(x)
        x = self.conv5(x)
        x = self.max_pool2(x)
        x = self.conv6(x)
        x = self.conv7(x)
        x = self.gempool(x)
        x = self.dropout1(x)
        x = self.flatten(x)
        x1 = self.fc1(x)
        x2 = self.fc22(self.fc21(x))
        x = self.fc3(x1 + x2)
        return x


# 定义模型Net3（多池化策略）
class Net3(Model):
    def __init__(self):
        super(Net3, self).__init__()
        self.conv1 = Conv1D(16, 3, padding='same', activation='relu', input_shape=(205, 1))
        self.conv2 = Conv1D(32, 3, dilation_rate=2, padding='same', activation='relu')
        self.conv3 = Conv1D(64, 3, dilation_rate=2, padding='same', activation='relu')
        self.conv4 = Conv1D(128, 3, dilation_rate=2, padding='same', activation='relu')
        self.conv5 = Conv1D(128, 5, dilation_rate=2, padding='same', activation='relu')
        self.max_pool1 = MaxPool1D(3, 2, padding='same')
        self.avg_pool1 = AvgPool1D(3, 2, padding='same')
        self.conv6 = Conv1D(128, 5, dilation_rate=2, padding='same', activation='relu')
        self.conv7 = Conv1D(128, 5, dilation_rate=2, padding='same', activation='relu')
        self.max_pool2 = MaxPool1D(3, 2, padding='same')
        self.avg_pool2 = AvgPool1D(3, 2, padding='same')
        self.dropout = Dropout(0.5)
        self.flatten = Flatten()
        self.fc1 = Dense(256, activation='relu')
        self.fc21 = Dense(16, activation='relu')
        self.fc22 = Dense(256, activation='sigmoid')
        self.fc3 = Dense(4, activation='softmax')

    def call(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        x = self.conv4(x)
        x = self.conv5(x)
        xm1 = self.max_pool1(x)
        xa1 = self.avg_pool1(x)
        x = tf.concat([xm1, xa1], axis=2)
        x = self.conv6(x)
        x = self.conv7(x)
        xm2 = self.max_pool2(x)
        xa2 = self.avg_pool2(x)
        x = tf.concat([xm2, xa2], axis=2)
        x = self.dropout(x)
        x = self.flatten(x)
        x1 = self.fc1(x)
        x2 = self.fc22(self.fc21(x))
        x = self.fc3(x1 + x2)
        return x


# 模型训练函数
def step_decay(epoch):
    initial_lr = 0.01
    drop = 0.5
    epochs_drop = 10
    lr = initial_lr * (drop ** (epoch // epochs_drop))
    return lr


lr_scheduler = LearningRateScheduler(step_decay)


def train_model(model, x_train, y_train, batch_size=128, epochs=30, validation_split=0.2):
    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
                        validation_split=validation_split, callbacks=[lr_scheduler])
    return history


def train_model2(model, x_train, y_train, batch_size=128, epochs=30, validation_split=0.2):
    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
                        validation_split=validation_split)
    return history


# 初始化并训练模型
model1 = Net1()
model2 = Net2()
model3 = Net3()

history1 = train_model(model1, k_x_train, k_y_train)
history2 = train_model2(model2, k_x_train, k_y_train)
history3 = train_model2(model3, k_x_train, k_y_train)

# 模型预测与结果融合
predictions1 = model1.predict(X_test)
predictions2 = model2.predict(X_test)
predictions3 = model3.predict(X_test)

# 加权平均融合
predictions_weighted = 0.32 * predictions1 + 0.33 * predictions2 + 0.35 * predictions3

# 准备提交结果
submit = pd.DataFrame()
submit['id'] = range(100000, 120000)  # 假设测试集id从100000开始
submit['label_0'] = predictions_weighted[:, 0]
submit['label_1'] = predictions_weighted[:, 1]
submit['label_2'] = predictions_weighted[:, 2]
submit['label_3'] = predictions_weighted[:, 3]

# 后处理：阈值法
threshold = 0.5
others = []
for index, row in submit.iterrows():
    row_max = max(row[1:])
    if row_max > threshold:
        for i in range(1, 5):
            submit.iloc[index, i] = 1 if row[i] > threshold else 0
    else:
        others.append(index)

# 二次后处理：处理难分样本
for idx in others:
    value = submit.iloc[idx, 1:].values
    ordered = sorted([(v, j) for j, v in enumerate(value)], reverse=True)
    if ordered[0][0] - ordered[1][0] >= 0.04:
        submit.iloc[idx, ordered[0][1] + 1] = 1
        submit.iloc[idx, [ordered[1][1] + 1, ordered[2][1] + 1, ordered[3][1] + 1]] = 0
    else:
        submit.iloc[idx, [ordered[2][1] + 1, ordered[3][1] + 1]] = 0

# 保存结果
submit.to_csv(f'submit_{datetime.datetime.now().strftime("%Y%m%d_%H%M%S")}.csv', index=False)