import os
import pandas as pd
from django.http import HttpResponse
from django.views import View
from django.conf import settings
from utils.network_structure import *
from imblearn.over_sampling import SMOTE
from django.shortcuts import render, redirect
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split

# Create train views here.
# 将上传的文件名作为全局变量保存
file_name = ''


# 上传训练集，接收训练集
class Upload(View):
    """训练功能：上传训练集"""

    def get(self, request):
        return render(request, 'train/train-upload.html', context={'message': "上传训练集"})

    def post(self, request):

        global file_name
        try:
            csv_file = request.FILES.get('csv_file')
            file_name = csv_file.name
            if file_name.split('.')[1] != 'csv':
                return render(request, 'train/train-upload.html', context={'message': '文件格式错误'})

            file_path = os.path.join(settings.MEDIA_ROOT, 'code\\'+file_name)

            # 学到
            with open(file_path, 'wb+') as f:
                for chunk in csv_file.chunks():
                    f.write(chunk)

            return redirect('train:confirm')
        except:
            return render(request, 'train/train-upload.html', context={'message': '上传出错，请重新上传'})



# 执行训练，生成模型
def execute_train(request):

    # 做一系列的训练操作
    global file_name
    global num_epochs
    global learning_rate
    global in_channels
    global out_channels
    global kernel_size
    global num_classes

    # 导入数据集
    data1 = pd.read_csv(os.path.join('static/code/', "train_10000.csv"))
    data2 = pd.read_csv(os.path.join('static/code/', "validate_1000.csv"))
    data3 = pd.read_csv(os.path.join(settings.MEDIA_ROOT, 'code\\'+file_name))
    # 对缺失值以0填充
    data1[np.isnan(data1)] = 0
    data2[np.isnan(data2)] = 0
    data3[np.isnan(data3)] = 0

    # 删除第一列数据
    data1 = data1.drop(['sample_id'], axis=1)
    data2 = data2.drop(['sample_id'], axis=1)
    data3 = data3.drop(['sample_id'], axis=1)

    # 经过随机森林特征选择后，去除对分类影响小的特征
    columns_to_select = [
        'feature64', 'feature32', 'feature54', 'feature88', 'feature65', 'feature92',
        'feature80', 'feature1', 'feature78', 'feature60', 'feature20', 'feature77',
        'feature57', 'feature100'
    ]
    data1 = data1.drop(columns=columns_to_select)
    data2 = data2.drop(columns=columns_to_select)
    data3 = data3.drop(columns=columns_to_select)

    # data3的字典
    pie_data = data3['label'].value_counts().to_dict()

    # 部分标签样本数过多，存在不平衡问题，所以有选择的从数据集中选取样本组成新样本
    # 将标签分类
    grouped_data = data1.groupby('label')
    count = 400

    # 随机选取label为0，1，2的样本1000个
    label_0_sampled_data = grouped_data.get_group(0).sample(n=count, random_state=42)
    label_1_sampled_data = grouped_data.get_group(1).sample(n=count, random_state=42)
    label_2_sampled_data = grouped_data.get_group(2).sample(n=count, random_state=42)
    label_3_sampled_data = grouped_data.get_group(3).sample(n=count, random_state=42)
    label_4_sampled_data = grouped_data.get_group(4).sample(n=count, random_state=42)
    label_5_sampled_data = grouped_data.get_group(5).sample(n=count, random_state=42)
    # 选取label为2，3，4的所有样本
    # other_label_data = data1[(data1['label'] != 0) & (data1['label'] != 1)& (data1['label'] != 2)]

    # 将选取的label为0，1，2的样本和其余label的样本合并成新的数据集
    data1 = pd.concat([label_0_sampled_data, label_1_sampled_data, label_2_sampled_data,
                       label_3_sampled_data, label_4_sampled_data, label_5_sampled_data])

    label_counts = data1['label'].value_counts()



    # grouped_data = data1.groupby('label')
    # count = 1000
    # # 随机选取label为0，1，2的样本1000个
    # label_0_sampled_data = grouped_data.get_group(0).sample(n=count, random_state=42)
    # label_1_sampled_data = grouped_data.get_group(1).sample(n=count, random_state=42)
    # label_2_sampled_data = grouped_data.get_group(2).sample(n=count, random_state=42)
    #
    # # 选取label为2，3，4的所有样本
    # other_label_data = data1[(data1['label'] != 0) & (data1['label'] != 1) & (data1['label'] != 2)]
    #
    # # 将选取的label为0，1，2的样本和其余label的样本合并成新的数据集
    # data1 = pd.concat([label_0_sampled_data, other_label_data, label_1_sampled_data, label_2_sampled_data])
    #
    # label_counts = data1['label'].value_counts()
    # # 将数据集分为特征和标签
    # X_train = data1.drop(['label'], axis=1)
    # y_train = data1['label']
    #
    # # 创建SMOTE对象并拟合
    # smote2 = SMOTE()
    # X_resampled, y_resampled = smote2.fit_resample(X_train, y_train)
    # data1_resampled = pd.concat([pd.DataFrame(X_resampled), pd.DataFrame(y_resampled)], axis=1)
    #
    # # 将扩大后的数据集合并回原始数据集
    # data1 = pd.concat([data1, data1_resampled], axis=0)
    #
    # label_counts = data1['label'].value_counts()


    # 将验证集的数据与选取、扩充后的训练集数据混合在一起
    data = pd.concat([data1, data2, data3], axis=0)

    # 打乱顺序
    df = data.sample(frac=1, random_state=42).reset_index(drop=True)
    label_counts = df.groupby('label').size()

    X = df.values[:, :-1]  # 加载数据，形状为(12181,93)
    y = df.values[:, -1]  # 加载标签，形状为(12181,)

    # 标准化处理
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)

    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=True)

    # 转换数据维度，使可以使用cnn
    X_train = X_train.reshape(X_train.shape[0], 93, 1).astype('float32')
    X_test = X_test.reshape(X_test.shape[0], 93, 1).astype('float32')

    # 选择CPU还是GPU跑训练
    device = get_default_device()

    # 创建训练集和测试集的数据加载器

    train_dataset = MyDataset(X_train, y_train)
    train_dataloader = DataLoader(train_dataset, batch_size=20)
    test_dataset = MyDataset(X_test, y_test)
    test_dataloader = DataLoader(test_dataset, batch_size=20)

    # 将训练数据加载器和验证数据加载器包装起来，并将数据移动到指定的设备上进行计算
    train_dl = DeviceDataLoader(train_dataloader, device)
    val_dl = DeviceDataLoader(test_dataloader, device)

    # 生成模型
    model = to_device(OneDCNN(num_classes, in_channels, out_channels, kernel_size), device)
    # 损失函数
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=0.001)

    # 设置最佳损失函数
    best_loss = np.inf
    history = []
    # Train the model
    for epoch in range(num_epochs):
        # Training Phase
        # model.train()
        train_f1 = []
        train_losses = []
        for batch in train_dl:
            f1, loss = model.training_step(batch)
            train_f1.append(f1)
            train_losses.append(loss)
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
        # Validation phase
        result = evaluate(model, val_dl)
        result['train_f1'] = torch.stack(train_f1).mean().item()
        result['train_loss'] = torch.stack(train_losses).mean().item()

        if result['val_loss'] < best_loss:
            best_loss = result['val_loss']
            torch.save(model.state_dict(), os.path.join(settings.MEDIA_ROOT, 'model\\' + '1dcnn-select2.pth'))

        model.epoch_end(epoch, result)
        history.append(result)

    # 处理可视化
    # 这是两条线的折线图
    train_losses = [round(x.get('train_loss'), 2) for x in history]  # 测试
    val_losses = [round(x['val_loss'], 2) for x in history]  # 损失
    scores = [round(x['val_f1'], 2) for x in history]

    epochs = list(range(1, len(train_losses) + 1))
    context = {
        'epochs': epochs,
        'train_losses': train_losses,
        'val_losses': val_losses,
        'scores': scores,
        'pie_data': pie_data,
    }

    # 渲染到可视化的HTML中
    return render(request, 'train/train-visualization.html', context)


# 每轮次之后输出模型对测试集的效果
@torch.no_grad()
def evaluate(model, val_loader):
    outputs = [model.validation_step(batch) for batch in val_loader]
    return model.validation_epoch_end(outputs)


# 开放下载模型
def download(request):
    # 读取要下载的文件
    filepath = os.path.join(settings.MEDIA_ROOT, 'model\\' + '1dcnn-select2.pth')
    with open(filepath, 'rb') as f:
        filedata = f.read()

    # 返回响应，弹出文件下载对话框
    response = HttpResponse(filedata, content_type='application/octet-stream')
    response['Content-Disposition'] = 'attachment; filename="1dcnn-select2.pth"'

    return response


# 确认执行训练
def confirm(request):
    print('确认训练')
    return render(request, 'train/train-confirm.html')

