from src.dataset import FudanDrowsyDataset
import torch
import torch.utils.data
from torch import nn
import numpy as np
from torch.nn import functional as F
from src import const
from src.utils import merge_const, get_train_test_df, Evaluator
from tensorboardX import SummaryWriter
import os
import shutil
from glob import glob
import random

merge_const('src.conf.filter_sample')
if os.path.exists('models') is False:
    os.makedirs('models')

train_df, test_df = get_train_test_df()
writer = SummaryWriter(const.TRAIN_DIR)

step = 0
criterion = nn.CrossEntropyLoss()

train_df['in_use'] = True

train_dataset_unshuffle = FudanDrowsyDataset(train_df, mode=const.VAL_DATASET_MODE)
train_dataloader_unshuffle = torch.utils.data.DataLoader(train_dataset_unshuffle, batch_size=const.VAL_BATCH_SIZE, shuffle=False, num_workers=4)

if os.path.exists('data/error/'):
    shutil.rmtree('data/error/')

for iter_num in range(10):
    net = const.USE_NET()
    net = net.to(const.device)
    learning_rate = const.LEARNING_RATE
    optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
    net.train()
    train_dataset = FudanDrowsyDataset(train_df[train_df['in_use']==True], mode=const.TRAIN_DATASET_MODE)
    train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=const.BATCH_SIZE, shuffle=True, num_workers=4)
    total_step = len(train_dataloader)
    for epoch in range(3):
        for i, sample in enumerate(train_dataloader):
            step += 1
            for key in sample:
                sample[key] = sample[key].to(const.device)
            if sample['image'].shape[0] == 1:  # BN层要求batch_size > 1
                continue
            output = net(sample['image'])
            loss = criterion(output, sample['label'])

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (i + 1) % 100 == 0:
                writer.add_scalar('loss', loss.item(), step)
                writer.add_scalar('learning_rate', learning_rate, step)
                print('Step [{}/{}], Loss: {:.4f}'
                      .format(i + 1, total_step, loss.item()))

    with torch.no_grad():
        evaluator = Evaluator(train_df)
        for i, sample in enumerate(train_dataloader_unshuffle):
            for key in sample:
                sample[key] = sample[key].to(const.device)
            output = net(sample['image'])
            evaluator.add(sample, output)
        ret = evaluator.evaluate()

    df = evaluator.all_info
    
    def cp(origin_path, label, label_pred):
        new_path = './data/error/Gt_%s_Pred_%s/' % (label, label_pred)
        base_name = os.path.basename(origin_path)
        if os.path.exists(new_path) is False:
            os.makedirs(new_path)
        shutil.copy(origin_path, new_path + base_name)
    def func(row):
        if (row.in_use is False) or (can_drop.loc[row.VideoID] is False):
            return row
        else:
            if (row.Label == 'normal'):
                if row.pred_1 >= 0.99 or row.pred_2 >= 0.99:
                    row.in_use = False
            if (row.Label == 'yawn'):
                if row.pred_0 >= 0.7 or row.pred_2 >= 0.99:
                    row.in_use = False
            if (row.Label == 'fatigue'):
                if row.pred_0 >= 0.6 or row.pred_1 >= 0.99:
                    row.in_use = False
            if row.in_use is False:
                label_pred = const.label_id2name[np.argmax([row.pred_0, row.pred_1, row.pred_2])]
                cp(row.CroppedPath, row.Label, label_pred)
            return row
    # 是否可以继续drop, normal的，不能drop>10%，其他的，不能drop>50%
    can_drop = \
    train_df[['VideoID', 'Label']].groupby('VideoID').head(1).set_index('VideoID').join(
        train_df[['VideoID', 'in_use']].groupby('VideoID').sum() / \
        train_df[['VideoID', 'in_use']].groupby('VideoID').count()
    ).apply(
        lambda x: True if ((x.Label == 'normal') and (x.in_use >= 0.9) or ((x.Label != 'normal') and (x.in_use >= 0.5))) else False,
        axis=1,
    )
    df = df.apply(func, axis=1)
    train_df['in_use'] = df['in_use']
    print(len(train_df[train_df['in_use']==False]))


train_df.to_csv('./data/train_with_in_use.csv', index=False)

data_df = train_df[train_df['in_use'] == True]

if os.path.exists('data/filtered/'):
    shutil.rmtree('data/filtered/')

for i, row in data_df.iterrows():
    new_dir = 'data/filtered/%s/' % row.Label
    if os.path.exists(new_dir) is False:
        os.makedirs(new_dir)
    shutil.copy(row.CroppedPath, new_dir + os.path.basename(row.CroppedPath))
