import os
import numpy as np
import pandas as pd
import torch
from torch import nn
from utils.ensemble_tools import class_to_idx_map, idx_to_class_map
from sklearn.metrics import confusion_matrix, f1_score, classification_report
from utils.ensemble_tools import class_to_idx_map, idx_to_class_map
from utils.ensemble_tools import simple_npy_predict, class_to_idx_map, simple_train_npy
from sklearn.model_selection import train_test_split
from models.other_layers import grouplinear_clf
from torch.optim import lr_scheduler
import torch.optim as optim
from torch.nn import CrossEntropyLoss


stage1_dir = '/media/gserver/models/tianwen/stacking'

model_folders = [
    'resnet20_crop',
    'xception_crop',
    'xception_ori',
                 ]


stage1_train_true = np.load(os.path.join(stage1_dir,model_folders[0],'stage1-train_true.npy'))
concat_score = np.zeros((435465, len(model_folders), 4), dtype=np.float32)

for i,folder in enumerate(model_folders):
    npy_path = os.path.join(stage1_dir,folder,'stage1-train_pred.npy')
    score = np.load(npy_path)
    concat_score[:, i] = score

    npy_path = os.path.join(stage1_dir, folder, 'stage1-train_true.npy')
    labels = np.load(npy_path)
    assert (labels!=stage1_train_true).sum()==0

print concat_score.shape


avg_score = np.mean(concat_score, axis=1)
avg_pred = np.argmax(avg_score,axis=1)


print f1_score(stage1_train_true, avg_pred,average='macro')


#
x_train,  x_val,y_train, y_val = train_test_split(concat_score,stage1_train_true,
                                         test_size=0.1, random_state=42,
                                         stratify=stage1_train_true)

#
classes,class_sample_count = np.unique(y_train,return_counts=True)

print classes
print class_sample_count
weights =  [int(class_sample_count.max()/class_sample_count[x]) for x in range(len(class_sample_count))]
# weights[3]=1
print weights

#
print x_train.shape
print x_val.shape


# model prepare
resume = None
model = grouplinear_clf(group=3, classes=4, bias=False)
# model = nn.Linear(in_features=8, out_features=4, bias=False)
# model.weight.data = torch.from_numpy(np.zeros((4,8))+0.5).float()


model = torch.nn.DataParallel(model)
if resume:
    model.load_state_dict(torch.load(resume))

model = model.cuda()


optimizer = optim.SGD(model.parameters(), lr=0.05, momentum=0.9, weight_decay=0)
criterion = CrossEntropyLoss()
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)


best_model_wts, best_acc = simple_train_npy(model, x_train, y_train, x_val, y_val,
                                            start_epoch=0, epoch_num=20,
                                            optimizer=optimizer,
                                            criterion=criterion,
                                            exp_lr_scheduler=exp_lr_scheduler,
                                            bs_train=32*3, bs_val=32*3, val_inter=200)

print 'best f1:',best_acc

if not os.path.exists('./stage1-weights'):
    os.makedirs('./stage1-weights')
save_path = './stage1-weights/linear-weights-%.5f'%best_acc
torch.save(best_model_wts, save_path)

model.load_state_dict(best_model_wts)

