import pandas as pd
import numpy as np
import json
import torch
import torch.nn.functional as F
from tqdm import tqdm
import cv2
import os
import time
from random import shuffle
from collections import Counter
import torch.nn as nn
from torch.utils.data import DataLoader,Dataset
from torch.optim.lr_scheduler import StepLR,CosineAnnealingLR
import json
import albumentations as A
import torchvision.transforms as T
from sklearn.metrics import accuracy_score,f1_score
from pretrainedmodels import models

aug_transform = A.Compose([
    A.HorizontalFlip(p=0.5),
    A.VerticalFlip(p=0.5),
    A.RandomRotate90(p=0.7),
    A.Downscale(scale_min=0.6,scale_max=0.8,p=0.1),
    A.CoarseDropout(max_holes=3,max_width=3,max_height=3,min_holes=1,p=0.1),
    A.Transpose(),
    A.RandomResizedCrop(height=300,width=400,scale=(0.6,1),p=0.1),
    A.RandomBrightnessContrast(p=0.1),
])

Model_Save_Patch = '../model_save/baseline'
Record_File_Dir = 'record/baseline'
Num_Epoch = 30
Batch_Size = 8
Image_Size = (400,300)
Diff_Lr = False
accumlation_step = 4
lr = 1e-4
loss_fun = nn.CrossEntropyLoss()

model = models.se_resnet50(num_classes=5,pretrained=True)

def make_label(csv_dir):
    name_list = []
    csv_file = pd.read_csv(csv_dir)
    for index in range(len(csv_file)):
        name_list.append([csv_file.iloc[index,0],int(csv_file.iloc[index,1])])
    shuffle(name_list)
    train_list = name_list[:int(0.8*len(name_list))]
    val_list = name_list[int(0.8*len(name_list)):]
    with open('label_file/train.json','w') as f:
        json.dump(train_list,f)
    with open('label_file/val.json','w') as f:
        json.dump(val_list,f)


class Mydataset(Dataset):
    def __init__(self,json_dir,data_path,aug = False,image_size = None,):
        self.name_list = json.load(open(json_dir))
        self.data_path = data_path
        self.image_size = image_size
        self.as_tensor = T.Compose([
            T.ToTensor(),
        ])
        self.aug = aug
    def __getitem__(self, index):
        image = cv2.imread(self.data_path,self.name_list[index][0])
        image = cv2.resize(image,self.image_size)
        label = torch.tensor(self.name_list[index][1])
        if self.aug:
            image = (aug_transform(image=image))['image']
        image = self.as_tensor(image)
        return image,label
    def __len__(self):
        return len(self.name_list)


def val_fun(model,data_loader):
    def pred_fun(model, image):
        with torch.no_grad():
            model.eval()
            pred = model(image)
            pred = torch.argmax(F.log_softmax(pred, dim=1), dim=1)
            pred = pred.cpu().numpy().tolist()
            return pred

    pred_list = []
    label_list = []

    with torch.no_grad():
        model.eval()
        for index,(batch_image,batch_label) in enumerate(tqdm(data_loader)):
            ## 返回一个batch 的结果，存在list中。
            batch_image = batch_image.float().cuda()
            pred = pred_fun(model,batch_image)
            pred_list += pred
            label_list += batch_label.numpy().tolist()
    acc = accuracy_score(label_list,pred_list)
    f1 = f1_score(label_list,pred_list,average=None)
    print('acc ------',acc,'--f1---------',f1)
    return acc,f1






def train(model):
    best_result = 0
    if not os.path.exists(Model_Save_Patch):
        os.makedirs(Model_Save_Patch)
    train_dataSet = Mydataset(json_dir='label_file/train.json',data_path='../train_images',aug=True,image_size=Image_Size)
    train_data_loader = DataLoader(dataset=train_dataSet, batch_size=Batch_Size, shuffle=True,
                                   num_workers=6, pin_memory=False)
    val_dataSet = Mydataset(json_dir='label_file/val.json',data_path='../train_images',aug=False,image_size=Image_Size)
    val_data_loader = DataLoader(dataset=val_dataSet, batch_size=4, shuffle=True,
                                   num_workers=6, pin_memory=True)

    ## 用来对训练集进行验证，去除了data aug部分操作(train=False)
    val_dataSet_2 = Mydataset(json_dir='label_file/train.json',data_path='../train_images',aug=False,image_size=Image_Size)
    val_data_loader_2 = DataLoader(dataset=val_dataSet_2, batch_size=4, shuffle=True,
                                   num_workers=6, pin_memory=True)

    if Diff_Lr:
        ## 其他层学习率为fc的0.1倍
        fc_params = list(map(id, model.fc.parameters()))
        base_params = filter(lambda p: id(p) not in fc_params,
                             model.parameters())
        optimizer = torch.optim.Adam([{'params':base_params,'lr' :lr * 0.1},{'params':model.fc.parameters()}],lr = lr,weight_decay=2e-5)
    else:
        optimizer = torch.optim.Adam(model.parameters(),lr = lr, weight_decay= 2e-5)
    # optimizer = torch.optim.SGD(model.parameters(),lr = lr,momentum=0.9,weight_decay=2e-5)
    scheduler = CosineAnnealingLR(optimizer,T_max=Num_Epoch,)
    # scheduler = GradualWarmupScheduler(optimizer, multiplier=5, total_epoch=10, after_scheduler=scheduler)
    total_step = len(train_data_loader)



    # scaler = torch.cuda.amp.GradScaler()
    for epoch in range(Num_Epoch):
        model.train()
        start_time  = time.time()
        #注意必须每次训练都设置model.train()，因为在val时会设置model.eval(),
        #需要再次进行转换。

        print('epoch{}'.format(epoch+1))
        train_loss = 0
        for step,(batch_image,batch_label) in enumerate(train_data_loader):
            if torch.cuda.is_available():
                batch_image = batch_image.float().cuda()
                batch_label = batch_label.float().long().cuda()
            # print('batch_image',batch_image.shape,'batch_label',batch_label.shape)
            pred = model(batch_image)
            loss = loss_fun(pred,batch_label)
            loss.backward()
            # scaler.scale(loss).backward()
            if (step % accumlation_step) == 0:
                optimizer.step()
                # scaler.step(optimizer)
                optimizer.zero_grad()
            if step % 20 ==0 and epoch <3:
                print('Step [{}/{}],Loss:{:.7f}'.format(step + 1, total_step, loss))
            train_loss += loss.item()

        scheduler.step()
        train_loss = train_loss/total_step
        print('Epoch [{}/{}],Loss:{:.7f}'.format(epoch+1,Num_Epoch,train_loss))

        # checkpoint = {
        #     'epoch':epoch+1,
        #     'model':model.state_dict(),
        #     'optimizer':optimizer.state_dict(),
        # }

        torch.cuda.empty_cache()
        record_file = open(Record_File_Dir,'a')
        val_Acc,val_f1 = val_fun(model,val_data_loader,)
        print('--train_loss--', train_loss, '--val_Acc--', val_Acc,'--val_f1--',val_f1)
        record_file.write(
            'epoch--' + str(epoch + 1) + '--train_loss--' + str(train_loss) + '--val_ACC--' + str(val_Acc) + '--val_f1--' + str(val_f1))
        record_file.write('\n')
        if epoch % 2 == 0:
            train_Acc,train_f1 = val_fun(model,val_data_loader_2,)
            print('-------------------------------train_Acc--', train_Acc,'+++++++++++++++ train f1 +++++',train_f1)
            record_file.write(
                'epoch--' + str(epoch + 1) + '--train_ACC--' + str(train_Acc)+ '--train_f1--' + str(train_f1))
            record_file.write('\n')
        record_file.close()


        torch.cuda.empty_cache()

        torch.save(model.state_dict(), (Model_Save_Patch + '/' + 'new_' + '.pth'))
        if val_Acc > best_result:
            print('**************get better model with acc of *******************',val_Acc)
            torch.save(model.state_dict(), (Model_Save_Patch + '/' + 'best_' + '.pth'))
            best_result = val_Acc
        print('---------------------------------------------')

        if epoch < 3:
            print('use time -------',time.time()-start_time)
        torch.cuda.empty_cache()



if __name__ == '__main__':
    # make_label(csv_dir='../train.csv')
    train(model)