import torch
import numpy as np

from tqdm import tqdm
import torch
from torch.utils.data.dataset import Dataset, random_split
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms, datasets
import cv2
from ResNet import resnet18,resnet101,resnet50



from PIL import Image, ImageDraw
from data import transforms_detect_train,transforms_detect_val
import imgaug.augmenters as iaa

def data_get(annotation_path):
    with open(annotation_path) as f:
        lines = f.readlines()
    return lines


class data_Dataset(Dataset):
    def __init__(self, train_root, img_root, transform=''):
        self.data = data_get(train_root)
        self.img_root = img_root
        self.transform = transform

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        image_line = self.data[index].strip().split('\t')
        img_path = self.img_root + image_line[0]
        img_name = image_line[0]
        label = float(image_line[1])

        img = cv2.imread(img_path)
        # img = cv2.resize(img, (224, 224))

        img=cv2.resize(img,(224,224))
        # a=iaa.Sequential([iaa.CropToFixedSize(width=160, height=160, position="center")])
        # img=a(images=img[None])[0]
        # img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        # img = self.transform(img)
        label = torch.tensor(label)
        return img, label, img_name

data_transform_train = transforms_detect_train(224)
data_transform_val = transforms_detect_val(224)

def VOC_dataset_collate1(batch):
    images = []
    targets = []
    img_names = []
    for img, target, img_name in batch:
        images.append(img)
        targets.append(target)
        img_names.append(img_name)
    images=np.array(images)
    images=data_transform_train(images)
    return images, torch.stack(targets), img_names


def VOC_dataset_collate2(batch):
    images = []
    targets = []
    img_names = []
    for img, target, img_name in batch:
        images.append(img)
        targets.append(target)
        img_names.append(img_name)
    images = np.array(images)
    images = data_transform_val(images)
    return images, torch.stack(targets), img_names



# def dog_loss(y, t):
#     loss = torch.abs(y - t).sum() / len(t)
#     return loss
#
# def huber_loss(y,t):
#     a=dog_loss(y,t)
#     t=1
#     if a<t:
#         loss=(a**2)/2
#     else:
#         loss=t*(a-t/2)
#     return loss

annotations_train = r'./annotations/train.txt'
annotations_val = r'./annotations/val.txt'
train_root = r'./trainset/'
val_root = r'./valset/'
lr = 1e-4

def train():
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    # device = torch.device('cpu')
    print('using {} device.'.format(device))

    train_dataset = data_Dataset(annotations_train, train_root)
    val_dataset = data_Dataset(annotations_val, val_root)
    
    train_loader = DataLoader(train_dataset, shuffle=True, batch_size=4,pin_memory=False,
                  drop_last=False,num_workers=8, collate_fn=VOC_dataset_collate1)
    val_loader = DataLoader(val_dataset, shuffle=False, batch_size=32,pin_memory=True,
                  drop_last=False, collate_fn=VOC_dataset_collate2)
    
    model = resnet50()



    # loss_function = nn.CrossEntropyLoss()
    # loss_function = nn.MSELoss()
    loss_function = nn.SmoothL1Loss()
    try:
        model.load_state_dict(torch.load('./resnet50.pth'))
        print('load model')
    except:
        pass

    for param in model.parameters():
        param.requires_grad = False


    model.fc = torch.nn.Linear(2048, 1)
    model.to(device)

    optimizer = torch.optim.SGD(model.parameters(), lr, weight_decay=5e-4)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95)

    max_MAE = 100
    for epoch in range(1000):
        pbar = tqdm(total=len(train_loader), unit='batches')
        pbar.set_description('Epoch {}/{}'.format(epoch, 1000))
        model.train()
        total_loss = 0
        num = 0
        for img, target, img_name in train_loader:
            optimizer.zero_grad()
            y = model(img.to(device))
            # y = torch.max(y, dim=1)[1]?
            loss = loss_function(torch.squeeze(y), target.to(device))
            loss.backward()

            total_loss += loss.item()
            num += 1
            batch_info = 'Loss {:.4f}'.format(total_loss / num)
            pbar.set_postfix_str(batch_info)
            pbar.update()
            optimizer.step()

        pbar.close()
        torch.save(model.state_dict(), 'model_vesion1_latest.pth')
        pbar = tqdm(total=len(val_loader), unit='batches')
        pbar.set_description('Epoch val ')
        model.eval()

        num = 0
        MAE = 0
        acc=0
        with open('pred_result.txt', 'w') as f:
          with torch.no_grad():
              for img, target, img_name in val_loader:
                  y = model(img.to(device))
                  y=torch.abs(torch.squeeze(y))
                  for pred_y, name in zip(y, img_name):
                    f.write(f'{name}\t{pred_y:.2f}\n')
                  MAE += torch.abs(y - target.to(device)).sum()
                  num += len(target)
                  pbar.update()
        # val_accurate = acc / num
        # print('accurate:' + str(val_accurate))

        print('MAE:' + str(MAE / num))
        if MAE  < max_MAE:
            torch.save(model.state_dict(), 'model_vesion1_best.pth')
            max_MAE=MAE
        scheduler.step()
        print(optimizer.param_groups[0]['lr'])
        with open('lr.txt', 'w') as f:
            f.write(str(optimizer.param_groups[0]['lr']))
        pbar.close()
        

if __name__ == '__main__':
    train()
