import collections
import math
import os
import shutil
import pandas as pd
import torch
import torchvision
from torch import nn
from d2l import torch as d2l
#
# data_dir = '/root/autodl-tmp/cifar-10'
#
# def read_csv_labels(fname):
#     with open(fname,'r') as f:
#         lines = f.readlines()[1:]
#     tokens = [l.rstrip().split(',') for l in lines]
#     return dict(((name,label) for name,label in tokens))
#
# labels = read_csv_labels(os.path.join(data_dir,'trainLabels.csv'))
#
# #@save
# def copyfile(filename, target_dir):
#     """将文件复制到目标目录"""
#     os.makedirs(target_dir, exist_ok=True)
#     shutil.copy(filename, target_dir)
#
# #@save
# def reorg_train_valid(data_dir, labels, valid_ratio):
#     """将验证集从原始的训练集中拆分出来"""
#     # 训练数据集中样本最少的类别中的样本数
#     n = collections.Counter(labels.values()).most_common()[-1][1]
#     # 验证集中每个类别的样本数
#     n_valid_per_label = max(1, math.floor(n * valid_ratio))
#     label_count = {}
#     for train_file in os.listdir(os.path.join(data_dir, 'train')):
#         label = labels[train_file.split('.')[0]]
#         fname = os.path.join(data_dir, 'train', train_file)
#         copyfile(fname, os.path.join(data_dir, 'train_valid_test',
#                                      'train_valid', label))
#         if label not in label_count or label_count[label] < n_valid_per_label:
#             copyfile(fname, os.path.join(data_dir, 'train_valid_test',
#                                          'valid', label))
#             label_count[label] = label_count.get(label, 0) + 1
#         else:
#             copyfile(fname, os.path.join(data_dir, 'train_valid_test',
#                                          'train', label))
#     return n_valid_per_label
#
# #@save
# def reorg_test(data_dir):
#     """在预测期间整理测试集，以方便读取"""
#     for test_file in os.listdir(os.path.join(data_dir, 'test')):
#         copyfile(os.path.join(data_dir, 'test', test_file),
#                  os.path.join(data_dir, 'train_valid_test', 'test',
#                               'unknown'))
#
# def reorg_cifar10_data(data_dir, valid_ratio):
#     labels = read_csv_labels(os.path.join(data_dir, 'trainLabels.csv'))
#     reorg_train_valid(data_dir, labels, valid_ratio)
#     reorg_test(data_dir)
#
# batch_size = 128
# valid_ratio = 0.1
# reorg_cifar10_data(data_dir, valid_ratio)
#
# transform_train = torchvision.transforms.Compose([
#     # 在高度和宽度上将图像放大到40像素的正方形
#     torchvision.transforms.Resize(45),
#     # 随机裁剪出一个高度和宽度均为40像素的正方形图像，
#     # 生成一个面积为原始图像面积0.64～1倍的小正方形，
#     # 然后将其缩放为高度和宽度均为32像素的正方形
#     torchvision.transforms.RandomResizedCrop(32, scale=(0.64, 1.0),
#                                                    ratio=(1.0, 1.0)),
#     torchvision.transforms.RandomHorizontalFlip(),
#     torchvision.transforms.ToTensor(),
#     # 标准化图像的每个通道
#     torchvision.transforms.Normalize([0.4914, 0.4822, 0.4465],
#                                      [0.2023, 0.1994, 0.2010])])
#
# transform_test = torchvision.transforms.Compose([
#     torchvision.transforms.ToTensor(),
#     torchvision.transforms.Normalize([0.4914, 0.4822, 0.4465],
#                                      [0.2023, 0.1994, 0.2010])])
#
# train_ds, train_valid_ds = [torchvision.datasets.ImageFolder(
#     os.path.join(data_dir, 'train_valid_test', folder),
#     transform=transform_train) for folder in ['train', 'train_valid']]
#
# valid_ds, test_ds = [torchvision.datasets.ImageFolder(
#     os.path.join(data_dir, 'train_valid_test', folder),
#     transform=transform_test) for folder in ['valid', 'test']]
#
# train_iter, train_valid_iter = [torch.utils.data.DataLoader(
#     dataset, batch_size, shuffle=True, drop_last=True)
#     for dataset in (train_ds, train_valid_ds)]
#
# valid_iter = torch.utils.data.DataLoader(valid_ds, batch_size, shuffle=False,
#                                          drop_last=True)
#
# test_iter = torch.utils.data.DataLoader(test_ds, batch_size, shuffle=False,
#                                         drop_last=False)
#
# def get_net():
#     net = torchvision.models.resnet18()
#     num_in = net.fc.in_features
#     net.fc = nn.Linear(num_in, 176)
#     return net
#
#
# loss = nn.CrossEntropyLoss(reduction="none")
#
# def train(net, train_iter, valid_iter, num_epochs, lr, wd, devices, lr_period,
#           lr_decay):
#     trainer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9,
#                               weight_decay=wd)
#     scheduler = torch.optim.lr_scheduler.StepLR(trainer, lr_period, lr_decay)
#     num_batches, timer = len(train_iter), d2l.Timer()
#     legend = ['train loss', 'train acc']
#     if valid_iter is not None:
#         legend.append('valid acc')
#     animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs],
#                             legend=legend)
#     net = nn.DataParallel(net, device_ids=devices).to(devices[0])
#     for epoch in range(num_epochs):
#         net.train()
#         metric = d2l.Accumulator(3)
#         for i, (features, labels) in enumerate(train_iter):
#             timer.start()
#             l, acc = d2l.train_batch_ch13(net, features, labels,
#                                           loss, trainer, devices)
#             metric.add(l, acc, labels.shape[0])
#             timer.stop()
#             if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
#                 animator.add(epoch + (i + 1) / num_batches,
#                              (metric[0] / metric[2], metric[1] / metric[2],
#                               None))
#         if valid_iter is not None:
#             valid_acc = d2l.evaluate_accuracy_gpu(net, valid_iter)
#             animator.add(epoch + 1, (None, None, valid_acc))
#         scheduler.step()
#     measures = (f'train loss {metric[0] / metric[2]:.3f}, '
#                 f'train acc {metric[1] / metric[2]:.3f}')
#     if valid_iter is not None:
#         measures += f', valid acc {valid_acc:.3f}'
#     print(measures + f'\n{metric[2] * num_epochs / timer.sum():.1f}'
#           f' examples/sec on {str(devices)}')
#
# devices, num_epochs, lr, wd = d2l.try_all_gpus(), 25, 2e-4, 5e-4
# lr_period, lr_decay, net = 4, 0.8, get_net()
# train(net, train_iter, valid_iter, num_epochs, lr, wd, devices, lr_period,
#       lr_decay)
# d2l.plt.show()
#
# net, preds = get_net(), []
# train(net, train_valid_iter, None, num_epochs, lr, wd, devices, lr_period,
#       lr_decay)
# d2l.plt.show()
#
# for X, _ in test_iter:
#     y_hat = net(X.to(devices[0]))
#     preds.extend(y_hat.argmax(dim=1).type(torch.int32).cpu().numpy())
# sorted_ids = list(range(1, len(test_ds) + 1))
# sorted_ids.sort(key=lambda x: str(x))
# df = pd.DataFrame({'id': sorted_ids, 'label': preds})
# df['label'] = df['label'].apply(lambda x: train_valid_ds.classes[x])
# df.to_csv('/root/autodl-tmp/cifar-10/submission.csv', index=False)

# 目标监测

# d2l.set_figsize()
# img = d2l.plt.imread('../data/catdog.jpg')
# d2l.plt.imshow(img)
# d2l.plt.show()

def box_corner_to_center(boxes):
    x1,y1,x2,y2 = boxes[:0],boxes[:1],boxes[:2],boxes[:3];
    cx = (x1+x2)/2
    cy = (y1+y2)/2
    w = x2-x1
    h = y2-y1
    boxes  = torch.stack((cx,cy,w,h),axis=-1)
    return boxes

def box_center_to_corner(boxes):
    cx,cy,w,h = boxes[:0],boxes[:1],boxes[:2],boxes[:3]
    x1 = cx -0.5*w
    y1 = cy - 0.5*h
    x2 = cx+0.5*w
    y2 = cy+0.5*h
    boxes = torch.stack((x1,y1,x2,y2),dim=-1)
    return boxes

# bbox是边界框的英文缩写
dog_bbox, cat_bbox = [60.0, 45.0, 378.0, 516.0], [400.0, 112.0, 655.0, 493.0]

boxes = torch.tensor((dog_bbox, cat_bbox))
print(box_center_to_corner(box_corner_to_center(boxes))==boxes)


