import os,cv2
import numpy as np
import xml.etree.ElementTree as ET
from dataset.dataset import dataset, collate_fn,dataset_test,change
import torch
from torch.nn import CrossEntropyLoss
import torch.utils.data as torchdata
from torch.autograd import Variable
from math import ceil
from dataset.data_aug import *
from resnet import *
import torch.utils.data as data

####### get the bbox #######
def Img2BBox(ImgsDir,BBoxAnnoDir):

    BBoxXML = []
    for x in os.walk(BBoxAnnoDir):
        if len(x[2]) != 0 and 'xml' in x[2][0]:
            for loopi in range(len(x[2])):
                xml_path = os.path.join(x[0], x[2][loopi])
                BBoxXML.append(xml_path)

    img2bbox = {}
    for _xml in BBoxXML:
        dom = ET.parse(_xml)
        root = dom.getroot()
        _img_name = root.findall('filename')[0].text + '.jpg'
        size=root.findall("size")
        w = int(size[0].find('width').text)
        h = int(size[0].find('height').text)
        for objects in root.findall('object'):
            name = objects.find('name').text
            if name == "0" or name == "2":
                bndbox = objects.find('bndbox')
                xmin = int(bndbox.find('xmin').text)
                ymin = int(bndbox.find('ymin').text)
                xmax = int(bndbox.find('xmax').text)
                ymax = int(bndbox.find('ymax').text)
                xmin = max(0, xmin)
                ymin = max(0, ymin)
                xmax = min(w, xmax)
                ymax = min(h, ymax)
                if _img_name not in img2bbox.keys():
                    img2bbox[_img_name] = []
                img2bbox[_img_name].append([int(xmin), int(ymin), int(xmax), int(ymax)])
    return img2bbox

class dataset_pred(data.Dataset):
    def __init__(self, image , label, transforms=None):
        self.paths = image
        self.labels = label
        self.transforms = transforms

    def __len__(self):
        return len(self.paths)

    def __getitem__(self, item):
        img = self.paths[item]
        img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
        if self.transforms is not None:
            img = self.transforms(img)
        label = self.labels[item]
        return torch.from_numpy(img).float(), label



#########   set the GPU   ###########
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

#########   the input path   #########
AllImgsDir = '/media/hszc/model/zxy/table/data/Image'
BBoxAnnoDir = "/media/hszc/model/zxy/table/data/Annotation/BboxAnnotation"

test_img_path ='/media/hszc/model/zxy/table/data/predictor/13/IMG_20190710_162116.jpg'

#########  crop image from bbox  ###########
img_name =test_img_path.split('/')[-1]
count=0
img_list=[]
img2bbox=Img2BBox(AllImgsDir,BBoxAnnoDir)
try:
  bboxes=img2bbox[img_name]
except KeyError:
  print('未得到检测框数据！')
else:
    for bbox in bboxes:
        xmin,ymin,xmax,ymax=bbox
        imgfile=cv2.imread(test_img_path)
        crop_img=imgfile[ymin:ymax,xmin:xmax,:].copy()
        img_list.append(crop_img)
        count+=1
print('该图中共有'+str(count)+'个表:')

#########   the dataset    ###########
label_def={0:"有5个刻度关键点",
           1:"有6个刻度关键点",
           2:"有7个刻度关键点"}

test_transforms= Compose([
        ExpandBorder(size=(272,272),resize=True),
        RandomHflip(),
        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

labels=[]
for i in range(len(img_list)):
   labels.append(1)

data_set = {}
data_set['test'] = dataset_pred(image=img_list,label=labels,transforms=test_transforms)

data_loader = {}
data_loader['test'] = torchdata.DataLoader(data_set['test'], batch_size=1, num_workers=4,
                                           shuffle=False, pin_memory=True, collate_fn=collate_fn)

#########  choose the model ###########
model_name = 'resnet50-out'
model =resnet50(pretrained=True)
resume = '/media/hszc/model/zxy/table/predictor/model/resnet50/weights-10-210-[1.0000].pth'
# print('resuming finetune from %s'%resume)

model.avgpool = torch.nn.AdaptiveAvgPool2d(output_size=1)
model.fc = torch.nn.Linear(model.fc.in_features,3)

model.load_state_dict(torch.load(resume),strict=False)
model = model.cuda()
model.eval()

##########   forward     ###########
test_preds=[]
test_size = ceil(len(data_set['test']) / data_loader['test'].batch_size)

for batch_cnt_test, data_test in enumerate(data_loader['test']):
    inputs, labels = data_test
    inputs = Variable(inputs.cuda())
    labels = Variable(torch.from_numpy(np.array(labels)).long().cuda())

    outputs = model(inputs)

    if isinstance(outputs, list):
        outputs = (outputs[0]+outputs[1])/2
    props, preds = torch.max(outputs, 1)

    test_preds.append(preds.data.cpu().numpy()[0])
    # test_props = props.data.cpu().numpy()[0]
    # true_label = labels.data.cpu().numpy()[0]

if   len(test_preds)==1:
         print('该表'+label_def[test_preds[0]]+'!')
else:
     for i in range(len(test_preds)):
         print('第​'+str(i+1)+'个表'+label_def[test_preds[i]]+'!')

