# coding=utf-8
import tensorflow as tf
from libs.config import config
import os
import glob
import xml.etree.ElementTree as ET
import numpy as np
from tensorflow.python.lib.io.tf_record import TFRecordCompressionType
from PIL import Image
import cv2
from libtiff import TIFF
from openslide import ImageSlide, open_slide
from unicodedata import normalize
import re
from openslide import OpenSlide, OpenSlideError
from openslide.deepzoom import DeepZoomGenerator

FLAGS = tf.app.flags.FLAGS

def _get_dataset_filename(dataset_dir, shard_id, num_shards):
    output_filename = 'camelyon17-%s-of-%03d.tfrecord' % (shard_id, num_shards)
    return os.path.join(dataset_dir, output_filename)

def _int64_feature(values):
    if not isinstance(values, (tuple, list)):
        values = [values]
    return tf.train.Feature(int64_list=tf.train.Int64List(value=values))

def _float_feature(values):
    if not isinstance(values, (tuple, list)):
        values = [values]
    return tf.train.Feature(float_list=tf.train.FloatList(value=values))


def _bytes_feature(values):
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))

def slugify(text):
    text = normalize('NFKD', text.lower()).encode('ascii', 'ignore').decode()
    return re.sub('[^a-z0-9]+', '-', text)

def _load_annotation(anno_path,dataset_dir, tfrecord_writer):
    # print (filename)
    (filepath, tempfilename) = os.path.split(anno_path)
    (shotname, extension) = os.path.splitext(tempfilename)
    # print (shotname)

    data_path = os.path.join(dataset_dir,"centre",shotname+".tif")
    # print(tif)
    if not tf.gfile.Exists(data_path):
        return None

    print(data_path)
    # img = Image.open(data_path)
    # print(img.size)
    # img = cv2.imread(data_path)
    # print(img)
    # img = TIFF.open(data_path)
    # print(img)
    # TIFF.TIFFGetField(img, TIFF.TIFFTAG_IMAGEWIDTH, & imageWidth);
    osr = OpenSlide(data_path)
    slide = DeepZoomGenerator(osr)
    # w, h = slide._l0_dimensions
    w, h = osr.dimensions
    print(w,h)
    # print(osr.dimensions)
    print(osr.level_dimensions)
    print(osr.level_downsamples)
    # print(osr.properties)
    # img = osr.read_region((0,0), 0, (96650, 197225))
    # img.save("tmp.jpeg")

    tree = ET.parse(anno_path)
    # print (tree)
    # 标记的物体
    # objs = tree.findall('annotation')
    anns = tree.find("Annotations").findall("Annotation")

    # print(objs)
    # 标记物体个数
    num_objs = len(anns)
    # print (num_objs)
    objs = []
    bboxes = []
    for ix, ann in enumerate(anns):
        # print(ix)
        dict = {}
        dict["Name"] = _bytes_feature(ann.get("Name"))
        # dict["Type"] = ann.get("Type")
        # dict["PartOfGroup"] = ann.get("PartOfGroup")
        # dict["Color"] = ann.get("Color")

        cds = ann.find("Coordinates").findall("Coordinate")

        xs = np.asarray([cd.get("X") for cd in cds],dtype=np.float)
        ys = np.asarray([cd.get("Y") for cd in cds],dtype=np.float)

        # print (xs)
        # print(np.max(np.asarray(xs,dtype=np.float)))

        # dict["minX"] = np.min(xs)
        # dict["minY"] = np.min(ys)
        # dict["maxX"] = np.max(xs)
        # dict["maxY"] = _float_feature(np.max(ys))
        # objs.append(dict)
        bboxes.append([np.min(xs),np.min(ys),np.max(xs),np.max(ys)])

        # print(dict)
        # pass

    bboxes = np.asarray(bboxes)
    gt_boxes = bboxes.astype(np.float32)
    gt_boxes = gt_boxes.astype(np.float32)
    print(gt_boxes)

    example = tf.train.Example(features=tf.train.Features(feature={
        "nums":_int64_feature(num_objs),
        "bboxes":_bytes_feature(gt_boxes.tostring())
    }))
    tfrecord_writer.write(example.SerializeToString())

    return objs

def run():
    dataset_dir = FLAGS.dataset_dir
    record_dir = os.path.join(dataset_dir, 'records')
    # print(record_dir)
    if not tf.gfile.Exists(record_dir):
        tf.gfile.MakeDirs(record_dir)

    for i in range(99):
        id = str(i+1).zfill(3)
        annotation_dir = os.path.join(dataset_dir, 'lesion_annotations', "patient_%s_node_*.xml" % id)
        filename_list = glob.glob(annotation_dir)
        # print(filename_list)

        record_filename = _get_dataset_filename(record_dir, id, 99)
        options = tf.python_io.TFRecordOptions(TFRecordCompressionType.ZLIB)
        tfrecord_writer = tf.python_io.TFRecordWriter(record_filename, options=options)

        gt_roidb = [_load_annotation(filename,dataset_dir, tfrecord_writer) for filename in filename_list]

        roi_db = [elem for elem in gt_roidb if elem!=None]

        # if len(roi_db) > 0:

            #
            # example = tf.train.Example(features=tf.train.Features(feature={
            #     "objs":roi_db
            # }))

            # tfrecord_writer.write(example.SerializeToString())

            # print(roi_db)

def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')

    if not FLAGS.dataset_name:
        raise ValueError('You must supply the dataset name with --dataset_name')
    elif FLAGS.dataset_name == 'CAMELYON17':
      run()
    else:
        raise ValueError('dataset_name [%s] was not recognized.' % FLAGS.dataset_dir)


if __name__ == '__main__':
  tf.app.run()
