import torch
import torchvision
from torchvision import transforms
import numpy as np
import random
from torch import nn
from tqdm import trange, tqdm
from model.get_model import get_single_model,get_double_model,get_3d_model
import os
import copy
import argparse
import openslide
import time
import xml.etree.ElementTree as ET
from preprocess.testdataset_ndpi import TestDataset
import cv2




if __name__ == '__main__':
    # python3 automated_segmentaition_ndpi.py --ndpi_path '/media/alex/FA5EB5A15EB556DB1/uterus/2022.3.15子宫确诊'
    parser = argparse.ArgumentParser(description='Ndpi to xml')
    parser.add_argument('--ndpi_path', '-f', type=str, default=False, help='Load ndpi from a .ndpi file')

    args = parser.parse_args()

    ndpi_dir = args.ndpi_path
    num_classes = 5
    window_size = 640
    rescale_ratio = 40  # 最后生成预览图像的缩放倍率
    pic1_color = (219, 218, 168)
    pic2_color = (70, 96, 235)
    pic3_color = (107, 186, 253)
    pic4_color = (166, 190, 98)
    batch_size = 8

    pth_file = './pth/resnet18_0.pth'
    modelname = os.path.split(pth_file )[1].split('_')[0]

    model = get_single_model(modelname,num_classes=num_classes)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print('运行设备为',device)
    model.load_state_dict(torch.load(pth_file,map_location=device))
    model = model.to(device)
    model.eval()
    image_transforms = transforms.Compose([transforms.ToTensor()])
    for ndpi_path in os.listdir(ndpi_dir):
        if (ndpi_path[-4:]=='ndpi'):
            start_pred_time = time.time()
            ndpi_path = os.path.join(ndpi_dir,ndpi_path)

            time1 = time.time()
            slide = openslide.open_slide(ndpi_path)
            print ('opentime :',time.time()-time1)
            width, height = slide.dimensions
            print (width,height)
            index_x = 0
            index_y = 0
            index = 0
            # crop ndpi ,get images
            scores = []
            points_x = []
            points_y = []
            num = int(width/window_size) * int((height/window_size))

            image_index_list = []
            for index_x in range (0,width,window_size):
                for index_y in range (0,height,window_size):
                    if index_x + window_size < width and index_y + window_size < height:
                        image_index_list.append([index_x,index_y])


            testdataset = TestDataset(image_index_list,slide=slide,window_size=window_size)
            testloader = torch.utils.data.DataLoader(testdataset, batch_size=batch_size, shuffle=False,num_workers=6)
            
            print (f'共计{len(testloader)}个batch')
            for index, data in tqdm(enumerate(testloader)):
                test_img,index_xy = data
                
                test_inputs = test_img.to(device)
                test_outputs = model(test_inputs)
                result = np.argmax(nn.functional.softmax(test_outputs, dim=1).cpu().detach().numpy(),axis=1)
                
                scores.extend(result)

                index_x,index_y = index_xy
                points_x .extend(index_x.tolist())
                points_y .extend(index_y.tolist())

            slide.close()

            # load xml 
            xml_path = './module.xml'

            tree = ET.parse(xml_path)
            root = tree.getroot()
            annotation =  list(root.iter('Annotation'))[0]

            # draw cells
            regions  = list(annotation.iter('Regions'))[0]
            region  = list(annotation.iter('Region'))[0]
            Vertices = list(region.iter('Vertices'))[0]
            vertex = list(Vertices.iter('Vertex'))[0]

            root.remove(annotation)
            annotation.remove(regions)
            regions.remove(region)
            region.remove(Vertices)
            Vertices.remove(vertex)

            ID = 1


            result_pic1_name = ndpi_path[:-5]+f'_有染色_{modelname}.jpg'
            result_pic2_name = ndpi_path[:-5]+f'_细胞核_{modelname}.jpg'
            result_pic3_name = ndpi_path[:-5]+f'_细胞膜_{modelname}.jpg'
            result_pic4_name = ndpi_path[:-5]+f'_细胞浆_{modelname}.jpg'

            result_pic1=  np.zeros((height//rescale_ratio, width//rescale_ratio, 3), np.uint8) + 255
            result_pic2=  np.zeros((height//rescale_ratio, width//rescale_ratio, 3), np.uint8) + 255
            result_pic3=  np.zeros((height//rescale_ratio, width//rescale_ratio, 3), np.uint8) + 255
            result_pic4=  np.zeros((height//rescale_ratio, width//rescale_ratio, 3), np.uint8) + 255
                        
            points_all = [[points_x[index],points_y[index]] for index in range(len(points_x))]


            for points_index in range(0,len(points_all)):
                
                if scores[points_index]>0:
                    points_xy = points_all[points_index]
                    region_to_save = copy.deepcopy(region)
                    Vertices_to_save = copy.deepcopy(Vertices )

                    region_to_save.set('Id',str(ID))
                    region_to_save.set('Text',str(scores[points_index].item())[:5])
                    region_to_save.set('DisplayId',str(ID))
                    x1,y1,x2,y2 = int(points_xy[0]),int(points_xy[1]),int(points_xy[0])+window_size,int(points_xy[1]+window_size)
                    points = [
                        [x1,y1],[x2,y1],[x2,y2],[x1,y2]
                    ]

                    # save image
                    if scores[points_index] ==1:
                        cv2.rectangle(result_pic1, (x1//rescale_ratio ,y1//rescale_ratio), (x2//rescale_ratio, y2//rescale_ratio), pic1_color, thickness=-1)
                    elif scores[points_index] ==2:
                            cv2.rectangle(result_pic2, (x1//rescale_ratio, y1//rescale_ratio), (x2//rescale_ratio, y2//rescale_ratio), pic2_color, thickness=-1)
                    elif scores[points_index] ==3:
                            cv2.rectangle(result_pic3, (x1//rescale_ratio, y1//rescale_ratio), (x2//rescale_ratio, y2//rescale_ratio), pic3_color, thickness=-1)
                    elif scores[points_index] ==4:
                            cv2.rectangle(result_pic4, (x1//rescale_ratio, y1//rescale_ratio), (x2//rescale_ratio ,y2//rescale_ratio), pic4_color, thickness=-1)

                    if scores[points_index]>1:
                        # save xml
                        for point in points:
                            vertex_to_save = copy.deepcopy(vertex)
                            vertex_to_save.set('X',str(point[0]))
                            vertex_to_save.set('Y',str(point[1]))
                            Vertices_to_save.append(vertex_to_save)
                        region_to_save.append(Vertices_to_save)
                        regions.append(region_to_save)
                        ID += 1

                    
            annotation.append(regions)
            root.append(annotation)

            tree.write(ndpi_path[:-5]+'.xml')
            
            cv2.imwrite(result_pic1_name,result_pic1)
            cv2.imwrite(result_pic2_name,result_pic2)
            cv2.imwrite(result_pic3_name,result_pic3)
            cv2.imwrite(result_pic4_name,result_pic4)

            print (f'{ndpi_path} total_time',time.time()-start_pred_time)