import numpy as np
import pandas as pd
import argparse
from osgeo import gdal
from osgeo import ogr
from PIL import Image
from skimage import io
from skimage.io import imread, imshow
import matplotlib.pyplot as plt
import torch
import random  
import torch.utils.data as data
from torch.utils.data import Dataset,DataLoader
import torch.nn as nn
from torchvision.datasets import ImageFolder
import torch.nn.functional as F
import torchvision
import torch.optim as optim
import torchvision.transforms as transforms
from skimage.segmentation import slic
import numpy as np
from sklearn.cluster import KMeans, MiniBatchKMeans
from skimage.util import img_as_float
from skimage.segmentation import mark_boundaries
import os
import re
import copy

def progress_cb(complete, message, cb_data):
    '''Emit progress report in numbers for 10% intervals and dots for 3%'''
    if int(complete*100) % 10 == 0:
        print(f'{complete*100:.0f}', end='', flush=True)
    elif int(complete*100) % 3 == 0:
        print(f'{cb_data}', end='', flush=True)

 
# 显示结果
def image_slic():

    # 读取图片并将其转化为浮点型
    file_path = r"E:\新建文件夹\G50F008021_2_0.tif"
    save_folder = r"E:\Yang\5"
    n_segments=1000

    tif_name = os.path.basename(file_path)
    file_name = os.path.splitext(tif_name)[0]

    image = img_as_float(io.imread(file_path))
    label = slic(
        image, 
        n_segments=n_segments, 
        # compactness=10, 
        max_num_iter=10,
        sigma = 5,
        # spacing=None, 
        # channel_axis=True, 
        # convert2lab=None,
        # enforce_connectivity=True, 
        # min_size_factor=0.5, 
        # max_size_factor=3,
        # slic_zero=False
    )
    io.imsave(os.path.join(save_folder,f"{file_name}.png"),label)


    gdal.AllRegister()
    dataset = gdal.Open(file_path)
    projection = dataset.GetProjection()
    geotransform = dataset.GetGeoTransform()
    bandcount = dataset.RasterCount
    datatype = dataset.GetRasterBand(1).DataType

    for i in range(n_segments):
        locat = np.where(label==i+1)
        name = f"{file_name}_{i}.tif"
        path = os.path.join(save_folder, name)
        driver = gdal.GetDriverByName('GTiff')
        temp_dataset = driver.Create(
            path,
            int(max(locat[1])-min(locat[1])),
            int(max(locat[0])-min(locat[0])),
            bandcount, 
            datatype
        )
        temp_dataset.SetProjection(projection)
        temp_dataset.SetGeoTransform((
            geotransform[0]+min(locat[1])*geotransform[1],
            geotransform[1],
            geotransform[2],
            geotransform[3]+min(locat[0])*geotransform[5],
            geotransform[4],
            geotransform[5],
        ))
        for index in range(1, bandcount+1):
            band = dataset.GetRasterBand(index)
            array = band.ReadAsArray()
            # array = band.ReadAsArray(
            #     xoff=int(min(locat[1])), 
            #     yoff=int(min(locat[0])), 
            #     win_xsize=width, 
            #     win_ysize=height
            # )
            array[label!=(i+1)]=0
            array = copy.deepcopy(array[
                int(min(locat[0])):int(max(locat[0])),
                int(min(locat[1])):int(max(locat[1]))
            ])
  
            band = temp_dataset.GetRasterBand(index)
            band.WriteArray(array)
        temp_dataset.FlushCache()

    dataset.FlushCache()

    # png_path = os.path.join(folder, f"{file_name}.png")

    # image = img_as_float(io.imread(file_path))
    # segments = slic(image, n_segments=255, sigma = 5)
    # print(segments.dtype)

    # io.imsave(png_path,segments)

    # dataset = gdal.Open(png_path,2)
    # dataset.SetProjection(projection)
    # dataset.SetGeoTransform(geotransform)
    # band = dataset.GetRasterBand(1)
    # array = band.ReadAsArray()
    # print(array.dtype)

    # driver = ogr.GetDriverByName('ESRI Shapefile')
    # datasource = driver.CreateDataSource(folder)
    # layer = datasource.CreateLayer(
    #     file_name,srs=spatial,geom_type=ogr.wkbPolygon)
    # field = ogr.FieldDefn("value", ogr.OFTInteger)
    # layer.CreateField(field)
    # gdal.Polygonize(
    #     band, 
    #     None, 
    #     layer, 
    #     0, 
    #     options=[],
    #     callback=progress_cb,
    #     callback_data='.'
    # )
    # datasource.Destroy()



    # out = mark_boundaries(image, segments)

    # plt.figure("mask")
    # plt.subplot(111)
    # plt.axis("off")
    # plt.imshow(out)

    # plt.figure("source")
    # plt.subplot(111)
    # plt.axis("off")
    # plt.imshow(image)

    # plt.figure("segments")
    # plt.subplot(111)
    # plt.axis("off")
    # plt.imshow(segments)

    # plt.show()
    return

def image_kmeans():

    image_path = r"E:\Yang\image\G49F026048_0_1.tif"
    image = imread(image_path)

    plt.figure("source")
    plt.subplot(111)
    plt.axis("off")
    imshow(image)
    
    imagedf = pd.DataFrame([image[:,:,0].flatten(),image[:,:,1].flatten(),image[:,:,2].flatten()]).T
    imagedf.columns = ['Red_Channel','Green_Channel','Blue_Channel']
    kmeans = KMeans(n_clusters=3, random_state=42).fit(imagedf)
    result = kmeans.labels_.reshape(image.shape[0],image.shape[1])
    
    plt.figure("result")
    plt.subplot(111)
    plt.axis("off")
    imshow(result,cmap='viridis')

    plt.show()
    return

# get train txt file 
def create_train_txt(data_dir,class_dict,save_dir):
    lines = []
    for class_name in os.listdir(data_dir):
        class_code = class_dict[class_name]
        class_dir = os.path.join(data_dir, class_name)
        for image_name in os.listdir(class_dir):
            if re.match(".*\.(tif|jpg|png)$", image_name):
                image_path = os.path.join(class_dir, image_name)
                lines.append(f"{image_path} {class_code}\n")
    random.shuffle(lines)  
    txt_path = os.path.join(save_dir, "train.txt")
    with open(txt_path, "w") as txt_file:
        for line in lines:
            txt_file.write(line)        
    return txt_path

# my dataset
class VeriDataset(data.Dataset):
    
    def __init__(self, data_dir, train_list, train_data_transform=None, is_train=True):
        '''
        data_dir: 图像文件根目录
        train_list: 图像名称txt文件
        train_data_transform: 图像预处理
        is_train: 训练集集验证集标志
        '''
        super(VeriDataset, self).__init__()
        self.is_train = is_train
        self.data_dir = data_dir
        self.train_data_transform = train_data_transform
        #读取.txt文件
        f = open(train_list, 'r')
        lines = f.readlines()
        f.close()

        self.names = []
        self.labels = []
        self.cams = []

        if is_train == True:
            for line in lines:
                line = line.strip().split(' ')
                self.names.append(line[0])
                self.labels.append(line[1])
                self.cams.append(line[0].split('_')[1])
            
        # 训练集、验证集的文件储存不一样
        else:
            for line in lines:
                line = line.strip()
                self.names.append(line)
                self.labels.append(line.split('_')[0])
                self.cams.append(line.split('_')[1])
    # self.labels = np.array(self.labels, dtype=np.float32)

    #根据索引获取图片和标签（重写父类函数）
    def __getitem__(self, index):
        '''
        index 自动+1
        ''' 
        image_path = os.path.join(self.data_dir, self.names[index])
        img = Image.open(image_path).convert('RGB')
        target = int(self.labels[index])
        camid = self.cams[index]

        if self.train_data_transform != None:
            img = self.train_data_transform(img)

        return img, target, camid

    # 返回数据集大小（重写父类函数）
    def __len__(self):
        return len(self.names)

# download CIFAR10 train dataset
def get_dataset(dataset_path):
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5),(0.5, 0.5, 0.5))])

    trainset = torchvision.datasets.CIFAR10(
        root=dataset_path,train=True,download=True,transform=transform)
    trainloader = torch.utils.data.DataLoader(
        trainset,batch_size=4,shuffle=True,num_workers=2)

    testset = torchvision.datasets.CIFAR10(
        root=dataset_path,train=False,download=True,transform=transform)
    testloader = torch.utils.data.DataLoader(
        testset,batch_size=4,shuffle=False,num_workers=2)

    classes = ('plane','car','bird','cat','deer','dog','frog','horse','ship','truck')
    return trainloader

# load dataset
class LoadDataset:
    def __init__(self,train_ds,test_ds,pred_ds):
        batch_size=128
        train_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=4, pin_memory=True)
        val_dl = DataLoader(test_ds, batch_size, num_workers=4, pin_memory=True)
        pred_dl = DataLoadebatch_size=128
        train_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=4, pin_memory=True)
        val_dl = DataLoader(test_ds, batch_size, num_workers=4, pin_memory=True)
        pred_dl = DataLoader(pred_ds, batch_size, num_workers=4, pin_memory=True)(pred_ds, batch_size, num_workers=4, pin_memory=True)
        pass

    def train_transform():
        transform_train = transforms.Compose([
        transforms.Resize((150,150)), #becasue vgg takes 150*150
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((.5, .5, .5), (.5, .5, .5))
        ])
        return transform_train

    def test_transform():
        #Augmentation is not done for test/validation data.
        transform_test = transforms.Compose([
            transforms.Resize((150,150)), #becasue vgg takes 150*150
            transforms.ToTensor(),
            transforms.Normalize((.5, .5, .5), (.5, .5, .5))
        ])
        return transform_test

    def loader(self):
        train_ds = ImageFolder('../input/intel-image-classification/seg_train/seg_train', transform=self.transform_train())
        test_ds = ImageFolder('../input/intel-image-classification/seg_test/seg_test', transform=self.transform_test)
        pred_ds = ImageFolder('/kaggle/input/intel-image-classification/seg_pred/', transform=self.transform_test)
        return

# build net
class Net(nn.Module):

    # 一般在__init__中定义网络需要的操作算子，比如卷积、全连接算子等等
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 6, 5)  # Conv2d的第一个参数是输入的channel数量，第二个是输出的channel数量，第三个是kernel size
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Linear(16*5*5, 120) # 由于上一层有16个channel输出，每个feature map大小为5*5，所以全连接层的输入是16*5*5
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)  # 最终有10类，所以最后一个全连接层输出数量是10

    # forward这个函数定义了前向传播的运算，只需要像写普通的python算数运算那样就可以了
    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = self.pool(x)
        x = F.relu(self.conv2(x))
        x = self.pool(x)
        # 下面这步把二维特征图变为一维，这样全连接层才能处理
        x = x.view(-1, 16*5*5)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x

# train model
def  train(trainloader,iteration,model_path):
    net = Net()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

    for epoch in range(1,iteration+1):  # loop over the dataset multiple times

        running_loss = 0.0
        for i, data in enumerate(trainloader, start=0):
            inputs, labels = data # get the inputs; data is a list of [inputs, labels]
            outputs = net(inputs) # forward + backward + optimize
            loss = criterion(outputs, labels)
            optimizer.zero_grad() # zero the parameter gradients
            loss.backward()
            optimizer.step()

            running_loss += loss.item() 
            if i % 2000 == 1999:    # print statistics print every 2000 mini-batches
                print(f'[{epoch},{i+1}] loss: {running_loss/2000}')
                running_loss = 0.0
    

    print('Finished Training')
    torch.save(net.state_dict(), model_path) # 文件后缀为.pth
    return

# predict
def predict(testloader,model_path):

    dataiter = iter(testloader)
    images, labels = dataiter.next()

    # print images
    imshow(torchvision.utils.make_grid(images))
    print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))

    net = Net()
    net.load_state_dict(torch.load(model_path))

    outputs = net(images)

    correct = 0
    total = 0
    with torch.no_grad():
        for data in testloader:
            images, labels = data
            outputs = net(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    print('Accuracy of the network on the 10000 test images: %d %%' % (
        100 * correct / total))

    class_correct = list(0. for i in range(10))
    class_total = list(0. for i in range(10))
    with torch.no_grad():
        for data in testloader:
            images, labels = data
            outputs = net(images)
            _, predicted = torch.max(outputs, 1)
            c = (predicted == labels).squeeze()
            for i in range(4):
                label = labels[i]
                class_correct[label] += c[i].item()
                class_total[label] += 1

    for i in range(10):
        print('Accuracy of %5s : %2d %%' % (
            classes[i], 100 * class_correct[i] / class_total[i]))
    return

# start
def main():
    '''get train txt file'''
    # data_dir=r"E:\dataset"
    # class_dict = {"negative":0,"positive":1,}
    # save_dir=r"E:\\"
    # create_train_txt(data_dir,class_dict,save_dir)
    '''get dataset'''
    # dataset_path = r"E:\Yang\Project\RasterShapeToolBox-1\dataset"
    # trainloader=get_dataset(dataset_path)
    # print(trainloader)
    '''train model'''
    # iteration=2
    # model_path=r"E:\Yang\Project\RasterShapeToolBox-1\model\train_model_20220423.pth"
    # train(trainloader,iteration,model_path)
    '''val model'''

    '''predict model'''

    ''''''
    image_slic()
    ''''''
    # image_kmeans()
    return


if __name__ == "__main__":
    main()
