import torch
import numpy as np
import cv2
import os
import torch.nn as nn
import torch.optim as optim
import csv
from PIL import Image

from torchvision import transforms, utils
from torch.utils.data import DataLoader, Dataset

data_path = "/Users/WangZZ/code/20200610spine/data/"
train_path = "/Users/WangZZ/code/20200610spine/data/train"
test_path = "/Users/WangZZ/code/20200610spine/data/test"

def raw2png(filepath):
    pathDir = os.listdir(filepath) # 获取当前路径下的文件名，返回list
    for s in pathDir:
        newDir = os.path.join(filepath, s)  # 将文件名写入到当前文件路径后面
        if os.path.isfile(newDir):  # 如果是文件
            if os.path.splitext(newDir)[1] == ".raw":  # 判断是否是txt
                rawimg = np.fromfile(newDir,dtype='uint16').reshape(1024, 1024)
                clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
                cl1 = clahe.apply(rawimg)
                str = os.path.splitext(newDir)[0]
                cv2.imwrite(str + '.png', cl1)

def generate_txt(datapath,txtpath):
    pathDir = os.listdir(datapath)  # 获取当前路径下的文件名，返回list
    for s in pathDir:
        newDir = os.path.join(datapath, s)  # 将文件名写入到当前文件路径后面
        if os.path.splitext(newDir)[1] == ".png":  # 判断是否是txt
            result2txt = str(newDir)
            label = os.path.splitext(newDir)[0][-3]
            with open(txtpath,'a') as file_handle:   # .txt可以不自己新建,代码会自动新建
                file_handle.write(result2txt)     # 写入
                file_handle.write(' ')
                file_handle.write(label)
                file_handle.write('\n')         # 有时放在循环里面需要自动转行，不然会覆盖上一条数据

# generate_txt(test_path,data_path + 'test.txt')


class LeNet(nn.Module):  # 定义网络 pytorch定义网络有很多方式，推荐以下方式，结构清晰
    def __init__(self):
        super(LeNet, self).__init__()
        self.conv1 = nn.Sequential(  # input_size=(1*32*32)
            nn.Conv2d(1, 6, 5, 1, 2),  # padding=2，图片大小变为 28+2*2 = 32 (两边各加2列0)，保证输入输出尺寸相同
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2)  # input_size=(6*28*28)，output_size=(6*14*14)
        )

        self.conv2 = nn.Sequential(
            nn.Conv2d(6, 16, 5),  # input_size=(6*14*14)，output_size=16*10*10
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2)  ##input_size=(16*10*10)，output_size=(16*5*5)
        )

        self.fc1 = nn.Sequential(
            nn.Linear(16 * 6 * 6, 120),
            nn.ReLU()
        )

        self.fc2 = nn.Sequential(
            nn.Linear(120, 84),
            nn.ReLU()
        )

        self.fc3 = nn.Linear(84, 5)

    # 网络前向传播过程
    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)

        x = x.view(x.size(0), -1)  # 全连接层均使用的nn.Linear()线性结构，输入输出维度均为一维，故需要把数据拉为一维
        x = self.fc1(x)
        x = self.fc2(x)
        x = self.fc3(x)
        return x

class MyDataset(Dataset):

    def __init__(self, txt, transform=None, target_transform=None):

        super(MyDataset, self).__init__()

        fh = open(txt, 'r')
        img_list = []

        for line in fh:
            line = line.strip('\n')
            line = line.rstrip('\n')
            words = line.split()

            img_list.append((words[0], int(words[1])))

        self.imgs = img_list
        self.transform = transform
        self.target_transform = target_transform

    def __getitem__(self, index):
        filename, label = self.imgs[index]
        img = Image.open(filename)
        img = img.resize((32, 32),Image.NEAREST)
        img = np.array(img , dtype=np.uint16)
        img = (img / 65535).astype('float32')
        img = torch.from_numpy(img)
        if self.transform is not None:
            img = self.transform(img)
        return img, label

    def __len__(self):
        return len(self.imgs)

if __name__ == "__main__":
    transform = transforms.Compose([transforms.Resize(128),transforms.ToTensor(),])


    train_data = MyDataset(txt=data_path+'train.txt')
    test_data = MyDataset(txt=data_path+'test.txt')


    train_loader = DataLoader(dataset=train_data, batch_size=1, shuffle=False)
    test_loader = DataLoader(dataset=test_data, batch_size=1, shuffle=False)

    print("Train data load success")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = LeNet()
    print(model)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.002)
    model.to(device)

    epochs = 100
    steps = 0
    running_loss = 0
    print_every = 10
    train_loss = []

    # train process
    for epoch in range(epochs):
        for inputs, labels in train_loader:
            print("Steps: ", steps)
            steps += 1
            inputs, labels = inputs.to(device), labels.to(device)
            inputs = inputs.unsqueeze(0)
            optimizer.zero_grad()
            logps = model.forward(inputs) # batchsize, classes
            loss = criterion(logps, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()

        train_loss.append(running_loss / len(train_loader))
        print(f"Epoch {epoch + 1} / {epochs}.."
            f"Train loss: {running_loss / print_every:.70f}..")

        running_loss = 0
        model.train()

    torch.save(model.state_dict(), "xray_cls_model.pth")

    model.load_state_dict(torch.load("xray_cls_model.pth"))
    model.eval()




    with torch.no_grad():
        for input,gt in test_loader:
            input = input.to(device)
            input = input.unsqueeze(0)
            output = model(input) # [1,1,32,32]
            output = output.cpu().numpy()
            output = output.tolist()
            print(output[0],output[0].index(max(output[0])),gt)



    example = torch.rand(1, 1, 32, 32)

    traced_script_module = torch.jit.trace(model, example)

    output = traced_script_module(torch.ones(1, 1, 32, 32))
    print(output)

    traced_script_module.save("model_jit_trace.pt")


