import Net
import NumericalGradientDescent
import Loss
import torch
from torch import optim
from torch.utils.data import DataLoader
import datasetNew
import Visualization

class train:
    def __init__(self):
        self.uplimit = 1000
        self.surface = []

        self.epochs = 3000
        self.lr = 1e-4
        self.batch_size = 9402

        self.visual = Visualization.visualization()

        result = list()
        # 自己修改文件的路径
        with open('data/surfacePoint.txt', 'r') as f:
            for line in f.readlines():
                temp = list()
                for a in line.strip().split(','):
                    temp.append(int(a))
                result.append(temp)
        # result是[[x,y,z,1],[x1,y1,z1,1],....]的形式，你可以直接读取
        self.dataset = datasetNew.VoxedgeDataset(result)

    def mutiplanefit(self):
        #TODO 读入数据集
        dataset = None
        surfacenum = 0
        #这里的终止条件应该还有一个剩余点数的问题
        while(surfacenum < self.uplimit and self.dataset.__len__() > 0):
            print("ready to train NO." + surfacenum.__str__() + " surfacenum")
            tempmodel = self.singleplanefit()
            print(tempmodel.named_parameters())
            self.checkpoint(tempmodel)
            break
            #TODO 动态调整数据集
            dataset.adjust
            self.surface.append(tempmodel)
            surfacenum = surfacenum + 1

        self.save()

    def singleplanefit(self):
        loss_func = Loss.Gloss()
        model = Net.ournet()
        #NGD = NumericalGradientDescent.NumericalGradient
        #steploss = 9999
        #TODO 这个地方有点说法，优化到最后loss不会小于0的，所以其实应该计算原有loss和现在loss的差？
        #TODO 等dataset弄完了直接晚上怼就完事，没那么多怪话
        # while(steploss > 0):
        #     model, steploss = NGD.calgradient(model,loss,self.step)
        #     print(steploss)
        #     print(model.getpara(0,0),model.getpara(0,1),model.getpara(0,2),model.getpara(0,3))


        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        model.to(device)
        dataloader = DataLoader(dataset=self.dataset,batch_size=self.batch_size,shuffle=True)
        loss_func.to(device)
        optimizer = optim.Adam(model.parameters(), lr=self.lr)

        print('start train')
        for e in range(self.epochs):
            epoch_loss = 0
            #TODO 训练代码
            model.train(True)
            count = 0
            for batch_idx, coordinate in enumerate(dataloader):
                coordinate = coordinate.to(device)
                count += self.batch_size
                optimizer.zero_grad()
                res = model(coordinate)
                res.to(device)
                loss = loss_func(res,list(model.named_parameters())[0][1].data)
                # print(res)
                # print(list(model.named_parameters())[0][1].data)
                # print(loss)
                loss.backward()
                optimizer.step()
                epoch_loss += loss.item()

                msg = '{}\tEpoch: {}:\t[{}/{}]\tepoch_loss: {:.6f}'.format(
                    '尝试性训练',
                    e + 1,
                    count,
                    len(self.dataset),
                    epoch_loss)
                print(msg)
                print(list(model.named_parameters())[0][1].data)
                print(optimizer.state_dict()['param_groups'][0]['lr'])
            print("epoch done")
        return model

    def save(self):
        savePath = "models/test.txt"
        with open(savePath,'w')as fp:
            fp.write(str(self.surface.__len__()))
            fp.write("\n")
            for i in range(self.surface.__len__()):
                for j in range(self.surface[i].paranum):
                    fp.write(str(self.surface[i].getpara(0, j)))
                    fp.write(" ")
                fp.write("\n")

        print("save done")

    def checkpoint(self, model):
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        # model.to(device)
        dataloader = DataLoader(dataset=self.dataset,batch_size=1,shuffle=True)
        print('checkpoint')
        loss_func = Loss.Gloss()
        #TODO 先把可视化做出来
        visuallosspoint = list()
        for i in range(0, 100, 1):
            for j in range(0, 100, 1):
                for k in range(0, 100, 1):
                    temp = torch.zeros(1, 4)
                    temp[0,0] = float(i) / 100
                    temp[0,1] = float(j) / 100
                    temp[0,2] = float(k) / 100
                    temp[0,3] = 1
                    # print(temp)
                    temp = temp.to(device)
                    # print(temp)
                    res = model(temp)
                    loss = loss_func(res,list(model.named_parameters())[0][1].data)
                    if(loss < - 10000):
                        visuallosspoint.append((temp.cpu().numpy().tolist()))
                        #print(temp)
            print(i)
        # for batch_idx, coordinate in enumerate(dataloader):
        #     coordinate = coordinate.to(device)
        #     res = model(coordinate)
        #     loss = loss_func(res,list(model.named_parameters())[0][1].data)
        #     #那么我是将距离小于0.01的点都画出来了
        #     if(loss < -10000):
        #         visuallosspoint.append(coordinate.cpu().numpy().tolist())
        #         print(coordinate)
        self.visual.losspoint(visuallosspoint)
        res = list()
        with open('data/surfacePoint.txt', 'r') as f:
            for line in f.readlines():
                temp = list()
                for a in line.strip().split(','):
                    temp.append(int(a))
                res.append(temp[:-1])
        self.visual.normal(res)