# This is a sample Python script.

# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import numpy as np
import torch
import torch.nn as nn
import Train
import Visualization
import Loss
import Net

def print_hi(name):
    # Use a breakpoint in the code line below to debug your script.
    print(f'Hi, {name}')  # Press Ctrl+F8 to toggle the breakpoint.

#python函数传参示范
def test(func,*args,**kwargs):
    func(*args,**kwargs)

def noparafunc():
    print('hello world')

def test2(func):
    func('fuck')

# Press the green button in the gutter to run the script.

def mytest():
    #TODO 模型归一化
    #TODO 去掉输出层，直接用原有的16个节点累加做反向传播？
    #TODO 有两个优化目标，在内点全为正的基础上（1）尽可能让外点不被认为正（2）
    #TODO 不可避免的，我们会考虑到（1）和（2）的loss书写形式，简单的加权求和不能满足这个需求（局部不连续）
    #TODO 将内点据平面距离（3）纳入考虑范畴是为了最优贴合问题
    #TODO 最优贴合问题的两种表述方式是等效的吗：在满足（1）的情况下，满足（3） ？= 满足（1）的情况下，满足（2）——不等效
    #TODO （3）为弱约束，弃用。（2）为强约束

    #TODO 实际上是两段可导的函数吗？
    #TODO 做你妈的梯度下降，直接优化吧
    #————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————
    #TODO 我随机梯度下降的思路已经写出来了，下一步做什么呢
    #优先实现老蔡的思想还是我们的思想，我们的思想不太对的地方在于还没写好优化方向（loss）
    #老蔡的思想已经足够成熟，但是我不知道。。。我试试看
    print_hi('PyCharm')
    test(print_hi, 'test')
    noparafunc()
    test(noparafunc)
    #test2(noparafunc)
    test2(print_hi)
    print(3**3)
    #3行4列矩阵初始化
    xyz = np.array([1.,2.,3.])
    tempmatrix = np.mat([xyz[0],xyz[1],xyz[2],1])
    tempmatrix2 = np.mat(xyz)
    tempmatrix3 = np.mat(np.random.rand(1,4))
    print(tempmatrix)
    print(tempmatrix2)
    print(tempmatrix3)
    tempmatrix3[0,2] += 0.1
    print(tempmatrix3.item(0,2))
    tempmatrix4 = np.mat(np.random.rand(4,1))
    tempmatrix5 = np.mat(np.random.rand(1,4))
    print(tempmatrix4)
    print(tempmatrix5)
    print(tempmatrix4 * tempmatrix5)
    print(tempmatrix5 * tempmatrix4)
    tempmatrix6 = tempmatrix5 * tempmatrix4
    print(tempmatrix6[0,0])
    plane = torch.rand(4,1)
    point = torch.rand(1,4)
    print("plane")
    print(plane)
    print("point")
    print(point)
    # 距离分母
    d_part_1 = torch.sqrt(torch.sum(torch.pow(plane, 2), 0))
    # 距离分子
    d_part_2 = torch.abs(torch.matmul(point, plane))
    # 距离
    d = torch.div(d_part_2, d_part_1)
    print(d_part_1)
    print(d_part_2)
    print(d)
    with open("fuck.txt","rb") as fp:
        str1 = fp.readline().__str__().__str__()
        print(str1[2:-5])
        num = int(str1[2:-5])
        print(num + 1)
        print(fp.readline())
        print(fp.readline())
        print(fp.readline())
        print(fp.readline())

def getweight():
    example = nn.Linear(4,1,False)
    print(example.state_dict())
    params = list(example.named_parameters())
    print(params)
    print(params[0][0])
    print(params[0][1].data)


def planevisual():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    visual = Visualization.visualization()
    model = Net.ournet()
    model = model.cuda()
    count = 0
    with open("models/20plane.txt",'r') as fp:
        for lines in fp.readlines():
            str = lines[1:-2]
            tempplane = list()
            for a in str.strip().split(','):
                print(float(a))
                tempplane.append(float(a))
            model.setpara(tempplane)
            loss_func = Loss.Gloss()
            visuallosspoint = list()
            for i in range(0,100,2):
                for j in range(0,100,2):
                    for k in range(0,100,2):
                        temp = torch.zeros(1, 4)
                        temp[0, 0] = float(i) / 100
                        temp[0, 1] = float(j) / 100
                        temp[0, 2] = float(k) / 100
                        temp[0, 3] = 1
                        # print(temp)
                        temp = temp.cuda()
                        #print(temp)
                        # print(model.state_dict())
                        res = model(temp)
                        loss = loss_func(res, list(model.named_parameters())[0][1].data)
                        if (loss < - 20000):
                            visuallosspoint.append((temp.cpu().numpy().tolist()))
            visual.losspoint(visuallosspoint,'data/' + count.__str__() + '.txt')
            count = count + 1



if __name__ == '__main__':
    #TODO 怎么把表面初始化在外面呢。。
    #TODO 万有引力loss实际上有个缺陷。。。不能距离物体太远，不然收敛速度会很慢



    #TODO 单位球初始化代码
    #TODO loss形式还得改，我觉得他妈的单纯加个e没有卵用
    #TODO 球面坐标的随机初始化是个好主意，再完成初始化代码之后可以考虑一下
    #TODO 总之来说还是很有希望的，周五前看看能不能做出单平面的最优贴合，调参就别想了。
    #TODO 有个新想法，如果不用平方是不是能让平面自己从物体里面爬出来？不行，梯度方向不对
    print("hello world")
    print(torch.cuda.is_available())


    planevisual()
    # testone = Train.train()
    # print("初始化成功")
    # testone.mutiplanefit()
    # print('done')





# See PyCharm help at https://www.jetbrains.com/help/pycharm/
