import time
from PIL import Image
from torchvision import transforms 
import torch
import numpy as np
import re
import sys
import re
import random
from  torch import nn, optim, autograd
import math
from torch.autograd import grad as torch_grad
from run import cfg
import os
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0].split('.')
ip=ip[-1]

def tensor_to_PIL(tensor,name):
    unloader = transforms.ToPILImage()
    image = tensor.cpu().clone()
    image = image.squeeze(0)
    image = unloader(image)
    folder='picture/ETH3D/'+str(cfg['folder'].split('/')[-1])+'/'
    try:
       os.makedirs(folder)
    except:
        pass
    image.save(folder+name+'.jpg')
    return image
def tensor_to_PIL1(tensor):
    unloader = transforms.ToPILImage()
    image = tensor.cpu().clone()
    image = image.squeeze(0)
    image = unloader(image)
    return image


def numpy_to_PIL(tensor,name):
    tensor=torch.from_numpy(tensor)
    unloader = transforms.ToPILImage()
    image = tensor.cpu().clone()
    image = image.squeeze(0)
    image = unloader(image)
    image.save('picture/'+name+'.jpg')
    return image
def sec_to_hm(t):
    """Convert time in seconds to time in hours, minutes and seconds
    e.g. 10239 -> (2, 50, 39)
    """
    t = int(t)
    s = t % 60
    t //= 60
    m = t % 60
    t //= 60
    return t, m, s
def sec_to_hm_str(t):
    """Convert time in seconds to a nice string
    e.g. 10239 -> '02h50m39s'
    """
    h, m, s = sec_to_hm(t)
    return "{:02d}h{:02d}m{:02d}s".format(h, m, s)
def readlines(filename):
    """Read all the lines in a text file and return as a list
    """
    with open(filename, 'r') as f:
        lines = f.read().splitlines()
    return lines
def preprocess(tensor1,num_scales):
    sizes=tensor1.size()
    height=sizes[-2]
    width=sizes[-1]
 

    resize = {}
    interp = Image.ANTIALIAS #高质量
    loader = transforms.Compose([transforms.ToTensor()]) 
    scales=[]
    
    for i in range(num_scales):
        s = 2 ** i
        resize[i] = transforms.Resize((height // s, width // s),interpolation=interp)
        try:
            scales.append(resize[i](scales[i-1]))
            scales[i-1]=loader(scales[i-1].convert('RGB'))
        except:
            scales.append(tensor_to_PIL1(tensor1))     
    scales[-1]=loader(scales[-1].convert('RGB'))   
    return scales

def batchpreprocess(tensor1,num_scales,device=torch.device('cuda')):
    scales=[]
    scales.append(preprocess(tensor1[0],num_scales))
    tscales=scales[0]
    for j in range(0,num_scales):
        tscales[j]=tscales[j].unsqueeze(0)
        for i in range(1,tensor1.size()[0]):
            outputs=preprocess(tensor1[i],num_scales)
            scales.append(outputs)
            tscales[j]=torch.cat([tscales[j],scales[i][j].unsqueeze(0)],0) 
    for j in range(0,num_scales):
        tscales[j]=tscales[j].to(device)

    return tscales



def read_pfm(file: str):
    color = None
    width = None
    height = None
    scale = None
    endian = None
    with open(file, 'rb') as f:
        header = f.readline().rstrip()
        if header == b'PF':
            color = True
        elif header == b'Pf':
            color = False
        else:
            raise Exception('Not a PFM file.')
        dim_match = re.match(br'^(\d+)\s(\d+)\s$', f.readline())
        if dim_match:
            width, height = map(int, dim_match.groups())
        else:
            raise Exception('Malformed PFM header.')
        scale = float(f.readline().rstrip())
        if scale < 0:  # little-endian
            endian = '<'
            scale = -scale
        else:
            endian = '>'  # big-endian
        data = np.fromfile(f, endian + 'f')
        shape = (height, width, 3) if color else (height, width)
        data = np.reshape(data, shape)
        data = data[::-1, ...]  # cv2.flip(data, 0)
        data=np.array(data, dtype=np.float32)
        # data=torch.from_numpy(data)
    return data


def save_pfm(filename, image, scale=1):
    file = open(filename, "wb")
    color = None

    image = np.flipud(image)

    if image.dtype.name != 'float32':
        raise Exception('Image dtype must be float32.')

    if len(image.shape) == 3 and image.shape[2] == 3:  # color image
        color = True
    elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1:  # greyscale
        color = False
    else:
        raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')

    file.write('PF\n'.encode('utf-8') if color else 'Pf\n'.encode('utf-8'))
    file.write('{} {}\n'.format(image.shape[1], image.shape[0]).encode('utf-8'))

    endian = image.dtype.byteorder

    if endian == '<' or endian == '=' and sys.byteorder == 'little':
        scale = -scale

    file.write(('%f\n' % scale).encode('utf-8'))

    image.tofile(file)
    file.close()

def pil_loader(path):
    # open path as file to avoid ResourceWarning
    # (https://github.com/python-pillow/Pillow/issues/835)
    # loader = transforms.Compose([transforms.ToTensor()]) 
    with open(path, 'rb') as f:
        with Image.open(f) as img:
            return img.convert('RGB')

def depth_loader(path):
    # loader = transforms.Compose([transforms.ToTensor()]) 

    with open(path, 'rb') as f:
        with Image.open(f) as img:
            return img.convert('L')

def dataset_divude(length):
    a=np.arange(length)
    np.random.shuffle(a)
    train_l=int(np.around(length*0.7))
    test_l=length-train_l
    train=np.array(a[0:train_l])
    test=np.array(a[train_l: ])
    np.save('splits/train_files.npy',train)   # 保存为.npy格式
    np.save('splits/test_files.npy',test)   # 保存为.npy格式

 
# dataset_divude(18300)


def cut_picture(M,dH=128,dW=128):    #batch==1
    sizes=M.size()
    H=sizes[-2]
    W=sizes[-1]
    x1=math.ceil(H/dH)
    x2=math.ceil(W/dW)
    for i in range(0,x1):
        h=dH*i
        if h>H-dH:
            h=H-dH
        for j in range(0,x2):
            w=dW*j
            if w>W-dW:
                w=H-dW
            M1=M[:,:,h:h+dH,w:w+dW]
            try:
                MS=torch.cat([MS,M1],axis=0)
            except:
                MS=M1
    return MS
def cut_picture_batch(M,dH=128,dW=128):
  
    batchsize=M.size()[0]
    for i in range(0,batchsize):
        m_b=cut_picture(M[i:i+1,...],dH,dW).unsqueeze(0)
        try:
           Ms=torch.cat([m_b,Ms],axis=0)
        except:
            Ms=m_b
    return Ms    #batch. cut. h,w




def joint_picture(MS,H,W):   #输入 cut_num,3,h,w
    dH=MS.size()[-2]
    dW=MS.size()[-1]
    x1=math.ceil(H/dH)
    x2=math.ceil(W/dW)
    for i in range(0,x1*x2):
            try:
                w=pic.size()[-1]
                if w>W-dW:
                    pic=torch.cat([pic,MS[i:i+1,:,:,w-W+dW::]],axis=3)
                else:
                    pic=torch.cat([pic,MS[i:i+1,...]],axis=3)
            except:
                pic=MS[i:i+1,...]
            if (i+1)%x2==0:
                try:
                    h=pics.size()[-2]
                    if h>H-dH:
                       pics=torch.cat([pics,pic[...,h-H+dH::,:]],axis=2)
                    else:
                       pics=torch.cat([pics,pic],axis=2)
                except:
                    pics=pic
                del pic
    return pics
def joint_picture_batch(MS,H,W):
    batch=MS.size()[0]
    for k in range(0,batch):
        MS_=MS[k]
        pic=joint_picture(MS_,H,W)
        try:
            pics=torch.cat([pic,pics],axis=0)
        except:
            pics=pic
    del pic
    return pics







def get_image_to_tensor_balanced(image_size=0):
    ops = []
    if image_size > 0:
        ops.append(transforms.Resize(image_size))
    ops.extend(
        [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),]
    )
    return transforms.Compose(ops)


def get_mask_to_tensor():
    return transforms.Compose(
        [transforms.ToTensor(), transforms.Normalize((0.0,), (1.0,))]
    )
def read_cam_file(filename):  #eth3d

    with open(filename) as f:
        lines = [line.rstrip() for line in f.readlines()]

    extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ')
    extrinsics = extrinsics.reshape((4, 4))


    intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ')
    intrinsics = intrinsics.reshape((3, 3))

    intrinsics=np.vstack([intrinsics,[0 ,0, 0]])#垂直方向拼接
    intrinsics=np.hstack([intrinsics,[[0] ,[0], [0], [1]]])     #hstack()水平方向拼接
    depth_min = float(lines[11].split()[0])
    depth_max = float(lines[11].split()[3])
    return intrinsics, extrinsics,depth_min,depth_max
def read_cam_file1(source):    #shark 
    f = open(source, "r")
    cam=f.readlines()   #
    f.close()
    cameraA=[]
    cameraC=[]
    CameraA={}
    CameraC={}
    for i in range(0,len(cam)):
        p=i//10
        if(i%10==0):
            cameraname=cam[p*10].replace('\n','')
        if(i%10==1 or i%10==2 or i%10==3):
            camera=cam[i].replace('\n','').replace('\t',' ').split(' ')
            camera=list(map(float, camera[0:3]))
            camera.append(0)
            cameraA.append(camera)
            if(i%10==3):
                cameraA.append([00.000000,-0.000000,0.000000,1])
                camA= torch.Tensor(cameraA)
                CameraA[cameraname]=camA
                cameraA=[]
        if(i%10==6 or i%10==7 or i%10==8 ):
            camera=cam[i].replace('\n','').replace('\t',' ').split(' ')
            camera=list(map(float, camera[0:4]))
            cameraC.append(camera)
            if(i%10==8):
                cameraC.append([00.000000,-0.000000,0.000000,1.000000])
                camC= torch.Tensor(cameraC)
                CameraC[(cameraname)]=camC
                cameraC=[]
    # for key, ipt in  CameraA.items():
    #     CameraA[key] = ipt.unsqueeze(0)
    # for key, ipt in  CameraC.items():
    #     CameraC[key] = ipt.unsqueeze(0)

    return CameraA,CameraC   #输出   1x4x4
# 读取
def gradient_penalty(images, output, weight = 10):

    batch_size, device = images.shape[0], images.device
    gradients = torch_grad(outputs=output, inputs=images,
                           grad_outputs=torch.ones(output.size(), device=device),
                           create_graph=True, retain_graph=True, only_inputs=True)[0]

    gradients = gradients.reshape(batch_size, -1)
    l2 = ((gradients.norm(2, dim = 1) - 1) ** 2).mean()
    return weight * l2


def upsampling(im,sx,sy):
    m = nn.Upsample(size=[round(sx),round(sy)],mode='bilinear',align_corners=True)
    return m(im)
def generate_noise(size,num_samp=1,device='cpu',type='gaussian', scale=1):
    if type == 'gaussian':
        noise = torch.randn(num_samp, size[0], round(size[1]/scale), round(size[2]/scale), device=device)
        noise = upsampling(noise,size[1], size[2])
    if type =='gaussian_mixture':
        noise1 = torch.randn(num_samp, size[0], size[1], size[2], device=device)+5
        noise2 = torch.randn(num_samp, size[0], size[1], size[2], device=device)
        noise = noise1+noise2
    if type == 'uniform':
        noise = torch.randn(num_samp, size[0], size[1], size[2], device=device)
    return noise
# a=generate_noise([1,2,128,100])
# print(a.size())
