from numba import cuda, float32
import numpy as np
import time
import numba
import math

from numpy.core.shape_base import block 

TPB=3

def floyd_cpu(arr,length):
    for k in range(length):
        for i in range(length):
            for j in range(length):
                temp=arr[i][k]+arr[k][j]
                if arr[i][j]>temp:
                    arr[i][j]=temp

def update_chunk_in_same_row_cpu(center_chunk,chunk,chunk_size):
    for k in range(chunk_size):
        for i in range(chunk_size):
            for j in range(chunk_size):
                temp=center_chunk[i][k]+chunk[k][j]
                if chunk[i][j]>temp:
                    chunk[i][j]=temp

@cuda.jit
def update_chunk_in_same_row_shared_memory(center_chunk,chunk,chunk_size):
    t_block_x=cuda.threadIdx.x
    t_block_y=cuda.threadIdx.y

    center_chunk_shared=cuda.shared.array(shape=(TPB,TPB),dtype=float32)
    chunk_shared=cuda.shared.array(shape=(TPB,TPB),dtype=float32)

    chunk_shared[t_block_x][t_block_y]=chunk[t_block_x][t_block_y]
    center_chunk_shared[t_block_x][t_block_y]=center_chunk[t_block_x][t_block_y]
    
    for k in range(chunk_size):
        cuda.syncthreads()
        temp=center_chunk_shared[t_block_x][k]+chunk_shared[k][t_block_y]
        if chunk_shared[t_block_x][t_block_y]>temp:
            chunk_shared[t_block_x][t_block_y]=temp
            chunk[t_block_x][t_block_y]=temp
        cuda.syncthreads()

@cuda.jit
def update_chunk_in_same_row(center_chunk_k,chunks,chunk_size,mod_by_chunk,intact_chunk_num):
    t_block_x=cuda.threadIdx.x
    t_block_y=cuda.threadIdx.y

    if center_chunk_k==cuda.blockIdx.y:
        return

    ty_limitation=mod_by_chunk if cuda.blockIdx.y>=intact_chunk_num else chunk_size
    tx_limitation=mod_by_chunk if center_chunk_k>=intact_chunk_num else chunk_size
    if ty_limitation<=t_block_y or tx_limitation<=t_block_x:
        return

    center_chunk=chunks[center_chunk_k*chunk_size:(center_chunk_k+1)*chunk_size,center_chunk_k*chunk_size:(center_chunk_k+1)*chunk_size]
    chunk=chunks[center_chunk_k*chunk_size:(center_chunk_k+1)*chunk_size,cuda.blockIdx.y*chunk_size:(cuda.blockIdx.y+1)*chunk_size]

    center_chunk_size=mod_by_chunk if center_chunk_k>=intact_chunk_num else chunk_size

    # cuda.syncthreads()
    for k in range(center_chunk_size):
        cuda.syncthreads()
        temp=center_chunk[t_block_x][k]+chunk[k][t_block_y]
        if chunk[t_block_x][t_block_y]>temp:
            chunk[t_block_x][t_block_y]=temp
        cuda.syncthreads()

@cuda.jit
def update_chunk_in_same_column(center_chunk_k,chunks,chunk_size,mod_by_chunk,intact_chunk_num):
    tx,ty=cuda.threadIdx.x,cuda.threadIdx.y

    if center_chunk_k==cuda.blockIdx.x:
        return

    tx_limitation=mod_by_chunk if cuda.blockIdx.x>=intact_chunk_num else chunk_size
    ty_limitation=mod_by_chunk if center_chunk_k>=intact_chunk_num else chunk_size
    if tx>=tx_limitation or ty>=ty_limitation:
        return

    center_chunk=chunks[center_chunk_k*chunk_size:(center_chunk_k+1)*chunk_size,center_chunk_k*chunk_size:(center_chunk_k+1)*chunk_size]
    chunk=chunks[cuda.blockIdx.x*chunk_size:(cuda.blockIdx.x+1)*chunk_size,center_chunk_k*chunk_size:(center_chunk_k+1)*chunk_size]

    center_chunk_size=mod_by_chunk if center_chunk_k>=intact_chunk_num else chunk_size

    for k in range(center_chunk_size):
        cuda.syncthreads()
        temp=chunk[tx][k]+center_chunk[k][ty]
        if chunk[tx][ty]>temp:
            chunk[tx][ty]=temp
        cuda.syncthreads()

@cuda.jit
def update_general_chunk(center_chunk_k,chunks,chunk_size,mod_by_chunk,intact_chunk_num):
    tx,ty=cuda.threadIdx.x,cuda.threadIdx.y
    bx,by=cuda.blockIdx.x,cuda.blockIdx.y
    
    if bx==center_chunk_k or by==center_chunk_k:
        return

    tx_limitation=mod_by_chunk if bx>=intact_chunk_num else chunk_size
    ty_limitation=mod_by_chunk if by>=intact_chunk_num else chunk_size
    if tx>=tx_limitation or ty >=ty_limitation:
        return

    # extract chunk_column,chunk_row,chunk from global matrix(chunks)
    chunk_column=chunks[bx*chunk_size:(bx+1)*chunk_size,center_chunk_k*chunk_size:(center_chunk_k+1)*chunk_size]
    chunk_row=chunks[center_chunk_k*chunk_size:(center_chunk_k+1)*chunk_size,by*chunk_size:(by+1)*chunk_size]
    chunk=chunks[bx*chunk_size:(bx+1)*chunk_size,by*chunk_size:(by+1)*chunk_size]

    center_chunk_size=mod_by_chunk if center_chunk_k>=intact_chunk_num else chunk_size
    # calculate elements in the chunk
    last=chunk[tx][ty]
    for k in range(center_chunk_size):
        temp=chunk_column[tx][k]+chunk_row[k][ty]
        if last>temp:
            last=temp
    chunk[tx][ty]=last

@cuda.jit
def update_center_chunk(center_chunk_k,chunks,chunk_size,mod_by_chunk,intact_chunk_num):
    center_chunk=chunks[center_chunk_k*chunk_size:(center_chunk_k+1)*chunk_size,center_chunk_k*chunk_size:(center_chunk_k+1)*chunk_size]

    center_chunk_size = mod_by_chunk if center_chunk_k >= intact_chunk_num else chunk_size

    tx,ty=cuda.threadIdx.x,cuda.threadIdx.y
    if tx>=center_chunk_size or ty>=center_chunk_size:
        return

    # calculate the shortrest path within the center chunk
    for k in range(center_chunk_size):
        cuda.syncthreads()
        temp=center_chunk[tx][k]+center_chunk[k][ty]
        if center_chunk[tx][ty]>temp:
            center_chunk[tx][ty]=temp
        cuda.syncthreads()


def floyd_gpu(arr,chunk_size=32):
    # arr: adjacent matrix of graph
    # chunk_size: decide the scale(chunk_size^2) of data which each block must process.
    #             chunk_size is less than the maximum of threads in a block, and it means that chunk_size^2<1024
    #             chunk_size<=32
    
    length=len(arr)
    chunk_num=int((length+chunk_size-1)/chunk_size)
    intact_chunk_num=int(length/chunk_size)
    mod_by_chunk=length%chunk_size
    arr_d=cuda.to_device(arr)

    threads_per_block=(chunk_size,chunk_size)
    blocks_per_grid_center=(1,1)
    blocks_per_grid_row=(1,chunk_num)
    blocks_per_grid_column=(chunk_num,1)
    blocks_per_grid_general=(chunk_num,chunk_num)
    for k in range(chunk_num):
        cuda.synchronize()
        ## update center chunk by floyd
        update_center_chunk[blocks_per_grid_center,threads_per_block](k,arr_d,chunk_size,mod_by_chunk,intact_chunk_num)
        cuda.synchronize()

        ## update row and column 
        update_chunk_in_same_column[blocks_per_grid_column,threads_per_block](k,arr_d,chunk_size,mod_by_chunk,intact_chunk_num)
        update_chunk_in_same_row[blocks_per_grid_row,threads_per_block](k,arr_d,chunk_size,mod_by_chunk,intact_chunk_num)
        cuda.synchronize()

        ## update general chunk
        update_general_chunk[blocks_per_grid_general,threads_per_block](k,arr_d,chunk_size,mod_by_chunk,intact_chunk_num)
        cuda.synchronize()
        
    return arr_d