text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
#include "dragon/core/context_cuda.h" #include "dragon/utils/math_functions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { #define LDG(x, i) __ldg(x + i) #define LDG2(x, i) convert::To<float>(__ldg(x + i)) template <typename T> __global__ void _ResizeNearest2dNCHW( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { const int w_out = yi % out_w; const int h_out = (yi / out_w) % out_h; const int c = (yi / out_w / out_h) % C; const int n = yi / out_w / out_h / C; const int h = min(int(h_out * scale_h), H - 1); const int w = min(int(w_out * scale_w), W - 1); y[yi] = LDG(x, (((n * C + c) * H + h) * W + w)); } } template <typename T> __global__ void _ResizeNearest2dNHWC( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { const int c = yi % C; const int w_out = (yi / C) % out_w; const int h_out = (yi / C / out_w) % out_h; const int n = yi / C / out_w / out_h; const int h = min(int(h_out * scale_h), H - 1); const int w = min(int(w_out * scale_w), W - 1); y[yi] = LDG(x, (((n * H + h) * W + w) * C + c)); } } template <typename T> __global__ void _ResizeNearest2dGradNCHW( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, float* dx) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { const int w_out = yi % out_w; const int h_out = (yi / out_w) % out_h; const int c = (yi / out_w / out_h) % C; const int n = yi / out_w / out_h / C; const int h = min(int(h_out * scale_h), H - 1); const int w = min(int(w_out * scale_w), W - 1); math::utils::AtomicAdd(&dx[((n * C + c) * H + h) * W + w], LDG2(dy, yi)); } } template <typename T> __global__ void _ResizeNearest2dGradNHWC( const int nthreads, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, float* dx) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { const int c = yi % C; const int w_out = (yi / C) % out_w; const int h_out = (yi / C / out_w) % out_h; const int n = yi / C / out_w / out_h; const int h = min(int(h_out * scale_h), H - 1); const int w = min(int(w_out * scale_w), W - 1); math::utils::AtomicAdd(&dx[((n * H + h) * W + w) * C + c], LDG2(dy, yi)); } } template <typename T> __global__ void _ResizeNearest3dNCHW( const int nthreads, const int C, const int D, const int H, const int W, const int out_d, const int out_h, const int out_w, const float scale_d, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { int tmp = yi / out_w; const int w_out = yi % out_w; const int h_out = tmp % out_h; tmp /= out_h; const int d_out = tmp % out_d; tmp /= out_d; const int c = tmp % C; const int n = tmp / C; const int d = min(int(d_out * scale_d), D - 1); const int h = min(int(h_out * scale_h), H - 1); const int w = min(int(w_out * scale_w), W - 1); y[yi] = LDG(x, (((n * C + c) * D + d) * H + h) * W + w); } } template <typename T> __global__ void _ResizeNearest3dNHWC( const int nthreads, const int C, const int D, const int H, const int W, const int out_d, const int out_h, const int out_w, const float scale_d, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { int tmp = yi / C; const int c = yi % C; const int w_out = tmp % out_w; tmp /= out_w; const int h_out = tmp % out_h; tmp /= out_h; const int d_out = tmp % out_d; const int n = tmp / out_d; const int d = min(int(d_out * scale_d), D - 1); const int h = min(int(h_out * scale_h), H - 1); const int w = min(int(w_out * scale_w), W - 1); y[yi] = LDG(x, (((n * D + d) * H + h) * W + w) * C + c); } } template <typename T> __global__ void _ResizeNearest3dGradNCHW( const int nthreads, const int C, const int D, const int H, const int W, const int out_d, const int out_h, const int out_w, const float scale_d, const float scale_h, const float scale_w, const T* dy, float* dx) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { int tmp = yi / out_w; const int w_out = yi % out_w; const int h_out = tmp % out_h; tmp /= out_h; const int d_out = tmp % out_d; tmp /= out_d; const int c = tmp % C; const int n = tmp / C; const int d = min(int(d_out * scale_d), D - 1); const int h = min(int(h_out * scale_h), H - 1); const int w = min(int(w_out * scale_w), W - 1); math::utils::AtomicAdd( &dx[(((n * C + c) * D + d) * H + h) * W + w], LDG2(dy, yi)); } } template <typename T> __global__ void _ResizeNearest3dGradNHWC( const int nthreads, const int C, const int D, const int H, const int W, const int out_d, const int out_h, const int out_w, const float scale_d, const float scale_h, const float scale_w, const T* dy, float* dx) { CUDA_1D_KERNEL_LOOP(yi, nthreads) { int tmp = yi / C; const int c = yi % C; const int w_out = tmp % out_w; tmp /= out_w; const int h_out = tmp % out_h; tmp /= out_h; const int d_out = tmp % out_d; const int n = tmp / out_d; const int d = min(int(d_out * scale_d), D - 1); const int h = min(int(h_out * scale_h), H - 1); const int w = min(int(w_out * scale_w), W - 1); math::utils::AtomicAdd( &dx[(((n * D + d) * H + h) * W + w) * C + c], LDG2(dy, yi)); } } #undef LDG #undef LDG2 } // namespace /* ------------------- Launcher Separator ------------------- */ #define DISPATCH_RESIZE_KERNEL(name, T, kBlocks, kThreads, ...) \ if (data_format == "NCHW") { \ name##NCHW<<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else if (data_format == "NHWC") { \ name##NHWC<<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else { \ LOG(FATAL) << "Unknown DataFormat: " << data_format; \ } #define DEFINE_KERNEL_LAUNCHER(name, kBackward, InputT, OutputT) \ template <> \ void name<InputT, CUDAContext>( \ const int N, \ const int C, \ const int H, \ const int W, \ const int out_h, \ const int out_w, \ const string& data_format, \ const InputT* x, \ OutputT* y, \ CUDAContext* ctx) { \ auto nthreads = N * C * out_h * out_w; \ if (kBackward) { \ math::Set(N* C* H* W, convert::To<OutputT>(0.f), y, ctx); \ } \ DISPATCH_RESIZE_KERNEL( \ _##name, \ math::ScalarType<InputT>::type, \ CUDA_BLOCKS(nthreads), \ CUDA_THREADS, \ nthreads, \ C, \ H, \ W, \ out_h, \ out_w, \ (float)H / (float)out_h, \ (float)W / (float)out_w, \ reinterpret_cast<const math::ScalarType<InputT>::type*>(x), \ reinterpret_cast<math::ScalarType<OutputT>::type*>(y)); \ } DEFINE_KERNEL_LAUNCHER(ResizeNearest2d, false, uint8_t, uint8_t); DEFINE_KERNEL_LAUNCHER(ResizeNearest2d, false, int8_t, int8_t); DEFINE_KERNEL_LAUNCHER(ResizeNearest2d, false, int, int); DEFINE_KERNEL_LAUNCHER(ResizeNearest2d, false, int64_t, int64_t); DEFINE_KERNEL_LAUNCHER(ResizeNearest2d, false, float16, float16); DEFINE_KERNEL_LAUNCHER(ResizeNearest2d, false, float, float); DEFINE_KERNEL_LAUNCHER(ResizeNearest2d, false, double, double); DEFINE_KERNEL_LAUNCHER(ResizeNearest2dGrad, true, float16, float); // Grad DEFINE_KERNEL_LAUNCHER(ResizeNearest2dGrad, true, float, float); // Grad DEFINE_KERNEL_LAUNCHER(ResizeNearest2dGrad, true, double, float); // Grad #undef DEFINE_KERNEL_LAUNCHER #define DEFINE_KERNEL_LAUNCHER(name, kBackward, InputT, OutputT) \ template <> \ void name<InputT, CUDAContext>( \ const int N, \ const int C, \ const int D, \ const int H, \ const int W, \ const int out_d, \ const int out_h, \ const int out_w, \ const string& data_format, \ const InputT* x, \ OutputT* y, \ CUDAContext* ctx) { \ auto nthreads = N * C * out_d * out_h * out_w; \ if (kBackward) { \ math::Set(N* C* D* H* W, convert::To<OutputT>(0.f), y, ctx); \ } \ DISPATCH_RESIZE_KERNEL( \ _##name, \ math::ScalarType<InputT>::type, \ CUDA_BLOCKS(nthreads), \ CUDA_THREADS, \ nthreads, \ C, \ D, \ H, \ W, \ out_d, \ out_h, \ out_w, \ (float)D / (float)out_d, \ (float)H / (float)out_h, \ (float)W / (float)out_w, \ reinterpret_cast<const math::ScalarType<InputT>::type*>(x), \ reinterpret_cast<math::ScalarType<OutputT>::type*>(y)); \ } DEFINE_KERNEL_LAUNCHER(ResizeNearest3d, false, uint8_t, uint8_t); DEFINE_KERNEL_LAUNCHER(ResizeNearest3d, false, int8_t, int8_t); DEFINE_KERNEL_LAUNCHER(ResizeNearest3d, false, int, int); DEFINE_KERNEL_LAUNCHER(ResizeNearest3d, false, int64_t, int64_t); DEFINE_KERNEL_LAUNCHER(ResizeNearest3d, false, float16, float16); DEFINE_KERNEL_LAUNCHER(ResizeNearest3d, false, float, float); DEFINE_KERNEL_LAUNCHER(ResizeNearest3d, false, double, double); DEFINE_KERNEL_LAUNCHER(ResizeNearest3dGrad, true, float16, float); // Grad DEFINE_KERNEL_LAUNCHER(ResizeNearest3dGrad, true, float, float); // Grad DEFINE_KERNEL_LAUNCHER(ResizeNearest3dGrad, true, double, float); // Grad #undef DEFINE_KERNEL_LAUNCHER #undef DISPATCH_RESIZE_KERNEL } // namespace kernels } // namespace dragon #endif // USE_CUDA
the_stack
#include<ops/declarable/helpers/sru.h> #include <array/NDArrayFactory.h> #include <helpers/PointersManager.h> #include <helpers/MmulHelper.h> namespace sd { namespace ops { namespace helpers { ////////////////////////////////////////////////////////////////////////// static FORCEINLINE NDArray activation(const NDArray& arr) { // return (const_cast<NDArray<T>&>(arr)).template transform<simdOps::Tanh<T>>(); auto result = NDArray(&arr, false, arr.getContext()); (const_cast<NDArray&>(arr)).applyTransform(transform::Tanh, result); return result; } ////////////////////////////////////////////////////////////////////////// static FORCEINLINE NDArray sigmoid(const NDArray& arr) { return (const_cast<NDArray&>(arr)).transform(transform::Sigmoid); } ////////////////////////////////////////////////////////////////////////// void sruCell(sd::LaunchContext * context, const NDArray* x, const NDArray* c0, const NDArray* w, const NDArray* b, NDArray* h, NDArray* c) { // x input [bS x inSize], bS - batch size, inSize - number of features // c0 previous cell state c [bS x inSize], that is at previous time step t-1 // w weights [inSize x 3*inSize] // b biases [2*inSize] // h current cell output [bS x inSize], that is at current time step t // c current cell state [bS x inSize], that is at current time step t const int inSize = x->sizeAt(1); // inSize - number of features auto z = mmul(*x, *w); // [bS x 3*inSize] // forget gate = sigmoid(x*Wf + bf) auto f = sigmoid(z({0,0, inSize, 2*inSize}) + (*b)({0, inSize})); // reset gate = sigmoid(x*Wr + br) auto r = sigmoid(z({0,0, 2*inSize, 3*inSize}) + (*b)({inSize, 2*inSize})); // ◦ means element-wise product or so called Hadamard product // current sell state = f◦c0 + (1 - f)◦(x*Wc) c->assign(f * (*c0) + (1.f - f) * z({0, 0 ,0, inSize}) ); // *c = f*(*c0 - z({},{0, inSize})) + z({{},{0, inSize}}); // current cell output = r◦activation(c) + (1 - r)◦x h->assign( r * activation(*c) + (1.f - r) * (*x) ); // *h = r * (activation<T>(c) - *x) + *x; } ////////////////////////////////////////////////////////////////////////// void sruTimeLoop(sd::LaunchContext * context, const NDArray* x, const NDArray* c0, const NDArray* w, const NDArray* b, NDArray* h, NDArray* c) { // x input [bS x inSize x time] // c0 initial cell state (at time step = 0) [bS x inSize], // w weights, [3*inSize x inSize] // b biases, [2*inSize] // h cell outputs [bS x inSize x time] // c cell states [bS x inSize x time] auto wT = w->transpose(); // [3*inSize x inSize] -> [inSize x 3*inSize] const int time = x->sizeAt(2); NDArray ct_1(*c0); // loop through time steps for (int t = 0; t < time; ++t) { auto xt = (*x)({0,0, 0,0, t,t+1}); auto ht = (*h)({0,0, 0,0, t,t+1}); auto ct = (*c)({0,0, 0,0, t,t+1}); helpers::sruCell(context, &xt, &ct_1, &wT, b, &ht, &ct); ct_1.assign(ct); } } ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void sruBICuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vwi, const Nd4jLong* wiShapeInfo, const void* vb, const Nd4jLong* bShapeInfo, const void* vc0, const Nd4jLong* c0ShapeInfo, const void* vmask, const Nd4jLong* maskShapeInfo, void* vht, const Nd4jLong* htShapeInfo, void* vct, const Nd4jLong* ctShapeInfo) { // inputs: // x [time, bS, 2*K] // wi [time, bS, 6*K], wi = mmul(x, weights); // b [4*K] // c0 [bS, 2*K] // mask [bS, 2*K], optional // outputs // ht [time, bS, 2*K] // ct [time, bS, 2*K] const auto x = reinterpret_cast<const T*>(vx); const auto wi = reinterpret_cast<const T*>(vwi); const auto b = reinterpret_cast<const T*>(vb); const auto c0 = reinterpret_cast<const T*>(vc0); const auto mask = reinterpret_cast<const T*>(vmask); auto ht = reinterpret_cast<T*>(vht); auto ct = reinterpret_cast<T*>(vct); const int rank = 3; __shared__ int time, K, *sharedMem; __shared__ Nd4jLong len, totalThreads; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<int*>(shmem); time = xShapeInfo[1]; K = xShapeInfo[3] / 2; len = xShapeInfo[2] * xShapeInfo[3]; // 2*K*bS totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; auto coords = sharedMem + threadIdx.x * rank; if(tid >= len) return; shape::index2coords(tid, rank - 1, xShapeInfo + 2, coords + 1); // loop through last two dimensions of x : {bS, 2*K} const auto maskOffst = mask ? shape::getOffset(maskShapeInfo, coords + 1) : 0; const auto c0Offset = shape::getOffset(c0ShapeInfo, coords + 1); const auto bFOffset = shape::getOffset(bShapeInfo, coords + 2); const auto bROffset = bFOffset + 2 * K * bShapeInfo[2]; // 2*K*b_stride const T maskVal = mask ? mask[maskOffst] : static_cast<T>(1); const T bF = b[bFOffset]; const T bR = b[bROffset]; T c0Val = c0[c0Offset]; const bool flip = coords[2] >= K; if(flip) coords[0] = time - 1; else coords[0] = 0; auto xOffset = shape::getOffset(xShapeInfo, coords); auto htOffset = shape::getOffset(htShapeInfo, coords); auto ctOffset = shape::getOffset(ctShapeInfo, coords); coords[2] *= 3; auto wiOffset0 = shape::getOffset(wiShapeInfo, coords); auto wiOffset1 = wiOffset0 + wiShapeInfo[rank + 3]; // add last stride auto wiOffset2 = wiOffset1 + wiShapeInfo[rank + 3]; // add last stride // time loop for (uint t = 0; t < time; ++t) { // evaluate sigmoids T ft = (1.f)/(1.f + sd::math::nd4j_exp<T, T>(-(wi[wiOffset1] + bF))); T rt = (1.f)/(1.f + sd::math::nd4j_exp<T, T>(-(wi[wiOffset2] + bR))); c0Val = (c0Val - wi[wiOffset0]) * ft + wi[wiOffset0]; ct[ctOffset] = c0Val; T val = sd::math::nd4j_tanh<T, T>(c0Val); T xVal = x[xOffset]; ht[htOffset] = (val * maskVal - xVal) * rt + xVal; if(flip) { xOffset -= xShapeInfo[rank + 1]; // first stride, corresponds to time step htOffset -= htShapeInfo[rank + 1]; ctOffset -= htShapeInfo[rank + 1]; wiOffset0 -= wiShapeInfo[rank + 1]; wiOffset1 -= wiShapeInfo[rank + 1]; wiOffset2 -= wiShapeInfo[rank + 1]; } else { xOffset += xShapeInfo[rank + 1]; // first stride, corresponds to time step htOffset += htShapeInfo[rank + 1]; ctOffset += htShapeInfo[rank + 1]; wiOffset0 += wiShapeInfo[rank + 1]; wiOffset1 += wiShapeInfo[rank + 1]; wiOffset2 += wiShapeInfo[rank + 1]; } } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void sruBICudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vwi, const Nd4jLong* wiShapeInfo, const void* vb, const Nd4jLong* bShapeInfo, const void* vc0, const Nd4jLong* c0ShapeInfo, const void* vmask, const Nd4jLong* maskShapeInfo, void* vht, const Nd4jLong* htShapeInfo, void* vct, const Nd4jLong* ctShapeInfo) { sruBICuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vwi, wiShapeInfo, vb, bShapeInfo, vc0, c0ShapeInfo, vmask, maskShapeInfo, vht, htShapeInfo, vct, ctShapeInfo); } ////////////////////////////////////////////////////////////////////////// void sruBI(sd::LaunchContext * context, NDArray* x, const NDArray* w, const NDArray* b, const NDArray* c0, const NDArray* mask, NDArray* ht, NDArray* ct) { // x = x * mask if(mask) x->applyBroadcast(broadcast::Multiply, {1, 2}, *mask, *x); // apply mask // U = x * w NDArray wi = mmul(*x, *w); // U [time x bS x 6*K] PointersManager manager(context, "sru_bi"); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (x->sizeAt(1) * x->sizeAt(2) + threadsPerBlock - 1) / threadsPerBlock; // loop through last two dimensions of x array -> bS, 2*K const int sharedMem = threadsPerBlock * sizeof(int) * x->rankOf() + 128; NDArray::prepareSpecialUse({ht, ct}, {x, &wi, b, c0, mask}); BUILD_SINGLE_SELECTOR(x->dataType(), sruBICudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), x->specialBuffer(), x->specialShapeInfo(), wi.specialBuffer(), wi.specialShapeInfo(), b->specialBuffer(), b->specialShapeInfo(), c0->specialBuffer(), c0->specialShapeInfo(), mask ? mask->specialBuffer() : nullptr, mask ? mask->specialShapeInfo() : nullptr, ht->specialBuffer(), ht->specialShapeInfo(), ct->specialBuffer(), ct->specialShapeInfo()), FLOAT_TYPES); NDArray::registerSpecialUse({ht, ct}, {x, &wi, b, c0, mask}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void sruBIBPCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vwi, const Nd4jLong* wiShapeInfo, const void* vb, const Nd4jLong* bShapeInfo, const void* vc0, const Nd4jLong* c0ShapeInfo, const void* vmask, const Nd4jLong* maskShapeInfo, const void* vct, const Nd4jLong* ctShapeInfo, const void* vgradHt, const Nd4jLong* gradHtShapeInfo, const void* vgradCt, const Nd4jLong* gradCtShapeInfo, void* vgradI, const Nd4jLong* gradIShapeInfo, void* vgradWi, const Nd4jLong* gradWiShapeInfo, void* vgradB, const Nd4jLong* gradBShapeInfo, void* vgradC0, const Nd4jLong* gradC0ShapeInfo) { // inputs: // x [time, bS, 2*K] // wi [time, bS, 6*K], wi = mmul(x, weights); // b [4*K] // c0 [bS, 2*K] // mask [bS, 2*K], optional // ct [time, bS, 2*K] // gradHt [time, bS, 2*K] // gradCt [bS, 2*K] // outputs // gradI [time, bS, 2*K] // gradWi [time, 2*K, 6*K] // gradB [bS, 4*K] // gradC0 [bS, 2*K] const auto x = reinterpret_cast<const T*>(vx); const auto wi = reinterpret_cast<const T*>(vwi); const auto b = reinterpret_cast<const T*>(vb); const auto c0 = reinterpret_cast<const T*>(vc0); const auto mask = reinterpret_cast<const T*>(vmask); const auto ct = reinterpret_cast<const T*>(vct); const auto gradHt = reinterpret_cast<const T*>(vgradHt); const auto gradCt = reinterpret_cast<const T*>(vgradCt); auto gradI = reinterpret_cast<T*>(vgradI); auto gradWi = reinterpret_cast<T*>(vgradWi); auto gradB = reinterpret_cast<T*>(vgradB); auto gradC0 = reinterpret_cast<T*>(vgradC0); const int rank = 3; __shared__ int time, K, *sharedMem; __shared__ Nd4jLong len, totalThreads; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<int*>(shmem); time = xShapeInfo[1]; K = xShapeInfo[3] / 2; len = xShapeInfo[2] * xShapeInfo[3]; // 2*K*bS totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; auto coords = sharedMem + threadIdx.x * rank; if(tid >= len) return; shape::index2coords(tid, rank - 1, xShapeInfo + 2, coords + 1); // loop through last two dimensions of x : {bS, 2*K} const auto maskOffst = mask ? shape::getOffset(maskShapeInfo, coords + 1) : 0; const auto c0Offset = shape::getOffset(c0ShapeInfo, coords + 1); const auto gradCtOffset = shape::getOffset(gradCtShapeInfo, coords + 1); const auto gradC0Offset = shape::getOffset(gradC0ShapeInfo, coords + 1); const auto bFOffset = shape::getOffset(bShapeInfo, coords + 2); const auto bROffset = bFOffset + 2 * K * bShapeInfo[2]; // 2*K*b_stride // const auto gradBFOffset = shape::getOffset(gradBShapeInfo, coords + 1); const auto gradBFOffset = coords[1] * gradBShapeInfo[3] / 2 + coords[2] * gradBShapeInfo[4]; const auto gradBROffset = gradBFOffset + gradBShapeInfo[3]; const bool flip = coords[2] >= K; if(flip) coords[0] = 0; else coords[0] = time - 1; auto xOffset = shape::getOffset(xShapeInfo, coords); auto ctOffset = shape::getOffset(ctShapeInfo, coords); auto gradIOffset = shape::getOffset(gradIShapeInfo, coords); auto gradHtOffset = shape::getOffset(gradHtShapeInfo, coords); coords[2] *= 3; auto gradWiOffset0 = shape::getOffset(gradWiShapeInfo, coords); auto gradWiOffset1 = gradWiOffset0 + gradWiShapeInfo[rank + 3]; // add last stride auto gradWiOffset2 = gradWiOffset1 + gradWiShapeInfo[rank + 3]; // add last stride auto wiOffset0 = shape::getOffset(wiShapeInfo, coords); auto wiOffset1 = wiOffset0 + wiShapeInfo[rank + 3]; // add last stride auto wiOffset2 = wiOffset1 + wiShapeInfo[rank + 3]; // add last stride const T xVal = x[xOffset]; const T maskVal = mask ? mask[maskOffst] : static_cast<T>(1); const T c0Val = c0[c0Offset]; const T bF = b[bFOffset]; const T bR = b[bROffset]; T gradCtVal = gradCt[gradCtOffset]; T gbF = 0.f; T gbR = 0.f; // time loop for (uint t = 0; t < time; ++t) { // evaluate sigmoids T ft = (1.f)/(1.f + sd::math::nd4j_exp<T, T>(-(wi[wiOffset1] + bF))); T rt = (1.f)/(1.f + sd::math::nd4j_exp<T, T>(-(wi[wiOffset2] + bR))); T val = sd::math::nd4j_tanh<T,T>(ct[ctOffset]); T prevVal; if(t < time-1) prevVal = ct[ctOffset += flip ? ctShapeInfo[rank + 1] : -ctShapeInfo[rank + 1]]; else prevVal = c0Val; // grad wrt input gradI[gradIOffset] = gradHt[gradHtOffset] - gradHt[gradHtOffset] * rt ; // grad wrt rt, wiR and bR T grt = gradHt[gradHtOffset] * (val * maskVal - x[xOffset]) * (rt - rt * rt); gradWi[gradWiOffset2] = grt; gbR += grt; // grad wrt state T gradC0Val = gradHt[gradHtOffset] * maskVal * (rt - rt * val * val) + gradCtVal; // grad wrt wi0 gradWi[gradWiOffset0] = gradC0Val - gradC0Val * ft; // grad wrt ft, wi1, and bF T gft = gradC0Val * (prevVal - wi[wiOffset0]) * (ft - ft * ft); gradWi[gradWiOffset1] = gft; gbF += gft; // grad wrt c_previous gradCtVal = gradC0Val * ft; if(flip) { xOffset += xShapeInfo[rank + 1]; // first stride, corresponds to time step gradHtOffset += gradHtShapeInfo[rank + 1]; gradIOffset += gradIShapeInfo[rank + 1]; wiOffset0 += wiShapeInfo[rank + 1]; wiOffset1 += wiShapeInfo[rank + 1]; wiOffset2 += wiShapeInfo[rank + 1]; gradWiOffset0 += gradWiShapeInfo[rank + 1]; gradWiOffset1 += gradWiShapeInfo[rank + 1]; gradWiOffset2 += gradWiShapeInfo[rank + 1]; } else { xOffset -= xShapeInfo[rank + 1]; // first stride, corresponds to time step gradHtOffset -= gradHtShapeInfo[rank + 1]; gradIOffset -= gradIShapeInfo[rank + 1]; wiOffset0 -= wiShapeInfo[rank + 1]; wiOffset1 -= wiShapeInfo[rank + 1]; wiOffset2 -= wiShapeInfo[rank + 1]; gradWiOffset0 -= gradWiShapeInfo[rank + 1]; gradWiOffset1 -= gradWiShapeInfo[rank + 1]; gradWiOffset2 -= gradWiShapeInfo[rank + 1]; } } gradB[gradBFOffset] = gbF; gradB[gradBROffset] = gbR; gradC0[gradC0Offset] = gradCtVal; } ////////////////////////////////////////////////////////////////////////// template <typename T> static void sruBIBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vwi, const Nd4jLong* wiShapeInfo, const void* vb, const Nd4jLong* bShapeInfo, const void* vc0, const Nd4jLong* c0ShapeInfo, const void* vmask, const Nd4jLong* maskShapeInfo, const void* vct, const Nd4jLong* ctShapeInfo, const void* vgradHt, const Nd4jLong* gradHtShapeInfo, const void* vgradCt, const Nd4jLong* gradCtShapeInfo, void* vgradI, const Nd4jLong* gradIShapeInfo, void* vgradWi, const Nd4jLong* gradWiShapeInfo, void* vgradB, const Nd4jLong* gradBShapeInfo, void* vgradC0, const Nd4jLong* gradC0ShapeInfo) { sruBIBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vwi, wiShapeInfo, vb, bShapeInfo, vc0, c0ShapeInfo, vmask, maskShapeInfo, vct, ctShapeInfo, vgradHt, gradHtShapeInfo, vgradCt, gradCtShapeInfo, vgradI, gradIShapeInfo, vgradWi, gradWiShapeInfo, vgradB, gradBShapeInfo, vgradC0, gradC0ShapeInfo); } BUILD_SINGLE_TEMPLATE(template void sruBIBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vwi, const Nd4jLong* wiShapeInfo, const void* vb, const Nd4jLong* bShapeInfo, const void* vc0, const Nd4jLong* c0ShapeInfo, const void* vmask, const Nd4jLong* maskShapeInfo, const void* vct, const Nd4jLong* ctShapeInfo, const void* vgradHt, const Nd4jLong* gradHtShapeInfo, const void* vgradCt, const Nd4jLong* gradCtShapeInfo, void* vgradI, const Nd4jLong* gradIShapeInfo, void* vgradWi, const Nd4jLong* gradWiShapeInfo, void* vgradB, const Nd4jLong* gradBShapeInfo, void* vgradC0, const Nd4jLong* gradC0ShapeInfo), FLOAT_TYPES); ////////////////////////////////////////////////////////////////////////// void sruBIBP(sd::LaunchContext* context, NDArray* x, const NDArray* w, const NDArray* b, const NDArray* c0, const NDArray* ct, const NDArray* gradCt, const NDArray* gradHt, const NDArray* mask, NDArray* gradI, NDArray* gradW, NDArray* gradB, NDArray* gradC0) { // x = x * mask if(mask) x->applyBroadcast(broadcast::Multiply, {1, 2}, *mask, *x); // apply mask // U = x * w NDArray wi = mmul(*x, *w); // U [time x bS x 6*K] const int time = x->sizeAt(0); const int bS = x->sizeAt(1); const int K = x->sizeAt(2) / 2; NDArray gradBias(x->ordering(), {bS, 4*K}, x->dataType(), context); NDArray gradWi (x->ordering(), {time, bS, 6*K}, x->dataType(), context); PointersManager manager(context, "sru_bi_bp"); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (x->sizeAt(1) * x->sizeAt(2) + threadsPerBlock - 1) / threadsPerBlock; // loop through last two dimensions of x array -> bS, 2*K const int sharedMem = threadsPerBlock * sizeof(int) * x->rankOf() + 128; NDArray::prepareSpecialUse({gradI, &gradWi, &gradBias, gradC0}, {x, &wi, b, c0, ct, gradCt, gradHt, mask}); BUILD_SINGLE_SELECTOR(x->dataType(), sruBIBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), x->specialBuffer(), x->specialShapeInfo(), wi.specialBuffer(), wi.specialShapeInfo(), b->specialBuffer(), b->specialShapeInfo(), c0->specialBuffer(), c0->specialShapeInfo(), mask ? mask->specialBuffer() : nullptr, mask ? mask->specialShapeInfo() : nullptr, ct->specialBuffer(), ct->specialShapeInfo(), gradHt->specialBuffer(), gradHt->specialShapeInfo(), gradCt->specialBuffer(), gradCt->specialShapeInfo(), gradI->specialBuffer(), gradI->specialShapeInfo(), gradWi.specialBuffer(), gradWi.specialShapeInfo(), gradBias.specialBuffer(), gradBias.specialShapeInfo(), gradC0->specialBuffer(), gradC0->specialShapeInfo()), FLOAT_TYPES); NDArray::registerSpecialUse({gradI, &gradWi, &gradBias, gradC0}, {x, &wi, b, c0, ct, gradCt, gradHt, mask}); manager.synchronize(); // gradB gradBias.reduceAlongDimension(reduce::Sum, *gradB, {0}); // [4*K] // gradW x->permutei({0, 2, 1}); // [time, bS, 2*K] -> [time, 2*K, bS] MmulHelper::mmul(x, &gradWi, gradW, 1., 0.); // [time, 2*K, bS] x [time, bS , 6*K] = [time, 2*K, 6*K] } } } }
the_stack
// ----------------------------------------------------------------------------------------- // NVEnc by rigaya // ----------------------------------------------------------------------------------------- // // The MIT License // // Copyright (c) 2014-2016 rigaya // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // // ------------------------------------------------------------------------------------------ #include <map> #include <array> #include "convert_csp.h" #include "NVEncFilterDenoiseKnn.h" #include "NVEncParam.h" #pragma warning (push) #pragma warning (disable: 4819) #include "cuda_runtime.h" #include "device_launch_parameters.h" #pragma warning (pop) #include "rgy_cuda_util_kernel.h" static const int KNN_RADIUS_MAX = 5; template<typename Type, int knn_radius, int bit_depth> __global__ void kernel_denoise_knn(uint8_t *__restrict__ pDst, const int dstPitch, const int dstWidth, const int dstHeight, cudaTextureObject_t texSrc, const float strength, const float lerpC, const float weight_threshold, const float lerp_threshold) { const float knn_window_area = (float)((2 * knn_radius + 1) * (2 * knn_radius + 1)); const float inv_knn_window_area = 1.0f / knn_window_area; const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix < dstWidth && iy < dstHeight) { const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; float fCount = 0.0f; float sumWeights = 0.0f; float sum = 0.0f; float center = (float)tex2D<Type>(texSrc, x, y) * (1.0f / (1<<bit_depth)); #pragma unroll for (int i = -knn_radius; i <= knn_radius; i++) { #pragma unroll for (int j = -knn_radius; j <= knn_radius; j++) { float clrIJ = (float)tex2D<Type>(texSrc, x + (float)j, y + (float)i) * (1.0f / (1<<bit_depth)); float distanceIJ = (center - clrIJ) * (center - clrIJ); float weightIJ = __expf(-(distanceIJ * strength + (float)(i * i + j * j) * inv_knn_window_area)); sum += clrIJ * weightIJ; sumWeights += weightIJ; fCount += (weightIJ > weight_threshold) ? inv_knn_window_area : 0; } } float lerpQ = (fCount > lerp_threshold) ? lerpC : 1.0f - lerpC; Type *ptr = (Type *)(pDst + iy * dstPitch + ix * sizeof(Type)); ptr[0] = (Type)(lerpf(sum * __frcp_rn(sumWeights), center, lerpQ) * (1<<bit_depth)); } } template<typename Type, int bit_depth> void denoise_knn(uint8_t *pDst, const int dstPitch, const int dstWidth, const int dstHeight, cudaTextureObject_t texSrc, int radius, const float strength, const float lerpC, const float weight_threshold, const float lerp_threshold, cudaStream_t stream) { dim3 blockSize(64, 16); dim3 gridSize(divCeil(dstWidth, blockSize.x), divCeil(dstHeight, blockSize.y)); switch (radius) { case 1: kernel_denoise_knn<Type, 1, bit_depth><<<gridSize, blockSize, 0, stream>>>(pDst, dstPitch, dstWidth, dstHeight, texSrc, 1.0f / (strength * strength), lerpC, weight_threshold, lerp_threshold); break; case 2: kernel_denoise_knn<Type, 2, bit_depth><<<gridSize, blockSize, 0, stream>>>(pDst, dstPitch, dstWidth, dstHeight, texSrc, 1.0f / (strength * strength), lerpC, weight_threshold, lerp_threshold); break; case 3: kernel_denoise_knn<Type, 3, bit_depth><<<gridSize, blockSize, 0, stream>>>(pDst, dstPitch, dstWidth, dstHeight, texSrc, 1.0f / (strength * strength), lerpC, weight_threshold, lerp_threshold); break; case 4: kernel_denoise_knn<Type, 4, bit_depth><<<gridSize, blockSize, 0, stream>>>(pDst, dstPitch, dstWidth, dstHeight, texSrc, 1.0f / (strength * strength), lerpC, weight_threshold, lerp_threshold); break; case 5: //よりレジスタを使うので、ブロック当たりのスレッド数を低減 blockSize = dim3(32, 16); gridSize = dim3(divCeil(dstWidth, blockSize.x), divCeil(dstHeight, blockSize.y)); kernel_denoise_knn<Type, 5, bit_depth><<<gridSize, blockSize, 0, stream>>>(pDst, dstPitch, dstWidth, dstHeight, texSrc, 1.0f / (strength * strength), lerpC, weight_threshold, lerp_threshold); break; default: break; } } template<typename Type> cudaError_t textureCreateDenoiseKnn(cudaTextureObject_t& tex, cudaTextureFilterMode filterMode, cudaTextureReadMode readMode, uint8_t *ptr, int pitch, int width, int height) { cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypePitch2D; resDesc.res.pitch2D.devPtr = ptr; resDesc.res.pitch2D.pitchInBytes = pitch; resDesc.res.pitch2D.width = width; resDesc.res.pitch2D.height = height; resDesc.res.pitch2D.desc = cudaCreateChannelDesc<Type>(); cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.addressMode[1] = cudaAddressModeClamp; texDesc.filterMode = filterMode; texDesc.readMode = readMode; texDesc.normalizedCoords = 0; return cudaCreateTextureObject(&tex, &resDesc, &texDesc, nullptr); } template<typename Type, int bit_depth> static cudaError_t denoise_knn_plane(RGYFrameInfo *pOutputFrame, const RGYFrameInfo *pInputFrame, int radius, const float strength, const float lerpC, const float weight_threshold, const float lerp_threshold, cudaStream_t stream) { cudaTextureObject_t texSrc = 0; auto cudaerr = textureCreateDenoiseKnn<Type>(texSrc, cudaFilterModePoint, cudaReadModeElementType, pInputFrame->ptr, pInputFrame->pitch, pInputFrame->width, pInputFrame->height); if (cudaerr != cudaSuccess) { return cudaerr; } denoise_knn<Type, bit_depth>((uint8_t *)pOutputFrame->ptr, pOutputFrame->pitch, pOutputFrame->width, pOutputFrame->height, texSrc, radius, strength, lerpC, weight_threshold, lerp_threshold, stream); cudaerr = cudaGetLastError(); if (cudaerr != cudaSuccess) { return cudaerr; } cudaerr = cudaDestroyTextureObject(texSrc); if (cudaerr != cudaSuccess) { return cudaerr; } return cudaerr; } template<typename Type, int bit_depth> static cudaError_t denoise_knn_frame(RGYFrameInfo *pOutputFrame, const RGYFrameInfo *pInputFrame, int radius, const float strength, const float lerpC, const float weight_threshold, const float lerp_threshold, cudaStream_t stream) { cudaError_t cudaerr = cudaSuccess; const auto planeInputY = getPlane(pInputFrame, RGY_PLANE_Y); const auto planeInputU = getPlane(pInputFrame, RGY_PLANE_U); const auto planeInputV = getPlane(pInputFrame, RGY_PLANE_V); auto planeOutputY = getPlane(pOutputFrame, RGY_PLANE_Y); auto planeOutputU = getPlane(pOutputFrame, RGY_PLANE_U); auto planeOutputV = getPlane(pOutputFrame, RGY_PLANE_V); cudaerr = denoise_knn_plane<Type, bit_depth>(&planeOutputY, &planeInputY, radius, strength, lerpC, weight_threshold, lerp_threshold, stream); if (cudaerr != cudaSuccess) { return cudaerr; } cudaerr = denoise_knn_plane<Type, bit_depth>(&planeOutputU, &planeInputU, radius, strength, lerpC, weight_threshold, lerp_threshold, stream); if (cudaerr != cudaSuccess) { return cudaerr; } cudaerr = denoise_knn_plane<Type, bit_depth>(&planeOutputV, &planeInputV, radius, strength, lerpC, weight_threshold, lerp_threshold, stream); if (cudaerr != cudaSuccess) { return cudaerr; } return cudaerr; } NVEncFilterDenoiseKnn::NVEncFilterDenoiseKnn() : m_bInterlacedWarn(false) { m_sFilterName = _T("knn"); } NVEncFilterDenoiseKnn::~NVEncFilterDenoiseKnn() { close(); } RGY_ERR NVEncFilterDenoiseKnn::init(shared_ptr<NVEncFilterParam> pParam, shared_ptr<RGYLog> pPrintMes) { RGY_ERR sts = RGY_ERR_NONE; m_pPrintMes = pPrintMes; auto pKnnParam = std::dynamic_pointer_cast<NVEncFilterParamDenoiseKnn>(pParam); if (!pKnnParam) { AddMessage(RGY_LOG_ERROR, _T("Invalid parameter type.\n")); return RGY_ERR_INVALID_PARAM; } //パラメータチェック if (pKnnParam->frameOut.height <= 0 || pKnnParam->frameOut.width <= 0) { AddMessage(RGY_LOG_ERROR, _T("Invalid parameter.\n")); return RGY_ERR_INVALID_PARAM; } if (pKnnParam->knn.radius <= 0) { AddMessage(RGY_LOG_ERROR, _T("radius must be a positive value.\n")); return RGY_ERR_INVALID_PARAM; } if (pKnnParam->knn.radius > KNN_RADIUS_MAX) { AddMessage(RGY_LOG_ERROR, _T("radius must be <= %d.\n"), KNN_RADIUS_MAX); return RGY_ERR_INVALID_PARAM; } if (pKnnParam->knn.strength < 0.0 || 1.0 < pKnnParam->knn.strength) { AddMessage(RGY_LOG_ERROR, _T("strength should be 0.0 - 1.0.\n")); return RGY_ERR_INVALID_PARAM; } if (pKnnParam->knn.lerpC < 0.0 || 1.0 < pKnnParam->knn.lerpC) { AddMessage(RGY_LOG_ERROR, _T("lerpC should be 0.0 - 1.0.\n")); return RGY_ERR_INVALID_PARAM; } if (pKnnParam->knn.lerp_threshold < 0.0 || 1.0 < pKnnParam->knn.lerp_threshold) { AddMessage(RGY_LOG_ERROR, _T("th_lerp should be 0.0 - 1.0.\n")); return RGY_ERR_INVALID_PARAM; } if (pKnnParam->knn.weight_threshold < 0.0 || 1.0 < pKnnParam->knn.weight_threshold) { AddMessage(RGY_LOG_ERROR, _T("th_weight should be 0.0 - 1.0.\n")); return RGY_ERR_INVALID_PARAM; } auto cudaerr = AllocFrameBuf(pKnnParam->frameOut, 1); if (cudaerr != cudaSuccess) { AddMessage(RGY_LOG_ERROR, _T("failed to allocate memory: %s.\n"), char_to_tstring(cudaGetErrorName(cudaerr)).c_str()); return RGY_ERR_MEMORY_ALLOC; } pKnnParam->frameOut.pitch = m_pFrameBuf[0]->frame.pitch; setFilterInfo(pParam->print()); m_pParam = pParam; return sts; } tstring NVEncFilterParamDenoiseKnn::print() const { return knn.print(); } RGY_ERR NVEncFilterDenoiseKnn::run_filter(const RGYFrameInfo *pInputFrame, RGYFrameInfo **ppOutputFrames, int *pOutputFrameNum, cudaStream_t stream) { RGY_ERR sts = RGY_ERR_NONE; if (pInputFrame->ptr == nullptr) { return sts; } *pOutputFrameNum = 1; if (ppOutputFrames[0] == nullptr) { auto pOutFrame = m_pFrameBuf[m_nFrameIdx].get(); ppOutputFrames[0] = &pOutFrame->frame; m_nFrameIdx = (m_nFrameIdx + 1) % m_pFrameBuf.size(); } ppOutputFrames[0]->picstruct = pInputFrame->picstruct; if (interlaced(*pInputFrame)) { return filter_as_interlaced_pair(pInputFrame, ppOutputFrames[0], cudaStreamDefault); } const auto memcpyKind = getCudaMemcpyKind(pInputFrame->deivce_mem, ppOutputFrames[0]->deivce_mem); if (memcpyKind != cudaMemcpyDeviceToDevice) { AddMessage(RGY_LOG_ERROR, _T("only supported on device memory.\n")); return RGY_ERR_INVALID_PARAM; } if (m_pParam->frameOut.csp != m_pParam->frameIn.csp) { AddMessage(RGY_LOG_ERROR, _T("csp does not match.\n")); return RGY_ERR_INVALID_PARAM; } auto pKnnParam = std::dynamic_pointer_cast<NVEncFilterParamDenoiseKnn>(m_pParam); if (!pKnnParam) { AddMessage(RGY_LOG_ERROR, _T("Invalid parameter type.\n")); return RGY_ERR_INVALID_PARAM; } static const std::map<RGY_CSP, decltype(denoise_knn_frame<uint8_t, 8>)*> denoise_list = { { RGY_CSP_YV12, denoise_knn_frame<uint8_t, 8> }, { RGY_CSP_YV12_16, denoise_knn_frame<uint16_t, 16> }, { RGY_CSP_YUV444, denoise_knn_frame<uint8_t, 8> }, { RGY_CSP_YUV444_16, denoise_knn_frame<uint16_t, 16> }, }; if (denoise_list.count(pInputFrame->csp) == 0) { AddMessage(RGY_LOG_ERROR, _T("unsupported csp %s.\n"), RGY_CSP_NAMES[pInputFrame->csp]); return RGY_ERR_UNSUPPORTED; } denoise_list.at(pInputFrame->csp)(ppOutputFrames[0], pInputFrame, pKnnParam->knn.radius, pKnnParam->knn.strength, pKnnParam->knn.lerpC, pKnnParam->knn.weight_threshold, pKnnParam->knn.lerp_threshold, stream); auto cudaerr = cudaGetLastError(); if (cudaerr != cudaSuccess) { AddMessage(RGY_LOG_ERROR, _T("error at knn(%s): %s.\n"), RGY_CSP_NAMES[pInputFrame->csp], char_to_tstring(cudaGetErrorString(cudaerr)).c_str()); return RGY_ERR_CUDA; } return sts; } void NVEncFilterDenoiseKnn::close() { m_pFrameBuf.clear(); m_bInterlacedWarn = false; }
the_stack
* \file * cub::AgentSelectIf implements a stateful abstraction of CUDA thread blocks for participating in device-wide select. */ #pragma once #include <iterator> #include "single_pass_scan_operators.cuh" #include "../block/block_load.cuh" #include "../block/block_store.cuh" #include "../block/block_scan.cuh" #include "../block/block_exchange.cuh" #include "../block/block_discontinuity.cuh" #include "../config.cuh" #include "../grid/grid_queue.cuh" #include "../iterator/cache_modified_input_iterator.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Tuning policy types ******************************************************************************/ /** * Parameterizable tuning policy type for AgentSelectIf */ template < int _BLOCK_THREADS, ///< Threads per thread block int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use struct AgentSelectIfPolicy { enum { BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) }; static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use }; /****************************************************************************** * Thread block abstractions ******************************************************************************/ /** * \brief AgentSelectIf implements a stateful abstraction of CUDA thread blocks for participating in device-wide selection * * Performs functor-based selection if SelectOpT functor type != NullType * Otherwise performs flag-based selection if FlagsInputIterator's value type != NullType * Otherwise performs discontinuity selection (keep unique) */ template < typename AgentSelectIfPolicyT, ///< Parameterized AgentSelectIfPolicy tuning policy type typename InputIteratorT, ///< Random-access input iterator type for selection items typename FlagsInputIteratorT, ///< Random-access input iterator type for selections (NullType* if a selection functor or discontinuity flagging is to be used for selection) typename SelectedOutputIteratorT, ///< Random-access input iterator type for selection_flags items typename SelectOpT, ///< Selection operator type (NullType if selections or discontinuity flagging is to be used for selection) typename EqualityOpT, ///< Equality operator type (NullType if selection functor or selections is to be used for selection) typename OffsetT, ///< Signed integer type for global offsets bool KEEP_REJECTS> ///< Whether or not we push rejected items to the back of the output struct AgentSelectIf { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- // The input value type typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; // The output value type typedef typename If<(Equals<typename std::iterator_traits<SelectedOutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<SelectedOutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type // The flag value type typedef typename std::iterator_traits<FlagsInputIteratorT>::value_type FlagT; // Tile status descriptor interface type typedef ScanTileState<OffsetT> ScanTileStateT; // Constants enum { USE_SELECT_OP, USE_SELECT_FLAGS, USE_DISCONTINUITY, BLOCK_THREADS = AgentSelectIfPolicyT::BLOCK_THREADS, ITEMS_PER_THREAD = AgentSelectIfPolicyT::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, TWO_PHASE_SCATTER = (ITEMS_PER_THREAD > 1), SELECT_METHOD = (!Equals<SelectOpT, NullType>::VALUE) ? USE_SELECT_OP : (!Equals<FlagT, NullType>::VALUE) ? USE_SELECT_FLAGS : USE_DISCONTINUITY }; // Cache-modified Input iterator wrapper type (for applying cache modifier) for items typedef typename If<IsPointer<InputIteratorT>::VALUE, CacheModifiedInputIterator<AgentSelectIfPolicyT::LOAD_MODIFIER, InputT, OffsetT>, // Wrap the native input pointer with CacheModifiedValuesInputIterator InputIteratorT>::Type // Directly use the supplied input iterator type WrappedInputIteratorT; // Cache-modified Input iterator wrapper type (for applying cache modifier) for values typedef typename If<IsPointer<FlagsInputIteratorT>::VALUE, CacheModifiedInputIterator<AgentSelectIfPolicyT::LOAD_MODIFIER, FlagT, OffsetT>, // Wrap the native input pointer with CacheModifiedValuesInputIterator FlagsInputIteratorT>::Type // Directly use the supplied input iterator type WrappedFlagsInputIteratorT; // Parameterized BlockLoad type for input data typedef BlockLoad< OutputT, BLOCK_THREADS, ITEMS_PER_THREAD, AgentSelectIfPolicyT::LOAD_ALGORITHM> BlockLoadT; // Parameterized BlockLoad type for flags typedef BlockLoad< FlagT, BLOCK_THREADS, ITEMS_PER_THREAD, AgentSelectIfPolicyT::LOAD_ALGORITHM> BlockLoadFlags; // Parameterized BlockDiscontinuity type for items typedef BlockDiscontinuity< OutputT, BLOCK_THREADS> BlockDiscontinuityT; // Parameterized BlockScan type typedef BlockScan< OffsetT, BLOCK_THREADS, AgentSelectIfPolicyT::SCAN_ALGORITHM> BlockScanT; // Callback type for obtaining tile prefix during block scan typedef TilePrefixCallbackOp< OffsetT, cub::Sum, ScanTileStateT> TilePrefixCallbackOpT; // Item exchange type typedef OutputT ItemExchangeT[TILE_ITEMS]; // Shared memory type for this thread block union _TempStorage { struct ScanStorage { typename BlockScanT::TempStorage scan; // Smem needed for tile scanning typename TilePrefixCallbackOpT::TempStorage prefix; // Smem needed for cooperative prefix callback typename BlockDiscontinuityT::TempStorage discontinuity; // Smem needed for discontinuity detection } scan_storage; // Smem needed for loading items typename BlockLoadT::TempStorage load_items; // Smem needed for loading values typename BlockLoadFlags::TempStorage load_flags; // Smem needed for compacting items (allows non POD items in this union) Uninitialized<ItemExchangeT> raw_exchange; }; // Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; //--------------------------------------------------------------------- // Per-thread fields //--------------------------------------------------------------------- _TempStorage& temp_storage; ///< Reference to temp_storage WrappedInputIteratorT d_in; ///< Input items SelectedOutputIteratorT d_selected_out; ///< Unique output items WrappedFlagsInputIteratorT d_flags_in; ///< Input selection flags (if applicable) InequalityWrapper<EqualityOpT> inequality_op; ///< T inequality operator SelectOpT select_op; ///< Selection operator OffsetT num_items; ///< Total number of input items //--------------------------------------------------------------------- // Constructor //--------------------------------------------------------------------- // Constructor __device__ __forceinline__ AgentSelectIf( TempStorage &temp_storage, ///< Reference to temp_storage InputIteratorT d_in, ///< Input data FlagsInputIteratorT d_flags_in, ///< Input selection flags (if applicable) SelectedOutputIteratorT d_selected_out, ///< Output data SelectOpT select_op, ///< Selection operator EqualityOpT equality_op, ///< Equality operator OffsetT num_items) ///< Total number of input items : temp_storage(temp_storage.Alias()), d_in(d_in), d_flags_in(d_flags_in), d_selected_out(d_selected_out), select_op(select_op), inequality_op(equality_op), num_items(num_items) {} //--------------------------------------------------------------------- // Utility methods for initializing the selections //--------------------------------------------------------------------- /** * Initialize selections (specialized for selection operator) */ template <bool IS_FIRST_TILE, bool IS_LAST_TILE> __device__ __forceinline__ void InitializeSelections( OffsetT /*tile_offset*/, OffsetT num_tile_items, OutputT (&items)[ITEMS_PER_THREAD], OffsetT (&selection_flags)[ITEMS_PER_THREAD], Int2Type<USE_SELECT_OP> /*select_method*/) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { // Out-of-bounds items are selection_flags selection_flags[ITEM] = 1; if (!IS_LAST_TILE || (OffsetT(threadIdx.x * ITEMS_PER_THREAD) + ITEM < num_tile_items)) selection_flags[ITEM] = select_op(items[ITEM]); } } /** * Initialize selections (specialized for valid flags) */ template <bool IS_FIRST_TILE, bool IS_LAST_TILE> __device__ __forceinline__ void InitializeSelections( OffsetT tile_offset, OffsetT num_tile_items, OutputT (&/*items*/)[ITEMS_PER_THREAD], OffsetT (&selection_flags)[ITEMS_PER_THREAD], Int2Type<USE_SELECT_FLAGS> /*select_method*/) { CTA_SYNC(); FlagT flags[ITEMS_PER_THREAD]; if (IS_LAST_TILE) { // Out-of-bounds items are selection_flags BlockLoadFlags(temp_storage.load_flags).Load(d_flags_in + tile_offset, flags, num_tile_items, 1); } else { BlockLoadFlags(temp_storage.load_flags).Load(d_flags_in + tile_offset, flags); } // Convert flag type to selection_flags type #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { selection_flags[ITEM] = flags[ITEM]; } } /** * Initialize selections (specialized for discontinuity detection) */ template <bool IS_FIRST_TILE, bool IS_LAST_TILE> __device__ __forceinline__ void InitializeSelections( OffsetT tile_offset, OffsetT num_tile_items, OutputT (&items)[ITEMS_PER_THREAD], OffsetT (&selection_flags)[ITEMS_PER_THREAD], Int2Type<USE_DISCONTINUITY> /*select_method*/) { if (IS_FIRST_TILE) { CTA_SYNC(); // Set head selection_flags. First tile sets the first flag for the first item BlockDiscontinuityT(temp_storage.scan_storage.discontinuity).FlagHeads(selection_flags, items, inequality_op); } else { OutputT tile_predecessor; if (threadIdx.x == 0) tile_predecessor = d_in[tile_offset - 1]; CTA_SYNC(); BlockDiscontinuityT(temp_storage.scan_storage.discontinuity).FlagHeads(selection_flags, items, inequality_op, tile_predecessor); } // Set selection flags for out-of-bounds items #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { // Set selection_flags for out-of-bounds items if ((IS_LAST_TILE) && (OffsetT(threadIdx.x * ITEMS_PER_THREAD) + ITEM >= num_tile_items)) selection_flags[ITEM] = 1; } } //--------------------------------------------------------------------- // Scatter utility methods //--------------------------------------------------------------------- /** * Scatter flagged items to output offsets (specialized for direct scattering) */ template <bool IS_LAST_TILE, bool IS_FIRST_TILE> __device__ __forceinline__ void ScatterDirect( OutputT (&items)[ITEMS_PER_THREAD], OffsetT (&selection_flags)[ITEMS_PER_THREAD], OffsetT (&selection_indices)[ITEMS_PER_THREAD], OffsetT num_selections) { // Scatter flagged items #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { if (selection_flags[ITEM]) { if ((!IS_LAST_TILE) || selection_indices[ITEM] < num_selections) { d_selected_out[selection_indices[ITEM]] = items[ITEM]; } } } } /** * Scatter flagged items to output offsets (specialized for two-phase scattering) */ template <bool IS_LAST_TILE, bool IS_FIRST_TILE> __device__ __forceinline__ void ScatterTwoPhase( OutputT (&items)[ITEMS_PER_THREAD], OffsetT (&selection_flags)[ITEMS_PER_THREAD], OffsetT (&selection_indices)[ITEMS_PER_THREAD], int /*num_tile_items*/, ///< Number of valid items in this tile int num_tile_selections, ///< Number of selections in this tile OffsetT num_selections_prefix, ///< Total number of selections prior to this tile OffsetT /*num_rejected_prefix*/, ///< Total number of rejections prior to this tile Int2Type<false> /*is_keep_rejects*/) ///< Marker type indicating whether to keep rejected items in the second partition { CTA_SYNC(); // Compact and scatter items #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { int local_scatter_offset = selection_indices[ITEM] - num_selections_prefix; if (selection_flags[ITEM]) { temp_storage.raw_exchange.Alias()[local_scatter_offset] = items[ITEM]; } } CTA_SYNC(); for (int item = threadIdx.x; item < num_tile_selections; item += BLOCK_THREADS) { d_selected_out[num_selections_prefix + item] = temp_storage.raw_exchange.Alias()[item]; } } /** * Scatter flagged items to output offsets (specialized for two-phase scattering) */ template <bool IS_LAST_TILE, bool IS_FIRST_TILE> __device__ __forceinline__ void ScatterTwoPhase( OutputT (&items)[ITEMS_PER_THREAD], OffsetT (&selection_flags)[ITEMS_PER_THREAD], OffsetT (&selection_indices)[ITEMS_PER_THREAD], int num_tile_items, ///< Number of valid items in this tile int num_tile_selections, ///< Number of selections in this tile OffsetT num_selections_prefix, ///< Total number of selections prior to this tile OffsetT num_rejected_prefix, ///< Total number of rejections prior to this tile Int2Type<true> /*is_keep_rejects*/) ///< Marker type indicating whether to keep rejected items in the second partition { CTA_SYNC(); int tile_num_rejections = num_tile_items - num_tile_selections; // Scatter items to shared memory (rejections first) #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { int item_idx = (threadIdx.x * ITEMS_PER_THREAD) + ITEM; int local_selection_idx = selection_indices[ITEM] - num_selections_prefix; int local_rejection_idx = item_idx - local_selection_idx; int local_scatter_offset = (selection_flags[ITEM]) ? tile_num_rejections + local_selection_idx : local_rejection_idx; temp_storage.raw_exchange.Alias()[local_scatter_offset] = items[ITEM]; } CTA_SYNC(); // Gather items from shared memory and scatter to global #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { int item_idx = (ITEM * BLOCK_THREADS) + threadIdx.x; int rejection_idx = item_idx; int selection_idx = item_idx - tile_num_rejections; OffsetT scatter_offset = (item_idx < tile_num_rejections) ? num_items - num_rejected_prefix - rejection_idx - 1 : num_selections_prefix + selection_idx; OutputT item = temp_storage.raw_exchange.Alias()[item_idx]; if (!IS_LAST_TILE || (item_idx < num_tile_items)) { d_selected_out[scatter_offset] = item; } } } /** * Scatter flagged items */ template <bool IS_LAST_TILE, bool IS_FIRST_TILE> __device__ __forceinline__ void Scatter( OutputT (&items)[ITEMS_PER_THREAD], OffsetT (&selection_flags)[ITEMS_PER_THREAD], OffsetT (&selection_indices)[ITEMS_PER_THREAD], int num_tile_items, ///< Number of valid items in this tile int num_tile_selections, ///< Number of selections in this tile OffsetT num_selections_prefix, ///< Total number of selections prior to this tile OffsetT num_rejected_prefix, ///< Total number of rejections prior to this tile OffsetT num_selections) ///< Total number of selections including this tile { // Do a two-phase scatter if (a) keeping both partitions or (b) two-phase is enabled and the average number of selection_flags items per thread is greater than one if (KEEP_REJECTS || (TWO_PHASE_SCATTER && (num_tile_selections > BLOCK_THREADS))) { ScatterTwoPhase<IS_LAST_TILE, IS_FIRST_TILE>( items, selection_flags, selection_indices, num_tile_items, num_tile_selections, num_selections_prefix, num_rejected_prefix, Int2Type<KEEP_REJECTS>()); } else { ScatterDirect<IS_LAST_TILE, IS_FIRST_TILE>( items, selection_flags, selection_indices, num_selections); } } //--------------------------------------------------------------------- // Cooperatively scan a device-wide sequence of tiles with other CTAs //--------------------------------------------------------------------- /** * Process first tile of input (dynamic chained scan). Returns the running count of selections (including this tile) */ template <bool IS_LAST_TILE> __device__ __forceinline__ OffsetT ConsumeFirstTile( int num_tile_items, ///< Number of input items comprising this tile OffsetT tile_offset, ///< Tile offset ScanTileStateT& tile_state) ///< Global tile state descriptor { OutputT items[ITEMS_PER_THREAD]; OffsetT selection_flags[ITEMS_PER_THREAD]; OffsetT selection_indices[ITEMS_PER_THREAD]; // Load items if (IS_LAST_TILE) BlockLoadT(temp_storage.load_items).Load(d_in + tile_offset, items, num_tile_items); else BlockLoadT(temp_storage.load_items).Load(d_in + tile_offset, items); // Initialize selection_flags InitializeSelections<true, IS_LAST_TILE>( tile_offset, num_tile_items, items, selection_flags, Int2Type<SELECT_METHOD>()); CTA_SYNC(); // Exclusive scan of selection_flags OffsetT num_tile_selections; BlockScanT(temp_storage.scan_storage.scan).ExclusiveSum(selection_flags, selection_indices, num_tile_selections); if (threadIdx.x == 0) { // Update tile status if this is not the last tile if (!IS_LAST_TILE) tile_state.SetInclusive(0, num_tile_selections); } // Discount any out-of-bounds selections if (IS_LAST_TILE) num_tile_selections -= (TILE_ITEMS - num_tile_items); // Scatter flagged items Scatter<IS_LAST_TILE, true>( items, selection_flags, selection_indices, num_tile_items, num_tile_selections, 0, 0, num_tile_selections); return num_tile_selections; } /** * Process subsequent tile of input (dynamic chained scan). Returns the running count of selections (including this tile) */ template <bool IS_LAST_TILE> __device__ __forceinline__ OffsetT ConsumeSubsequentTile( int num_tile_items, ///< Number of input items comprising this tile int tile_idx, ///< Tile index OffsetT tile_offset, ///< Tile offset ScanTileStateT& tile_state) ///< Global tile state descriptor { OutputT items[ITEMS_PER_THREAD]; OffsetT selection_flags[ITEMS_PER_THREAD]; OffsetT selection_indices[ITEMS_PER_THREAD]; // Load items if (IS_LAST_TILE) BlockLoadT(temp_storage.load_items).Load(d_in + tile_offset, items, num_tile_items); else BlockLoadT(temp_storage.load_items).Load(d_in + tile_offset, items); // Initialize selection_flags InitializeSelections<false, IS_LAST_TILE>( tile_offset, num_tile_items, items, selection_flags, Int2Type<SELECT_METHOD>()); CTA_SYNC(); // Exclusive scan of values and selection_flags TilePrefixCallbackOpT prefix_op(tile_state, temp_storage.scan_storage.prefix, cub::Sum(), tile_idx); BlockScanT(temp_storage.scan_storage.scan).ExclusiveSum(selection_flags, selection_indices, prefix_op); OffsetT num_tile_selections = prefix_op.GetBlockAggregate(); OffsetT num_selections = prefix_op.GetInclusivePrefix(); OffsetT num_selections_prefix = prefix_op.GetExclusivePrefix(); OffsetT num_rejected_prefix = (tile_idx * TILE_ITEMS) - num_selections_prefix; // Discount any out-of-bounds selections if (IS_LAST_TILE) { int num_discount = TILE_ITEMS - num_tile_items; num_selections -= num_discount; num_tile_selections -= num_discount; } // Scatter flagged items Scatter<IS_LAST_TILE, false>( items, selection_flags, selection_indices, num_tile_items, num_tile_selections, num_selections_prefix, num_rejected_prefix, num_selections); return num_selections; } /** * Process a tile of input */ template <bool IS_LAST_TILE> __device__ __forceinline__ OffsetT ConsumeTile( int num_tile_items, ///< Number of input items comprising this tile int tile_idx, ///< Tile index OffsetT tile_offset, ///< Tile offset ScanTileStateT& tile_state) ///< Global tile state descriptor { OffsetT num_selections; if (tile_idx == 0) { num_selections = ConsumeFirstTile<IS_LAST_TILE>(num_tile_items, tile_offset, tile_state); } else { num_selections = ConsumeSubsequentTile<IS_LAST_TILE>(num_tile_items, tile_idx, tile_offset, tile_state); } return num_selections; } /** * Scan tiles of items as part of a dynamic chained scan */ template <typename NumSelectedIteratorT> ///< Output iterator type for recording number of items selection_flags __device__ __forceinline__ void ConsumeRange( int num_tiles, ///< Total number of input tiles ScanTileStateT& tile_state, ///< Global tile state descriptor NumSelectedIteratorT d_num_selected_out) ///< Output total number selection_flags { // Blocks are launched in increasing order, so just assign one tile per block int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; // Current tile index OffsetT tile_offset = tile_idx * TILE_ITEMS; // Global offset for the current tile if (tile_idx < num_tiles - 1) { // Not the last tile (full) ConsumeTile<false>(TILE_ITEMS, tile_idx, tile_offset, tile_state); } else { // The last tile (possibly partially-full) OffsetT num_remaining = num_items - tile_offset; OffsetT num_selections = ConsumeTile<true>(num_remaining, tile_idx, tile_offset, tile_state); if (threadIdx.x == 0) { // Output the total number of items selection_flags *d_num_selected_out = num_selections; } } } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
#include "cupoch/geometry/pointcloud.h" #include "cupoch/integration/integrate_functor.h" #include "cupoch/integration/marching_cubes_const.h" #include "cupoch/integration/scalable_tsdfvolume.h" #include "cupoch/utility/console.h" #include "cupoch/utility/platform.h" #include "cupoch/utility/range.h" namespace cupoch { namespace integration { typedef stdgpu::unordered_map<Eigen::Vector3i, ScalableTSDFVolume::VolumeUnit<>, utility::hash_eigen<Eigen::Vector3i>> VolumeUnitsMap; class ScalableTSDFVolume::VolumeUnitsImpl { public: VolumeUnitsMap volume_units_; }; namespace { struct scalable_integrate_functor : public integrate_functor { scalable_integrate_functor( float fx, float fy, float cx, float cy, const Eigen::Matrix4f &extrinsic, float voxel_length, float sdf_trunc, float safe_width, float safe_height, int resolution, const uint8_t *color, const uint8_t *depth, const uint8_t *depth_to_camera_distance_multiplier, int width, int num_of_channels, TSDFVolumeColorType color_type, VolumeUnitsMap volume_units) : integrate_functor(fx, fy, cx, cy, extrinsic, voxel_length, sdf_trunc, safe_width, safe_height, resolution, color, depth, depth_to_camera_distance_multiplier, width, num_of_channels, color_type), volume_units_(volume_units){}; VolumeUnitsMap volume_units_; __device__ void operator()(size_t idx) { int res2 = resolution_ * resolution_; int res3 = res2 * resolution_; int n_v = idx / res3; int xyz = idx % res3; int x = xyz / res2; int yz = xyz % res2; int y = yz / resolution_; int z = yz % resolution_; auto &tsdfvol = (volume_units_.begin() + n_v)->second; if (tsdfvol.is_initialized_) { ComputeTSDF(tsdfvol.voxels_[xyz], tsdfvol.origin_, x, y, z); } } }; __global__ void OpenVolumeUnitKernel(const Eigen::Vector3f *points, float sdf_trunc, float volume_unit_length, int n, VolumeUnitsMap volume_units) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > n) return; auto min_bound = LocateVolumeUnit(points[idx] - Eigen::Vector3f::Constant(sdf_trunc), volume_unit_length); auto max_bound = LocateVolumeUnit(points[idx] + Eigen::Vector3f::Constant(sdf_trunc), volume_unit_length); for (auto x = min_bound(0); x <= max_bound(0); x++) { for (auto y = min_bound(1); y <= max_bound(1); y++) { for (auto z = min_bound(2); z <= max_bound(2); z++) { Eigen::Vector3i loc = Eigen::Vector3i(x, y, z); if (!volume_units.contains(loc)) { volume_units.emplace(loc, ScalableTSDFVolume::VolumeUnit<>( loc.cast<float>() * volume_unit_length)); } } } } } struct extract_pointcloud_functor { extract_pointcloud_functor( const VolumeUnitsMap &volume_units, const stdgpu::device_indexed_range<const VolumeUnitsMap::value_type> &range, int resolution, float voxel_length, float volume_unit_length, TSDFVolumeColorType color_type, Eigen::Vector3f *points, Eigen::Vector3f *normals, Eigen::Vector3f *colors) : volume_units_(volume_units), range_(range), resolution_(resolution), voxel_length_(voxel_length), half_voxel_length_(0.5 * voxel_length_), volume_unit_length_(volume_unit_length), color_type_(color_type), points_(points), normals_(normals), colors_(colors){}; const VolumeUnitsMap volume_units_; const stdgpu::device_indexed_range<const VolumeUnitsMap::value_type> range_; const int resolution_; const float voxel_length_; const float half_voxel_length_; const float volume_unit_length_; const TSDFVolumeColorType color_type_; Eigen::Vector3f *points_; Eigen::Vector3f *normals_; Eigen::Vector3f *colors_; __device__ Eigen::Vector3f GetNormalAt(const Eigen::Vector3f &p) { Eigen::Vector3f n; const float half_gap = 0.99 * voxel_length_; for (int i = 0; i < 3; i++) { Eigen::Vector3f p0 = p; p0(i) -= half_gap; Eigen::Vector3f p1 = p; p1(i) += half_gap; n(i) = GetTSDFAt(p1) - GetTSDFAt(p0); } return n.normalized(); } __device__ float GetTSDFAt(const Eigen::Vector3f &p) { Eigen::Vector3f p_locate = p - Eigen::Vector3f(0.5, 0.5, 0.5) * voxel_length_; Eigen::Vector3i index0 = LocateVolumeUnit(p_locate, volume_unit_length_); auto unit_itr = volume_units_.find(index0); if (unit_itr == volume_units_.end()) { return 0.0; } const auto &volume0 = unit_itr->second; Eigen::Vector3i idx0; Eigen::Vector3f p_grid = (p_locate - index0.cast<float>() * volume_unit_length_) / voxel_length_; for (int i = 0; i < 3; i++) { idx0(i) = (int)floorf(p_grid(i)); if (idx0(i) < 0) idx0(i) = 0; if (idx0(i) >= resolution_) idx0(i) = resolution_ - 1; } Eigen::Vector3f r = p_grid - idx0.cast<float>(); float f[8]; for (int i = 0; i < 8; i++) { Eigen::Vector3i index1 = index0; Eigen::Vector3i idx1 = idx0 + Eigen::Vector3i(shift[i][0], shift[i][1], shift[i][2]); if (idx1(0) < resolution_ && idx1(1) < resolution_ && idx1(2) < resolution_) { f[i] = volume0.voxels_[IndexOf(idx1, resolution_)].tsdf_; } else { for (int j = 0; j < 3; j++) { if (idx1(j) >= resolution_) { idx1(j) -= resolution_; index1(j) += 1; } } auto unit_itr1 = volume_units_.find(index1); if (unit_itr1 == volume_units_.end()) { f[i] = 0.0f; } else { const auto &volume1 = unit_itr1->second; f[i] = volume1.voxels_[IndexOf(idx1, resolution_)].tsdf_; } } } return (1 - r(0)) * ((1 - r(1)) * ((1 - r(2)) * f[0] + r(2) * f[4]) + r(1) * ((1 - r(2)) * f[3] + r(2) * f[7])) + r(0) * ((1 - r(1)) * ((1 - r(2)) * f[1] + r(2) * f[5]) + r(1) * ((1 - r(2)) * f[2] + r(2) * f[6])); } __device__ void operator()(const size_t idx) { int res2 = resolution_ * resolution_; int res3 = res2 * resolution_; int n_v = idx / res3; int xyz = idx % res3; int x = xyz / res2; int yz = xyz % res2; int y = yz / resolution_; int z = yz % resolution_; const auto pair_val = *(range_.begin() + n_v); const auto &index0 = pair_val.first; const auto &volume0 = pair_val.second; Eigen::Vector3i idx0(x, y, z); float w0 = volume0.voxels_[IndexOf(idx0, resolution_)].weight_; float f0 = volume0.voxels_[IndexOf(idx0, resolution_)].tsdf_; Eigen::Vector3f c0 = Eigen::Vector3f::Zero(); if (color_type_ != TSDFVolumeColorType::NoColor) c0 = volume0.voxels_[IndexOf(idx0, resolution_)].color_; if (w0 != 0.0f && f0 < 0.98f && f0 >= -0.98f) { Eigen::Vector3f p0 = Eigen::Vector3f(half_voxel_length_ + voxel_length_ * x, half_voxel_length_ + voxel_length_ * y, half_voxel_length_ + voxel_length_ * z) + index0.cast<float>() * volume_unit_length_; float w1, f1; Eigen::Vector3f c1; for (int i = 0; i < 3; i++) { Eigen::Vector3f p1 = p0; Eigen::Vector3i idx1 = idx0; Eigen::Vector3i index1 = index0; p1(i) += voxel_length_; idx1(i) += 1; if (idx1(i) < resolution_) { w1 = volume0.voxels_[IndexOf(idx1, resolution_)].weight_; f1 = volume0.voxels_[IndexOf(idx1, resolution_)].tsdf_; if (color_type_ != TSDFVolumeColorType::NoColor) c1 = volume0.voxels_[IndexOf(idx1, resolution_)].color_; } else { idx1(i) -= resolution_; index1(i) += 1; auto unit_itr = volume_units_.find(index1); if (unit_itr == volume_units_.end()) { w1 = 0.0f; f1 = 0.0f; } else { const auto &volume1 = unit_itr->second; w1 = volume1.voxels_[IndexOf(idx1, resolution_)] .weight_; f1 = volume1.voxels_[IndexOf(idx1, resolution_)].tsdf_; if (color_type_ != TSDFVolumeColorType::NoColor) c1 = volume1.voxels_[IndexOf(idx1, resolution_)] .color_; } } if (w1 != 0.0f && f1 < 0.98f && f1 >= -0.98f && f0 * f1 < 0) { float r0 = abs(f0); float r1 = abs(f1); Eigen::Vector3f p = p0; p(i) = (p0(i) * r1 + p1(i) * r0) / (r0 + r1); points_[idx * 3 + i] = p; if (color_type_ == TSDFVolumeColorType::RGB8) { colors_[idx * 3 + i] = ((c0 * r1 + c1 * r0) / (r0 + r1) / 255.0f); } else if (color_type_ == TSDFVolumeColorType::Gray32) { colors_[idx * 3 + i] = ((c0 * r1 + c1 * r0) / (r0 + r1)); } // has_normal normals_[idx * 3 + i] = GetNormalAt(p); } } } } }; } // namespace ScalableTSDFVolume::ScalableTSDFVolume(float voxel_length, float sdf_trunc, TSDFVolumeColorType color_type, int depth_sampling_stride /* = 4*/, int map_size /* = 1000*/) : TSDFVolume(voxel_length, sdf_trunc, color_type), volume_unit_length_(voxel_length * VolumeUnit<>::GetResolution()), resolution_(VolumeUnit<>::GetResolution()), volume_unit_voxel_num_(VolumeUnit<>::GetVoxelNum()), depth_sampling_stride_(depth_sampling_stride) { impl_ = std::make_shared<VolumeUnitsImpl>(); impl_->volume_units_ = VolumeUnitsMap::createDeviceObject(map_size); } ScalableTSDFVolume::~ScalableTSDFVolume() { VolumeUnitsMap::destroyDeviceObject(impl_->volume_units_); } void ScalableTSDFVolume::Reset() { impl_->volume_units_.clear(); } void ScalableTSDFVolume::Integrate( const geometry::RGBDImage &image, const camera::PinholeCameraIntrinsic &intrinsic, const Eigen::Matrix4f &extrinsic) { if ((image.depth_.num_of_channels_ != 1) || (image.depth_.bytes_per_channel_ != 4) || (image.depth_.width_ != intrinsic.width_) || (image.depth_.height_ != intrinsic.height_) || (color_type_ == TSDFVolumeColorType::RGB8 && image.color_.num_of_channels_ != 3) || (color_type_ == TSDFVolumeColorType::RGB8 && image.color_.bytes_per_channel_ != 1) || (color_type_ == TSDFVolumeColorType::Gray32 && image.color_.num_of_channels_ != 1) || (color_type_ == TSDFVolumeColorType::Gray32 && image.color_.bytes_per_channel_ != 4) || (color_type_ != TSDFVolumeColorType::NoColor && image.color_.width_ != intrinsic.width_) || (color_type_ != TSDFVolumeColorType::NoColor && image.color_.height_ != intrinsic.height_)) { utility::LogError( "[ScalableTSDFVolume::Integrate] Unsupported image format."); } auto depth2cameradistance = geometry::Image::CreateDepthToCameraDistanceMultiplierFloatImage( intrinsic); auto pointcloud = geometry::PointCloud::CreateFromDepthImage( image.depth_, intrinsic, extrinsic, 1000.0, 1000.0, depth_sampling_stride_); size_t n_points = pointcloud->points_.size(); const dim3 threads(32); const dim3 blocks((n_points + threads.x - 1) / threads.x); OpenVolumeUnitKernel<<<blocks, threads>>>( thrust::raw_pointer_cast(pointcloud->points_.data()), sdf_trunc_, volume_unit_length_, n_points, impl_->volume_units_); cudaSafeCall(cudaDeviceSynchronize()); cudaSafeCall(cudaGetLastError()); IntegrateWithDepthToCameraDistanceMultiplier(image, intrinsic, extrinsic, *depth2cameradistance); } std::shared_ptr<geometry::PointCloud> ScalableTSDFVolume::ExtractPointCloud() { auto pointcloud = std::make_shared<geometry::PointCloud>(); size_t n_total = impl_->volume_units_.size() * volume_unit_voxel_num_; const Eigen::Vector3f nanvec = Eigen::Vector3f::Constant(std::numeric_limits<float>::quiet_NaN()); pointcloud->points_.resize(3 * n_total, nanvec); pointcloud->normals_.resize(3 * n_total, nanvec); pointcloud->colors_.resize(3 * n_total, nanvec); extract_pointcloud_functor func( impl_->volume_units_, impl_->volume_units_.device_range(), resolution_, voxel_length_, volume_unit_length_, color_type_, thrust::raw_pointer_cast(pointcloud->points_.data()), thrust::raw_pointer_cast(pointcloud->normals_.data()), thrust::raw_pointer_cast(pointcloud->colors_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_total), func); pointcloud->RemoveNoneFinitePoints(true, true); return pointcloud; } std::shared_ptr<geometry::TriangleMesh> ScalableTSDFVolume::ExtractTriangleMesh() { utility::LogError( "ScalableTSDFVolume::ExtractTriangleMesh is not impelemented"); auto mesh = std::make_shared<geometry::TriangleMesh>(); return mesh; } void ScalableTSDFVolume::IntegrateWithDepthToCameraDistanceMultiplier( const geometry::RGBDImage &image, const camera::PinholeCameraIntrinsic &intrinsic, const Eigen::Matrix4f &extrinsic, const geometry::Image &depth_to_camera_distance_multiplier) { const float fx = intrinsic.GetFocalLength().first; const float fy = intrinsic.GetFocalLength().second; const float cx = intrinsic.GetPrincipalPoint().first; const float cy = intrinsic.GetPrincipalPoint().second; const float safe_width = intrinsic.width_ - 0.0001f; const float safe_height = intrinsic.height_ - 0.0001f; scalable_integrate_functor func( fx, fy, cx, cy, extrinsic, voxel_length_, sdf_trunc_, safe_width, safe_height, resolution_, thrust::raw_pointer_cast(image.color_.data_.data()), thrust::raw_pointer_cast(image.depth_.data_.data()), thrust::raw_pointer_cast( depth_to_camera_distance_multiplier.data_.data()), image.depth_.width_, image.color_.num_of_channels_, color_type_, impl_->volume_units_); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>( impl_->volume_units_.max_size() * VolumeUnit<>::GetVoxelNum()), func); } } // namespace integration } // namespace cupoch
the_stack
//Includes for IntelliSense #define _SIZE_T_DEFINED #ifndef __CUDACC__ #define __CUDACC__ #endif #ifndef __cplusplus #define __cplusplus #endif extern "C" { //kernel code __global__ void VectorInputDiffKernel( float *input, int inputSize, float *referenceVector, int maxCells, float *difference ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells * inputSize) { difference[threadId] = input[threadId % inputSize] - referenceVector[threadId]; } } __global__ void ComputeDistanceKernel( int inputSize, float *distance, float *dimensionWeight, int maxCells, float *difference ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells) { float sum = 0.00f; float value; for(int i = 0; i < inputSize; i++) { value = difference[threadId * inputSize + i]; sum += dimensionWeight[i] * value*value; } distance[threadId] = sqrtf(sum); } } __global__ void AddLocalErrorKernel( int s1, float *distance, float *localError ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < 1) { localError[s1] += distance[s1] * distance[s1]; } } __global__ void AddUtilityKernel( int s1, int s2, float *distance, float *utility ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < 1) { utility[s1] += distance[s2] - distance[s1]; } } __global__ void AdaptWinningFractionKernel( int s1, float *winningFraction, int *winningCount, float bParam, int maxCells ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells) { winningFraction[threadId] = winningFraction[threadId] + bParam * ((float)(threadId == s1) - winningFraction[threadId]); winningCount[threadId] = winningCount[threadId] + (threadId == s1) * 1; } } __global__ void ComputeBiasTermKernel( float *biasTerm, float cFactor, float *winningFraction, int activeCells, int maxCells ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells) { biasTerm[threadId] = cFactor * ( 1.00f / activeCells - winningFraction[threadId]); } } __global__ void ComputeBiasedDistanceKernel( float *distance, float *biasedDistance, float *biasTerm, int maxCells ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells) { biasedDistance[threadId] = distance[threadId] + biasTerm[threadId]; } } __global__ void CreateAndRefreshConnectionKernel( int s1, int s2, int *connection, int *age, int maxCells ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < 1) { connection[s1 * maxCells + s2] = 1; age[s1 * maxCells + s2] = 0; connection[s2 * maxCells + s1] = 1; age[s2 * maxCells + s1] = 0; } } __global__ void AdaptRefVectorKernel( int cell, float *referenceVector, float oldErrorFraction, float youngErrorFraction, float decayFactor, int *winningCount, float *difference, int inputSize ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < inputSize) { float errorFraction = (youngErrorFraction - oldErrorFraction) * expf( - decayFactor * winningCount[cell] ) + oldErrorFraction; referenceVector[cell * inputSize + threadId] += errorFraction * difference[cell * inputSize + threadId]; } } __global__ void IncrementConnectionAgeKernel( int cell, int *connection, int *age, int maxCells ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells) { if(connection[cell * maxCells + threadId] == 1) { age[cell * maxCells + threadId] += 1; age[threadId * maxCells + cell] += 1; } } } __global__ void RemoveEdgesKernel( int *connection, int *age, int maxAge, int *activityFlag, float *winningFraction, int *winningCount, float *utility, float *localError, int *neuronAge, int maxCells ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells) { if(activityFlag[threadId] == 1) { neuronAge[threadId] = neuronAge[threadId] + 1; // TO DO : GET RID OF IFs & ELSEs int activeConnections = 0; int connId; for(int c = 0; c < maxCells; c++) { connId = threadId * maxCells + c; if(connection[connId] == 1) { if(age[connId] <= maxAge) { activeConnections++; } else { connection[connId] = 0; age[connId] = 0; } } } if(activeConnections == 0) { activityFlag[threadId] = 0; localError[threadId] = 0.00f; neuronAge[threadId] = 0; winningFraction[threadId] = 0.00f; winningCount[threadId] = 0; utility[threadId] = 0.00f; } } } } __global__ void RemoveNodeByUtilityKernel( int *connectionMatrix, int *connectionAge, int *activityFlag, float *utility, float utilityConstant, float *localError, int *neuronAge, float *winningFraction, int *winningCount, float maxError, int maxCells ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells) { if(activityFlag[threadId] == 1) { if(utility[threadId] > 0.00f) { if( maxError / utility[threadId] > utilityConstant ) { activityFlag[threadId] = 0; localError[threadId] = 0.00f; neuronAge[threadId] = 0; winningFraction[threadId] = 0.00f; winningCount[threadId] = 0; utility[threadId] = 0.00f; for(int n = 0; n < maxCells; n++) { connectionMatrix[threadId * maxCells + n] = 0; connectionAge[threadId * maxCells + n] = 0; connectionMatrix[n * maxCells + threadId] = 0; connectionAge[n * maxCells + threadId] = 0; } } } } } } __global__ void InterpolateVectorKernel( int r, int q, int f, int inputSize, float *referenceVector ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < inputSize) { referenceVector[r * inputSize + threadId] = 0.50f * (referenceVector[q * inputSize + threadId] + referenceVector[f * inputSize + threadId]); } } __global__ void NewNodeConnectionKernel( int f, int q, int r, int *activityFlag, int *connection, int *age, float *localError, float alfa, int maxCells, float errorFraction ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < 1) { activityFlag[r] = 1; connection[q * maxCells + f] = 0; age[q * maxCells + f] = 0; connection[f * maxCells + q] = 0; age[f * maxCells + q] = 0; connection[q * maxCells + r] = 1; age[q * maxCells + r] = 0; connection[r * maxCells + q] = 1; age[r * maxCells + q] = 0; connection[f * maxCells + r] = 1; age[f * maxCells + r] = 0; connection[r * maxCells + f] = 1; age[r * maxCells + f] = 0; localError[q] -= alfa * localError[q]; localError[f] -= alfa * localError[f]; localError[r] = errorFraction * (localError[q] + localError[f]); } } __global__ void AddAndRefreshConnectionKernel( int node1, int node2, int *activityFlag, int *connection, int *age, int maxCells ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < 1) { activityFlag[node1] = 1; activityFlag[node2] = 1; connection[node1 * maxCells + node2] = 1; age[node1 * maxCells + node2] = 0; connection[node2 * maxCells + node1] = 1; age[node2 * maxCells + node1] = 0; } } __global__ void TwoNodesDifferenceKernel( int nodeOne, int nodeTwo, int vectorLength, float *referenceVector, float *twoNodesDifference ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < vectorLength) { twoNodesDifference[threadId] = referenceVector[nodeOne * vectorLength + threadId] - referenceVector[nodeTwo * vectorLength + threadId]; } } __global__ void TwoNodesDistanceKernel( float *twoNodesDifference, float *twoNodesDistance, int vectorLength ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < 1) { float sum = 0.00f; float value; for(int i = 0; i < vectorLength; i++) { value = twoNodesDifference[threadId * vectorLength + i]; sum += value*value; } twoNodesDistance[threadId] = sqrtf(sum); } } __global__ void CopyVectorKernel( float *from, int fromOffset, float *to, int toOffset, int vectorSize ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < vectorSize) { to[threadId + toOffset] = from[threadId + fromOffset]; } } __global__ void DecreaseErrorAndUtilityKernel( float *localError, float *utility, int *activityFlag, int maxCells, float beta ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells) { if(activityFlag[threadId] == 1) { localError[threadId] -= beta * localError[threadId]; utility[threadId] -= beta * utility[threadId]; } } } __global__ void ComputeErrorPerWinningKernel( float *localError, int *winningCount, float *errorPerWinning, int *activityFlag, int maxCells ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; // TO DO: GET RID OF IF-ELSE if(threadId < maxCells) { if(activityFlag[threadId] == 1) { if(winningCount[threadId] != 0) { errorPerWinning[threadId] = localError[threadId] / (float)winningCount[threadId]; } else { errorPerWinning[threadId] = 0.00f; } } } } }
the_stack
#include "anonymouslib_cuda.h" #include "mmio.h" using namespace std; #ifndef VALUE_TYPE #define VALUE_TYPE double #endif #ifndef NUM_RUN #define NUM_RUN 1000 #endif int call_anonymouslib(int m, int n, int nnzA, int *csrRowPtrA, int *csrColIdxA, VALUE_TYPE *csrValA, VALUE_TYPE *x, VALUE_TYPE *y, VALUE_TYPE alpha) { int err = 0; cudaError_t err_cuda = cudaSuccess; // set device int device_id = 0; cudaSetDevice(device_id); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device_id); cout << "Device [" << device_id << "] " << deviceProp.name << ", " << " @ " << deviceProp.clockRate * 1e-3f << "MHz. " << endl; double gb = getB<int, VALUE_TYPE>(m, nnzA); double gflop = getFLOP<int>(nnzA); // Define pointers of matrix A, vector x and y int *d_csrRowPtrA; int *d_csrColIdxA; VALUE_TYPE *d_csrValA; VALUE_TYPE *d_x; VALUE_TYPE *d_y; // Matrix A checkCudaErrors(cudaMalloc((void **)&d_csrRowPtrA, (m+1) * sizeof(int))); checkCudaErrors(cudaMalloc((void **)&d_csrColIdxA, nnzA * sizeof(int))); checkCudaErrors(cudaMalloc((void **)&d_csrValA, nnzA * sizeof(VALUE_TYPE))); checkCudaErrors(cudaMemcpy(d_csrRowPtrA, csrRowPtrA, (m+1) * sizeof(int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_csrColIdxA, csrColIdxA, nnzA * sizeof(int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_csrValA, csrValA, nnzA * sizeof(VALUE_TYPE), cudaMemcpyHostToDevice)); // Vector x checkCudaErrors(cudaMalloc((void **)&d_x, n * sizeof(VALUE_TYPE))); checkCudaErrors(cudaMemcpy(d_x, x, n * sizeof(VALUE_TYPE), cudaMemcpyHostToDevice)); // Vector y checkCudaErrors(cudaMalloc((void **)&d_y, m * sizeof(VALUE_TYPE))); checkCudaErrors(cudaMemset(d_y, 0, m * sizeof(VALUE_TYPE))); anonymouslibHandle<int, unsigned int, VALUE_TYPE> A(m, n); err = A.inputCSR(nnzA, d_csrRowPtrA, d_csrColIdxA, d_csrValA); //cout << "inputCSR err = " << err << endl; err = A.setX(d_x); // you only need to do it once! //cout << "setX err = " << err << endl; A.setSigma(ANONYMOUSLIB_AUTO_TUNED_SIGMA); // warmup device A.warmup(); anonymouslib_timer asCSR5_timer; asCSR5_timer.start(); err = A.asCSR5(); cout << "CSR->CSR5 time = " << asCSR5_timer.stop() << " ms." << endl; //cout << "asCSR5 err = " << err << endl; // check correctness by running 1 time err = A.spmv(alpha, d_y); //cout << "spmv err = " << err << endl; checkCudaErrors(cudaMemcpy(y, d_y, m * sizeof(VALUE_TYPE), cudaMemcpyDeviceToHost)); // warm up by running 50 times if (NUM_RUN) { for (int i = 0; i < 50; i++) err = A.spmv(alpha, d_y); } err_cuda = cudaDeviceSynchronize(); anonymouslib_timer CSR5Spmv_timer; CSR5Spmv_timer.start(); // time spmv by running NUM_RUN times for (int i = 0; i < NUM_RUN; i++) err = A.spmv(alpha, d_y); err_cuda = cudaDeviceSynchronize(); double CSR5Spmv_time = CSR5Spmv_timer.stop() / (double)NUM_RUN; if (NUM_RUN) cout << "CSR5-based SpMV time = " << CSR5Spmv_time << " ms. Bandwidth = " << gb/(1.0e+6 * CSR5Spmv_time) << " GB/s. GFlops = " << gflop/(1.0e+6 * CSR5Spmv_time) << " GFlops." << endl; A.destroy(); checkCudaErrors(cudaFree(d_csrRowPtrA)); checkCudaErrors(cudaFree(d_csrColIdxA)); checkCudaErrors(cudaFree(d_csrValA)); checkCudaErrors(cudaFree(d_x)); checkCudaErrors(cudaFree(d_y)); return err; } int main(int argc, char ** argv) { int m, n, nnzA; int *csrRowPtrA; int *csrColIdxA; VALUE_TYPE *csrValA; // report precision of floating-point cout << "------------------------------------------------------" << endl; char *precision; if (sizeof(VALUE_TYPE) == 4) { precision = "32-bit Single Precision"; } else if (sizeof(VALUE_TYPE) == 8) { precision = "64-bit Double Precision"; } else { cout << "Wrong precision. Program exit!" << endl; return 0; } cout << "PRECISION = " << precision << endl; cout << "------------------------------------------------------" << endl; //ex: ./spmv webbase-1M.mtx int argi = 1; char *filename; if(argc > argi) { filename = argv[argi]; argi++; } cout << "--------------" << filename << "--------------" << endl; // read matrix from mtx file int ret_code; MM_typecode matcode; FILE *f; int nnzA_mtx_report; int isInteger = 0, isReal = 0, isPattern = 0, isSymmetric = 0; // load matrix if ((f = fopen(filename, "r")) == NULL) return -1; if (mm_read_banner(f, &matcode) != 0) { cout << "Could not process Matrix Market banner." << endl; return -2; } if ( mm_is_complex( matcode ) ) { cout <<"Sorry, data type 'COMPLEX' is not supported. " << endl; return -3; } if ( mm_is_pattern( matcode ) ) { isPattern = 1; /*cout << "type = Pattern" << endl;*/ } if ( mm_is_real ( matcode) ) { isReal = 1; /*cout << "type = real" << endl;*/ } if ( mm_is_integer ( matcode ) ) { isInteger = 1; /*cout << "type = integer" << endl;*/ } /* find out size of sparse matrix .... */ ret_code = mm_read_mtx_crd_size(f, &m, &n, &nnzA_mtx_report); if (ret_code != 0) return -4; if ( mm_is_symmetric( matcode ) || mm_is_hermitian( matcode ) ) { isSymmetric = 1; //cout << "symmetric = true" << endl; } else { //cout << "symmetric = false" << endl; } int *csrRowPtrA_counter = (int *)malloc((m+1) * sizeof(int)); memset(csrRowPtrA_counter, 0, (m+1) * sizeof(int)); int *csrRowIdxA_tmp = (int *)malloc(nnzA_mtx_report * sizeof(int)); int *csrColIdxA_tmp = (int *)malloc(nnzA_mtx_report * sizeof(int)); VALUE_TYPE *csrValA_tmp = (VALUE_TYPE *)malloc(nnzA_mtx_report * sizeof(VALUE_TYPE)); /* NOTE: when reading in doubles, ANSI C requires the use of the "l" */ /* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */ /* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */ for (int i = 0; i < nnzA_mtx_report; i++) { int idxi, idxj; double fval; int ival; if (isReal) fscanf(f, "%d %d %lg\n", &idxi, &idxj, &fval); else if (isInteger) { fscanf(f, "%d %d %d\n", &idxi, &idxj, &ival); fval = ival; } else if (isPattern) { fscanf(f, "%d %d\n", &idxi, &idxj); fval = 1.0; } // adjust from 1-based to 0-based idxi--; idxj--; csrRowPtrA_counter[idxi]++; csrRowIdxA_tmp[i] = idxi; csrColIdxA_tmp[i] = idxj; csrValA_tmp[i] = fval; } if (f != stdin) fclose(f); if (isSymmetric) { for (int i = 0; i < nnzA_mtx_report; i++) { if (csrRowIdxA_tmp[i] != csrColIdxA_tmp[i]) csrRowPtrA_counter[csrColIdxA_tmp[i]]++; } } // exclusive scan for csrRowPtrA_counter int old_val, new_val; old_val = csrRowPtrA_counter[0]; csrRowPtrA_counter[0] = 0; for (int i = 1; i <= m; i++) { new_val = csrRowPtrA_counter[i]; csrRowPtrA_counter[i] = old_val + csrRowPtrA_counter[i-1]; old_val = new_val; } nnzA = csrRowPtrA_counter[m]; csrRowPtrA = (int *)malloc((m+1) * sizeof(int)); memcpy(csrRowPtrA, csrRowPtrA_counter, (m+1) * sizeof(int)); memset(csrRowPtrA_counter, 0, (m+1) * sizeof(int)); csrColIdxA = (int *)malloc(nnzA * sizeof(int)); csrValA = (VALUE_TYPE *)malloc(nnzA * sizeof(VALUE_TYPE)); if (isSymmetric) { for (int i = 0; i < nnzA_mtx_report; i++) { if (csrRowIdxA_tmp[i] != csrColIdxA_tmp[i]) { int offset = csrRowPtrA[csrRowIdxA_tmp[i]] + csrRowPtrA_counter[csrRowIdxA_tmp[i]]; csrColIdxA[offset] = csrColIdxA_tmp[i]; csrValA[offset] = csrValA_tmp[i]; csrRowPtrA_counter[csrRowIdxA_tmp[i]]++; offset = csrRowPtrA[csrColIdxA_tmp[i]] + csrRowPtrA_counter[csrColIdxA_tmp[i]]; csrColIdxA[offset] = csrRowIdxA_tmp[i]; csrValA[offset] = csrValA_tmp[i]; csrRowPtrA_counter[csrColIdxA_tmp[i]]++; } else { int offset = csrRowPtrA[csrRowIdxA_tmp[i]] + csrRowPtrA_counter[csrRowIdxA_tmp[i]]; csrColIdxA[offset] = csrColIdxA_tmp[i]; csrValA[offset] = csrValA_tmp[i]; csrRowPtrA_counter[csrRowIdxA_tmp[i]]++; } } } else { for (int i = 0; i < nnzA_mtx_report; i++) { int offset = csrRowPtrA[csrRowIdxA_tmp[i]] + csrRowPtrA_counter[csrRowIdxA_tmp[i]]; csrColIdxA[offset] = csrColIdxA_tmp[i]; csrValA[offset] = csrValA_tmp[i]; csrRowPtrA_counter[csrRowIdxA_tmp[i]]++; } } // free tmp space free(csrColIdxA_tmp); free(csrValA_tmp); free(csrRowIdxA_tmp); free(csrRowPtrA_counter); srand(time(NULL)); // set csrValA to 1, easy for checking floating-point results for (int i = 0; i < nnzA; i++) { csrValA[i] = rand() % 10; } cout << " ( " << m << ", " << n << " ) nnz = " << nnzA << endl; VALUE_TYPE *x = (VALUE_TYPE *)malloc(n * sizeof(VALUE_TYPE)); for (int i = 0; i < n; i++) x[i] = rand() % 10; VALUE_TYPE *y = (VALUE_TYPE *)malloc(m * sizeof(VALUE_TYPE)); VALUE_TYPE *y_ref = (VALUE_TYPE *)malloc(m * sizeof(VALUE_TYPE)); double gb = getB<int, VALUE_TYPE>(m, nnzA); double gflop = getFLOP<int>(nnzA); VALUE_TYPE alpha = 1.0; // compute reference results on a cpu core anonymouslib_timer ref_timer; ref_timer.start(); int ref_iter = 1; for (int iter = 0; iter < ref_iter; iter++) { for (int i = 0; i < m; i++) { VALUE_TYPE sum = 0; for (int j = csrRowPtrA[i]; j < csrRowPtrA[i+1]; j++) sum += x[csrColIdxA[j]] * csrValA[j] * alpha; y_ref[i] = sum; } } double ref_time = ref_timer.stop() / (double)ref_iter; cout << "cpu sequential time = " << ref_time << " ms. Bandwidth = " << gb/(1.0e+6 * ref_time) << " GB/s. GFlops = " << gflop/(1.0e+6 * ref_time) << " GFlops." << endl << endl; // launch compute call_anonymouslib(m, n, nnzA, csrRowPtrA, csrColIdxA, csrValA, x, y, alpha); // compare reference and anonymouslib results int error_count = 0; for (int i = 0; i < m; i++) if (abs(y_ref[i] - y[i]) > 0.01 * abs(y_ref[i])) { error_count++; // cout << "ROW [ " << i << " ], NNZ SPAN: " // << csrRowPtrA[i] << " - " // << csrRowPtrA[i+1] // << "\t ref = " << y_ref[i] // << ", \t csr5 = " << y[i] // << ", \t error = " << y_ref[i] - y[i] // << endl; // break; // //if (abs(y_ref[i] - y[i]) > 0.00001) // // cout << ", \t error = " << y_ref[i] - y[i] << endl; // //else // // cout << ". \t CORRECT!" << endl; } if (error_count == 0) cout << "Check... PASS!" << endl; else cout << "Check... NO PASS! #Error = " << error_count << " out of " << m << " entries." << endl; cout << "------------------------------------------------------" << endl; free(csrRowPtrA); free(csrColIdxA); free(csrValA); free(x); free(y); free(y_ref); return 0; }
the_stack
#include "grid_sample_kernel_util.h" namespace oneflow { class CudnnGridSampleDesc final { public: OF_DISALLOW_COPY_AND_MOVE(CudnnGridSampleDesc); CudnnGridSampleDesc(DataType data_type, const ShapeView& shape) { std::vector<int> tensor_dim({shape.ptr(), shape.ptr() + shape.NumAxes()}); OF_CUDNN_CHECK(cudnnCreateSpatialTransformerDescriptor(&val_)); OF_CUDNN_CHECK(cudnnSetSpatialTransformerNdDescriptor(val_, CUDNN_SAMPLER_BILINEAR, GetCudnnDataType(data_type), shape.NumAxes(), tensor_dim.data())); } ~CudnnGridSampleDesc() { OF_CUDNN_CHECK(cudnnDestroySpatialTransformerDescriptor(val_)); } const cudnnSpatialTransformerDescriptor_t& Get() const { return val_; } private: cudnnSpatialTransformerDescriptor_t val_; }; template<typename T> struct CudnnGridSampleKernelUtil { static bool CanRunWithCudnn(user_op::KernelComputeContext* ctx) { if (ctx->Attr<std::string>("interpolation_mode") != "bilinear" || ctx->Attr<std::string>("padding_mode") != "zeros" || !ctx->Attr<bool>("align_corners")) { return false; } const ShapeView& input_shape = ctx->Tensor4ArgNameAndIndex("input", 0)->shape(); if (input_shape.NumAxes() != 4 || input_shape.At(1) > 1024) { return false; } return true; } static void ForwardCompute(user_op::KernelComputeContext* ctx) { const user_op::Tensor* input = ctx->Tensor4ArgNameAndIndex("input", 0); const user_op::Tensor* grid = ctx->Tensor4ArgNameAndIndex("grid", 0); user_op::Tensor* output = ctx->Tensor4ArgNameAndIndex("output", 0); const ShapeView& input_shape = input->shape(); const ShapeView& output_shape = output->shape(); const DataType dtype = input->data_type(); CudnnTensorDesc input_desc(dtype, input_shape, "channels_first"); CudnnTensorDesc output_desc(dtype, output_shape, "channels_first"); CudnnGridSampleDesc transfomer_desc(dtype, output_shape); OF_CUDNN_CHECK(cudnnSpatialTfSamplerForward( ctx->device_ctx()->cudnn_handle(), transfomer_desc.Get(), CudnnSPOnePtr<T>(), input_desc.Get(), input->dptr(), grid->dptr(), CudnnSPZeroPtr<T>(), output_desc.Get(), output->mut_dptr())); } static void BackwardCompute(user_op::KernelComputeContext* ctx) { const user_op::Tensor* doutput = ctx->Tensor4ArgNameAndIndex("doutput", 0); const user_op::Tensor* input = ctx->Tensor4ArgNameAndIndex("input", 0); const user_op::Tensor* grid = ctx->Tensor4ArgNameAndIndex("grid", 0); user_op::Tensor* dinput = ctx->Tensor4ArgNameAndIndex("dinput", 0); user_op::Tensor* dgrid = ctx->Tensor4ArgNameAndIndex("dgrid", 0); const ShapeView& input_shape = input->shape(); const ShapeView& output_shape = doutput->shape(); const ShapeView& dinput_shape = dinput->shape(); const DataType dtype = input->data_type(); CudnnTensorDesc input_desc(dtype, input_shape, "channels_first"); CudnnTensorDesc output_desc(dtype, output_shape, "channels_first"); CudnnTensorDesc dinput_desc(dtype, dinput_shape, "channels_first"); CudnnGridSampleDesc transfomer_desc(dtype, output_shape); OF_CUDNN_CHECK(cudnnSpatialTfSamplerBackward( ctx->device_ctx()->cudnn_handle(), transfomer_desc.Get(), CudnnSPOnePtr<T>(), input_desc.Get(), input->dptr(), CudnnSPZeroPtr<T>(), dinput_desc.Get(), dinput->mut_dptr(), CudnnSPOnePtr<T>(), output_desc.Get(), doutput->dptr(), grid->dptr(), CudnnSPZeroPtr<T>(), dgrid->mut_dptr())); } }; template<typename data_type, typename index_type> __launch_bounds__(256) __global__ void CUDAGridSampler4DKernel(const index_type nthreads, const data_type* input_ptr, const data_type* grid_ptr, data_type* output_ptr, index_type N, index_type C, index_type inp_H, index_type inp_W, index_type out_H, index_type out_W, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, const bool align_corners) { GridSampler4DKernel(nthreads, input_ptr, grid_ptr, output_ptr, N, C, inp_H, inp_W, out_H, out_W, interpolation_mode, padding_mode, align_corners); } template<typename data_type, typename index_type> __launch_bounds__(512) __global__ void CUDAGridSampler5DKernel(const index_type nthreads, const data_type* input_ptr, const data_type* grid_ptr, data_type* output_ptr, index_type N, index_type C, index_type inp_D, index_type inp_H, index_type inp_W, index_type out_D, index_type out_H, index_type out_W, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, const bool align_corners) { GridSampler5DKernel(nthreads, input_ptr, grid_ptr, output_ptr, N, C, inp_D, inp_H, inp_W, out_D, out_H, out_W, interpolation_mode, padding_mode, align_corners); } template<typename data_type, typename index_type> __launch_bounds__(256) __global__ void CUDAGridSampler4DBackwardKernel( const index_type nthreads, const data_type* grad_output_ptr, const data_type* input_ptr, const data_type* grid_ptr, data_type* grad_input_ptr, data_type* grad_grid_ptr, index_type N, index_type C, index_type inp_H, index_type inp_W, index_type out_H, index_type out_W, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, const bool align_corners, const index_type grad_input_memory_span) { GridSampler4DBackwardKernel(nthreads, grad_output_ptr, input_ptr, grid_ptr, grad_input_ptr, grad_grid_ptr, N, C, inp_H, inp_W, out_H, out_W, interpolation_mode, padding_mode, align_corners, grad_input_memory_span); } template<typename data_type, typename index_type> __launch_bounds__(256) __global__ void CUDAGridSampler5DBackwardKernel( const index_type nthreads, const data_type* grad_output_ptr, const data_type* input_ptr, const data_type* grid_ptr, data_type* grad_input_ptr, data_type* grad_grid_ptr, index_type N, index_type C, index_type inp_D, index_type inp_H, index_type inp_W, index_type out_D, index_type out_H, index_type out_W, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, const bool align_corners, const index_type grad_input_memory_span) { GridSampler5DBackwardKernel(nthreads, grad_output_ptr, input_ptr, grid_ptr, grad_input_ptr, grad_grid_ptr, N, C, inp_D, inp_H, inp_W, out_D, out_H, out_W, interpolation_mode, padding_mode, align_corners, grad_input_memory_span); } template<typename data_type, typename index_type> struct GridSampleKernelUtil<DeviceType::kGPU, data_type, index_type> final { static void Forward4D(user_op::KernelComputeContext* ctx, const user_op::Tensor* input, const user_op::Tensor* grid, user_op::Tensor* output, GridSamplerInterpolation interpolation, GridSamplerPadding padding, const bool align_corners, const ShapeView& input_shape, const ShapeView& grid_shape, const ShapeView& output_shape, int64_t count) { if (CudnnGridSampleKernelUtil<data_type>::CanRunWithCudnn(ctx) && CanUse32BitIndex({input_shape, grid_shape, output_shape})) { return CudnnGridSampleKernelUtil<data_type>::ForwardCompute(ctx); } CUDAGridSampler4DKernel<data_type, index_type> <<<GridSampleGetBlocks(count, 256), 256, 0, ctx->device_ctx()->cuda_stream()>>>( count, input->dptr<data_type>(), grid->dptr<data_type>(), output->mut_dptr<data_type>(), input_shape.At(0), input_shape.At(1), input_shape.At(2), input_shape.At(3), output_shape.At(2), output_shape.At(3), interpolation, padding, align_corners); } static void Forward5D(user_op::KernelComputeContext* ctx, const user_op::Tensor* input, const user_op::Tensor* grid, user_op::Tensor* output, GridSamplerInterpolation interpolation, GridSamplerPadding padding, const bool align_corners, const ShapeView& input_shape, const ShapeView& grid_shape, const ShapeView& output_shape, int64_t count) { CUDAGridSampler5DKernel<data_type, index_type> <<<GridSampleGetBlocks(count, 512), 512, 0, ctx->device_ctx()->cuda_stream()>>>( count, input->dptr<data_type>(), grid->dptr<data_type>(), output->mut_dptr<data_type>(), input_shape.At(0), input_shape.At(1), input_shape.At(2), input_shape.At(3), input_shape.At(4), output_shape.At(2), output_shape.At(3), output_shape.At(4), interpolation, padding, align_corners); } static void Backward4D(user_op::KernelComputeContext* ctx, const user_op::Tensor* doutput, const user_op::Tensor* input, const user_op::Tensor* grid, user_op::Tensor* dinput, user_op::Tensor* dgrid, GridSamplerInterpolation interpolation, GridSamplerPadding padding, const bool align_corners, const ShapeView& input_shape, const ShapeView& grid_shape, const ShapeView& output_shape, int64_t count) { if (CudnnGridSampleKernelUtil<data_type>::CanRunWithCudnn(ctx) && CanUse32BitIndex({input_shape, grid_shape, output_shape})) { return CudnnGridSampleKernelUtil<data_type>::BackwardCompute(ctx); } CUDAGridSampler4DBackwardKernel<data_type, index_type> <<<GridSampleGetBlocks(count, 256), 256, 0, ctx->device_ctx()->cuda_stream()>>>( count, doutput->dptr<data_type>(), input->dptr<data_type>(), grid->dptr<data_type>(), dinput->mut_dptr<data_type>(), dgrid->mut_dptr<data_type>(), input_shape.At(0), input_shape.At(1), input_shape.At(2), input_shape.At(3), output_shape.At(2), output_shape.At(3), interpolation, padding, align_corners, input_shape.elem_cnt()); } static void Backward5D(user_op::KernelComputeContext* ctx, const user_op::Tensor* doutput, const user_op::Tensor* input, const user_op::Tensor* grid, user_op::Tensor* dinput, user_op::Tensor* dgrid, GridSamplerInterpolation interpolation, GridSamplerPadding padding, const bool align_corners, const ShapeView& input_shape, const ShapeView& grid_shape, const ShapeView& output_shape, int64_t count) { CUDAGridSampler5DBackwardKernel<data_type, index_type> <<<GridSampleGetBlocks(count, 256), 256, 0, ctx->device_ctx()->cuda_stream()>>>( count, doutput->dptr<data_type>(), input->dptr<data_type>(), grid->dptr<data_type>(), dinput->mut_dptr<data_type>(), dgrid->mut_dptr<data_type>(), input_shape.At(0), input_shape.At(1), input_shape.At(2), input_shape.At(3), input_shape.At(4), output_shape.At(2), output_shape.At(3), output_shape.At(4), interpolation, padding, align_corners, input_shape.elem_cnt()); } }; OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_GRID_SAMPLE_KERNEL_UTIL, (DeviceType::kGPU), FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ); } // namespace oneflow
the_stack
#ifndef IDIVUP #define IDIVUP(i,j) ((i+j-1)/j) #endif /// /// Constructor for cuAmpcorParameter class /// also sets the default/initial values of various parameters /// cuAmpcorParameter::cuAmpcorParameter() { // default settings // will be changed if they are set by python scripts algorithm = 0; //0 freq; 1 time deviceID = 0; nStreams = 1; derampMethod = 1; windowSizeWidthRaw = 64; windowSizeHeightRaw = 64; halfSearchRangeDownRaw = 20; halfSearchRangeAcrossRaw = 20; skipSampleAcrossRaw = 64; skipSampleDownRaw = 64; rawDataOversamplingFactor = 2; zoomWindowSize = 16; oversamplingFactor = 16; oversamplingMethod = 0; referenceImageName = "reference.slc"; referenceImageWidth = 1000; referenceImageHeight = 1000; secondaryImageName = "secondary.slc"; secondaryImageWidth = 1000; secondaryImageHeight = 1000; offsetImageName = "DenseOffset.off"; grossOffsetImageName = "GrossOffset.off"; snrImageName = "snr.snr"; covImageName = "cov.cov"; numberWindowDown = 1; numberWindowAcross = 1; numberWindowDownInChunk = 1; numberWindowAcrossInChunk = 1 ; referenceStartPixelDown0 = 0; referenceStartPixelAcross0 = 0; corrStatWindowSize = 21; // 10*2+1 as in RIOPAC useMmap = 1; // use mmap mmapSizeInGB = 1; mergeGrossOffset = 0; // default to separate gross offset } /** * To determine other process parameters after reading essential parameters from python */ void cuAmpcorParameter::setupParameters() { // Size to extract the raw correlation surface for snr/cov corrRawZoomInHeight = std::min(corrStatWindowSize, 2*halfSearchRangeDownRaw+1); corrRawZoomInWidth = std::min(corrStatWindowSize, 2*halfSearchRangeAcrossRaw+1); // Size to extract the resampled correlation surface for oversampling // users should use 16 for zoomWindowSize, no need to multiply by 2 // zoomWindowSize *= rawDataOversamplingFactor; //8 * 2 // to check the search range int corrSurfaceActualSize = std::min(halfSearchRangeAcrossRaw, halfSearchRangeDownRaw)* 2*rawDataOversamplingFactor; zoomWindowSize = std::min(zoomWindowSize, corrSurfaceActualSize); halfZoomWindowSizeRaw = zoomWindowSize/(2*rawDataOversamplingFactor); // 8*2/(2*2) = 4 windowSizeWidth = windowSizeWidthRaw*rawDataOversamplingFactor; // windowSizeHeight = windowSizeHeightRaw*rawDataOversamplingFactor; searchWindowSizeWidthRaw = windowSizeWidthRaw + 2*halfSearchRangeDownRaw; searchWindowSizeHeightRaw = windowSizeHeightRaw + 2*halfSearchRangeAcrossRaw; searchWindowSizeWidthRawZoomIn = windowSizeWidthRaw + 2*halfZoomWindowSizeRaw; searchWindowSizeHeightRawZoomIn = windowSizeHeightRaw + 2*halfZoomWindowSizeRaw; searchWindowSizeWidth = searchWindowSizeWidthRawZoomIn*rawDataOversamplingFactor; searchWindowSizeHeight = searchWindowSizeHeightRawZoomIn*rawDataOversamplingFactor; numberWindows = numberWindowDown*numberWindowAcross; if(numberWindows <=0) { fprintf(stderr, "Incorrect number of windows! (%d, %d)\n", numberWindowDown, numberWindowAcross); exit(EXIT_FAILURE); } numberChunkDown = IDIVUP(numberWindowDown, numberWindowDownInChunk); numberChunkAcross = IDIVUP(numberWindowAcross, numberWindowAcrossInChunk); numberChunks = numberChunkDown*numberChunkAcross; allocateArrays(); } void cuAmpcorParameter::allocateArrays() { int arraySize = numberWindows*sizeof(int); grossOffsetDown = (int *)malloc(arraySize); grossOffsetAcross = (int *)malloc(arraySize); referenceStartPixelDown = (int *)malloc(arraySize); referenceStartPixelAcross = (int *)malloc(arraySize); secondaryStartPixelDown = (int *)malloc(arraySize); secondaryStartPixelAcross = (int *)malloc(arraySize); int arraySizeChunk = numberChunks*sizeof(int); referenceChunkStartPixelDown = (int *)malloc(arraySizeChunk); referenceChunkStartPixelAcross = (int *)malloc(arraySizeChunk); secondaryChunkStartPixelDown = (int *)malloc(arraySizeChunk); secondaryChunkStartPixelAcross = (int *)malloc(arraySizeChunk); referenceChunkHeight = (int *)malloc(arraySizeChunk); referenceChunkWidth = (int *)malloc(arraySizeChunk); secondaryChunkHeight = (int *)malloc(arraySizeChunk); secondaryChunkWidth = (int *)malloc(arraySizeChunk); } void cuAmpcorParameter::deallocateArrays() { free(grossOffsetDown); free(grossOffsetAcross); free(referenceStartPixelDown); free(referenceStartPixelAcross); free(secondaryStartPixelDown); free(secondaryStartPixelAcross); free(referenceChunkStartPixelDown); free(referenceChunkStartPixelAcross); free(secondaryChunkStartPixelDown); free(secondaryChunkStartPixelAcross); free(referenceChunkHeight); free(referenceChunkWidth); free(secondaryChunkHeight); free(secondaryChunkWidth); } /// Set starting pixels for reference and secondary windows from arrays /// set also gross offsets between reference and secondary windows /// void cuAmpcorParameter::setStartPixels(int *mStartD, int *mStartA, int *gOffsetD, int *gOffsetA) { for(int i=0; i<numberWindows; i++) { referenceStartPixelDown[i] = mStartD[i]; grossOffsetDown[i] = gOffsetD[i]; secondaryStartPixelDown[i] = referenceStartPixelDown[i] + grossOffsetDown[i] - halfSearchRangeDownRaw; referenceStartPixelAcross[i] = mStartA[i]; grossOffsetAcross[i] = gOffsetA[i]; secondaryStartPixelAcross[i] = referenceStartPixelAcross[i] + grossOffsetAcross[i] - halfSearchRangeAcrossRaw; } setChunkStartPixels(); } /// set starting pixels for each window with a varying gross offset void cuAmpcorParameter::setStartPixels(int mStartD, int mStartA, int *gOffsetD, int *gOffsetA) { for(int row=0; row<numberWindowDown; row++) { for(int col = 0; col < numberWindowAcross; col++) { int i = row*numberWindowAcross + col; referenceStartPixelDown[i] = mStartD + row*skipSampleDownRaw; grossOffsetDown[i] = gOffsetD[i]; secondaryStartPixelDown[i] = referenceStartPixelDown[i] + grossOffsetDown[i] - halfSearchRangeDownRaw; referenceStartPixelAcross[i] = mStartA + col*skipSampleAcrossRaw; grossOffsetAcross[i] = gOffsetA[i]; secondaryStartPixelAcross[i] = referenceStartPixelAcross[i] + grossOffsetAcross[i] - halfSearchRangeAcrossRaw; } } setChunkStartPixels(); } /// set starting pixels for each window with a constant gross offset void cuAmpcorParameter::setStartPixels(int mStartD, int mStartA, int gOffsetD, int gOffsetA) { for(int row=0; row<numberWindowDown; row++) { for(int col = 0; col < numberWindowAcross; col++) { int i = row*numberWindowAcross + col; referenceStartPixelDown[i] = mStartD + row*skipSampleDownRaw; grossOffsetDown[i] = gOffsetD; secondaryStartPixelDown[i] = referenceStartPixelDown[i] + grossOffsetDown[i] - halfSearchRangeDownRaw; referenceStartPixelAcross[i] = mStartA + col*skipSampleAcrossRaw; grossOffsetAcross[i] = gOffsetA; secondaryStartPixelAcross[i] = referenceStartPixelAcross[i] + grossOffsetAcross[i] - halfSearchRangeAcrossRaw; } } setChunkStartPixels(); } /// set starting pixels for each chunk void cuAmpcorParameter::setChunkStartPixels() { maxReferenceChunkHeight = 0; maxReferenceChunkWidth = 0; maxSecondaryChunkHeight = 0; maxSecondaryChunkWidth = 0; for(int ichunk=0; ichunk <numberChunkDown; ichunk++) { for (int jchunk =0; jchunk<numberChunkAcross; jchunk++) { int idxChunk = ichunk*numberChunkAcross+jchunk; int mChunkSD = referenceImageHeight; int mChunkSA = referenceImageWidth; int mChunkED = 0; int mChunkEA = 0; int sChunkSD = secondaryImageHeight; int sChunkSA = secondaryImageWidth; int sChunkED = 0; int sChunkEA = 0; int numberWindowDownInChunkRun = numberWindowDownInChunk; int numberWindowAcrossInChunkRun = numberWindowAcrossInChunk; // modify the number of windows in last chunk if(ichunk == numberChunkDown -1) numberWindowDownInChunkRun = numberWindowDown - numberWindowDownInChunk*(numberChunkDown -1); if(jchunk == numberChunkAcross -1) numberWindowAcrossInChunkRun = numberWindowAcross - numberWindowAcrossInChunk*(numberChunkAcross -1); for(int i=0; i<numberWindowDownInChunkRun; i++) { for(int j=0; j<numberWindowAcrossInChunkRun; j++) { int idxWindow = (ichunk*numberWindowDownInChunk+i)*numberWindowAcross + (jchunk*numberWindowAcrossInChunk+j); int vpixel = referenceStartPixelDown[idxWindow]; if(mChunkSD > vpixel) mChunkSD = vpixel; if(mChunkED < vpixel) mChunkED = vpixel; vpixel = referenceStartPixelAcross[idxWindow]; if(mChunkSA > vpixel) mChunkSA = vpixel; if(mChunkEA < vpixel) mChunkEA = vpixel; vpixel = secondaryStartPixelDown[idxWindow]; if(sChunkSD > vpixel) sChunkSD = vpixel; if(sChunkED < vpixel) sChunkED = vpixel; vpixel = secondaryStartPixelAcross[idxWindow]; if(sChunkSA > vpixel) sChunkSA = vpixel; if(sChunkEA < vpixel) sChunkEA = vpixel; } } referenceChunkStartPixelDown[idxChunk] = mChunkSD; referenceChunkStartPixelAcross[idxChunk] = mChunkSA; secondaryChunkStartPixelDown[idxChunk] = sChunkSD; secondaryChunkStartPixelAcross[idxChunk] = sChunkSA; referenceChunkHeight[idxChunk] = mChunkED - mChunkSD + windowSizeHeightRaw; referenceChunkWidth[idxChunk] = mChunkEA - mChunkSA + windowSizeWidthRaw; secondaryChunkHeight[idxChunk] = sChunkED - sChunkSD + searchWindowSizeHeightRaw; secondaryChunkWidth[idxChunk] = sChunkEA - sChunkSA + searchWindowSizeWidthRaw; if(maxReferenceChunkHeight < referenceChunkHeight[idxChunk]) maxReferenceChunkHeight = referenceChunkHeight[idxChunk]; if(maxReferenceChunkWidth < referenceChunkWidth[idxChunk] ) maxReferenceChunkWidth = referenceChunkWidth[idxChunk]; if(maxSecondaryChunkHeight < secondaryChunkHeight[idxChunk]) maxSecondaryChunkHeight = secondaryChunkHeight[idxChunk]; if(maxSecondaryChunkWidth < secondaryChunkWidth[idxChunk] ) maxSecondaryChunkWidth = secondaryChunkWidth[idxChunk]; } } } /// check whether reference and secondary windows are within the image range void cuAmpcorParameter::checkPixelInImageRange() { int endPixel; for(int row=0; row<numberWindowDown; row++) { for(int col = 0; col < numberWindowAcross; col++) { int i = row*numberWindowAcross + col; if(referenceStartPixelDown[i] <0) { fprintf(stderr, "Reference Window start pixel out ot range in Down, window (%d,%d), pixel %d\n", row, col, referenceStartPixelDown[i]); exit(EXIT_FAILURE); //or raise range error } if(referenceStartPixelAcross[i] <0) { fprintf(stderr, "Reference Window start pixel out ot range in Across, window (%d,%d), pixel %d\n", row, col, referenceStartPixelAcross[i]); exit(EXIT_FAILURE); } endPixel = referenceStartPixelDown[i] + windowSizeHeightRaw; if(endPixel >= referenceImageHeight) { fprintf(stderr, "Reference Window end pixel out ot range in Down, window (%d,%d), pixel %d\n", row, col, endPixel); exit(EXIT_FAILURE); } endPixel = referenceStartPixelAcross[i] + windowSizeWidthRaw; if(endPixel >= referenceImageWidth) { fprintf(stderr, "Reference Window end pixel out ot range in Across, window (%d,%d), pixel %d\n", row, col, endPixel); exit(EXIT_FAILURE); } //secondary if(secondaryStartPixelDown[i] <0) { fprintf(stderr, "Secondary Window start pixel out ot range in Down, window (%d,%d), pixel %d\n", row, col, secondaryStartPixelDown[i]); exit(EXIT_FAILURE); } if(secondaryStartPixelAcross[i] <0) { fprintf(stderr, "Secondary Window start pixel out ot range in Across, window (%d,%d), pixel %d\n", row, col, secondaryStartPixelAcross[i]); exit(EXIT_FAILURE); } endPixel = secondaryStartPixelDown[i] + searchWindowSizeHeightRaw; if(endPixel >= secondaryImageHeight) { fprintf(stderr, "Secondary Window end pixel out ot range in Down, window (%d,%d), pixel %d\n", row, col, endPixel); exit(EXIT_FAILURE); } endPixel = secondaryStartPixelAcross[i] + searchWindowSizeWidthRaw; if(endPixel >= secondaryImageWidth) { fprintf(stderr, "Secondary Window end pixel out ot range in Across, window (%d,%d), pixel %d\n", row, col, endPixel); exit(EXIT_FAILURE); } } } } cuAmpcorParameter::~cuAmpcorParameter() { deallocateArrays(); } // end of file
the_stack
#include <THC/THCDeviceUtils.cuh> #include <THC/THCGeneral.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/TensorAccessor.h> #if defined(__HIP_PLATFORM_HCC__) constexpr int WARP_SIZE = 64; #else constexpr int WARP_SIZE = 32; #endif // The maximum number of threads in a block #if defined(__HIP_PLATFORM_HCC__) constexpr int MAX_BLOCK_SIZE = 256; #else constexpr int MAX_BLOCK_SIZE = 512; #endif // Number of threads in a block given an input size up to MAX_BLOCK_SIZE static int getNumThreads(int nElem) { #if defined(__HIP_PLATFORM_HCC__) int threadSizes[5] = { 16, 32, 64, 128, MAX_BLOCK_SIZE }; #else int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE }; #endif for (int i = 0; i != 5; ++i) { if (nElem <= threadSizes[i]) { return threadSizes[i]; } } return MAX_BLOCK_SIZE; } // Returns the index of the most significant 1 bit in `val`. __device__ __forceinline__ int getMSB(int val) { return 31 - __clz(val); } template <typename scalar_t, typename accscalar_t> struct Float2 { accscalar_t v1, v2; __device__ Float2() {} __device__ Float2(scalar_t v1, scalar_t v2) : v1(static_cast<accscalar_t>(v1)), v2(static_cast<accscalar_t>(v2)) {} __device__ Float2(int v) : v1(static_cast<accscalar_t>(v)), v2(static_cast<accscalar_t>(v)) {} __device__ Float2& operator+=(const Float2& a) { v1 += a.v1; v2 += a.v2; return *this; } }; template <typename scalar_t, typename accscalar_t, typename PTA> struct SumOp { __device__ SumOp(const PTA& t) : tensor(t) {} __device__ __forceinline__ accscalar_t operator()(int batch, int plane, int n) { return static_cast<accscalar_t>(tensor[batch][plane][n]); } const PTA& tensor; }; template <typename scalar_t, typename accscalar_t, typename PTA> struct VarOp { __device__ VarOp(accscalar_t m, const PTA& t) : mean(m), tensor(t) {} __device__ __forceinline__ accscalar_t operator()(int batch, int plane, int n) { accscalar_t val = tensor[batch][plane][n]; return (val - mean) * (val - mean); } const accscalar_t mean; const PTA& tensor; }; template <typename scalar_t, typename accscalar_t, typename PTA> struct GradOp { __device__ GradOp(accscalar_t m, const PTA& i, const PTA& g) : mean(m), input(i), grad_output(g) {} __device__ __forceinline__ Float2<scalar_t, accscalar_t> operator()(int batch, int plane, int n) { accscalar_t g = grad_output[batch][plane][n]; accscalar_t c = static_cast<accscalar_t>(input[batch][plane][n]) - mean; return Float2<scalar_t, accscalar_t>(g, g * c); } const accscalar_t mean; const PTA& input; const PTA& grad_output; }; // Sum across all threads within a warp template <typename T> static __device__ __forceinline__ T warpSum(T val) { for (int i = 0; i < getMSB(WARP_SIZE); ++i) { val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); } return val; } template <typename scalar_t, typename accscalar_t> static __device__ __forceinline__ Float2<scalar_t, accscalar_t> warpSum(Float2<scalar_t, accscalar_t> value) { value.v1 = warpSum(value.v1); value.v2 = warpSum(value.v2); return value; } // Sum across (batch, x/y/z) applying Op() pointwise // this works by first having each thread sum it's part // of the data. Then there is a double-shuffeling reduction. // First each warp (of WARP_SIZE threads) uses warpSum to reduce its // data to the "warp leader", who writes its value into shared memory. // Then a single warp reads the remaining (at most WARP_SIZE) items // and reduces them using another warpSum. // The implicit assumption is that there are no more // than WARP_SIZE**2 threads. template<typename scalar_t, typename Op, typename PTA> __device__ scalar_t reduce(Op op, PTA tensor, int plane) { // first the reductions each thread does separately scalar_t sum = static_cast<scalar_t>(0); for (int batch = threadIdx.y; batch < tensor.size(0); batch += blockDim.y) { for (int x = threadIdx.x; x < tensor.size(2); x += blockDim.x) { sum += op(batch, plane, x); } } // first warpSum to get one value per thread to // one value per warp sum = warpSum(sum); // this writes each warps item into shared memory // there are at most WARP_SIZE items left because // there are at most WARP_SIZE**2 threads at the beginning __shared__ scalar_t shared[WARP_SIZE]; __syncthreads(); int tid = threadIdx.x + threadIdx.y * blockDim.x; if (tid % WARP_SIZE == 0) { shared[tid / WARP_SIZE] = sum; } if (tid >= blockDim.x * blockDim.y / WARP_SIZE && tid < WARP_SIZE) { // zero out the other entries in shared shared[tid] = (scalar_t)0; } __syncthreads(); // now have a second warpSum to reduce the intermediate values // from shared memory to a single number. The very first // thread writes it to shared memory. if (tid / WARP_SIZE == 0) { sum = warpSum(shared[tid]); if (tid == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole grad_input return shared[0]; } template <typename scalar_t, typename accscalar_t, bool train, typename index_t> __global__ void batch_norm_transform_input_kernel( const at::PackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input, at::PackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> output, const at::PackedTensorAccessor<typename std::conditional<train, accscalar_t, scalar_t>::type, 1, at::RestrictPtrTraits, index_t> mean_, const at::PackedTensorAccessor<typename std::conditional<train, accscalar_t, scalar_t>::type, 1, at::RestrictPtrTraits, index_t> var_or_std, const at::PackedTensorAccessor<scalar_t, 1, at::RestrictPtrTraits, index_t> weight, const at::PackedTensorAccessor<scalar_t, 1, at::RestrictPtrTraits, index_t> bias, accscalar_t epsilon) { index_t plane = blockIdx.x; if (plane >= input.size(1)) { return; } accscalar_t gamma = weight.size(0) > 0 ? static_cast<accscalar_t>(weight[plane]) : static_cast<accscalar_t>(1); accscalar_t beta = bias.size(0) > 0 ? static_cast<accscalar_t>(bias[plane]) : static_cast<accscalar_t>(0); accscalar_t mean = static_cast<accscalar_t>(mean_[plane]); accscalar_t invstd = 1.0 / var_or_std[plane]; index_t bs = input.size(0); index_t fs = input.size(2); index_t bstep = blockDim.y * gridDim.y; for (index_t batch = threadIdx.y + blockIdx.y * blockDim.y; batch < bs; batch += bstep) { auto o = output[batch][plane]; auto i = input[batch][plane]; for (index_t feature = threadIdx.x; feature < fs; feature += blockDim.x) { o[feature] = static_cast<scalar_t>(gamma * (i[feature] - mean) * invstd + beta); } } } template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void batch_norm_collect_statistics_kernel( const at::PackedTensorAccessor<scalar_t, 3, at::RestrictPtrTraits, index_t> input, at::PackedTensorAccessor<accscalar_t, 1, at::RestrictPtrTraits, index_t> save_mean, at::PackedTensorAccessor<accscalar_t, 1, at::RestrictPtrTraits, index_t> save_mean2) { __shared__ int shared_n[2 * 2 * WARP_SIZE + WARP_SIZE]; int plane = blockIdx.x; int N = input.size(0) * input.size(2); int tid = threadIdx.x + threadIdx.y * blockDim.x; // Compute the mean and variance across (batch, x/y/z) // this uses the Welford (in the for loop)/parallel algorithm (to sum across the block) // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_Online_algorithm // and the parallel algorithm on the same page. // We use two shuffles to reduce across the entire block. // https://devblogs.nvidia.com/faster-parallel-reductions-kepler/ has a description. accscalar_t* shared_avg_var = (accscalar_t*) &shared_n[WARP_SIZE]; // first the reductions each thread does separately accscalar_t avg = 0; accscalar_t avg2 = 0; int n = 0; for (int batch = threadIdx.y; batch < input.size(0); batch += blockDim.y) { for (int x = threadIdx.x; x < input.size(2); x += blockDim.x) { accscalar_t v = input[batch][plane][x]; accscalar_t d1 = v - avg; accscalar_t d2 = (v * v) - avg2; n++; avg += d1 / n; avg2 += d2 / n; } } // first warpSum to get one value per thread to // one value per warp for (int i = 0; i < getMSB(WARP_SIZE); ++i) { accscalar_t o_avg = WARP_SHFL_XOR(avg, 1 << i, WARP_SIZE); accscalar_t o_avg2 = WARP_SHFL_XOR(avg2, 1 << i, WARP_SIZE); int o_n = WARP_SHFL_XOR(n, 1 << i, WARP_SIZE); accscalar_t factor = 1.0 / fmaxf(1.0, n+o_n); // var_n += WARP_SHFL_XOR(var_n, 1 << i, WARP_SIZE) + (avg - o_avg) * (avg - o_avg) * n * o_n * factor; avg2 = (n * avg2 + o_n * o_avg2) * factor; avg = (n * avg + o_n * o_avg) * factor; n += o_n; } // this writes each warps item into shared memory // there are at most WARP_SIZE items left because // there are at most WARP_SIZE**2 threads at the beginning __syncthreads(); if (tid % WARP_SIZE == 0) { shared_n[tid / WARP_SIZE] = n; shared_avg_var[tid / WARP_SIZE * 2] = avg; shared_avg_var[tid / WARP_SIZE * 2 + 1] = avg2; } __syncthreads(); // now have a second warpSum to reduce the intermediate values // from shared memory to a single number. The very first // thread writes it to shared memory. if (tid < WARP_SIZE) { n = (tid < blockDim.x * blockDim.y / WARP_SIZE ? shared_n[tid] : 0); avg = (tid < blockDim.x * blockDim.y / WARP_SIZE ? shared_avg_var[2 * tid] : 0); avg2 = (tid < blockDim.x * blockDim.y / WARP_SIZE ? shared_avg_var[2 * tid + 1] : 0); } for (int i = 0; i < getMSB(WARP_SIZE); ++i) { accscalar_t o_avg = WARP_SHFL_XOR(avg, 1 << i, WARP_SIZE); accscalar_t o_avg2 = WARP_SHFL_XOR(avg2, 1 << i, WARP_SIZE); int o_n = WARP_SHFL_XOR(n, 1 << i, WARP_SIZE); accscalar_t factor = 1.0 / fmaxf(1.0, n+o_n); // var_n += WARP_SHFL_XOR(var_n, 1 << i, WARP_SIZE) + (avg - o_avg) * (avg - o_avg) * n * o_n * factor; avg2 = (n * avg2 + o_n * o_avg2) * factor; avg = (n * avg + o_n * o_avg) * factor; n += o_n; } // Save the mean, variance, and moving averages if (tid == 0) { /* accscalar_t invstd = 0; if (var_n != static_cast<accscalar_t>(0) || epsilon != static_cast<accscalar_t>(0)) { invstd = static_cast<accscalar_t>(1) / device_sqrt(var_n / N + epsilon); } */ save_mean[plane] = avg; save_mean2[plane] = avg2; } } template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void batch_norm_collect_grad_statistics_kernel( const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> input, const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> grad_output, at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_weight, at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_bias, at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_ex, at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_exs, const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> weight, const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_mean, const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_std, accscalar_t epsilon, accscalar_t cf) { index_t plane = blockIdx.x; index_t N = grad_output.size(0) * grad_output.size(2); accscalar_t mean, invstd; mean = save_mean[plane]; invstd = 1.0 / save_std[plane]; /* if (train) { mean = save_mean[plane]; invstd = 1.0 / save_std[plane]; } else { mean = static_cast<accscalar_t>(running_mean[plane]); invstd = static_cast<accscalar_t>(1) / device_sqrt(static_cast<accscalar_t>(running_var[plane]) + epsilon); } */ accscalar_t weight_val = weight.size(0) > 0 ? static_cast<accscalar_t>(weight[plane]) : accscalar_t(1); // accscalar_t norm = accscalar_t(1) / N; // Compute two values across (batch, x/y/z) in one pass: // 1. Sum(grad_output) // 2. DotProduct(input - mean, grad_output) GradOp<scalar_t, accscalar_t, at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>> g(mean, input, grad_output); Float2<scalar_t, accscalar_t> res = reduce<Float2<scalar_t, accscalar_t>, GradOp<scalar_t, accscalar_t, at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>>>(g, grad_output, plane); accscalar_t grad_output_sum = res.v1; accscalar_t dot_p = res.v2; /* accscalar_t grad_mean = grad_output_sum * norm; accscalar_t proj_scale = dot_p * norm * invstd * invstd; accscalar_t grad_scale = invstd * weight_val; if (grad_input.data() != NULL) { for (int batch = threadIdx.y; batch < grad_output.size(0); batch += blockDim.y) { for (int x = threadIdx.x; x < grad_output.size(2); x += blockDim.x) { scalar_t go = grad_output[batch][plane][x]; if (train) { scalar_t inp = input[batch][plane][x]; accscalar_t proj = (inp - mean) * proj_scale; grad_input[batch][plane][x] = static_cast<scalar_t>((go - proj - grad_mean) * grad_scale); } else { grad_input[batch][plane][x] = static_cast<scalar_t>(go * grad_scale); } } } } */ if (threadIdx.x == 0) { grad_exs[plane] = static_cast<scalar_t>(dot_p * weight_val * (-0.5) * pow(invstd, 3) * cf); grad_ex[plane] = static_cast<scalar_t>(grad_output_sum * weight_val * (-1.0) * invstd + \ dot_p * weight_val * pow(invstd, 3) * mean * cf); } if (grad_weight.size(0) > 0) { if (threadIdx.x == 0) { // printf("dot_p = %f, invstd = %f\n", dot_p, invstd); grad_weight[plane] = static_cast<scalar_t>(dot_p * invstd); } } if (grad_bias.size(0) > 0) { if (threadIdx.x == 0) { grad_bias[plane] = static_cast<scalar_t>(grad_output_sum); } } } template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void batch_norm_backward_kernel( const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> input, const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> grad_output, at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> grad_input, at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_weight, at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_bias, const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> weight, const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> running_mean, const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> running_var, const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_mean, const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_invstd, bool train, accscalar_t epsilon) { index_t plane = blockIdx.x; index_t N = grad_output.size(0) * grad_output.size(2); accscalar_t mean, invstd; if (train) { mean = save_mean[plane]; invstd = save_invstd[plane]; } else { mean = static_cast<accscalar_t>(running_mean[plane]); invstd = static_cast<accscalar_t>(1) / device_sqrt(static_cast<accscalar_t>(running_var[plane]) + epsilon); } accscalar_t weight_val = weight.size(0) > 0 ? static_cast<accscalar_t>(weight[plane]) : accscalar_t(1); accscalar_t norm = accscalar_t(1) / N; // Compute two values across (batch, x/y/z) in one pass: // 1. Sum(grad_output) // 2. DotProduct(input - mean, grad_output) GradOp<scalar_t, accscalar_t, at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>> g(mean, input, grad_output); Float2<scalar_t, accscalar_t> res = reduce<Float2<scalar_t, accscalar_t>, GradOp<scalar_t, accscalar_t, at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>>>(g, grad_output, plane); accscalar_t grad_output_sum = res.v1; accscalar_t dot_p = res.v2; accscalar_t grad_mean = grad_output_sum * norm; accscalar_t proj_scale = dot_p * norm * invstd * invstd; accscalar_t grad_scale = invstd * weight_val; if (grad_input.data() != NULL) { for (int batch = threadIdx.y; batch < grad_output.size(0); batch += blockDim.y) { for (int x = threadIdx.x; x < grad_output.size(2); x += blockDim.x) { scalar_t go = grad_output[batch][plane][x]; if (train) { scalar_t inp = input[batch][plane][x]; accscalar_t proj = (inp - mean) * proj_scale; grad_input[batch][plane][x] = static_cast<scalar_t>((go - proj - grad_mean) * grad_scale); } else { grad_input[batch][plane][x] = static_cast<scalar_t>(go * grad_scale); } } } } if (grad_weight.size(0) > 0) { if (threadIdx.x == 0) { grad_weight[plane] = static_cast<scalar_t>(dot_p * invstd); } } if (grad_bias.size(0) > 0) { if (threadIdx.x == 0) { grad_bias[plane] = static_cast<scalar_t>(grad_output_sum); } } } template <typename scalar_t, int64_t dim, template <typename U> class PtrTraits = at::DefaultPtrTraits, typename index_t = int64_t> static at::PackedTensorAccessor<scalar_t, dim, PtrTraits, index_t> packed_accessor_or_dummy(const at::Tensor& t) { if (! t.defined()) { const std::vector<index_t> zeros(dim); return at::PackedTensorAccessor<scalar_t, dim, PtrTraits, index_t>(nullptr, zeros.data(), zeros.data()); } return t.packed_accessor<scalar_t, dim, PtrTraits, index_t>(); } std::vector<at::Tensor> batch_norm_collect_statistics_cuda( const at::Tensor input) { // const auto batch_size = input.size(0); const auto channel_size = input.size(1); // const auto dim_size = input.size(2); auto input_reshaped = input.reshape({input.size(0), input.size(1), -1}); // internally we merge the feature dimensions auto ex = at::empty({channel_size}, input.options()); auto exs = at::empty({channel_size}, input.options()); auto stream = at::cuda::getCurrentCUDAStream(); const dim3 blocks(input_reshaped.size(1)); int tf = getNumThreads(input_reshaped.size(2)); dim3 threads(tf, std::max<int>(1, MAX_BLOCK_SIZE/tf)); AT_DISPATCH_FLOATING_TYPES(input.type(), "batch_norm_collect_statistics_cuda", ([&] { using accscalar_t = at::acc_type<scalar_t, true>; if (at::cuda::detail::canUse32BitIndexMath(input)) { batch_norm_collect_statistics_kernel<scalar_t, accscalar_t, int32_t><<<blocks, threads, 0, stream>>>( input_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int32_t>(), ex.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int32_t>(), exs.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int32_t>()); } else { batch_norm_collect_statistics_kernel<scalar_t, accscalar_t, int64_t><<<blocks, threads, 0, stream>>>( input_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int64_t>(), ex.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int64_t>(), exs.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int64_t>()); } })); THCudaCheck(cudaGetLastError()); return {ex, exs}; } at::Tensor batch_norm_transform_input_cuda( const at::Tensor input, const at::Tensor gamma, const at::Tensor beta, const at::Tensor ex, const at::Tensor exs, float eps, float cf) { const auto channel_size = input.size(1); auto input_reshaped = input.reshape({input.size(0), input.size(1), -1}); // internally we merge the feature dimensions auto output_reshaped = at::empty_like(input_reshaped); auto std = (cf * (exs - ex * ex) + eps).sqrt(); auto stream = at::cuda::getCurrentCUDAStream(); int tf = std::max<int>(getNumThreads(input_reshaped.size(2)/4), std::min<int>(getNumThreads(input_reshaped.size(2)), 64)); int tb = std::max<int>(64/tf, 1); dim3 blocks_trans(input_reshaped.size(1), std::max<int>(1, std::min<int>((256*1024)/input_reshaped.size(1), (input_reshaped.size(0)+tb-1)/tb))); dim3 threads_trans(tf, tb); AT_DISPATCH_FLOATING_TYPES(input.type(), "batch_norm_transform_input_cuda", ([&] { using accscalar_t = at::acc_type<scalar_t, true>; if (at::cuda::detail::canUse32BitIndexMath(input)) { batch_norm_transform_input_kernel<scalar_t, accscalar_t, true, int32_t><<<blocks_trans, threads_trans, 0, stream>>>( input_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int32_t>(), output_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int32_t>(), ex.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int32_t>(), std.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int32_t>(), packed_accessor_or_dummy<scalar_t, 1, at::RestrictPtrTraits, int32_t>(gamma), packed_accessor_or_dummy<scalar_t, 1, at::RestrictPtrTraits, int32_t>(beta), eps); } else { batch_norm_transform_input_kernel<scalar_t, accscalar_t, true, int64_t><<<blocks_trans, threads_trans, 0, stream>>>( input_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int64_t>(), output_reshaped.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, int64_t>(), ex.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int64_t>(), std.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, int64_t>(), packed_accessor_or_dummy<scalar_t, 1, at::RestrictPtrTraits, int64_t>(gamma), packed_accessor_or_dummy<scalar_t, 1, at::RestrictPtrTraits, int64_t>(beta), eps); } })); THCudaCheck(cudaGetLastError()); return output_reshaped.view(input.sizes()); } std::vector<at::Tensor> batch_norm_collect_grad_statistics_cuda( const at::Tensor input, const at::Tensor grad_output, const at::Tensor weight, const at::Tensor ex, const at::Tensor exs, float eps, float cf) { const auto channel_size = input.size(1); auto input_reshaped = input.reshape({input.size(0), input.size(1), -1}); // internally we merge the feature dimensions auto grad_output_reshaped = grad_output.reshape(input_reshaped.sizes()); auto std = (cf * (exs - ex * ex) + eps).sqrt(); auto grad_weight = at::empty_like(weight); auto grad_bias = at::empty_like(weight); auto grad_ex = at::empty_like(ex); auto grad_exs = at::empty_like(exs); auto stream = at::cuda::getCurrentCUDAStream(); const dim3 blocks(input_reshaped.size(1)); int tf = getNumThreads(input_reshaped.size(2)); dim3 threads(tf, std::max<int>(1, MAX_BLOCK_SIZE/tf)); AT_DISPATCH_FLOATING_TYPES(input.type(), "batch_norm_collect_grad_statistics_cuda", ([&] { using accscalar_t = at::acc_type<scalar_t, true>; if (at::cuda::detail::canUse32BitIndexMath(input)) { batch_norm_collect_grad_statistics_kernel<scalar_t, accscalar_t, int32_t><<<blocks, threads, 0, stream>>>( input_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int32_t>(), grad_output_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int32_t>(), packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(grad_weight), packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(grad_bias), packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(grad_ex), packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(grad_exs), packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(weight), packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int32_t>(ex), packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int32_t>(std), eps, cf); } else { batch_norm_collect_grad_statistics_kernel<scalar_t, accscalar_t, int64_t><<<blocks, threads, 0, stream>>>( input_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int64_t>(), grad_output_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int64_t>(), packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(grad_weight), packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(grad_bias), packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(grad_ex), packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(grad_exs), packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(weight), packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int64_t>(ex), packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int64_t>(std), eps, cf); } })); THCudaCheck(cudaGetLastError()); return {grad_weight, grad_bias, grad_ex, grad_exs}; } template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void batch_norm_input_backward_kernel( const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> input, const at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> grad_output, at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t> grad_input, const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_ex, const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> grad_exs, const at::PackedTensorAccessor<scalar_t, 1, at::DefaultPtrTraits, index_t> weight, const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_mean, const at::PackedTensorAccessor<accscalar_t, 1, at::DefaultPtrTraits, index_t> save_invstd, accscalar_t epsilon) { index_t plane = blockIdx.x; index_t N = grad_output.size(0) * grad_output.size(2); // accscalar_t mean, invstd; // mean = save_mean[plane]; accscalar_t invstd; invstd = 1.0 / save_invstd[plane]; accscalar_t weight_val = weight.size(0) > 0 ? static_cast<accscalar_t>(weight[plane]) : accscalar_t(1); accscalar_t norm = accscalar_t(1) / N; /* // Compute two values across (batch, x/y/z) in one pass: // 1. Sum(grad_output) // 2. DotProduct(input - mean, grad_output) GradOp<scalar_t, accscalar_t, at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>> g(mean, input, grad_output); Float2<scalar_t, accscalar_t> res = reduce<Float2<scalar_t, accscalar_t>, GradOp<scalar_t, accscalar_t, at::PackedTensorAccessor<scalar_t, 3, at::DefaultPtrTraits, index_t>>>(g, grad_output, plane); accscalar_t grad_output_sum = res.v1; accscalar_t dot_p = res.v2; accscalar_t grad_mean = grad_output_sum * norm; accscalar_t proj_scale = dot_p * norm * invstd * invstd; accscalar_t grad_scale = invstd * weight_val; */ if (grad_input.data() != NULL) { for (int batch = threadIdx.y; batch < grad_output.size(0); batch += blockDim.y) { for (int x = threadIdx.x; x < grad_output.size(2); x += blockDim.x) { grad_input[batch][plane][x] = static_cast<scalar_t>(grad_output[batch][plane][x] * invstd * weight_val + grad_exs[plane] * 2.0 * input[batch][plane][x] * norm + \ grad_ex[plane] * norm); } } } } at::Tensor batch_norm_input_backward_cuda( const at::Tensor input, const at::Tensor grad_output, const at::Tensor weight, const at::Tensor ex, const at::Tensor exs, const at::Tensor grad_ex, const at::Tensor grad_exs, float eps, float cf) { auto input_reshaped = input.reshape({input.size(0), input.size(1), -1}); // internally we merge the feature dimensions auto grad_output_reshaped = grad_output.reshape(input_reshaped.sizes()); auto std = (cf * (exs - ex * ex) + eps).sqrt(); auto grad_input = at::empty_like(input); auto grad_input_reshaped = grad_input.view(input_reshaped.sizes()); auto stream = at::cuda::getCurrentCUDAStream(); const dim3 blocks(input_reshaped.size(1)); int tf = getNumThreads(input_reshaped.size(2)); dim3 threads(tf, std::max<int>(1, MAX_BLOCK_SIZE/tf)); AT_DISPATCH_FLOATING_TYPES(input.type(), "batch_norm_input_backward_cuda", ([&] { using accscalar_t = at::acc_type<scalar_t, true>; if (at::cuda::detail::canUse32BitIndexMath(input)) { batch_norm_input_backward_kernel<scalar_t, accscalar_t, int32_t><<<blocks, threads, 0, stream>>>( input_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int32_t>(), grad_output_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int32_t>(), packed_accessor_or_dummy<scalar_t, 3, at::DefaultPtrTraits, int32_t>(grad_input_reshaped), grad_ex.packed_accessor<scalar_t, 1, at::DefaultPtrTraits, int32_t>(), grad_exs.packed_accessor<scalar_t, 1, at::DefaultPtrTraits, int32_t>(), packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int32_t>(weight), packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int32_t>(ex), packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int32_t>(std), eps); } else { batch_norm_input_backward_kernel<scalar_t, accscalar_t, int64_t><<<blocks, threads, 0, stream>>>( input_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int64_t>(), grad_output_reshaped.packed_accessor<scalar_t, 3, at::DefaultPtrTraits, int64_t>(), packed_accessor_or_dummy<scalar_t, 3, at::DefaultPtrTraits, int64_t>(grad_input_reshaped), grad_ex.packed_accessor<scalar_t, 1, at::DefaultPtrTraits, int64_t>(), grad_exs.packed_accessor<scalar_t, 1, at::DefaultPtrTraits, int64_t>(), packed_accessor_or_dummy<scalar_t, 1, at::DefaultPtrTraits, int64_t>(weight), packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int64_t>(ex), packed_accessor_or_dummy<accscalar_t, 1, at::DefaultPtrTraits, int64_t>(std), eps); } })); THCudaCheck(cudaGetLastError()); return grad_input; }
the_stack
//#include <cstdio> #include <stdio.h> #include <vector> #include <algorithm> #include <math.h> #include <omp.h> #include <cuda.h> #include <cuda_runtime_api.h> using namespace std; // texture<float, 2, cudaReadModeElementType> inTex; texture<float4, 2, cudaReadModeElementType> inTex; // texture<float, cudaTextureType1D, cudaReadModeElementType> inTex; const int TILE_DIM = 32; const int BLOCK_ROWS = 8; const int N_THREADS = 64; const int N_BLOCKS = 64; __global__ void copy_mem(unsigned char *source, unsigned char *render) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) for (int channel = 0; channel < 3; channel ++ ) render[3*((y+j)*width + x) + channel] = source[3 * ((y+j)*width + x) + channel]; } __global__ void set_depth(unsigned int *depth) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) depth[(y+j)*width + x] = 65535; } __global__ void char_to_int(int * img2, unsigned char * img) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) img2[(y+j)*width + x] = img[3*((y+j)*width + x) + 0] * 256 * 256 + img[3*((y+j)*width + x) + 1] * 256 + img[3*((y+j)*width + x) + 2]; } __global__ void int_to_char(int * img2, unsigned char * img) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { img[3*((y+j)*width + x)] = img2[(y+j)*width + x] / (256*256); img[3*((y+j)*width + x)+1] = img2[(y+j)*width + x] / 256 % 256; img[3*((y+j)*width + x)+2] = img2[(y+j)*width + x] % 256; } } __global__ void to3d_point(float *depth, float *points3d) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int w = gridDim.x * TILE_DIM; int h = w / 2; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; float depth_point = depth[ih*w + iw] * 128.0; float phi = ((float)(ih) + 0.5) / float(h) * M_PI; float theta = ((float)(iw) + 0.5) / float(w) * 2 * M_PI + M_PI; points3d[(ih * w + iw) * 4 + 0] = depth_point * sin(phi) * cos(theta); points3d[(ih * w + iw) * 4 + 1] = depth_point * sin(phi) * sin(theta); points3d[(ih * w + iw) * 4 + 2] = depth_point * cos(phi); points3d[(ih * w + iw) * 4 + 3] = 1; } } __global__ void transform(float *points3d_after, float *points3d, float * transformation_matrix) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int w = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; for (int ic = 0; ic < 3; ic ++) { points3d_after[(ih * w + iw) * 3 + ic] = points3d[(ih * w + iw) * 4 + 0] * transformation_matrix[4 * ic + 0] + points3d[(ih * w + iw) * 4 + 1] * transformation_matrix[4 * ic + 1] + points3d[(ih * w + iw) * 4 + 2] * transformation_matrix[4 * ic + 2] + points3d[(ih * w + iw) * 4 + 3] * transformation_matrix[4 * ic + 3]; } } } __global__ void transform2d(float *points3d_after, float *points3d_polar) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int w = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; float x = points3d_after[(ih * w + iw) * 3 + 0]; float y = points3d_after[(ih * w + iw) * 3 + 1]; float z = points3d_after[(ih * w + iw) * 3 + 2]; points3d_polar[(ih * w + iw) * 3 + 0] = sqrt(x * x + y * y + z * z); points3d_polar[(ih * w + iw) * 3 + 1] = atan2(y, x); points3d_polar[(ih * w + iw) * 3 + 2] = atan2(sqrt(x * x + y * y), z); } } __global__ void render_depth(float *points3d_polar, unsigned int * depth_render) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int w = gridDim.x * TILE_DIM; int h = w /2; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; int tx = round((points3d_polar[(ih * w + iw) * 3 + 1] + M_PI)/(2*M_PI) * w - 0.5); int ty = round((points3d_polar[(ih * w + iw) * 3 + 2])/M_PI * h - 0.5); int this_depth = (int)(512 * points3d_polar[(ih * w + iw) * 3 + 0]); atomicMin(&depth_render[(ty * w + tx)] , this_depth); } } __global__ void render_final(float *points3d_polar, int * depth_render, int * img, int * render) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int w = gridDim.x * TILE_DIM; int h = w /2; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; int tx = round((points3d_polar[(ih * w + iw) * 3 + 1] + M_PI)/(2*M_PI) * w - 0.5); int ty = round((points3d_polar[(ih * w + iw) * 3 + 2])/M_PI * h - 0.5); int this_depth = (int)(12800/128 * points3d_polar[(ih * w + iw) * 3 + 0]); int delta = this_depth - depth_render[(ty * w + tx)]; //printf("%d %d\n", this_depth, depth_render[(ty * w + tx)]); if ((y > h/8) && (y < h*7/8)) if ((delta > -10) && (delta < 10) && (this_depth < 10000)) { render[(ty * w + tx)] = img[(ih * w + iw)]; } } } __global__ void projectCubeMapToERImage(float *dst, float * src, uint * idxs, size_t count) { int n_to_do = count / ( gridDim.x * blockDim.x); int start = (blockIdx.x * blockDim.x + threadIdx.x) * n_to_do; //printf("x: %d w: %d | %d %d (%d)(%d)\n", blockIdx.x, threadIdx.x, gridDim.x, blockDim.x, start, n_to_do); for (int j = 0; j < n_to_do; j++) { dst[start + j] = src[idxs[start + j]]; } } __global__ void readTextureToCubeMapBuffer(float * dst, size_t width, size_t height) { unsigned int n_to_do = height * width / (blockDim.x * gridDim.x); int start = (blockIdx.x * blockDim.x + threadIdx.x) * n_to_do; // printf("Block (%i) thread (%i); n_to_do (%d); start (%d) | (%d, %d)\n", // blockIdx.x, threadIdx.x , // n_to_do, // start, width, height); for (int j = start; j < start + n_to_do; j++) { int x_val = (j%width); int y_val = (j/width); float4 temp = tex2D(inTex, x_val, y_val); ; dst[j] = temp.z; } // printf("DONE (%i, %i);\n", blockIdx.x, threadIdx.x); } extern "C"{ /* Convenience function to print any GPU errors */ #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } float * allocateBufferOnGPU(size_t count) { float *d_dst; const int dst_mem_size = count*sizeof(float); cudaMalloc((void **)&d_dst, dst_mem_size); return d_dst; } void projectCubeMapToEquirectangular(float * dst, float * d_src, uint *d_idx, size_t count, size_t src_size) { /* First call "d_idx = moveToGPU(cubeIdxToEqui, count)" */ // Declare vars const int dstMemSize = count*sizeof(float); float *d_dst; // Create buffer for the equirectangular img on gpu cudaMalloc((void **)&d_dst, dstMemSize); cudaMemcpy(d_dst, dst, dstMemSize, cudaMemcpyHostToDevice); // Do cube -> equirecangular projection projectCubeMapToERImage<<< N_BLOCKS, N_THREADS >>>(d_dst, d_src, d_idx, count); // Copy back to cpu cudaMemcpy(dst, d_dst, dstMemSize, cudaMemcpyDeviceToHost); cudaFree(d_dst); cudaDeviceSynchronize(); } void fillBlue(float * dst, cudaArray_t src, size_t offset, size_t w, size_t h) { /* Fills the buffer at *dst with the contents at src + offset (a h x w texture)*/ // --- Dims dim3 dimBlock(N_BLOCKS); dim3 dimGrid(N_THREADS); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat); // Set the texture parameters inTex.normalized = false; cudaBindTextureToArray(inTex, src, channelDesc); readTextureToCubeMapBuffer<<< dimBlock, dimGrid >>>(dst + offset, w, h); } uint * copyToGPU(uint * cubeMapIdxToEqui, size_t count) { /* Copies the given array to device */ uint *d_idx; const int idxsMemSize = count*sizeof(uint); cudaMalloc((void **)&d_idx, idxsMemSize); cudaMemcpy(d_idx, cubeMapIdxToEqui, idxsMemSize, cudaMemcpyHostToDevice); return d_idx; } void render(int h,int w,unsigned char * img, float * depth,float * pose, unsigned char * render, int * depth_render) { //int ih, iw, i, ic; const int nx = w; const int ny = h; const int depth_mem_size = nx*ny*sizeof(float); const int frame_mem_size = nx*ny*sizeof(unsigned char) * 3; dim3 dimGrid(nx/TILE_DIM, ny/TILE_DIM, 1); dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1); unsigned char *d_img, *d_render; float *d_depth, *d_pose; int *d_depth_render; float *d_3dpoint, *d_3dpoint_after, *d_3dpoint_polar; int *d_render2, *d_img2; cudaMalloc((void **)&d_img, frame_mem_size); cudaMalloc((void **)&d_render, frame_mem_size); cudaMalloc((void **)&d_depth, depth_mem_size); cudaMalloc((void **)&d_depth_render, nx * ny * sizeof(int)); cudaMalloc((void **)&d_3dpoint, depth_mem_size * 4); cudaMalloc((void **)&d_3dpoint_after, depth_mem_size * 4); cudaMalloc((void **)&d_3dpoint_polar, depth_mem_size * 4); cudaMalloc((void **)&d_pose, sizeof(float) * 16); cudaMalloc((void **)&d_render2, nx * ny * sizeof(int)); cudaMalloc((void **)&d_img2, nx * ny * sizeof(int)); cudaMemcpy(d_depth_render, depth_render, nx * ny * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_pose, pose, sizeof(float) * 16, cudaMemcpyHostToDevice); cudaMemcpy(d_img, img, frame_mem_size, cudaMemcpyHostToDevice); cudaMemcpy(d_depth, depth, depth_mem_size, cudaMemcpyHostToDevice); cudaMemcpy(d_render, render, frame_mem_size, cudaMemcpyHostToDevice); cudaMemset(d_render2, 0, nx * ny * sizeof(int)); cudaMemset(d_img2, 0, nx * ny * sizeof(int)); cudaMemset(d_3dpoint, 0, depth_mem_size * 4); cudaMemset(d_3dpoint_after, 0, depth_mem_size * 4); to3d_point<<< dimGrid, dimBlock >>>(d_depth, d_3dpoint); transform<<< dimGrid, dimBlock >>>(d_3dpoint_after, d_3dpoint, d_pose); transform2d<<<dimGrid, dimBlock>>>(d_3dpoint_after, d_3dpoint_polar); char_to_int <<< dimGrid, dimBlock >>> (d_img2, d_img); char_to_int <<< dimGrid, dimBlock >>> (d_render2, d_render); //render_depth <<< dimGrid, dimBlock >>> (d_3dpoint_polar, d_depth_render); render_final <<< dimGrid, dimBlock >>> (d_3dpoint_polar, d_depth_render, d_img2, d_render2); int_to_char <<< dimGrid, dimBlock >>> (d_render2, d_render); cudaMemcpy(render, d_render, frame_mem_size, cudaMemcpyDeviceToHost); //cudaMemcpy(depth_render, d_depth_render, nx * ny * sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaFree(d_img); cudaFree(d_depth); cudaFree(d_render2); cudaFree(d_img2); cudaFree(d_render); cudaFree(d_depth_render); cudaFree(d_3dpoint); cudaFree(d_3dpoint_after); cudaFree(d_3dpoint_polar); cudaFree(d_pose); } }//extern "C"
the_stack
#include <nvbio/basic/timer.h> #include <nvbio/basic/shared_pointer.h> #include <nvbio/io/sequence/sequence.h> #include <nvbio/basic/dna.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <zlib/zlib.h> #include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> using namespace nvbio; bool to_ascii(const char* reads_name, void* output_file, void* output_index, const io::QualityEncoding qencoding, const io::SequenceEncoding flags) { log_visible(stderr, "opening read file \"%s\"\n", reads_name); SharedPointer<nvbio::io::SequenceDataStream> read_data_file( nvbio::io::open_sequence_file(reads_name, qencoding, uint32(-1), uint32(-1), flags ) ); if (read_data_file == NULL || read_data_file->is_ok() == false) { log_error(stderr, " failed opening file \"%s\"\n", reads_name); return false; } const uint32 batch_size = 512*1024; std::vector<char> char_read( 1024*1024 ); std::vector<uint64> index( 512*1024 + 1u ); uint64 offset = 0u; uint32 n_reads = 0; io::SequenceDataHost h_read_data; // loop through all read batches while (1) { // load a new batch of reads if (io::next( DNA_N, &h_read_data, read_data_file.get(), batch_size ) == 0) break; const io::SequenceDataAccess<DNA_N> h_read_access( h_read_data ); // loop through all reads for (uint32 i = 0; i < h_read_data.size(); ++i) { const io::SequenceDataAccess<DNA_N>::sequence_string read = h_read_access.get_read(i); dna_to_string( read, read.length(), &char_read[0] ); char_read[ read.length() ] = '\n'; const uint32 n_written = (uint32)gzwrite( output_file, &char_read[0], sizeof(char) * (read.length()+1) ); if (n_written < read.length()+1) { log_error( stderr, "unable to write to output\n"); return false; } } if (output_index) { // collect the sequence offsets for (uint32 i = 0; i < h_read_data.size(); ++i) index[i] = offset + h_read_data.sequence_index()[i+1]; // write the sequence offsets gzwrite( output_file, &index[0], sizeof(uint64) * h_read_data.size() ); } // update the global sequence offset offset += h_read_data.bps(); // update the global number of output reads n_reads += h_read_data.size(); const uint64 n_bytes = gzoffset( output_file ); log_verbose(stderr,"\r %u reads (%.2fGB - %.2fB/read - %.2fB/bp) ", n_reads, float( n_bytes ) / float(1024*1024*1024), float(n_bytes)/float(n_reads), float(n_bytes)/float(offset)); } log_verbose_cont(stderr,"\n"); return true; } template <uint32 SYMBOL_SIZE> bool to_packed(const char* reads_name, void* output_file, void* output_index, const io::QualityEncoding qencoding, const io::SequenceEncoding flags) { log_visible(stderr, "opening read file \"%s\"\n", reads_name); SharedPointer<nvbio::io::SequenceDataStream> read_data_file( nvbio::io::open_sequence_file(reads_name, qencoding, uint32(-1), uint32(-1), flags ) ); if (read_data_file == NULL || read_data_file->is_ok() == false) { log_error(stderr, " failed opening file \"%s\"\n", reads_name); return false; } static const uint32 SYMBOLS_PER_WORD = 32u / SYMBOL_SIZE; const uint32 batch_size = 512*1024; uint32 n_reads = 0; io::SequenceDataHost h_read_data; typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,true> packed_stream_type; std::vector<uint32> words( 1024*1024 ); std::vector<uint64> index( 512*1024 + 1u ); uint32 rem = 0u; uint64 offset = 0u; // loop through all read batches while (1) { // load a new batch of reads if (io::next( DNA_N, &h_read_data, read_data_file.get(), batch_size ) == 0) break; // reserve enough storage words.resize( h_read_data.words() + 1u ); packed_stream_type packed_reads( &words[0] ); const io::SequenceDataAccess<DNA_N> h_read_access( h_read_data ); nvbio::assign( h_read_access.bps(), h_read_access.sequence_stream(), packed_reads + rem ); // write all whole words const uint32 n_bps = h_read_access.bps() + rem; const uint32 whole_words = n_bps / SYMBOLS_PER_WORD; gzwrite( output_file, &words[0], sizeof(uint32) * whole_words ); // save the last non-whole word words[0] = words[ whole_words ]; // save the number of unwritten symbols left rem = n_bps & (SYMBOLS_PER_WORD-1); if (output_index) { // collect the sequence offsets for (uint32 i = 0; i < h_read_data.size(); ++i) index[i] = offset + h_read_data.sequence_index()[i+1]; // write the sequence offsets gzwrite( output_file, &index[0], sizeof(uint64) * h_read_data.size() ); } // update the global sequence offset offset += h_read_data.bps(); // update the global number of output reads n_reads += h_read_data.size(); const uint64 n_bytes = gzoffset( output_file ); log_verbose(stderr,"\r %u reads (%.2fGB - %.2fB/read - %.2fB/bp) ", n_reads, float( n_bytes ) / float(1024*1024*1024), float(n_bytes)/float(n_reads), float(n_bytes)/float(offset)); } log_verbose_cont(stderr,"\n"); return true; } enum Format { ASCII_FORMAT = 0u, PACKED2_FORMAT = 1u, PACKED4_FORMAT = 2u, }; int main(int argc, char* argv[]) { if (argc < 2) { log_info(stderr, "nvExtractReads [options] input output\n"); log_info(stderr, " extract a set of reads to a plain ASCII_FORMAT or packed file with one read per line (.txt)\n\n"); log_info(stderr, "options:\n"); log_info(stderr, " --verbosity\n"); log_info(stderr, " -F | --skip-forward skip forward strand\n"); log_info(stderr, " -R | --skip-reverse skip forward strand\n"); log_info(stderr, " -a | --ascii ASCII_FORMAT output\n"); log_info(stderr, " -p2 | --packed-2 2-bits packed output\n"); log_info(stderr, " -p4 | --packed-4 4-bits packed output\n"); log_info(stderr, " -i | --idx string save an index file\n"); exit(0); } const char* reads_name = argv[argc-2]; const char* out_name = argv[argc-1]; const char* idx_name = NULL; bool forward = true; bool reverse = true; Format format = ASCII_FORMAT; io::QualityEncoding qencoding = io::Phred33; for (int i = 0; i < argc - 2; ++i) { if (strcmp( argv[i], "-verbosity" ) == 0 || strcmp( argv[i], "--verbosity" ) == 0) { set_verbosity( Verbosity( atoi( argv[++i] ) ) ); } else if (strcmp( argv[i], "-F" ) == 0 || strcmp( argv[i], "--skip-forward" ) == 0) // skip forward strand { forward = false; } else if (strcmp( argv[i], "-R" ) == 0 || strcmp( argv[i], "--skip-reverse" ) == 0) // skip reverse strand { reverse = false; } else if (strcmp( argv[i], "-a" ) == 0 || strcmp( argv[i], "--ascii" ) == 0) // ascii format { format = ASCII_FORMAT; } else if (strcmp( argv[i], "-p2" ) == 0 || strcmp( argv[i], "--packed-2" ) == 0) // 2-bits packed { format = PACKED2_FORMAT; } else if (strcmp( argv[i], "-p4" ) == 0 || strcmp( argv[i], "--packed-4" ) == 0) // 4-bits packed { format = PACKED4_FORMAT; } else if (strcmp( argv[i], "-i" ) == 0 || strcmp( argv[i], "--idx" ) == 0) // index file { idx_name = argv[++i]; } } std::string out_string = out_name; // parse out file extension; look for .fastq.gz, .fastq suffixes uint32 len = uint32( strlen(out_name) ); bool is_gzipped = false; // do we have a .gz suffix? if (len >= strlen(".gz")) { if (strcmp(&out_name[len - strlen(".gz")], ".gz") == 0) { is_gzipped = true; len = uint32(len - strlen(".gz")); } } void* output_file = NULL; void* output_index = NULL; if (format == ASCII_FORMAT) { // open a plain ASCII_FORMAT file output_file = gzopen( out_name, is_gzipped ? "w1R" : "w" ); } else { // open a binary file output_file = gzopen( out_name, is_gzipped ? "wb1R" : "wbT" ); } if (output_file == NULL) { log_error(stderr, " failed opening file \"%s\"\n", out_name); return 1; } if (idx_name) { output_index = fopen( idx_name, "wb" ); if (output_index == NULL) { log_error(stderr, " failed opening file \"%s\"\n", idx_name); return 1; } } log_visible(stderr,"nvExtractReads... started\n"); uint32 encoding_flags = 0u; if (forward) encoding_flags |= io::FORWARD; if (reverse) encoding_flags |= io::REVERSE_COMPLEMENT; bool success; switch (format) { case ASCII_FORMAT: success = to_ascii( reads_name, output_file, output_index, qencoding, io::SequenceEncoding(encoding_flags) ); break; case PACKED2_FORMAT: success = to_packed<2u>( reads_name, output_file, output_index, qencoding, io::SequenceEncoding(encoding_flags) ); break; case PACKED4_FORMAT: success = to_packed<4u>( reads_name, output_file, output_index, qencoding, io::SequenceEncoding(encoding_flags) ); break; } if (output_file) gzclose( output_file ); if (output_index) gzclose( output_index ); log_visible(stderr,"nvExtractReads... done\n"); return success ? 0u : 1u; }
the_stack
using namespace Yolo; namespace nvinfer1 { YoloLayerPlugin::YoloLayerPlugin(const int cudaThread /*= 512*/):mThreadCount(cudaThread) { mClassCount = CLASS_NUM; mYoloKernel.clear(); mYoloKernel.push_back(yolo1); mYoloKernel.push_back(yolo2); mYoloKernel.push_back(yolo3); mKernelCount = mYoloKernel.size(); } YoloLayerPlugin::~YoloLayerPlugin() { if(mInputBuffer) CUDA_CHECK(cudaFreeHost(mInputBuffer)); if(mOutputBuffer) CUDA_CHECK(cudaFreeHost(mOutputBuffer)); } // create the plugin at runtime from a byte stream YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) { using namespace Tn; const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mClassCount); read(d, mThreadCount); read(d, mKernelCount); mYoloKernel.resize(mKernelCount); auto kernelSize = mKernelCount*sizeof(YoloKernel); memcpy(mYoloKernel.data(),d,kernelSize); d += kernelSize; assert(d == a + length); } void YoloLayerPlugin::serialize(void* buffer) { using namespace Tn; char* d = static_cast<char*>(buffer), *a = d; write(d, mClassCount); write(d, mThreadCount); write(d, mKernelCount); auto kernelSize = mKernelCount*sizeof(YoloKernel); memcpy(d,mYoloKernel.data(),kernelSize); d += kernelSize; assert(d == a + getSerializationSize()); } size_t YoloLayerPlugin::getSerializationSize() { return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size(); } int YoloLayerPlugin::initialize() { int totalCount = 0; for(const auto& yolo : mYoloKernel) totalCount += (LOCATIONS + 1 + mClassCount) * yolo.width*yolo.height * CHECK_COUNT; CUDA_CHECK(cudaHostAlloc(&mInputBuffer, totalCount * sizeof(float), cudaHostAllocDefault)); totalCount = 0;//detection count for(const auto& yolo : mYoloKernel) totalCount += yolo.width*yolo.height * CHECK_COUNT; CUDA_CHECK(cudaHostAlloc(&mOutputBuffer, sizeof(float) + totalCount * sizeof(Detection), cudaHostAllocDefault)); return 0; } Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { //output the result to channel int totalCount = 0; for(const auto& yolo : mYoloKernel) totalCount += yolo.width*yolo.height * CHECK_COUNT * sizeof(Detection) / sizeof(float); return Dims3(totalCount + 1, 1, 1); } void YoloLayerPlugin::forwardCpu(const float*const * inputs, float* outputs, cudaStream_t stream,int batchSize) { auto Logist = [=](float data){ return 1./(1. + exp(-data)); }; int totalOutputCount = 0; int i = 0; int totalCount = 0; for(const auto& yolo : mYoloKernel) { totalOutputCount += yolo.width*yolo.height * CHECK_COUNT * sizeof(Detection) / sizeof(float); totalCount += (LOCATIONS + 1 + mClassCount) * yolo.width*yolo.height * CHECK_COUNT; ++ i; } for (int idx = 0; idx < batchSize;idx++) { i = 0; float* inputData = (float *)mInputBuffer;// + idx *totalCount; //if create more batch size for(const auto& yolo : mYoloKernel) { int size = (LOCATIONS + 1 + mClassCount) * yolo.width*yolo.height * CHECK_COUNT; CUDA_CHECK(cudaMemcpyAsync(inputData, (float *)inputs[i] + idx * size, size * sizeof(float), cudaMemcpyDeviceToHost, stream)); inputData += size; ++ i; } CUDA_CHECK(cudaStreamSynchronize(stream)); inputData = (float *)mInputBuffer ;//+ idx *totalCount; //if create more batch size std::vector <Detection> result; for (const auto& yolo : mYoloKernel) { int stride = yolo.width*yolo.height; for (int j = 0;j < stride ;++j) { for (int k = 0;k < CHECK_COUNT; ++k ) { int beginIdx = (LOCATIONS + 1 + mClassCount)* stride *k + j; int objIndex = beginIdx + LOCATIONS*stride; //check obj float objProb = Logist(inputData[objIndex]); if(objProb <= IGNORE_THRESH) continue; //classes int classId = -1; float maxProb = IGNORE_THRESH; for (int c = 0;c< mClassCount;++c){ float cProb = Logist(inputData[beginIdx + (5 + c) * stride]) * objProb; if(cProb > maxProb){ maxProb = cProb; classId = c; } } if(classId >= 0) { Detection det; int row = j / yolo.width; int cols = j % yolo.width; //Location det.bbox[0] = (cols + Logist(inputData[beginIdx]))/ yolo.width; det.bbox[1] = (row + Logist(inputData[beginIdx+stride]))/ yolo.height; det.bbox[2] = exp(inputData[beginIdx+2*stride]) * yolo.anchors[2*k]; det.bbox[3] = exp(inputData[beginIdx+3*stride]) * yolo.anchors[2*k + 1]; det.classId = classId; det.prob = maxProb; result.emplace_back(det); } } } inputData += (LOCATIONS + 1 + mClassCount) * stride * CHECK_COUNT; } int detCount =result.size(); auto data = (float *)mOutputBuffer;// + idx*(totalOutputCount + 1); //if create more batch size float * begin = data; //copy count; data[0] = (float)detCount; data++; //copy result memcpy(data,result.data(),result.size()*sizeof(Detection)); //(count + det result) CUDA_CHECK(cudaMemcpyAsync(outputs, begin,sizeof(float) + result.size()*sizeof(Detection), cudaMemcpyHostToDevice, stream)); outputs += totalOutputCount + 1; } }; __device__ float Logist(float data){ return 1./(1. + exp(-data)); }; __global__ void CalDetection(const float *input, float *output,int noElements, int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes,int outputElem) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= noElements) return; int stride = yoloWidth*yoloHeight; int bnIdx = idx / stride; int curIdx = idx - stride*bnIdx; const float* curInput = input + bnIdx* ((LOCATIONS + 1 + classes) * stride * CHECK_COUNT); for (int k = 0;k < CHECK_COUNT; ++k ) { int beginIdx = (LOCATIONS + 1 + classes)* stride *k + curIdx; int objIndex = beginIdx + LOCATIONS*stride; //check objectness float objProb = Logist(curInput[objIndex]); if(objProb <= IGNORE_THRESH) continue; int row = curIdx / yoloWidth; int cols = curIdx % yoloWidth; //classes int classId = -1; float maxProb = IGNORE_THRESH; for (int c = 0;c<classes;++c){ float cProb = Logist(curInput[beginIdx + (5 + c) * stride]) * objProb; if(cProb > maxProb){ maxProb = cProb; classId = c; } } if(classId >= 0) { float *curOutput = output + bnIdx*outputElem; int resCount = (int)atomicAdd(curOutput,1); char* data = (char * )curOutput + sizeof(float) + resCount*sizeof(Detection); Detection* det = (Detection*)(data); //Location det->bbox[0] = (cols + Logist(curInput[beginIdx]))/ yoloWidth; det->bbox[1] = (row + Logist(curInput[beginIdx+stride]))/ yoloHeight; det->bbox[2] = exp(curInput[beginIdx+2*stride]) * anchors[2*k]; det->bbox[3] = exp(curInput[beginIdx+3*stride]) * anchors[2*k + 1]; det->classId = classId; det->prob = maxProb; } } } void YoloLayerPlugin::forwardGpu(const float *const * inputs,float * output,cudaStream_t stream,int batchSize) { void* devAnchor; size_t AnchorLen = sizeof(float)* CHECK_COUNT*2; CUDA_CHECK(cudaMalloc(&devAnchor,AnchorLen)); int outputElem = 1; for (unsigned int i = 0;i< mYoloKernel.size();++i) { const auto& yolo = mYoloKernel[i]; outputElem += yolo.width*yolo.height * CHECK_COUNT * sizeof(Detection) / sizeof(float); } for(int idx = 0 ;idx < batchSize;++idx) CUDA_CHECK(cudaMemset(output + idx*outputElem, 0, sizeof(float))); int numElem = 0; for (unsigned int i = 0;i< mYoloKernel.size();++i) { const auto& yolo = mYoloKernel[i]; numElem = yolo.width*yolo.height*batchSize; CUDA_CHECK(cudaMemcpy(devAnchor,yolo.anchors,AnchorLen,cudaMemcpyHostToDevice)); CalDetection<<< (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount>>> (inputs[i],output, numElem, yolo.width, yolo.height, (float *)devAnchor, mClassCount ,outputElem); } CUDA_CHECK(cudaFree(devAnchor)); } int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream) { //assert(batchSize == 1); //GPU //CUDA_CHECK(cudaStreamSynchronize(stream)); forwardGpu((const float *const *)inputs,(float *)outputs[0],stream,batchSize); //CPU //forwardCpu((const float *const *)inputs,(float *)outputs[0],stream,batchSize); return 0; }; }
the_stack
#include "blockMatchingKernel.h" #include "_reg_ReadWriteImage.h" #include "_reg_tools.h" #include <vector> #include "_reg_maths.h" //#define USE_TEST_KERNEL //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// /* * before it was in the file _reg_blockMatching_kernels.cu * * * Created by Marc Modat and Pankaj Daga on 24/03/2009. * Copyright (c) 2009, University College London. All rights reserved. * Centre for Medical Image Computing (CMIC) * See the LICENSE.txt file in the nifty_reg root folder * */ // Some parameters that we need for the kernel execution. // The caller is supposed to ensure that the values are set // Number of blocks in each dimension __device__ __constant__ int3 c_BlockDim; __device__ __constant__ uint3 c_ImageSize; // Transformation matrix from nifti header __device__ __constant__ float4 t_m_a; __device__ __constant__ float4 t_m_b; __device__ __constant__ float4 t_m_c; #define BLOCK_WIDTH 4 #define BLOCK_SIZE 64 #define OVERLAP_SIZE 3 #define STEP_SIZE 1 texture<float, 1, cudaReadModeElementType> referenceImageArray_texture; texture<float, 1, cudaReadModeElementType> warpedImageArray_texture; texture<int, 1, cudaReadModeElementType> totalBlock_texture; /* *************************************************************** */ template<class DTYPE> __inline__ __device__ void reg2D_mat44_mul_cuda(float* mat, DTYPE const* in, DTYPE *out) { out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * 0 + (double)mat[0 * 4 + 3]); out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * 0 + (double)mat[1 * 4 + 3]); return; } template<class DTYPE> __device__ __inline__ void reg_mat44_mul_cuda(float* mat, DTYPE const* in, DTYPE *out) { out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * (double)in[2] + (double)mat[0 * 4 + 3]); out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * (double)in[2] + (double)mat[1 * 4 + 3]); out[2] = (DTYPE)((double)mat[2 * 4 + 0] * (double)in[0] + (double)mat[2 * 4 + 1] * (double)in[1] + (double)mat[2 * 4 + 2] * (double)in[2] + (double)mat[2 * 4 + 3]); return; } // Apply the transformation matrix __device__ inline void apply_affine(const float4 &pt, float * result) { float4 mat = t_m_a; result[0] = (mat.x * pt.x) + (mat.y * pt.y) + (mat.z * pt.z) + (mat.w); mat = t_m_b; result[1] = (mat.x * pt.x) + (mat.y * pt.y) + (mat.z * pt.z) + (mat.w); mat = t_m_c; result[2] = (mat.x * pt.x) + (mat.y * pt.y) + (mat.z * pt.z) + (mat.w); } /* *************************************************************** */ __inline__ __device__ float blockReduce2DSum(float val, int tid) { static __shared__ float shared[16]; shared[tid] = val; __syncthreads(); for (unsigned int i = 8; i > 0; i >>= 1){ if (tid < i) { shared[tid] += shared[tid + i]; } __syncthreads(); } return shared[0]; } /* *************************************************************** */ __inline__ __device__ float blockReduceSum(float val, int tid) { static __shared__ float shared[64]; shared[tid] = val; __syncthreads(); for (unsigned int i = 32; i > 0; i >>= 1){ if (tid < i) { shared[tid] += shared[tid + i]; } __syncthreads(); } return shared[0]; } /* *************************************************************** */ __global__ void blockMatchingKernel2D(float *warpedPosition, float *referencePosition, int *mask, float* referenceMatrix_xyz, unsigned int *definedBlock) { extern __shared__ float sWarpedValues[]; // Compute the current block index const unsigned int bid = blockIdx.y * gridDim.x + blockIdx.x; const int currentBlockIndex = tex1Dfetch(totalBlock_texture, bid); if (currentBlockIndex > -1) { const unsigned int idy = threadIdx.x; const unsigned int idx = threadIdx.y; const unsigned int tid = idy * 4 + idx; const unsigned int xImage = blockIdx.x * 4 + idx; const unsigned int yImage = blockIdx.y * 4 + idy; //populate shared memory with resultImageArray's values for (int y=-1; y<2; ++y) { const int yImageIn = yImage + y * 4; for (int x=-1; x<2; ++x) { const int xImageIn = xImage + x * 4; const int sharedIndex = ((y+1)*4+idy)*12+(x+1)*4+idx; const int indexXYIn = yImageIn * c_ImageSize.x + xImageIn; const bool valid = (xImageIn > -1 && xImageIn < (int)c_ImageSize.x) && (yImageIn > -1 && yImageIn < (int)c_ImageSize.y); sWarpedValues[sharedIndex] = (valid && mask[indexXYIn] > -1) ? tex1Dfetch(warpedImageArray_texture, indexXYIn) : nanf("sNaN"); } } //for most cases we need this out of th loop //value if the block is 4x4 NaN otherwise const unsigned long voxIndex = yImage * c_ImageSize.x + xImage; const bool referenceInBounds = xImage < c_ImageSize.x && yImage < c_ImageSize.y; float rReferenceValue = (referenceInBounds && mask[voxIndex] > -1) ? tex1Dfetch(referenceImageArray_texture, voxIndex) : nanf("sNaN"); const bool finiteReference = isfinite(rReferenceValue); rReferenceValue = finiteReference ? rReferenceValue : 0.f; const unsigned int referenceSize = __syncthreads_count(finiteReference); float bestDisplacement[2] = {nanf("sNaN"), 0.0f}; float bestCC = 0.0; if (referenceSize > 8) { //the target values must remain constant throughout the block matching process const float referenceMean = __fdividef(blockReduce2DSum(rReferenceValue, tid), referenceSize); const float referenceTemp = finiteReference ? rReferenceValue - referenceMean : 0.f; const float referenceVar = blockReduce2DSum(referenceTemp * referenceTemp, tid); // iteration over the result blocks (block matching part) for (unsigned int y=1; y<8; ++y) { for (unsigned int x=1; x<8; ++x) { const unsigned int sharedIndex = ( y + idy ) * 12 + x + idx; const float rWarpedValue = sWarpedValues[sharedIndex]; const bool overlap = isfinite(rWarpedValue) && finiteReference; const unsigned int currentWarpedSize = __syncthreads_count(overlap); if (currentWarpedSize > 8) { //the reference values must remain intact at each loop, so please do not touch this! float newreferenceTemp = referenceTemp; float newreferenceVar = referenceVar; if (currentWarpedSize != referenceSize){ const float newReferenceValue = overlap ? rReferenceValue : 0.0f; const float newReferenceMean = __fdividef(blockReduce2DSum(newReferenceValue, tid), currentWarpedSize); newreferenceTemp = overlap ? newReferenceValue - newReferenceMean : 0.0f; newreferenceVar = blockReduce2DSum(newreferenceTemp * newreferenceTemp, tid); } const float rChecked = overlap ? rWarpedValue : 0.0f; const float warpedMean = __fdividef(blockReduce2DSum(rChecked, tid), currentWarpedSize); const float warpedTemp = overlap ? rChecked - warpedMean : 0.0f; const float warpedVar = blockReduce2DSum(warpedTemp * warpedTemp, tid); const float sumTargetResult = blockReduce2DSum((newreferenceTemp)* (warpedTemp), tid); const float localCC = (newreferenceVar * warpedVar) > 0.0 ? fabs((sumTargetResult) / sqrt(newreferenceVar * warpedVar)) : 0.0; if (tid == 0 && localCC > bestCC) { bestCC = localCC + 1.0e-7f; bestDisplacement[0] = x - 4.f; bestDisplacement[1] = y - 4.f; } } } } } if (tid==0){ const unsigned int posIdx = 2 * currentBlockIndex; const float referencePosition_temp[2] = {(float)xImage, (float)yImage}; bestDisplacement[0] += referencePosition_temp[0]; bestDisplacement[1] += referencePosition_temp[1]; reg2D_mat44_mul_cuda<float>(referenceMatrix_xyz, referencePosition_temp, &referencePosition[posIdx]); reg2D_mat44_mul_cuda<float>(referenceMatrix_xyz, bestDisplacement, &warpedPosition[posIdx]); if (isfinite(bestDisplacement[0])) { atomicAdd(definedBlock, 1); } } } } /* *************************************************************** */ #ifdef USE_TEST_KERNEL __inline__ __device__ float2 REDUCE_TEST(float* sData, float data, unsigned int tid) { sData[tid] = data; __syncthreads(); bool seconHalf = tid > 63 ? true : false; for (unsigned int i = 32; i > 0; i >>= 1){ if (tid < i) sData[tid] += sData[tid + i]; if (seconHalf && tid < 64 + i) sData[tid] += sData[tid + i]; __syncthreads(); } const float2 temp = make_float2(sData[0], sData[64]); __syncthreads(); return temp; } /* *************************************************************** */ __global__ void blockMatchingKernel3D(float *warpedPosition, float *referencePosition, int *mask, float* referenceMatrix_xyz, unsigned int *definedBlock) { extern __shared__ float sWarpedValues[]; float *sData = &sWarpedValues[12*12*16]; // Compute the current block index const unsigned int bid0 = (2*blockIdx.z * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x; const unsigned int bid1 = bid0 + gridDim.x * gridDim.y; int currentBlockIndex[2] = {tex1Dfetch(totalBlock_texture, bid0), tex1Dfetch(totalBlock_texture, bid1)}; currentBlockIndex[1] = (2*blockIdx.z+1)<c_BlockDim.z ? currentBlockIndex[1] : -1; if (currentBlockIndex[0] > -1 || currentBlockIndex[1] > -1) { const unsigned int idx = threadIdx.x; const unsigned int idy = threadIdx.y; const unsigned int idz = threadIdx.z; const unsigned int tid = (idz*4+idy)*4+idx; const unsigned int xImage = blockIdx.x * 4 + idx; const unsigned int yImage = blockIdx.y * 4 + idy; const unsigned int zImage = blockIdx.z * 8 + idz; //populate shared memory with resultImageArray's values for (int z=-1 ; z<2; z+=2) { const int zImageIn = zImage + z * 4; for (int y=-1; y<2; ++y) { const int yImageIn = yImage + y * 4; for (int x=-1; x<2; ++x) { const int xImageIn = xImage + x * 4; const int sharedIndex = (((z+1)*4+idz)*12+(y+1)*4+idy)*12+(x+1)*4+idx; const unsigned int indexXYZIn = xImageIn + c_ImageSize.x * (yImageIn + zImageIn * c_ImageSize.y); const bool valid = (xImageIn > -1 && xImageIn < (int)c_ImageSize.x) && (yImageIn > -1 && yImageIn < (int)c_ImageSize.y) && (zImageIn > -1 && zImageIn < (int)c_ImageSize.z); sWarpedValues[sharedIndex] = (valid && mask[indexXYZIn] > -1) ? tex1Dfetch(warpedImageArray_texture, indexXYZIn) : nanf("sNaN"); } } } const unsigned int voxIndex = ( zImage * c_ImageSize.y + yImage ) * c_ImageSize.x + xImage; const bool referenceInBounds = xImage < c_ImageSize.x && yImage < c_ImageSize.y && zImage < c_ImageSize.z; float rReferenceValue = (referenceInBounds && mask[voxIndex] > -1) ? tex1Dfetch(referenceImageArray_texture, voxIndex) : nanf("sNaN"); const bool finiteReference = isfinite(rReferenceValue); rReferenceValue = finiteReference ? rReferenceValue : 0.f; float2 tempVal = REDUCE_TEST(sData, finiteReference ? 1.0f : 0.0f, tid); const uint2 referenceSize = make_uint2((uint)tempVal.x, (uint)tempVal.y); float2 bestValue = make_float2(0.f, 0.f); float bestDisp[2][3]; bestDisp[0][0] = bestDisp[1][0] = nanf("sNaN"); if (referenceSize.x > 32 || referenceSize.y > 32) { float2 referenceMean=REDUCE_TEST(sData, rReferenceValue, tid); referenceMean.x /= (float)referenceSize.x; referenceMean.y /= (float)referenceSize.y; float referenceTemp; if(tid>63) referenceTemp = finiteReference ? rReferenceValue - referenceMean.y : 0.f; else referenceTemp = finiteReference ? rReferenceValue - referenceMean.x : 0.f; float2 referenceVar = REDUCE_TEST(sData, referenceTemp*referenceTemp, tid); // iteration over the result blocks (block matching part) for (unsigned int z=1; z<8; ++z) { for (unsigned int y=1; y<8; ++y) { for (unsigned int x=1; x<8; ++x) { const unsigned int sharedIndex = ( (z+idz) * 12 + y + idy ) * 12 + x + idx; const float rWarpedValue = sWarpedValues[sharedIndex]; const bool overlap = isfinite(rWarpedValue) && finiteReference; tempVal = REDUCE_TEST(sData, overlap ? 1.0f : 0.0f, tid); const uint2 currentWarpedSize = make_uint2((uint)tempVal.x, (uint)tempVal.y); if (currentWarpedSize.x > 32 || currentWarpedSize.y > 32) { float newreferenceTemp = referenceTemp; float2 newreferenceVar = referenceVar; if (currentWarpedSize.x!=referenceSize.x || currentWarpedSize.y!=referenceSize.y){ const float newReferenceValue = overlap ? rReferenceValue : 0.0f; float2 newReferenceMean = REDUCE_TEST(sData, newReferenceValue, tid); newReferenceMean.x /= (float)currentWarpedSize.x; newReferenceMean.y /= (float)currentWarpedSize.y; if(tid>63) referenceTemp = overlap ? newReferenceValue - newReferenceMean.y : 0.f; else referenceTemp = overlap ? newReferenceValue - newReferenceMean.x : 0.f; newreferenceVar = REDUCE_TEST(sData, newreferenceTemp * newreferenceTemp, tid); } const float rChecked = overlap ? rWarpedValue : 0.0f; float2 warpedMean = REDUCE_TEST(sData, rChecked, tid); warpedMean.x /= (float)currentWarpedSize.x; warpedMean.y /= (float)currentWarpedSize.y; float warpedTemp; if(tid>63) warpedTemp = overlap ? rChecked - warpedMean.y : 0.f; else warpedTemp = overlap ? rChecked - warpedMean.x : 0.f; const float2 warpedVar = REDUCE_TEST(sData, warpedTemp*warpedTemp, tid); const float2 sumTargetResult = REDUCE_TEST(sData, newreferenceTemp*warpedTemp, tid); if (tid==0 && currentWarpedSize.x > 32 ){ const float localCC = fabs(sumTargetResult.x * rsqrtf(newreferenceVar.x * warpedVar.x)); if(localCC > bestValue.x) { bestValue.x = localCC; bestDisp[0][0] = x - 4.f; bestDisp[0][1] = y - 4.f; bestDisp[0][2] = z - 4.f; } } if (tid==64 && currentWarpedSize.y > 32 ){ const float localCC = fabs(sumTargetResult.y * rsqrtf(newreferenceVar.y * warpedVar.y)); if(localCC > bestValue.y) { bestValue.y = localCC; bestDisp[1][0] = x - 4.f; bestDisp[1][1] = y - 4.f; bestDisp[1][2] = z - 4.f; } } __syncthreads(); } } } } } if(tid==0 && currentBlockIndex[0]>-1){ const unsigned int posIdx = 3 * currentBlockIndex[0]; warpedPosition[posIdx] = NAN; if (isfinite(bestDisp[0][0])){ const float referencePosition_temp[3] = { (float)xImage, (float)yImage, (float)zImage}; bestDisp[0][0] += referencePosition_temp[0]; bestDisp[0][1] += referencePosition_temp[1]; bestDisp[0][2] += referencePosition_temp[2]; reg_mat44_mul_cuda<float>(referenceMatrix_xyz, referencePosition_temp, &referencePosition[posIdx]); reg_mat44_mul_cuda<float>(referenceMatrix_xyz, bestDisp[0], &warpedPosition[posIdx]); atomicAdd(definedBlock, 1); } } if(tid==64 && currentBlockIndex[1]>-1){ const unsigned int posIdx = 3 * currentBlockIndex[1]; warpedPosition[posIdx] = NAN; if (isfinite(bestDisp[1][0])){ const float referencePosition_temp[3] = {(float)xImage, (float)yImage, (float)zImage}; bestDisp[1][0] += referencePosition_temp[0]; bestDisp[1][1] += referencePosition_temp[1]; bestDisp[1][2] += referencePosition_temp[2]; reg_mat44_mul_cuda<float>(referenceMatrix_xyz, referencePosition_temp, &referencePosition[posIdx]); reg_mat44_mul_cuda<float>(referenceMatrix_xyz, bestDisp[1], &warpedPosition[posIdx]); atomicAdd(definedBlock, 1); } } } } #else /* *************************************************************** */ __global__ void blockMatchingKernel3D(float *warpedPosition, float *referencePosition, int *mask, float* referenceMatrix_xyz, unsigned int *definedBlock) { extern __shared__ float sWarpedValues[]; // Compute the current block index const unsigned int bid = (blockIdx.z * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x ; const int currentBlockIndex = tex1Dfetch(totalBlock_texture, bid); if (currentBlockIndex > -1) { const unsigned int idx = threadIdx.x; const unsigned int idy = threadIdx.y; const unsigned int idz = threadIdx.z; const unsigned int tid = (idz*4+idy)*4+idx; const unsigned int xImage = blockIdx.x * 4 + idx; const unsigned int yImage = blockIdx.y * 4 + idy; const unsigned int zImage = blockIdx.z * 4 + idz; //populate shared memory with resultImageArray's values for (int z=-1 ; z<2; ++z) { const int zImageIn = zImage + z * 4; for (int y=-1; y<2; ++y) { const int yImageIn = yImage + y * 4; for (int x=-1; x<2; ++x) { const int xImageIn = xImage + x * 4; const int sharedIndex = (((z+1)*4+idz)*12+(y+1)*4+idy)*12+(x+1)*4+idx; const unsigned int indexXYZIn = xImageIn + c_ImageSize.x * (yImageIn + zImageIn * c_ImageSize.y); const bool valid = (xImageIn > -1 && xImageIn < (int)c_ImageSize.x) && (yImageIn > -1 && yImageIn < (int)c_ImageSize.y) && (zImageIn > -1 && zImageIn < (int)c_ImageSize.z); sWarpedValues[sharedIndex] = (valid && mask[indexXYZIn] > -1) ? tex1Dfetch(warpedImageArray_texture, indexXYZIn) : nanf("sNaN"); //for some reason the mask here creates probs } } } //for most cases we need this out of th loop //value if the block is 4x4x4 NaN otherwise const unsigned int voxIndex = ( zImage * c_ImageSize.y + yImage ) * c_ImageSize.x + xImage; const bool referenceInBounds = xImage < c_ImageSize.x && yImage < c_ImageSize.y && zImage < c_ImageSize.z; float rReferenceValue = (referenceInBounds && mask[voxIndex] > -1) ? tex1Dfetch(referenceImageArray_texture, voxIndex) : nanf("sNaN"); const bool finiteReference = isfinite(rReferenceValue); rReferenceValue = finiteReference ? rReferenceValue : 0.f; const unsigned int referenceSize = __syncthreads_count(finiteReference); float bestDisplacement[3] = {nanf("sNaN"), 0.0f, 0.0f }; float bestCC = 0.0f; if (referenceSize > 32) { //the target values must remain constant throughout the block matching process const float referenceMean = __fdividef(blockReduceSum(rReferenceValue, tid), referenceSize); const float referenceTemp = finiteReference ? rReferenceValue - referenceMean : 0.f; const float referenceVar = blockReduceSum(referenceTemp * referenceTemp, tid); // iteration over the result blocks (block matching part) for (unsigned int z=1; z<8; ++z) { for (unsigned int y=1; y<8; ++y) { for (unsigned int x=1; x<8; ++x) { const unsigned int sharedIndex = ( (z+idz) * 12 + y + idy ) * 12 + x + idx; const float rWarpedValue = sWarpedValues[sharedIndex]; const bool overlap = isfinite(rWarpedValue) && finiteReference; const unsigned int currentWarpedSize = __syncthreads_count(overlap); if (currentWarpedSize > 32) { //the target values must remain intact at each loop, so please do not touch this! float newreferenceTemp = referenceTemp; float newreferenceVar = referenceVar; if (currentWarpedSize != referenceSize){ const float newReferenceValue = overlap ? rReferenceValue : 0.0f; const float newReferenceMean = __fdividef(blockReduceSum(newReferenceValue, tid), currentWarpedSize); newreferenceTemp = overlap ? newReferenceValue - newReferenceMean : 0.0f; newreferenceVar = blockReduceSum(newreferenceTemp * newreferenceTemp, tid); } const float rChecked = overlap ? rWarpedValue : 0.0f; const float warpedMean = __fdividef(blockReduceSum(rChecked, tid), currentWarpedSize); const float warpedTemp = overlap ? rChecked - warpedMean : 0.0f; const float warpedVar = blockReduceSum(warpedTemp * warpedTemp, tid); const float sumTargetResult = blockReduceSum((newreferenceTemp)* (warpedTemp), tid); const float localCC = (newreferenceVar * warpedVar) > 0.0 ? fabs((sumTargetResult) / sqrt(newreferenceVar * warpedVar)) : 0.0; if (tid == 0 && localCC > bestCC) { bestCC = localCC + 1.0e-7f; bestDisplacement[0] = x - 4.f; bestDisplacement[1] = y - 4.f; bestDisplacement[2] = z - 4.f; } } } } } } if (tid==0) { const unsigned int posIdx = 3 * currentBlockIndex; const float referencePosition_temp[3] = { (float)xImage, (float)yImage, (float)zImage }; bestDisplacement[0] += referencePosition_temp[0]; bestDisplacement[1] += referencePosition_temp[1]; bestDisplacement[2] += referencePosition_temp[2]; reg_mat44_mul_cuda<float>(referenceMatrix_xyz, referencePosition_temp, &referencePosition[posIdx]); reg_mat44_mul_cuda<float>(referenceMatrix_xyz, bestDisplacement, &warpedPosition[posIdx]); if (isfinite(bestDisplacement[0])) { atomicAdd(definedBlock, 1); } } } } #endif /* *************************************************************** */ void block_matching_method_gpu(nifti_image *targetImage, _reg_blockMatchingParam *params, float **targetImageArray_d, float **resultImageArray_d, float **referencePosition_d, float **warpedPosition_d, int **totalBlock_d, int **mask_d, float** referenceMat_d) { // Copy some required parameters over to the device uint3 imageSize = make_uint3(targetImage->nx, targetImage->ny, targetImage->nz); uint3 blockSize = make_uint3(params->blockNumber[0], params->blockNumber[1], params->blockNumber[2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ImageSize,&imageSize,sizeof(uint3))); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_BlockDim,&blockSize,sizeof(uint3))); // Texture binding const unsigned int numBlocks = params->blockNumber[0] * params->blockNumber[1] * params->blockNumber[2]; NR_CUDA_SAFE_CALL(cudaBindTexture(0, referenceImageArray_texture, *targetImageArray_d, targetImage->nvox * sizeof(float))); NR_CUDA_SAFE_CALL(cudaBindTexture(0, warpedImageArray_texture, *resultImageArray_d, targetImage->nvox * sizeof(float))); NR_CUDA_SAFE_CALL(cudaBindTexture(0, totalBlock_texture, *totalBlock_d, numBlocks * sizeof(int))); unsigned int *definedBlock_d; unsigned int *definedBlock_h = (unsigned int*) malloc(sizeof(unsigned int)); *definedBlock_h = 0; NR_CUDA_SAFE_CALL(cudaMalloc((void** )(&definedBlock_d), sizeof(unsigned int))); NR_CUDA_SAFE_CALL(cudaMemcpy(definedBlock_d, definedBlock_h, sizeof(unsigned int), cudaMemcpyHostToDevice)); if (params->stepSize!=1 || params->voxelCaptureRange!=3){ reg_print_msg_error("The block Mathching CUDA kernel supports only a stepsize of 1"); reg_exit(); } #ifdef USE_TEST_KERNEL dim3 BlockDims1D(4,4,8); dim3 BlocksGrid3D( params->blockNumber[0], params->blockNumber[1], (unsigned int)reg_ceil((float)params->blockNumber[2]/2.f)); unsigned int sMem = (128 + 4*3 * 4*3 * 4*4) * sizeof(float); #else dim3 BlockDims1D(4,4,4); dim3 BlocksGrid3D( params->blockNumber[0], params->blockNumber[1], params->blockNumber[2]); unsigned int sMem = (64 + 4*3 * 4*3 * 4*3) * sizeof(float); // (3*4)^3 #endif if (targetImage->nz == 1){ BlockDims1D.z=1; BlocksGrid3D.z=1; sMem = (16 + 144) * sizeof(float); // // (3*4)^2 blockMatchingKernel2D << <BlocksGrid3D, BlockDims1D, sMem >> >(*warpedPosition_d, *referencePosition_d, *mask_d, *referenceMat_d, definedBlock_d); } else { blockMatchingKernel3D <<<BlocksGrid3D, BlockDims1D, sMem>>>(*warpedPosition_d, *referencePosition_d, *mask_d, *referenceMat_d, definedBlock_d); } #ifndef NDEBUG NR_CUDA_CHECK_KERNEL(BlocksGrid3D, BlockDims1D); #else NR_CUDA_SAFE_CALL(cudaThreadSynchronize()); #endif NR_CUDA_SAFE_CALL(cudaMemcpy((void * )definedBlock_h, (void * )definedBlock_d, sizeof(unsigned int), cudaMemcpyDeviceToHost)); params->definedActiveBlockNumber = *definedBlock_h; NR_CUDA_SAFE_CALL(cudaUnbindTexture(referenceImageArray_texture)); NR_CUDA_SAFE_CALL(cudaUnbindTexture(warpedImageArray_texture)); NR_CUDA_SAFE_CALL(cudaUnbindTexture(totalBlock_texture)); free(definedBlock_h); cudaFree(definedBlock_d); } /* *************************************************************** */ #endif //_REG_BLOCKMATCHING_GPU_CU
the_stack
#include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/pad.hpp> #include <nbla/cuda/utils/atomic_add.cuh> #include <nbla/variable.hpp> namespace nbla { using cuda::Index_t; struct AxisParam { Index_t x_stride; Index_t y_stride; Index_t y_shape; struct { Index_t first; Index_t second; } pad; }; namespace pad_constant_impl { template <typename T, int DIMENSIONS> __inline__ __device__ void d_pad_forward(const Index_t y_idx, const T *x, T *y, const int ndim, const AxisParam *params, const T val) { const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim; Index_t y_tmp = y_idx; Index_t x_idx = 0; #pragma unroll for (int axis = 0; axis < NDIM; axis++) { const auto &param = params[axis]; const auto axis_idx = y_tmp / param.y_stride; y_tmp -= axis_idx * param.y_stride; if ((axis_idx < param.pad.first) || (axis_idx >= param.y_shape - param.pad.second)) { y[y_idx] = val; return; } x_idx += (axis_idx - param.pad.first) * param.x_stride; } y[y_idx] = x[x_idx]; } template <typename T, int DIMENSIONS = 0> __global__ void pad_forward(const Index_t size, const T *x, T *y, const int ndim, const AxisParam *params, const T constant_value) { extern __shared__ AxisParam shared[]; const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim; if (threadIdx.x < NDIM * sizeof(AxisParam) / sizeof(int)) { auto tmp = reinterpret_cast<const int *>(params)[threadIdx.x]; reinterpret_cast<int *>(shared)[threadIdx.x] = tmp; } __syncthreads(); NBLA_CUDA_KERNEL_LOOP(i, size) { d_pad_forward<T, DIMENSIONS>(i, x, y, ndim, shared, constant_value); } } template <typename T, bool ACCUMULATE, int DIMENSIONS> __inline__ __device__ void d_pad_backward(const Index_t y_idx, const T *dy, T *dx, const int ndim, const AxisParam *params) { const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim; Index_t y_tmp = y_idx; Index_t x_idx = 0; #pragma unroll for (int axis = 0; axis < NDIM; axis++) { const auto &param = params[axis]; const auto axis_idx = y_tmp / param.y_stride; y_tmp -= axis_idx * param.y_stride; if ((axis_idx < param.pad.first) || (axis_idx >= param.y_shape - param.pad.second)) { return; } x_idx += (axis_idx - param.pad.first) * param.x_stride; } dx[x_idx] = ACCUMULATE ? dx[x_idx] + dy[y_idx] : dy[y_idx]; } template <typename T, int DIMENSIONS = 0, bool ACCUMULATE = false> __global__ void pad_backward(const Index_t size, const T *dy, T *dx, const int ndim, const AxisParam *params) { extern __shared__ AxisParam shared[]; const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim; if (threadIdx.x < NDIM * sizeof(AxisParam) / sizeof(int)) { auto tmp = reinterpret_cast<const int *>(params)[threadIdx.x]; reinterpret_cast<int *>(shared)[threadIdx.x] = tmp; } __syncthreads(); NBLA_CUDA_KERNEL_LOOP(i, size) { d_pad_backward<T, ACCUMULATE, DIMENSIONS>(i, dy, dx, ndim, shared); } } } // namespace pad_constant_impl namespace pad_reflect_impl { __inline__ __device__ Index_t reflect_index(Index_t idx, Index_t len) { return len > 0 ? std::abs(((idx / len) & 1) * len - (idx % len)) : 0; } template <typename T, int DIMENSIONS> __inline__ __device__ void d_pad_reflect_forward(const Index_t y_idx, const T *x, T *y, const int ndim, const AxisParam *params) { const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim; Index_t y_tmp = y_idx; Index_t x_idx = 0; #pragma unroll for (int axis = 0; axis < NDIM; axis++) { const auto &param = params[axis]; const auto axis_idx = y_tmp / param.y_stride; y_tmp -= axis_idx * param.y_stride; const auto src_len = param.y_shape - param.pad.first - param.pad.second; Index_t src_axis_idx = std::abs(axis_idx - param.pad.first); const auto src_axis_reflect_idx = reflect_index(src_axis_idx, src_len - 1); x_idx += src_axis_reflect_idx * param.x_stride; } y[y_idx] = x[x_idx]; } template <typename T, int DIMENSIONS = 0> __global__ void pad_reflect_forward(const Index_t size, const T *x, T *y, const int ndim, const AxisParam *params) { extern __shared__ AxisParam shared[]; const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim; if (threadIdx.x < NDIM * sizeof(AxisParam) / sizeof(int)) { auto tmp = reinterpret_cast<const int *>(params)[threadIdx.x]; reinterpret_cast<int *>(shared)[threadIdx.x] = tmp; } __syncthreads(); NBLA_CUDA_KERNEL_LOOP(i, size) { d_pad_reflect_forward<T, DIMENSIONS>(i, x, y, ndim, shared); } } template <typename T, int DIMENSIONS> __inline__ __device__ void d_pad_reflect_backward(const Index_t y_idx, const T *dy, T *dx, const int ndim, const AxisParam *params) { const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim; Index_t y_tmp = y_idx; Index_t x_idx = 0; #pragma unroll for (int axis = 0; axis < NDIM; axis++) { const auto &param = params[axis]; const auto axis_idx = y_tmp / param.y_stride; y_tmp -= axis_idx * param.y_stride; const auto dst_len = param.y_shape - param.pad.first - param.pad.second; Index_t dst_axis_idx = std::abs(axis_idx - param.pad.first); const auto dst_axis_reflect_idx = reflect_index(dst_axis_idx, dst_len - 1); x_idx += dst_axis_reflect_idx * param.x_stride; } atomic_add(&dx[x_idx], dy[y_idx]); } template <typename T, int DIMENSIONS = 0> __global__ void pad_reflect_backward(const Index_t size, const T *dy, T *dx, const int ndim, const AxisParam *params) { extern __shared__ AxisParam shared[]; const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim; if (threadIdx.x < NDIM * sizeof(AxisParam) / sizeof(int)) { auto tmp = reinterpret_cast<const int *>(params)[threadIdx.x]; reinterpret_cast<int *>(shared)[threadIdx.x] = tmp; } __syncthreads(); NBLA_CUDA_KERNEL_LOOP(i, size) { d_pad_reflect_backward<T, DIMENSIONS>(i, dy, dx, ndim, shared); } } } // namespace pad_reflect_impl namespace pad_repeat_impl { template <typename T, int DIMENSIONS> __inline__ __device__ void d_pad_repeat_forward(const Index_t y_idx, const T *x, T *y, const int ndim, const AxisParam *params) { const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim; Index_t y_tmp = y_idx; Index_t x_idx = 0; #pragma unroll for (int axis = 0; axis < NDIM; axis++) { const auto &param = params[axis]; const auto axis_idx = y_tmp / param.y_stride; y_tmp -= axis_idx * param.y_stride; int src_max_idx = param.y_shape - param.pad.first - param.pad.second - 1; x_idx += min(src_max_idx, max(0, static_cast<int>(axis_idx - param.pad.first))) * param.x_stride; } y[y_idx] = x[x_idx]; } template <typename T, int DIMENSIONS = 0> __global__ void pad_repeat_forward(const Index_t size, const T *x, T *y, const int ndim, const AxisParam *params) { extern __shared__ AxisParam shared[]; const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim; if (threadIdx.x < NDIM * sizeof(AxisParam) / sizeof(int)) { auto tmp = reinterpret_cast<const int *>(params)[threadIdx.x]; reinterpret_cast<int *>(shared)[threadIdx.x] = tmp; } __syncthreads(); NBLA_CUDA_KERNEL_LOOP(i, size) { d_pad_repeat_forward<T, DIMENSIONS>(i, x, y, ndim, shared); } } template <typename T, int DIMENSIONS> __inline__ __device__ void d_pad_repeat_backward(const Index_t y_idx, const T *dy, T *dx, const int ndim, const AxisParam *params) { const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim; Index_t y_tmp = y_idx; Index_t x_idx = 0; #pragma unroll for (int axis = 0; axis < NDIM; axis++) { const auto &param = params[axis]; const auto axis_idx = y_tmp / param.y_stride; y_tmp -= axis_idx * param.y_stride; int dst_max_idx = param.y_shape - param.pad.first - param.pad.second - 1; x_idx += min(dst_max_idx, max(0, static_cast<int>(axis_idx - param.pad.first))) * param.x_stride; } atomic_add(&dx[x_idx], dy[y_idx]); } template <typename T, int DIMENSIONS = 0> __global__ void pad_repeat_backward(const Index_t size, const T *dy, T *dx, const int ndim, const AxisParam *params) { extern __shared__ AxisParam shared[]; const int NDIM = DIMENSIONS > 0 ? DIMENSIONS : ndim; if (threadIdx.x < NDIM * sizeof(AxisParam) / sizeof(int)) { auto tmp = reinterpret_cast<const int *>(params)[threadIdx.x]; reinterpret_cast<int *>(shared)[threadIdx.x] = tmp; } __syncthreads(); NBLA_CUDA_KERNEL_LOOP(i, size) { d_pad_repeat_backward<T, DIMENSIONS>(i, dy, dx, ndim, shared); } } } // namespace pad_repeat_impl template <typename T> void PadCuda<T>::setup_impl(const Variables &inputs, const Variables &outputs) { Pad<T>::setup_impl(inputs, outputs); cuda_set_device(this->device_); Variable &x_var = *inputs[0]; Variable &y_var = *outputs[0]; std::vector<AxisParam> h_params; h_params.reserve(this->padding_.size()); for (int axis = 0; axis < this->padding_.size(); axis++) { AxisParam axis_param; axis_param.x_stride = this->x_stride_.at(axis); axis_param.y_stride = this->y_stride_.at(axis); axis_param.y_shape = this->y_shape_.at(axis); axis_param.pad.first = this->padding_.at(axis).first; axis_param.pad.second = this->padding_.at(axis).second; h_params.push_back(axis_param); } auto bytes = h_params.size() * sizeof(AxisParam); this->parameter_memory_.reshape(Shape_t{static_cast<Size_t>(bytes)}, true); auto d_params = this->parameter_memory_.cast(get_dtype<char>(), this->ctx_, true) ->template pointer<AxisParam>(); NBLA_CUDA_CHECK( cudaMemcpy(d_params, h_params.data(), bytes, cudaMemcpyHostToDevice)); } template <typename T> void PadCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); Variable &x_var = *inputs[0]; Variable &y_var = *outputs[0]; const auto y_size = y_var.size(); const auto ndim = this->padding_.size(); auto x = x_var.get_data_pointer<Tcu>(this->ctx_); auto y = y_var.cast_data_and_get_pointer<Tcu>(this->ctx_, true); auto threads = 128; auto blocks = cuda_get_blocks_by_size(y_var.size()); auto shared = this->parameter_memory_.size(); auto params = this->parameter_memory_.get(get_dtype<char>(), this->ctx_) ->template const_pointer<AxisParam>(); if (this->pad_mode_ == this->PAD_CONSTANT) { using pad_constant_impl::pad_forward; auto cvalue = this->constant_value_; void (*kernel)(const Index_t, const Tcu *, Tcu *, const int, const AxisParam *, const Tcu); if (ndim == 1) { kernel = pad_forward<Tcu, 1>; } else if (ndim == 2) { kernel = pad_forward<Tcu, 2>; } else if (ndim == 3) { kernel = pad_forward<Tcu, 3>; } else if (ndim == 4) { kernel = pad_forward<Tcu, 4>; } else { kernel = pad_forward<Tcu>; } kernel<<<blocks, threads, shared>>>(y_size, x, y, ndim, params, cvalue); NBLA_CUDA_KERNEL_CHECK(); } else if (this->pad_mode_ == this->PAD_REFLECT) { using namespace pad_reflect_impl; void (*kernel)(const Index_t, const Tcu *, Tcu *, const int, const AxisParam *); if (ndim == 1) { kernel = pad_reflect_forward<Tcu, 1>; } else if (ndim == 2) { kernel = pad_reflect_forward<Tcu, 2>; } else if (ndim == 3) { kernel = pad_reflect_forward<Tcu, 3>; } else if (ndim == 4) { kernel = pad_reflect_forward<Tcu, 4>; } else { kernel = pad_reflect_forward<Tcu>; } kernel<<<blocks, threads, shared>>>(y_size, x, y, ndim, params); NBLA_CUDA_KERNEL_CHECK(); } else if (this->pad_mode_ == this->PAD_REPEAT) { using pad_repeat_impl::pad_repeat_forward; void (*kernel)(const Index_t, const Tcu *, Tcu *, const int, const AxisParam *); if (ndim == 1) { kernel = pad_repeat_forward<Tcu, 1>; } else if (ndim == 2) { kernel = pad_repeat_forward<Tcu, 2>; } else if (ndim == 3) { kernel = pad_repeat_forward<Tcu, 3>; } else if (ndim == 4) { kernel = pad_repeat_forward<Tcu, 4>; } else { kernel = pad_repeat_forward<Tcu>; } kernel<<<blocks, threads, shared>>>(y_size, x, y, ndim, params); NBLA_CUDA_KERNEL_CHECK(); } } template <typename T> void PadCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum_gradient) { if (propagate_down[0]) { cuda_set_device(this->device_); auto accum = accum_gradient[0]; Variable &x_var = *inputs[0]; Variable &y_var = *outputs[0]; const auto ndim = this->padding_.size(); auto dy = y_var.get_grad_pointer<Tcu>(this->ctx_); if (this->pad_mode_ == this->PAD_CONSTANT) { using namespace pad_constant_impl; auto dx = x_var.cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum); auto threads = 128; auto blocks = cuda_get_blocks_by_size(y_var.size()); auto shared = this->parameter_memory_.size(); auto params = this->parameter_memory_.get(get_dtype<char>(), this->ctx_) ->template const_pointer<AxisParam>(); void (*kernel)(const Index_t, const Tcu *, Tcu *, const int, const AxisParam *); if (ndim == 1) { kernel = accum ? pad_backward<Tcu, 1, true> : pad_backward<Tcu, 1>; } else if (ndim == 2) { kernel = accum ? pad_backward<Tcu, 2, true> : pad_backward<Tcu, 2>; } else if (ndim == 3) { kernel = accum ? pad_backward<Tcu, 3, true> : pad_backward<Tcu, 3>; } else if (ndim == 4) { kernel = accum ? pad_backward<Tcu, 4, true> : pad_backward<Tcu, 4>; } else { kernel = accum ? pad_backward<Tcu, 0, true> : pad_backward<Tcu>; } kernel<<<blocks, threads, shared>>>(y_var.size(), dy, dx, ndim, params); NBLA_CUDA_KERNEL_CHECK(); } else if (this->pad_mode_ == this->PAD_REFLECT) { using namespace pad_reflect_impl; if (!accum) { x_var.grad()->zero(); } auto dx = x_var.cast_grad_and_get_pointer<Tcu>(this->ctx_, false); auto threads = 128; auto blocks = cuda_get_blocks_by_size(y_var.size()); auto shared = this->parameter_memory_.size(); auto params = this->parameter_memory_.get(get_dtype<char>(), this->ctx_) ->template const_pointer<AxisParam>(); void (*kernel)(const Index_t, const Tcu *, Tcu *, const int, const AxisParam *); if (ndim == 1) { kernel = pad_reflect_backward<Tcu, 1>; } else if (ndim == 2) { kernel = pad_reflect_backward<Tcu, 2>; } else if (ndim == 3) { kernel = pad_reflect_backward<Tcu, 3>; } else if (ndim == 4) { kernel = pad_reflect_backward<Tcu, 4>; } else { kernel = pad_reflect_backward<Tcu>; } kernel<<<blocks, threads, shared>>>(y_var.size(), dy, dx, ndim, params); NBLA_CUDA_KERNEL_CHECK(); } else if (this->pad_mode_ == this->PAD_REPEAT) { using namespace pad_repeat_impl; if (!accum) { x_var.grad()->zero(); } auto dx = x_var.cast_grad_and_get_pointer<Tcu>(this->ctx_, false); auto threads = 128; auto blocks = cuda_get_blocks_by_size(y_var.size()); auto shared = this->parameter_memory_.size(); auto params = this->parameter_memory_.get(get_dtype<char>(), this->ctx_) ->template const_pointer<AxisParam>(); void (*kernel)(const Index_t, const Tcu *, Tcu *, const int, const AxisParam *); if (ndim == 1) { kernel = pad_repeat_backward<Tcu, 1>; } else if (ndim == 2) { kernel = pad_repeat_backward<Tcu, 2>; } else if (ndim == 3) { kernel = pad_repeat_backward<Tcu, 3>; } else if (ndim == 4) { kernel = pad_repeat_backward<Tcu, 4>; } else { kernel = pad_repeat_backward<Tcu>; } kernel<<<blocks, threads, shared>>>(y_var.size(), dy, dx, ndim, params); NBLA_CUDA_KERNEL_CHECK(); } } } } // namespace nbla
the_stack
typedef uint32_t sph_u32; #define C32(x) (x) #define T32(x) (x) #define INPUT_BLOCK_ADD do { \ B0 = T32(B0 + M0); \ B1 = T32(B1 + M1); \ B2 = T32(B2 + M2); \ B3 = T32(B3 + M3); \ B4 = T32(B4 + M4); \ B5 = T32(B5 + M5); \ B6 = T32(B6 + M6); \ B7 = T32(B7 + M7); \ B8 = T32(B8 + M8); \ B9 = T32(B9 + M9); \ BA = T32(BA + MA); \ BB = T32(BB + MB); \ BC = T32(BC + MC); \ BD = T32(BD + MD); \ BE = T32(BE + ME); \ BF = T32(BF + MF); \ } while (0) #define INPUT_BLOCK_SUB do { \ C0 = T32(C0 - M0); \ C1 = T32(C1 - M1); \ C2 = T32(C2 - M2); \ C3 = T32(C3 - M3); \ C4 = T32(C4 - M4); \ C5 = T32(C5 - M5); \ C6 = T32(C6 - M6); \ C7 = T32(C7 - M7); \ C8 = T32(C8 - M8); \ C9 = T32(C9 - M9); \ CA = T32(CA - MA); \ CB = T32(CB - MB); \ CC = T32(CC - MC); \ CD = T32(CD - MD); \ CE = T32(CE - ME); \ CF = T32(CF - MF); \ } while (0) #define XOR_W do { \ A00 ^= Wlow; \ A01 ^= Whigh; \ } while (0) #define SWAP(v1, v2) do { \ sph_u32 tmp = (v1); \ (v1) = (v2); \ (v2) = tmp; \ } while (0) #define SWAP_BC do { \ SWAP(B0, C0); \ SWAP(B1, C1); \ SWAP(B2, C2); \ SWAP(B3, C3); \ SWAP(B4, C4); \ SWAP(B5, C5); \ SWAP(B6, C6); \ SWAP(B7, C7); \ SWAP(B8, C8); \ SWAP(B9, C9); \ SWAP(BA, CA); \ SWAP(BB, CB); \ SWAP(BC, CC); \ SWAP(BD, CD); \ SWAP(BE, CE); \ SWAP(BF, CF); \ } while (0) #define PERM_ELT(xa0, xa1, xb0, xb1, xb2, xb3, xc, xm) do { \ xa0 = T32((xa0 \ ^ (((xa1 << 15) | (xa1 >> 17)) * 5U) \ ^ xc) * 3U) \ ^ xb1 ^ (xb2 & ~xb3) ^ xm; \ xb0 = T32(~(((xb0 << 1) | (xb0 >> 31)) ^ xa0)); \ } while (0) #define PERM_STEP_0 do { \ PERM_ELT(A00, A0B, B0, BD, B9, B6, C8, M0); \ PERM_ELT(A01, A00, B1, BE, BA, B7, C7, M1); \ PERM_ELT(A02, A01, B2, BF, BB, B8, C6, M2); \ PERM_ELT(A03, A02, B3, B0, BC, B9, C5, M3); \ PERM_ELT(A04, A03, B4, B1, BD, BA, C4, M4); \ PERM_ELT(A05, A04, B5, B2, BE, BB, C3, M5); \ PERM_ELT(A06, A05, B6, B3, BF, BC, C2, M6); \ PERM_ELT(A07, A06, B7, B4, B0, BD, C1, M7); \ PERM_ELT(A08, A07, B8, B5, B1, BE, C0, M8); \ PERM_ELT(A09, A08, B9, B6, B2, BF, CF, M9); \ PERM_ELT(A0A, A09, BA, B7, B3, B0, CE, MA); \ PERM_ELT(A0B, A0A, BB, B8, B4, B1, CD, MB); \ PERM_ELT(A00, A0B, BC, B9, B5, B2, CC, MC); \ PERM_ELT(A01, A00, BD, BA, B6, B3, CB, MD); \ PERM_ELT(A02, A01, BE, BB, B7, B4, CA, ME); \ PERM_ELT(A03, A02, BF, BC, B8, B5, C9, MF); \ } while (0) #define PERM_STEP_1 do { \ PERM_ELT(A04, A03, B0, BD, B9, B6, C8, M0); \ PERM_ELT(A05, A04, B1, BE, BA, B7, C7, M1); \ PERM_ELT(A06, A05, B2, BF, BB, B8, C6, M2); \ PERM_ELT(A07, A06, B3, B0, BC, B9, C5, M3); \ PERM_ELT(A08, A07, B4, B1, BD, BA, C4, M4); \ PERM_ELT(A09, A08, B5, B2, BE, BB, C3, M5); \ PERM_ELT(A0A, A09, B6, B3, BF, BC, C2, M6); \ PERM_ELT(A0B, A0A, B7, B4, B0, BD, C1, M7); \ PERM_ELT(A00, A0B, B8, B5, B1, BE, C0, M8); \ PERM_ELT(A01, A00, B9, B6, B2, BF, CF, M9); \ PERM_ELT(A02, A01, BA, B7, B3, B0, CE, MA); \ PERM_ELT(A03, A02, BB, B8, B4, B1, CD, MB); \ PERM_ELT(A04, A03, BC, B9, B5, B2, CC, MC); \ PERM_ELT(A05, A04, BD, BA, B6, B3, CB, MD); \ PERM_ELT(A06, A05, BE, BB, B7, B4, CA, ME); \ PERM_ELT(A07, A06, BF, BC, B8, B5, C9, MF); \ } while (0) #define PERM_STEP_2 do { \ PERM_ELT(A08, A07, B0, BD, B9, B6, C8, M0); \ PERM_ELT(A09, A08, B1, BE, BA, B7, C7, M1); \ PERM_ELT(A0A, A09, B2, BF, BB, B8, C6, M2); \ PERM_ELT(A0B, A0A, B3, B0, BC, B9, C5, M3); \ PERM_ELT(A00, A0B, B4, B1, BD, BA, C4, M4); \ PERM_ELT(A01, A00, B5, B2, BE, BB, C3, M5); \ PERM_ELT(A02, A01, B6, B3, BF, BC, C2, M6); \ PERM_ELT(A03, A02, B7, B4, B0, BD, C1, M7); \ PERM_ELT(A04, A03, B8, B5, B1, BE, C0, M8); \ PERM_ELT(A05, A04, B9, B6, B2, BF, CF, M9); \ PERM_ELT(A06, A05, BA, B7, B3, B0, CE, MA); \ PERM_ELT(A07, A06, BB, B8, B4, B1, CD, MB); \ PERM_ELT(A08, A07, BC, B9, B5, B2, CC, MC); \ PERM_ELT(A09, A08, BD, BA, B6, B3, CB, MD); \ PERM_ELT(A0A, A09, BE, BB, B7, B4, CA, ME); \ PERM_ELT(A0B, A0A, BF, BC, B8, B5, C9, MF); \ } while (0) #define APPLY_P do { \ B0 = T32(B0 << 17) | (B0 >> 15); \ B1 = T32(B1 << 17) | (B1 >> 15); \ B2 = T32(B2 << 17) | (B2 >> 15); \ B3 = T32(B3 << 17) | (B3 >> 15); \ B4 = T32(B4 << 17) | (B4 >> 15); \ B5 = T32(B5 << 17) | (B5 >> 15); \ B6 = T32(B6 << 17) | (B6 >> 15); \ B7 = T32(B7 << 17) | (B7 >> 15); \ B8 = T32(B8 << 17) | (B8 >> 15); \ B9 = T32(B9 << 17) | (B9 >> 15); \ BA = T32(BA << 17) | (BA >> 15); \ BB = T32(BB << 17) | (BB >> 15); \ BC = T32(BC << 17) | (BC >> 15); \ BD = T32(BD << 17) | (BD >> 15); \ BE = T32(BE << 17) | (BE >> 15); \ BF = T32(BF << 17) | (BF >> 15); \ PERM_STEP_0; \ PERM_STEP_1; \ PERM_STEP_2; \ A0B = T32(A0B + C6); \ A0A = T32(A0A + C5); \ A09 = T32(A09 + C4); \ A08 = T32(A08 + C3); \ A07 = T32(A07 + C2); \ A06 = T32(A06 + C1); \ A05 = T32(A05 + C0); \ A04 = T32(A04 + CF); \ A03 = T32(A03 + CE); \ A02 = T32(A02 + CD); \ A01 = T32(A01 + CC); \ A00 = T32(A00 + CB); \ A0B = T32(A0B + CA); \ A0A = T32(A0A + C9); \ A09 = T32(A09 + C8); \ A08 = T32(A08 + C7); \ A07 = T32(A07 + C6); \ A06 = T32(A06 + C5); \ A05 = T32(A05 + C4); \ A04 = T32(A04 + C3); \ A03 = T32(A03 + C2); \ A02 = T32(A02 + C1); \ A01 = T32(A01 + C0); \ A00 = T32(A00 + CF); \ A0B = T32(A0B + CE); \ A0A = T32(A0A + CD); \ A09 = T32(A09 + CC); \ A08 = T32(A08 + CB); \ A07 = T32(A07 + CA); \ A06 = T32(A06 + C9); \ A05 = T32(A05 + C8); \ A04 = T32(A04 + C7); \ A03 = T32(A03 + C6); \ A02 = T32(A02 + C5); \ A01 = T32(A01 + C4); \ A00 = T32(A00 + C3); \ } while (0) #define INCR_W do { \ if ((Wlow = T32(Wlow + 1)) == 0) \ Whigh = T32(Whigh + 1); \ } while (0) __constant__ static const sph_u32 A_init_512[] = { C32(0x20728DFD), C32(0x46C0BD53), C32(0xE782B699), C32(0x55304632), C32(0x71B4EF90), C32(0x0EA9E82C), C32(0xDBB930F1), C32(0xFAD06B8B), C32(0xBE0CAE40), C32(0x8BD14410), C32(0x76D2ADAC), C32(0x28ACAB7F) }; __constant__ static const sph_u32 B_init_512[] = { C32(0xC1099CB7), C32(0x07B385F3), C32(0xE7442C26), C32(0xCC8AD640), C32(0xEB6F56C7), C32(0x1EA81AA9), C32(0x73B9D314), C32(0x1DE85D08), C32(0x48910A5A), C32(0x893B22DB), C32(0xC5A0DF44), C32(0xBBC4324E), C32(0x72D2F240), C32(0x75941D99), C32(0x6D8BDE82), C32(0xA1A7502B) }; __constant__ static const sph_u32 C_init_512[] = { C32(0xD9BF68D1), C32(0x58BAD750), C32(0x56028CB2), C32(0x8134F359), C32(0xB5D469D8), C32(0x941A8CC2), C32(0x418B2A6E), C32(0x04052780), C32(0x7F07D787), C32(0x5194358F), C32(0x3C60D665), C32(0xBE97D79A), C32(0x950C3434), C32(0xAED9A06D), C32(0x2537DC8D), C32(0x7CDB5969) }; __constant__ static uint32_t c_PaddedMessage80[20]; __host__ void x16_shabal512_setBlock_80(void *pdata) { cudaMemcpyToSymbol(c_PaddedMessage80, pdata, sizeof(c_PaddedMessage80), 0, cudaMemcpyHostToDevice); } #define TPB_SHABAL 256 __global__ __launch_bounds__(TPB_SHABAL, 2) void x16_shabal512_gpu_hash_80(uint32_t threads, const uint32_t startNonce, uint32_t *g_hash) { const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); uint32_t B[] = { 0xC1099CB7, 0x07B385F3, 0xE7442C26, 0xCC8AD640, 0xEB6F56C7, 0x1EA81AA9, 0x73B9D314, 0x1DE85D08, 0x48910A5A, 0x893B22DB, 0xC5A0DF44, 0xBBC4324E, 0x72D2F240, 0x75941D99, 0x6D8BDE82, 0xA1A7502B }; uint32_t M[16]; if (thread < threads) { // todo: try __ldc *(uint2x4*)&M[0] = *(uint2x4*)&c_PaddedMessage80[0]; *(uint2x4*)&M[8] = *(uint2x4*)&c_PaddedMessage80[8]; sph_u32 A00 = A_init_512[0], A01 = A_init_512[1], A02 = A_init_512[ 2], A03 = A_init_512[ 3]; sph_u32 A04 = A_init_512[4], A05 = A_init_512[5], A06 = A_init_512[ 6], A07 = A_init_512[ 7]; sph_u32 A08 = A_init_512[8], A09 = A_init_512[9], A0A = A_init_512[10], A0B = A_init_512[11]; sph_u32 B0 = B_init_512[ 0], B1 = B_init_512[ 1], B2 = B_init_512[ 2], B3 = B_init_512 [3]; sph_u32 B4 = B_init_512[ 4], B5 = B_init_512[ 5], B6 = B_init_512[ 6], B7 = B_init_512[ 7]; sph_u32 B8 = B_init_512[ 8], B9 = B_init_512[ 9], BA = B_init_512[10], BB = B_init_512[11]; sph_u32 BC = B_init_512[12], BD = B_init_512[13], BE = B_init_512[14], BF = B_init_512[15]; sph_u32 C0 = C_init_512[ 0], C1 = C_init_512[ 1], C2 = C_init_512[ 2], C3 = C_init_512[ 3]; sph_u32 C4 = C_init_512[ 4], C5 = C_init_512[ 5], C6 = C_init_512[ 6], C7 = C_init_512[ 7]; sph_u32 C8 = C_init_512[ 8], C9 = C_init_512[ 9], CA = C_init_512[10], CB = C_init_512[11]; sph_u32 CC = C_init_512[12], CD = C_init_512[13], CE = C_init_512[14], CF = C_init_512[15]; sph_u32 M0, M1, M2, M3, M4, M5, M6, M7, M8, M9, MA, MB, MC, MD, ME, MF; sph_u32 Wlow = 1, Whigh = 0; M0 = M[ 0]; M1 = M[ 1]; M2 = M[ 2]; M3 = M[ 3]; M4 = M[ 4]; M5 = M[ 5]; M6 = M[ 6]; M7 = M[ 7]; M8 = M[ 8]; M9 = M[ 9]; MA = M[10]; MB = M[11]; MC = M[12]; MD = M[13]; ME = M[14]; MF = M[15]; INPUT_BLOCK_ADD; XOR_W; APPLY_P; INPUT_BLOCK_SUB; SWAP_BC; INCR_W; M0 = c_PaddedMessage80[16]; M1 = c_PaddedMessage80[17]; M2 = c_PaddedMessage80[18]; M3 = cuda_swab32(startNonce + thread); M4 = 0x80; M5 = M6 = M7 = M8 = M9 = MA = MB = MC = MD = ME = MF = 0; INPUT_BLOCK_ADD; XOR_W; APPLY_P; for (unsigned i = 0; i < 3; i++) { SWAP_BC; XOR_W; APPLY_P; } B[ 0] = B0; B[ 1] = B1; B[ 2] = B2; B[ 3] = B3; B[ 4] = B4; B[ 5] = B5; B[ 6] = B6; B[ 7] = B7; B[ 8] = B8; B[ 9] = B9; B[10] = BA; B[11] = BB; B[12] = BC; B[13] = BD; B[14] = BE; B[15] = BF; // output uint64_t hashPosition = thread; uint32_t *Hash = &g_hash[hashPosition << 4]; *(uint2x4*)&Hash[0] = *(uint2x4*)&B[0]; *(uint2x4*)&Hash[8] = *(uint2x4*)&B[8]; } } __host__ void x16_shabal512_cuda_hash_80(int thr_id, const uint32_t threads, const uint32_t startNonce, uint32_t *d_hash) { const uint32_t threadsperblock = TPB_SHABAL; dim3 grid((threads + threadsperblock - 1) / threadsperblock); dim3 block(threadsperblock); x16_shabal512_gpu_hash_80 <<<grid, block >>>(threads, startNonce, d_hash); }
the_stack
#if !defined(ENABLE_GPU) | !defined(ENABLE_CUDNN) #error "nnconv_cudnn.hpp cannot be compiled without GPU and CUDNN support." #endif #include "nnconv_cudnn.hpp" #include "cudnnhelper.hpp" #include "../datacu.hpp" #include <assert.h> #include <algorithm> using namespace vl ; #define CHECK(x) \ { \ cudnnError = x ; \ if (cudnnError != CUDNN_STATUS_SUCCESS) { \ error = context.setError(context.getCudaHelper().catchCudnnError(cudnnError, \ STRINGIZE(__FILE__) ":" STRINGIZE(__LINE__))) ; \ goto done ; \ } } /* ---------------------------------------------------------------- */ /* nnconv_forward_cudnn */ /* ---------------------------------------------------------------- */ namespace vl { namespace impl { template<vl::DataType dataType> vl::ErrorCode vl::impl::nnconv_cudnn<dataType>::forward(Context& context, Tensor output, double outputMult, Tensor data, double dataMult, Tensor filters, Tensor biases, int strideY, int strideX, int padTop, int padBottom, int padLeft, int padRight, int dilateY, int dilateX) { assert(output) ; assert(data) ; assert(filters) ; typedef typename DataTypeTraits<dataType>::type type ; cudnnTensorDescriptor_t outputDesc, biasesDesc, dataDesc ; cudnnFilterDescriptor_t filtersDesc ; cudnnConvolutionDescriptor_t convDesc ; bool outputDescInitialized = false ; bool biasesDescInitialized = false ; bool dataDescInitialized = false ; bool filtersDescInitialized = false ; bool convDescInitialized = false ; void* workSpace = NULL ; int numGroups = data.getDepth() / filters.getDepth() ; int numFiltersPerGroup = filters.getSize() / numGroups ; if (dilateX != 1 || dilateY != 1) return vl::VLE_Unsupported ; if (padLeft != padRight) return vl::VLE_Unsupported ; if (padTop != padBottom) return vl::VLE_Unsupported ; if (filters.getHeight() > data.getHeight()) return vl::VLE_Unsupported ; if (filters.getWidth() > data.getWidth()) return vl::VLE_Unsupported ; cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ; vl::ErrorCode error = vl::VLE_Success ; cudnnHandle_t handle ; // Get CuDNN CHECK(context.getCudaHelper().getCudnnHandle(&handle)) ; // Get tensor descripotrs CHECK(cudnnCreateTensorDescriptor(&outputDesc)) ; outputDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptorEx(outputDesc, DataTypeToCudnn<dataType>::id , output.getSize(), // sizes numFiltersPerGroup, output.getWidth(), output.getHeight(), output.getHeight()*output.getWidth()*output.getDepth(), //strides output.getHeight()*output.getWidth(), output.getHeight(), 1)) ; CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptorEx(dataDesc, DataTypeToCudnn<dataType>::id, data.getSize(), data.getDepth() / numGroups, data.getWidth(), data.getHeight(), data.getHeight()*data.getWidth()*data.getDepth(), //strides data.getHeight()*data.getWidth(), data.getHeight(), 1)) ; CHECK(cudnnCreateFilterDescriptor(&filtersDesc)) ; filtersDescInitialized = true ; CHECK(cudnnSetFilter4dDescriptor(filtersDesc, DataTypeToCudnn<dataType>::id, IF_CUDNN_GE5(CUDNN_TENSOR_NCHW COMMA) numFiltersPerGroup, filters.getDepth(), filters.getWidth(), filters.getHeight())) ; if (biases) { CHECK(cudnnCreateTensorDescriptor(&biasesDesc)) ; biasesDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(biasesDesc, CUDNN_TENSOR_NCHW, DataTypeToCudnn<dataType>::id , 1, biases.getNumElements() / numGroups, 1, 1)) ; } // Get convolution descriptor CHECK(cudnnCreateConvolutionDescriptor(&convDesc)) ; convDescInitialized = true ; CHECK(cudnnSetConvolution2dDescriptor(convDesc, padLeft, padTop, strideX, strideY, 1,1, // upscale CUDNN_CROSS_CORRELATION)) ; // Sanity check #if 1 { int n, c, h, w ; cudnnGetConvolution2dForwardOutputDim(convDesc, dataDesc, filtersDesc, &n, &c, &w, &h) ; bool sane = output.getSize() == n && numFiltersPerGroup == c && output.getWidth() == w && output.getHeight() == h ; assert(sane) ; } #endif context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed = 0 ; context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceUsed = 0 ; context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceUsed = 0 ; if (!context.getCudaHelper().cudnnConvolutionFwdSpecificAlgo) { // Determine algorithm automatically CHECK(cudnnGetConvolutionForwardAlgorithm(handle, dataDesc, filtersDesc, convDesc, outputDesc, context.getCudaHelper().cudnnConvolutionFwdPreference, context.getCudaHelper().cudnnConvolutionFwdWorkSpaceLimit, &context.getCudaHelper().cudnnConvolutionFwdAlgo)) ; } // Get workspace size CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle, dataDesc, filtersDesc, convDesc, outputDesc, context.getCudaHelper().cudnnConvolutionFwdAlgo, &context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed)) ; // Get workspace if (context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed > 0) { workSpace = context.getWorkspace(vl::VLDT_GPU, context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed) ; if (workSpace == NULL) { error = context.getLastError() ; goto done ; } } // Perform convolution for each filter group for (int g = 0 ; g < numGroups ; ++g) { ptrdiff_t dataGrpOffset = (data.getHeight() * data.getWidth() * filters.getDepth()) * g ; ptrdiff_t filtersGrpOffset = (filters.getHeight() * filters.getWidth() * filters.getDepth()) * numFiltersPerGroup * g ; ptrdiff_t outputGrpOffset = (output.getHeight() * output.getWidth() * numFiltersPerGroup) * g ; ptrdiff_t biasesGrpOffset = numFiltersPerGroup * g ; type alpha = dataMult ; type beta = outputMult ; CHECK(cudnnConvolutionForward(handle, &alpha, dataDesc, (type const*)data.getMemory() + dataGrpOffset, filtersDesc, (type const*)filters.getMemory() + filtersGrpOffset, convDesc, context.getCudaHelper().cudnnConvolutionFwdAlgo, workSpace, context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed, &beta, outputDesc, (type*)output.getMemory() + outputGrpOffset)) ; if (biases) { type alpha = 1.0f ; type beta = 1.0f ; #if (CUDNN_VERSION < 4000) CHECK(cudnnAddTensor(handle, CUDNN_ADD_SAME_C, &alpha, biasesDesc, (type const*)biases.getMemory() + biasesGrpOffset, &beta, outputDesc, (type*)output.getMemory() + outputGrpOffset)) ; #else CHECK(cudnnAddTensor(handle, &alpha, biasesDesc, (type const*)biases.getMemory() + biasesGrpOffset, &beta, outputDesc, (type*)output.getMemory() + outputGrpOffset)) ; #endif } } /* cleanup */ done: if (convDescInitialized) { cudnnDestroyConvolutionDescriptor(convDesc) ; } if (filtersDescInitialized) { cudnnDestroyFilterDescriptor(filtersDesc) ; } if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; } if (biasesDescInitialized) { cudnnDestroyTensorDescriptor(biasesDesc) ; } if (outputDescInitialized) { cudnnDestroyTensorDescriptor(outputDesc) ; } return context.passError(error, __func__) ; } /* ---------------------------------------------------------------- */ /* nnconv_backward_cudnn */ /* ---------------------------------------------------------------- */ template<vl::DataType dataType> vl::ErrorCode vl::impl::nnconv_cudnn<dataType>::backward(Context& context, Tensor derData, Tensor derFilters, Tensor derBiases, Tensor data, Tensor filters, Tensor derOutput, int strideY, int strideX, int padTop, int padBottom, int padLeft, int padRight, int dilateY, int dilateX) { typedef typename DataTypeTraits<dataType>::type type ; /* no derDataDesc needed as same as dataDesc */ cudnnTensorDescriptor_t dataDesc, derBiasesDesc, derOutputDesc ; cudnnFilterDescriptor_t filtersDesc ; cudnnConvolutionDescriptor_t convDesc ; bool dataDescInitialized = false ; bool derBiasesDescInitialized = false ; bool derOutputDescInitialized = false ; bool filtersDescInitialized = false ; bool convDescInitialized = false ; #if (CUDNN_VERSION >= 3000) void* workSpace = NULL ; size_t workSpaceSize = 0 ; #endif ptrdiff_t numGroups = 1 ; ptrdiff_t numFiltersPerGroup = 0 ; ptrdiff_t filtersVolume = 0 ; if (dilateX != 1 || dilateY != 1) return vl::VLE_Unsupported ; if (padLeft != padRight) return vl::VLE_Unsupported ; if (padTop != padBottom) return vl::VLE_Unsupported ; cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ; vl::ErrorCode error = vl::VLE_Success ; cudnnHandle_t handle ; // Get CuDNN CHECK(context.getCudaHelper().getCudnnHandle(&handle)) ; // Get the dimensions of the tensrors involved // If derData is specified (hence comptued as output), use this // tensor as a basis to compute such dimensions, otherwise use derFilters. if (derData) { assert(filters) ; numGroups = derData.getDepth() / filters.getDepth() ; numFiltersPerGroup = filters.getSize() / numGroups ; filtersVolume = filters.getHeight() * filters.getWidth() * filters.getDepth() ; CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptorEx(dataDesc, DataTypeToCudnn<dataType>::id , derData.getSize(), derData.getDepth() / numGroups, derData.getWidth(), derData.getHeight(), derData.getHeight()*derData.getWidth()*derData.getDepth(), //strides derData.getHeight()*derData.getWidth(), derData.getHeight(), 1)) ; CHECK(cudnnCreateFilterDescriptor(&filtersDesc)) ; filtersDescInitialized = true ; CHECK(cudnnSetFilter4dDescriptor(filtersDesc, DataTypeToCudnn<dataType>::id , IF_CUDNN_GE5(CUDNN_TENSOR_NCHW COMMA) numFiltersPerGroup, filters.getDepth(), filters.getWidth(), filters.getHeight())) ; } else if (derFilters) { assert(data) ; numGroups = data.getDepth() / derFilters.getDepth() ; numFiltersPerGroup = derFilters.getSize() / numGroups ; filtersVolume = derFilters.getHeight() * derFilters.getWidth() * derFilters.getDepth() ; CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptorEx(dataDesc, DataTypeToCudnn<dataType>::id , data.getSize(), data.getDepth() / numGroups, data.getWidth(), data.getHeight(), data.getHeight()*data.getWidth()*data.getDepth(), //strides data.getHeight()*data.getWidth(), data.getHeight(), 1)) ; CHECK(cudnnCreateFilterDescriptor(&filtersDesc)) ; filtersDescInitialized = true ; CHECK(cudnnSetFilter4dDescriptor(filtersDesc, DataTypeToCudnn<dataType>::id , IF_CUDNN_GE5(CUDNN_TENSOR_NCHW COMMA) numFiltersPerGroup, derFilters.getDepth(), derFilters.getWidth(), derFilters.getHeight())) ; } CHECK(cudnnCreateConvolutionDescriptor(&convDesc)) ; convDescInitialized = true ; CHECK(cudnnSetConvolution2dDescriptor(convDesc, padLeft, padTop, strideX, strideY, 1,1, // upscale CUDNN_CROSS_CORRELATION)) ; // Must have derOutput for all derivatives assert(derOutput) ; CHECK(cudnnCreateTensorDescriptor(&derOutputDesc)) ; derOutputDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptorEx(derOutputDesc, DataTypeToCudnn<dataType>::id , derOutput.getSize(), // sizes numFiltersPerGroup, derOutput.getWidth(), derOutput.getHeight(), derOutput.getHeight()*derOutput.getWidth()*derOutput.getDepth(), //strides derOutput.getHeight()*derOutput.getWidth(), derOutput.getHeight(), 1)) ; // for derivatives w.r.t. bias if (derBiases) { CHECK(cudnnCreateTensorDescriptor(&derBiasesDesc)) ; derBiasesDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(derBiasesDesc, CUDNN_TENSOR_NCHW, DataTypeToCudnn<dataType>::id , 1, derBiases.getNumElements() / numGroups, 1, 1)) ; } context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed = 0 ; context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceUsed = 0 ; context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceUsed = 0 ; #if (CUDNN_VERSION >= 3000) if (derFilters) { // Get filter derivatives algorithm CHECK(cudnnGetConvolutionBackwardFilterAlgorithm (handle, dataDesc, derOutputDesc, convDesc, filtersDesc, context.getCudaHelper().cudnnConvolutionBwdFilterPreference, context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceLimit, &context.getCudaHelper().cudnnConvolutionBwdFilterAlgo)) ; // Get workspace size CHECK(cudnnGetConvolutionBackwardFilterWorkspaceSize (handle, dataDesc, derOutputDesc, convDesc, filtersDesc, context.getCudaHelper().cudnnConvolutionBwdFilterAlgo, &context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceUsed)) ; workSpaceSize = std::max(workSpaceSize, context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceUsed) ; } if (derData) { // Get data derivatives CHECK(cudnnGetConvolutionBackwardDataAlgorithm (handle, filtersDesc, derOutputDesc, convDesc, dataDesc, context.getCudaHelper().cudnnConvolutionBwdDataPreference, context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceLimit, &context.getCudaHelper().cudnnConvolutionBwdDataAlgo)) ; // Get workspace size CHECK(cudnnGetConvolutionBackwardDataWorkspaceSize (handle, filtersDesc, derOutputDesc, convDesc, dataDesc, context.getCudaHelper().cudnnConvolutionBwdDataAlgo, &context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceUsed)) ; workSpaceSize = std::max(workSpaceSize, context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceUsed) ; } // Get workspace if (workSpaceSize > 0) { workSpace = context.getWorkspace(vl::VLDT_GPU, workSpaceSize) ; if (workSpace == NULL) { error = context.getLastError() ; goto done ; } } #endif // Perform backward convolution for each filter group for (int g = 0 ; g < numGroups ; ++g) { ptrdiff_t filtersGrpOffset = filtersVolume * numFiltersPerGroup * g ; ptrdiff_t derOutputGrpOffset = (derOutput.getHeight() * derOutput.getWidth() * numFiltersPerGroup) * g ; if (derBiases) { ptrdiff_t derBiasesGrpOffset = numFiltersPerGroup * g ; type alpha = 1 ; type beta = 0 ; CHECK(cudnnConvolutionBackwardBias (handle, &alpha, derOutputDesc, (type const*)derOutput.getMemory() + derOutputGrpOffset, &beta, derBiasesDesc, (type*)derBiases.getMemory() + derBiasesGrpOffset)) ; } if (derFilters) { ptrdiff_t dataGrpOffset = (data.getHeight() * data.getWidth() * derFilters.getDepth()) * g ; type alpha = 1 ; type beta = 0 ; #if (CUDNN_VERSION >= 3000) CHECK( IF_CUDNN_GE4(cudnnConvolutionBackwardFilter) IF_CUDNN_GE3_LT4(cudnnConvolutionBackwardFilter_v3) (handle, &alpha, dataDesc, (type const*)data.getMemory() + dataGrpOffset, derOutputDesc, (type const*)derOutput.getMemory() + derOutputGrpOffset, convDesc, context.getCudaHelper().cudnnConvolutionBwdFilterAlgo, workSpace, workSpaceSize, &beta, filtersDesc, (type*)derFilters.getMemory() + filtersGrpOffset)) ; #else CHECK(cudnnConvolutionBackwardFilter (handle, &alpha, dataDesc, (type const*)data.getMemory() + dataGrpOffset, derOutputDesc, (type const*)derOutput.getMemory() + derOutputGrpOffset, convDesc, &beta, filtersDesc, (type*)derFilters.getMemory() + filtersGrpOffset)) ; #endif } if (derData) { ptrdiff_t dataGrpOffset = (derData.getHeight() * derData.getWidth() * filters.getDepth()) * g ; type alpha = 1 ; type beta = 0 ; #if (CUDNN_VERSION >= 3000) CHECK( IF_CUDNN_GE4(cudnnConvolutionBackwardData) IF_CUDNN_GE3_LT4(cudnnConvolutionBackwardData_v3) (handle, &alpha, filtersDesc, (type const*)filters.getMemory() + filtersGrpOffset, derOutputDesc, (type const*)derOutput.getMemory() + derOutputGrpOffset, convDesc, context.getCudaHelper().cudnnConvolutionBwdDataAlgo, workSpace, workSpaceSize, &beta, dataDesc, (type*)derData.getMemory() + dataGrpOffset)) ; #else CHECK(cudnnConvolutionBackwardData (handle, &alpha, filtersDesc, filters.getMemory() + filtersGrpOffset, derOutputDesc, derOutput.getMemory() + derOutputGrpOffset, convDesc, &beta, dataDesc, derData.getMemory() + dataGrpOffset)) ; #endif } } done: if (convDescInitialized) { cudnnDestroyConvolutionDescriptor(convDesc) ; } if (filtersDescInitialized) { cudnnDestroyFilterDescriptor(filtersDesc) ; } if (derOutputDescInitialized) { cudnnDestroyTensorDescriptor(derOutputDesc) ; } if (derBiasesDescInitialized) { cudnnDestroyTensorDescriptor(derBiasesDesc) ; } if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; } return context.passError(error, __func__) ; } } } // Instantiations template struct vl::impl::nnconv_cudnn<vl::VLDT_Float> ; #ifdef ENABLE_DOUBLE template struct vl::impl::nnconv_cudnn<vl::VLDT_Double> ; #endif
the_stack
#include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/gather.hpp> #include <cudf/detail/valid_if.cuh> #include <cudf/filling.hpp> #include <cudf/null_mask.hpp> #include <cudf/scalar/scalar_factories.hpp> #include <cudf/table/table.hpp> #include <cudf/types.hpp> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_uvector.hpp> #include <thrust/binary_search.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/functional.h> #include <thrust/gather.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/iterator/transform_output_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/uniform_int_distribution.h> #include <thrust/random/uniform_real_distribution.h> #include <thrust/scan.h> #include <thrust/tabulate.h> #include <thrust/transform.h> #include <thrust/tuple.h> #include <algorithm> #include <cstdint> #include <memory> #include <optional> #include <random> #include <utility> #include <vector> /** * @brief Mersenne Twister pseudo-random engine. */ auto deterministic_engine(unsigned seed) { return thrust::minstd_rand{seed}; } /** * Computes the mean value for a distribution of given type and value bounds. */ template <typename T> T get_distribution_mean(distribution_params<T> const& dist) { switch (dist.id) { case distribution_id::NORMAL: case distribution_id::UNIFORM: return (dist.lower_bound / 2.) + (dist.upper_bound / 2.); case distribution_id::GEOMETRIC: { auto const range_size = dist.lower_bound < dist.upper_bound ? dist.upper_bound - dist.lower_bound : dist.lower_bound - dist.upper_bound; auto const p = geometric_dist_p(range_size); if (dist.lower_bound < dist.upper_bound) return dist.lower_bound + (1. / p); else return dist.lower_bound - (1. / p); } default: CUDF_FAIL("Unsupported distribution type."); } } /** * @brief Computes the average element size in a column, given the data profile. * * Random distribution parameters like average string length and maximum list nesting level affect * the element size of non-fixed-width columns. For lists and structs, `avg_element_size` is called * recursively to determine the size of nested columns. */ size_t avg_element_size(data_profile const& profile, cudf::data_type dtype); // Utilities to determine the mean size of an element, given the data profile template <typename T, CUDF_ENABLE_IF(cudf::is_fixed_width<T>())> size_t non_fixed_width_size(data_profile const& profile) { CUDF_FAIL("Should not be called, use `size_of` for this type instead"); } template <typename T, CUDF_ENABLE_IF(!cudf::is_fixed_width<T>())> size_t non_fixed_width_size(data_profile const& profile) { CUDF_FAIL("not implemented!"); } template <> size_t non_fixed_width_size<cudf::string_view>(data_profile const& profile) { auto const dist = profile.get_distribution_params<cudf::string_view>().length_params; return get_distribution_mean(dist); } template <> size_t non_fixed_width_size<cudf::list_view>(data_profile const& profile) { auto const dist_params = profile.get_distribution_params<cudf::list_view>(); auto const single_level_mean = get_distribution_mean(dist_params.length_params); auto const element_size = avg_element_size(profile, cudf::data_type{dist_params.element_type}); return element_size * pow(single_level_mean, dist_params.max_depth); } template <> size_t non_fixed_width_size<cudf::struct_view>(data_profile const& profile) { auto const dist_params = profile.get_distribution_params<cudf::struct_view>(); return std::accumulate(dist_params.leaf_types.cbegin(), dist_params.leaf_types.cend(), 0ul, [&](auto& sum, auto type_id) { return sum + avg_element_size(profile, cudf::data_type{type_id}); }); } struct non_fixed_width_size_fn { template <typename T> size_t operator()(data_profile const& profile) { return non_fixed_width_size<T>(profile); } }; size_t avg_element_size(data_profile const& profile, cudf::data_type dtype) { if (cudf::is_fixed_width(dtype)) { return cudf::size_of(dtype); } return cudf::type_dispatcher(dtype, non_fixed_width_size_fn{}, profile); } /** * @brief bool generator with given probability [0.0 - 1.0] of returning true. */ struct bool_generator { thrust::minstd_rand engine; thrust::uniform_real_distribution<float> dist; double probability_true; bool_generator(thrust::minstd_rand engine, double probability_true) : engine(engine), dist{0, 1}, probability_true{probability_true} { } bool_generator(unsigned seed, double probability_true) : engine(seed), dist{0, 1}, probability_true{probability_true} { } __device__ bool operator()(size_t n) { engine.discard(n); return dist(engine) < probability_true; } }; /** * @brief Functor that computes a random column element with the given data profile. * * The implementation is SFINAEd for different type groups. Currently only used for fixed-width * types. */ template <typename T, typename Enable = void> struct random_value_fn; /** * @brief Creates an random timestamp/duration value */ template <typename T> struct random_value_fn<T, std::enable_if_t<cudf::is_chrono<T>()>> { distribution_fn<int64_t> seconds_gen; distribution_fn<int64_t> nanoseconds_gen; random_value_fn(distribution_params<T> params) { using cuda::std::chrono::duration_cast; std::pair<cudf::duration_s, cudf::duration_s> const range_s = { duration_cast<cuda::std::chrono::seconds>(typename T::duration{params.lower_bound}), duration_cast<cuda::std::chrono::seconds>(typename T::duration{params.upper_bound})}; if (range_s.first != range_s.second) { seconds_gen = make_distribution<int64_t>(params.id, range_s.first.count(), range_s.second.count()); nanoseconds_gen = make_distribution<int64_t>(distribution_id::UNIFORM, 0l, 1000000000l); } else { // Don't need a random seconds generator for sub-second intervals seconds_gen = [range_s](thrust::minstd_rand&, size_t size) { rmm::device_uvector<int64_t> result(size, rmm::cuda_stream_default); thrust::fill(thrust::device, result.begin(), result.end(), range_s.second.count()); return result; }; std::pair<cudf::duration_ns, cudf::duration_ns> const range_ns = { duration_cast<cudf::duration_ns>(typename T::duration{params.lower_bound}), duration_cast<cudf::duration_ns>(typename T::duration{params.upper_bound})}; nanoseconds_gen = make_distribution<int64_t>(distribution_id::UNIFORM, std::min(range_ns.first.count(), 0l), std::max(range_ns.second.count(), 0l)); } } rmm::device_uvector<T> operator()(thrust::minstd_rand& engine, unsigned size) { auto const sec = seconds_gen(engine, size); auto const ns = nanoseconds_gen(engine, size); rmm::device_uvector<T> result(size, rmm::cuda_stream_default); thrust::transform( thrust::device, sec.begin(), sec.end(), ns.begin(), result.begin(), [] __device__(int64_t sec_value, int64_t nanoseconds_value) { auto const timestamp_ns = cudf::duration_s{sec_value} + cudf::duration_ns{nanoseconds_value}; // Return value in the type's precision return T(cuda::std::chrono::duration_cast<typename T::duration>(timestamp_ns)); }); return result; } }; /** * @brief Creates an random fixed_point value. */ template <typename T> struct random_value_fn<T, std::enable_if_t<cudf::is_fixed_point<T>()>> { using rep = typename T::rep; rep const lower_bound; rep const upper_bound; distribution_fn<rep> dist; std::optional<numeric::scale_type> scale; random_value_fn(distribution_params<rep> const& desc) : lower_bound{desc.lower_bound}, upper_bound{desc.upper_bound}, dist{make_distribution<rep>(desc.id, desc.lower_bound, desc.upper_bound)} { } rmm::device_uvector<T> operator()(thrust::minstd_rand& engine, unsigned size) { if (not scale.has_value()) { int const max_scale = std::numeric_limits<rep>::digits10; std::uniform_int_distribution<int> scale_dist{-max_scale, max_scale}; std::mt19937 engine_scale(engine()); scale = numeric::scale_type{scale_dist(engine_scale)}; } auto const ints = dist(engine, size); rmm::device_uvector<T> result(size, rmm::cuda_stream_default); // Clamp the generated random value to the specified range thrust::transform(thrust::device, ints.begin(), ints.end(), result.begin(), [scale = *(this->scale), upper_bound = this->upper_bound, lower_bound = this->lower_bound] __device__(auto int_value) { return T{std::clamp(int_value, lower_bound, upper_bound), scale}; }); return result; } }; /** * @brief Creates an random numeric value with the given distribution. */ template <typename T> struct random_value_fn<T, std::enable_if_t<!std::is_same_v<T, bool> && cudf::is_numeric<T>()>> { T const lower_bound; T const upper_bound; distribution_fn<T> dist; random_value_fn(distribution_params<T> const& desc) : lower_bound{desc.lower_bound}, upper_bound{desc.upper_bound}, dist{make_distribution<T>(desc.id, desc.lower_bound, desc.upper_bound)} { } auto operator()(thrust::minstd_rand& engine, unsigned size) { return dist(engine, size); } }; /** * @brief Creates an boolean value with given probability of returning `true`. */ template <typename T> struct random_value_fn<T, typename std::enable_if_t<std::is_same_v<T, bool>>> { // Bernoulli distribution distribution_fn<bool> dist; random_value_fn(distribution_params<bool> const& desc) : dist{[valid_prob = desc.probability_true](thrust::minstd_rand& engine, size_t size) -> rmm::device_uvector<bool> { rmm::device_uvector<bool> result(size, rmm::cuda_stream_default); thrust::tabulate( thrust::device, result.begin(), result.end(), bool_generator(engine, valid_prob)); return result; }} { } auto operator()(thrust::minstd_rand& engine, unsigned size) { return dist(engine, size); } }; auto create_run_length_dist(cudf::size_type avg_run_len) { // Distribution with low probability of generating 0-1 even with a low `avg_run_len` value static constexpr float alpha = 4.f; return std::gamma_distribution<float>{alpha, avg_run_len / alpha}; } /** * @brief Generate indices within range [0 , cardinality) repeating with average run length * `avg_run_len` * * @param avg_run_len Average run length of the generated indices * @param cardinality Number of unique values in the output vector * @param num_rows Number of indices to generate * @param engine Random engine * @return Generated indices of type `cudf::size_type` */ rmm::device_uvector<cudf::size_type> sample_indices_with_run_length(cudf::size_type avg_run_len, cudf::size_type cardinality, cudf::size_type num_rows, thrust::minstd_rand& engine) { auto sample_dist = random_value_fn<cudf::size_type>{ distribution_params<cudf::size_type>{distribution_id::UNIFORM, 0, cardinality - 1}}; if (avg_run_len > 1) { auto avglen_dist = random_value_fn<int>{distribution_params<int>{distribution_id::UNIFORM, 1, 2 * avg_run_len}}; auto const approx_run_len = num_rows / avg_run_len + 1; auto run_lens = avglen_dist(engine, approx_run_len); thrust::inclusive_scan( thrust::device, run_lens.begin(), run_lens.end(), run_lens.begin(), std::plus<int>{}); auto const samples_indices = sample_dist(engine, approx_run_len + 1); // This is gather. auto avg_repeated_sample_indices_iterator = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [rb = run_lens.begin(), re = run_lens.end(), samples_indices = samples_indices.begin()] __device__(cudf::size_type i) { auto sample_idx = thrust::upper_bound(thrust::seq, rb, re, i) - rb; return samples_indices[sample_idx]; }); rmm::device_uvector<cudf::size_type> repeated_sample_indices(num_rows, rmm::cuda_stream_default); thrust::copy(thrust::device, avg_repeated_sample_indices_iterator, avg_repeated_sample_indices_iterator + num_rows, repeated_sample_indices.begin()); return repeated_sample_indices; } else { // generate n samples. return sample_dist(engine, num_rows); } } /** * @brief Creates a column with random content of type @ref T. * * @param profile Parameters for the random generator * @param engine Pseudo-random engine * @param num_rows Size of the output column * * @tparam T Data type of the output column * @return Column filled with random data */ template <typename T> std::unique_ptr<cudf::column> create_random_column(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { // Bernoulli distribution auto valid_dist = random_value_fn<bool>(distribution_params<bool>{1. - profile.get_null_frequency().value_or(0)}); auto value_dist = random_value_fn<T>{profile.get_distribution_params<T>()}; auto const cardinality = std::min(num_rows, profile.get_cardinality()); rmm::device_uvector<bool> samples_null_mask = valid_dist(engine, cardinality); rmm::device_uvector<T> samples = value_dist(engine, cardinality); // Distribution for picking elements from the array of samples auto const avg_run_len = profile.get_avg_run_length(); rmm::device_uvector<T> data(0, rmm::cuda_stream_default); rmm::device_uvector<bool> null_mask(0, rmm::cuda_stream_default); if (cardinality == 0) { data = value_dist(engine, num_rows); null_mask = valid_dist(engine, num_rows); } else { // generate n samples and gather. auto const sample_indices = sample_indices_with_run_length(avg_run_len, cardinality, num_rows, engine); data = rmm::device_uvector<T>(num_rows, rmm::cuda_stream_default); null_mask = rmm::device_uvector<bool>(num_rows, rmm::cuda_stream_default); thrust::gather( thrust::device, sample_indices.begin(), sample_indices.end(), samples.begin(), data.begin()); thrust::gather(thrust::device, sample_indices.begin(), sample_indices.end(), samples_null_mask.begin(), null_mask.begin()); } auto [result_bitmask, null_count] = cudf::detail::valid_if(null_mask.begin(), null_mask.end(), thrust::identity<bool>{}); return std::make_unique<cudf::column>( cudf::data_type{cudf::type_to_id<T>()}, num_rows, data.release(), profile.get_null_frequency().has_value() ? std::move(result_bitmask) : rmm::device_buffer{}); } struct valid_or_zero { template <typename T> __device__ T operator()(thrust::tuple<T, bool> len_valid) const { return thrust::get<1>(len_valid) ? thrust::get<0>(len_valid) : T{0}; } }; struct string_generator { char* chars; thrust::minstd_rand engine; thrust::uniform_int_distribution<unsigned char> char_dist; string_generator(char* c, thrust::minstd_rand& engine) : chars(c), engine(engine), char_dist(32, 137) // ~90% ASCII, ~10% UTF-8. // ~80% not-space, ~20% space. // range 32-127 is ASCII; 127-136 will be multi-byte UTF-8 { } __device__ void operator()(thrust::tuple<cudf::size_type, cudf::size_type> str_begin_end) { auto begin = thrust::get<0>(str_begin_end); auto end = thrust::get<1>(str_begin_end); engine.discard(begin); for (auto i = begin; i < end; ++i) { auto ch = char_dist(engine); if (i == end - 1 && ch >= '\x7F') ch = ' '; // last element ASCII only. if (ch >= '\x7F') // x7F is at the top edge of ASCII chars[i++] = '\xC4'; // these characters are assigned two bytes chars[i] = static_cast<char>(ch + (ch >= '\x7F')); } } }; /** * @brief Create a UTF-8 string column with the average length. * */ std::unique_ptr<cudf::column> create_random_utf8_string_column(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { auto len_dist = random_value_fn<uint32_t>{profile.get_distribution_params<cudf::string_view>().length_params}; auto valid_dist = random_value_fn<bool>(distribution_params<bool>{1. - profile.get_null_frequency().value_or(0)}); auto lengths = len_dist(engine, num_rows + 1); auto null_mask = valid_dist(engine, num_rows + 1); thrust::transform_if( thrust::device, lengths.begin(), lengths.end(), null_mask.begin(), lengths.begin(), [] __device__(auto) { return 0; }, thrust::logical_not<bool>{}); auto valid_lengths = thrust::make_transform_iterator( thrust::make_zip_iterator(thrust::make_tuple(lengths.begin(), null_mask.begin())), valid_or_zero{}); rmm::device_uvector<cudf::size_type> offsets(num_rows + 1, rmm::cuda_stream_default); thrust::exclusive_scan( thrust::device, valid_lengths, valid_lengths + lengths.size(), offsets.begin()); // offfsets are ready. auto chars_length = *thrust::device_pointer_cast(offsets.end() - 1); rmm::device_uvector<char> chars(chars_length, rmm::cuda_stream_default); thrust::for_each_n(thrust::device, thrust::make_zip_iterator(offsets.begin(), offsets.begin() + 1), num_rows, string_generator{chars.data(), engine}); auto [result_bitmask, null_count] = cudf::detail::valid_if(null_mask.begin(), null_mask.end() - 1, thrust::identity<bool>{}); return cudf::make_strings_column( num_rows, std::move(offsets), std::move(chars), profile.get_null_frequency().has_value() ? std::move(result_bitmask) : rmm::device_buffer{}); } /** * @brief Creates a string column with random content. * * @param profile Parameters for the random generator * @param engine Pseudo-random engine * @param num_rows Size of the output column * * @return Column filled with random strings */ template <> std::unique_ptr<cudf::column> create_random_column<cudf::string_view>(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { auto const cardinality = std::min(profile.get_cardinality(), num_rows); auto const avg_run_len = profile.get_avg_run_length(); auto sample_strings = create_random_utf8_string_column(profile, engine, cardinality == 0 ? num_rows : cardinality); if (cardinality == 0) { return sample_strings; } auto sample_indices = sample_indices_with_run_length(avg_run_len, cardinality, num_rows, engine); auto str_table = cudf::detail::gather(cudf::table_view{{sample_strings->view()}}, sample_indices, cudf::out_of_bounds_policy::DONT_CHECK, cudf::detail::negative_index_policy::NOT_ALLOWED); return std::move(str_table->release()[0]); } template <> std::unique_ptr<cudf::column> create_random_column<cudf::dictionary32>(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { CUDF_FAIL("not implemented yet"); } /** * @brief Functor to dispatch create_random_column calls. */ struct create_rand_col_fn { public: template <typename T> std::unique_ptr<cudf::column> operator()(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { return create_random_column<T>(profile, engine, num_rows); } }; /** * @brief Calculates the number of direct parents needed to generate a struct column hierarchy with * lowest maximum number of children in any nested column. * * Used to generate an "evenly distributed" struct column hierarchy with the given number of leaf * columns and nesting levels. The column tree is considered evenly distributed if all columns have * nearly the same number of child columns (difference not larger than one). */ int num_direct_parents(int num_lvls, int num_leaf_columns) { // Estimated average number of children in the hierarchy; auto const num_children_avg = std::pow(num_leaf_columns, 1. / num_lvls); // Minimum number of children columns for any column in the hierarchy int const num_children_min = std::floor(num_children_avg); // Maximum number of children columns for any column in the hierarchy int const num_children_max = num_children_min + 1; // Minimum number of columns needed so that their number of children does not exceed the maximum int const min_for_current_nesting = std::ceil((double)num_leaf_columns / num_children_max); // Minimum number of columns needed so that columns at the higher levels have at least the minimum // number of children int const min_for_upper_nesting = std::pow(num_children_min, num_lvls - 1); // Both conditions need to be satisfied return std::max(min_for_current_nesting, min_for_upper_nesting); } template <> std::unique_ptr<cudf::column> create_random_column<cudf::struct_view>(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { auto const dist_params = profile.get_distribution_params<cudf::struct_view>(); // Generate leaf columns std::vector<std::unique_ptr<cudf::column>> children; children.reserve(dist_params.leaf_types.size()); std::transform(dist_params.leaf_types.cbegin(), dist_params.leaf_types.cend(), std::back_inserter(children), [&](auto& type_id) { return cudf::type_dispatcher( cudf::data_type(type_id), create_rand_col_fn{}, profile, engine, num_rows); }); auto valid_dist = random_value_fn<bool>(distribution_params<bool>{1. - profile.get_null_frequency().value_or(0)}); // Generate the column bottom-up for (int lvl = dist_params.max_depth; lvl > 0; --lvl) { // Generating the next level std::vector<std::unique_ptr<cudf::column>> parents; parents.resize(num_direct_parents(lvl, children.size())); auto current_child = children.begin(); for (auto current_parent = parents.begin(); current_parent != parents.end(); ++current_parent) { auto [null_mask, null_count] = [&]() { if (profile.get_null_frequency().has_value()) { auto valids = valid_dist(engine, num_rows); return cudf::detail::valid_if(valids.begin(), valids.end(), thrust::identity<bool>{}); } return std::pair<rmm::device_buffer, cudf::size_type>{}; }(); // Adopt remaining children as evenly as possible auto const num_to_adopt = cudf::util::div_rounding_up_unsafe( std::distance(current_child, children.end()), std::distance(current_parent, parents.end())); CUDF_EXPECTS(num_to_adopt > 0, "No children columns left to adopt"); std::vector<std::unique_ptr<cudf::column>> children_to_adopt; children_to_adopt.insert(children_to_adopt.end(), std::make_move_iterator(current_child), std::make_move_iterator(current_child + num_to_adopt)); current_child += children_to_adopt.size(); *current_parent = cudf::make_structs_column( num_rows, std::move(children_to_adopt), null_count, std::move(null_mask)); } if (lvl == 1) { CUDF_EXPECTS(parents.size() == 1, "There should be one top-level column"); return std::move(parents.front()); } children = std::move(parents); } CUDF_FAIL("Reached unreachable code in struct column creation"); } template <typename T> struct clamp_down : public thrust::unary_function<T, T> { T max; clamp_down(T max) : max(max) {} __host__ __device__ T operator()(T x) const { return min(x, max); } }; /** * @brief Creates a list column with random content. * * The data profile determines the list length distribution, number of nested level, and the data * type of the bottom level. * * @param profile Parameters for the random generator * @param engine Pseudo-random engine * @param num_rows Size of the output column * * @return Column filled with random lists */ template <> std::unique_ptr<cudf::column> create_random_column<cudf::list_view>(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { auto const dist_params = profile.get_distribution_params<cudf::list_view>(); auto const single_level_mean = get_distribution_mean(dist_params.length_params); auto const num_elements = num_rows * pow(single_level_mean, dist_params.max_depth); auto leaf_column = cudf::type_dispatcher( cudf::data_type(dist_params.element_type), create_rand_col_fn{}, profile, engine, num_elements); auto len_dist = random_value_fn<uint32_t>{profile.get_distribution_params<cudf::list_view>().length_params}; auto valid_dist = random_value_fn<bool>(distribution_params<bool>{1. - profile.get_null_frequency().value_or(0)}); // Generate the list column bottom-up auto list_column = std::move(leaf_column); for (int lvl = 0; lvl < dist_params.max_depth; ++lvl) { // Generating the next level - offsets point into the current list column auto current_child_column = std::move(list_column); cudf::size_type const num_rows = current_child_column->size() / single_level_mean; auto offsets = len_dist(engine, num_rows + 1); auto valids = valid_dist(engine, num_rows); // to ensure these values <= current_child_column->size() auto output_offsets = thrust::make_transform_output_iterator( offsets.begin(), clamp_down{current_child_column->size()}); thrust::exclusive_scan(thrust::device, offsets.begin(), offsets.end(), output_offsets); thrust::device_pointer_cast(offsets.end())[-1] = current_child_column->size(); // Always include all elements auto offsets_column = std::make_unique<cudf::column>( cudf::data_type{cudf::type_id::INT32}, num_rows + 1, offsets.release()); auto [null_mask, null_count] = cudf::detail::valid_if(valids.begin(), valids.end(), thrust::identity<bool>{}); list_column = cudf::make_lists_column( num_rows, std::move(offsets_column), std::move(current_child_column), profile.get_null_frequency().has_value() ? null_count : 0, // cudf::UNKNOWN_NULL_COUNT, profile.get_null_frequency().has_value() ? std::move(null_mask) : rmm::device_buffer{}); } return list_column; // return the top-level column } using columns_vector = std::vector<std::unique_ptr<cudf::column>>; /** * @brief Creates a vector of columns with random content. * * @param profile Parameters for the random generator * @param dtype_ids vector of data type ids, one for each output column * @param engine Pseudo-random engine * @param num_rows Size of the output columns * * @return Column filled with random lists */ columns_vector create_random_columns(data_profile const& profile, std::vector<cudf::type_id> dtype_ids, thrust::minstd_rand engine, cudf::size_type num_rows) { columns_vector output_columns; std::transform( dtype_ids.begin(), dtype_ids.end(), std::back_inserter(output_columns), [&](auto tid) { engine.discard(num_rows); return cudf::type_dispatcher( cudf::data_type(tid), create_rand_col_fn{}, profile, engine, num_rows); }); return output_columns; } /** * @brief Repeats the input data types cyclically order to fill a vector of @ref num_cols * elements. */ std::vector<cudf::type_id> cycle_dtypes(std::vector<cudf::type_id> const& dtype_ids, cudf::size_type num_cols) { if (dtype_ids.size() == static_cast<std::size_t>(num_cols)) { return dtype_ids; } std::vector<cudf::type_id> out_dtypes; out_dtypes.reserve(num_cols); for (cudf::size_type col = 0; col < num_cols; ++col) out_dtypes.push_back(dtype_ids[col % dtype_ids.size()]); return out_dtypes; } std::unique_ptr<cudf::table> create_random_table(std::vector<cudf::type_id> const& dtype_ids, table_size_bytes table_bytes, data_profile const& profile, unsigned seed) { size_t const avg_row_bytes = std::accumulate(dtype_ids.begin(), dtype_ids.end(), 0ul, [&](size_t sum, auto tid) { return sum + avg_element_size(profile, cudf::data_type(tid)); }); cudf::size_type const num_rows = table_bytes.size / avg_row_bytes; return create_random_table(dtype_ids, row_count{num_rows}, profile, seed); } std::unique_ptr<cudf::table> create_random_table(std::vector<cudf::type_id> const& dtype_ids, row_count num_rows, data_profile const& profile, unsigned seed) { auto seed_engine = deterministic_engine(seed); thrust::uniform_int_distribution<unsigned> seed_dist; columns_vector output_columns; std::transform( dtype_ids.begin(), dtype_ids.end(), std::back_inserter(output_columns), [&](auto tid) mutable { auto engine = deterministic_engine(seed_dist(seed_engine)); return cudf::type_dispatcher( cudf::data_type(tid), create_rand_col_fn{}, profile, engine, num_rows.count); }); return std::make_unique<cudf::table>(std::move(output_columns)); } std::unique_ptr<cudf::table> create_sequence_table(std::vector<cudf::type_id> const& dtype_ids, row_count num_rows, std::optional<double> null_probability, unsigned seed) { auto seed_engine = deterministic_engine(seed); thrust::uniform_int_distribution<unsigned> seed_dist; auto columns = std::vector<std::unique_ptr<cudf::column>>(dtype_ids.size()); std::transform(dtype_ids.begin(), dtype_ids.end(), columns.begin(), [&](auto dtype) mutable { auto init = cudf::make_default_constructed_scalar(cudf::data_type{dtype}); auto col = cudf::sequence(num_rows.count, *init); auto [mask, count] = create_random_null_mask(num_rows.count, null_probability, seed_dist(seed_engine)); col->set_null_mask(std::move(mask), count); return col; }); return std::make_unique<cudf::table>(std::move(columns)); } std::pair<rmm::device_buffer, cudf::size_type> create_random_null_mask( cudf::size_type size, std::optional<double> null_probability, unsigned seed) { if (not null_probability.has_value()) { return {rmm::device_buffer{}, 0}; } CUDF_EXPECTS(*null_probability >= 0.0 and *null_probability <= 1.0, "Null probability must be within the range [0.0, 1.0]"); if (*null_probability == 0.0f) { return {cudf::create_null_mask(size, cudf::mask_state::ALL_VALID), 0}; } else if (*null_probability == 1.0) { return {cudf::create_null_mask(size, cudf::mask_state::ALL_NULL), size}; } else { return cudf::detail::valid_if(thrust::make_counting_iterator<cudf::size_type>(0), thrust::make_counting_iterator<cudf::size_type>(size), bool_generator{seed, 1.0 - *null_probability}); } } std::vector<cudf::type_id> get_type_or_group(int32_t id) { // identity transformation when passing a concrete type_id if (id < static_cast<int32_t>(cudf::type_id::NUM_TYPE_IDS)) return {static_cast<cudf::type_id>(id)}; // if the value is larger that type_id::NUM_TYPE_IDS, it's a group id type_group_id const group_id = static_cast<type_group_id>(id); using trait_fn = bool (*)(cudf::data_type); trait_fn is_integral = [](cudf::data_type type) { return cudf::is_numeric(type) && !cudf::is_floating_point(type); }; trait_fn is_integral_signed = [](cudf::data_type type) { return cudf::is_numeric(type) && !cudf::is_floating_point(type) && !cudf::is_unsigned(type); }; auto fn = [&]() -> trait_fn { switch (group_id) { case type_group_id::FLOATING_POINT: return cudf::is_floating_point; case type_group_id::INTEGRAL: return is_integral; case type_group_id::INTEGRAL_SIGNED: return is_integral_signed; case type_group_id::NUMERIC: return cudf::is_numeric; case type_group_id::TIMESTAMP: return cudf::is_timestamp; case type_group_id::DURATION: return cudf::is_duration; case type_group_id::FIXED_POINT: return cudf::is_fixed_point; case type_group_id::COMPOUND: return cudf::is_compound; case type_group_id::NESTED: return cudf::is_nested; default: CUDF_FAIL("Invalid data type group"); } }(); std::vector<cudf::type_id> types; for (int type_int = 0; type_int < static_cast<int32_t>(cudf::type_id::NUM_TYPE_IDS); ++type_int) { auto const type = static_cast<cudf::type_id>(type_int); if (type != cudf::type_id::EMPTY && fn(cudf::data_type(type))) types.push_back(type); } return types; } std::vector<cudf::type_id> get_type_or_group(std::vector<int32_t> const& ids) { std::vector<cudf::type_id> all_type_ids; for (auto& id : ids) { auto const type_ids = get_type_or_group(id); all_type_ids.insert(std::end(all_type_ids), std::cbegin(type_ids), std::cend(type_ids)); } return all_type_ids; }
the_stack
* Radix sorting problem instance ******************************************************************************/ #pragma once #include "../util/spine.cuh" #include "../util/basic_utils.cuh" #include "../util/kernel_props.cuh" #include "../util/error_utils.cuh" #include "../util/cta_work_distribution.cuh" #include "../util/ns_umbrella.cuh" #include "../radix_sort/sort_utils.cuh" #include "../radix_sort/pass_policy.cuh" #include "../radix_sort/upsweep/kernel_policy.cuh" #include "../radix_sort/upsweep/kernel.cuh" #include "../radix_sort/spine/kernel_policy.cuh" #include "../radix_sort/spine/kernel.cuh" #include "../radix_sort/downsweep/kernel_policy.cuh" #include "../radix_sort/downsweep/kernel.cuh" #include "../radix_sort/downsweep/tex_ref.cuh" B40C_NS_PREFIX namespace b40c { namespace radix_sort { /****************************************************************************** * Problem instance ******************************************************************************/ /** * Problem instance */ template < typename DoubleBuffer, typename _SizeT> struct ProblemInstance { //--------------------------------------------------------------------- // Type definitions //--------------------------------------------------------------------- typedef typename DoubleBuffer::KeyType KeyType; typedef typename DoubleBuffer::ValueType ValueType; typedef _SizeT SizeT; /** * Upsweep kernel properties */ struct UpsweepKernelProps : util::KernelProps { // Upsweep kernel function type typedef void (*KernelFunc)( SizeT*, KeyType*, KeyType*, util::CtaWorkDistribution<SizeT>); // Fields KernelFunc kernel_func; int log_tile_elements; cudaSharedMemConfig sm_bank_config; /** * Initializer */ template < typename KernelPolicy, typename OpaquePolicy> cudaError_t Init(int sm_arch, int sm_count) { // Initialize fields kernel_func = upsweep::Kernel<OpaquePolicy>; log_tile_elements = KernelPolicy::LOG_TILE_ELEMENTS; sm_bank_config = KernelPolicy::SMEM_CONFIG; // Initialize super class return util::KernelProps::Init( kernel_func, KernelPolicy::CTA_THREADS, sm_arch, sm_count); } /** * Initializer */ template <typename KernelPolicy> cudaError_t Init(int sm_arch, int sm_count) { return Init<KernelPolicy, KernelPolicy>(sm_arch, sm_count); } }; /** * Spine kernel properties */ struct SpineKernelProps : util::KernelProps { // Spine kernel function type typedef void (*KernelFunc)(SizeT*, SizeT*, int); // Fields KernelFunc kernel_func; int log_tile_elements; cudaSharedMemConfig sm_bank_config; /** * Initializer */ template < typename KernelPolicy, typename OpaquePolicy> cudaError_t Init(int sm_arch, int sm_count) { // Initialize fields kernel_func = spine::Kernel<OpaquePolicy>; log_tile_elements = KernelPolicy::LOG_TILE_ELEMENTS; sm_bank_config = KernelPolicy::SMEM_CONFIG; // Initialize super class return util::KernelProps::Init( kernel_func, KernelPolicy::CTA_THREADS, sm_arch, sm_count); } /** * Initializer */ template <typename KernelPolicy> cudaError_t Init(int sm_arch, int sm_count) { return Init<KernelPolicy, KernelPolicy>(sm_arch, sm_count); } }; /** * Downsweep kernel props */ struct DownsweepKernelProps : util::KernelProps { // Downsweep kernel function type typedef void (*KernelFunc)( SizeT*, KeyType*, KeyType*, ValueType*, ValueType*, util::CtaWorkDistribution<SizeT>); // Downsweep texture binding function type typedef cudaError_t (*BindTexFunc)(void *, void *, size_t); // Fields KernelFunc kernel_func; BindTexFunc keys_tex_func; BindTexFunc values_tex_func; int log_tile_elements; cudaSharedMemConfig sm_bank_config; /** * Initializer */ template < typename KernelPolicy, typename OpaquePolicy> cudaError_t Init(int sm_arch, int sm_count) { // Wrapper of downsweep texture types typedef downsweep::Textures< KeyType, ValueType, (1 << KernelPolicy::LOG_THREAD_ELEMENTS)> DownsweepTextures; // Key texture type typedef typename DownsweepTextures::KeyTexType KeyTexType; // Value texture type typedef typename DownsweepTextures::ValueTexType ValueTexType; // Initialize fields kernel_func = downsweep::Kernel<OpaquePolicy>; keys_tex_func = (KernelPolicy::LOAD_MODIFIER == util::io::ld::tex) ? downsweep::TexKeys<KeyTexType>::BindTexture : NULL; values_tex_func = (KernelPolicy::LOAD_MODIFIER == util::io::ld::tex) ? downsweep::TexValues<ValueTexType>::BindTexture : NULL; log_tile_elements = KernelPolicy::LOG_TILE_ELEMENTS; sm_bank_config = KernelPolicy::SMEM_CONFIG; // Initialize super class return util::KernelProps::Init( kernel_func, KernelPolicy::CTA_THREADS, sm_arch, sm_count); } /** * Initializer */ template <typename KernelPolicy> cudaError_t Init(int sm_arch, int sm_count) { return Init<KernelPolicy, KernelPolicy>(sm_arch, sm_count); } /** * Bind related textures */ cudaError_t BindTexture( KeyType *d_keys0, KeyType *d_keys1, ValueType *d_values0, ValueType *d_values1, SizeT num_elements) const { cudaError_t error = cudaSuccess; do { // Bind key texture if (keys_tex_func) error = keys_tex_func(d_keys0, d_keys1, sizeof(KeyType) * num_elements); if (error) break; // Bind value texture if (values_tex_func) error = values_tex_func(d_values0, d_values1, sizeof(ValueType) * num_elements); if (error) break; } while (0); return error; } }; //--------------------------------------------------------------------- // Fields //--------------------------------------------------------------------- DoubleBuffer &storage; SizeT num_elements; util::Spine &spine; cudaStream_t stream; int max_grid_size; bool debug; //--------------------------------------------------------------------- // Methods //--------------------------------------------------------------------- /** * Constructor */ ProblemInstance( DoubleBuffer &storage, SizeT num_elements, cudaStream_t stream, util::Spine &spine, int max_grid_size, bool debug) : storage(storage), num_elements(num_elements), stream(stream), spine(spine), max_grid_size(max_grid_size), debug(debug) {} /** * DispatchPass */ cudaError_t DispatchPass( int radix_bits, const UpsweepKernelProps &upsweep_props, const SpineKernelProps &spine_props, const DownsweepKernelProps &downsweep_props, bool unform_grid_size, DynamicSmemConfig dynamic_smem_config, int current_pass = 0) { cudaError_t error = cudaSuccess; do { // Compute sweep grid size int log_schedule_granularity = CUB_MAX( upsweep_props.log_tile_elements, downsweep_props.log_tile_elements); int sweep_grid_size = downsweep_props.OversubscribedGridSize( 1 << log_schedule_granularity, num_elements, max_grid_size); // Compute spine elements (rounded up to nearest tile size) SizeT spine_elements = CUB_ROUND_UP_NEAREST( (sweep_grid_size << radix_bits), // Each CTA produces a partial for every radix digit (1 << spine_props.log_tile_elements)); // Number of partials per tile // Make sure our spine is big enough error = spine.Setup(sizeof(SizeT) * spine_elements); if (error) break; // Bind downsweep textures on first pass if (current_pass == 0) { error = downsweep_props.BindTexture( storage.d_keys[storage.selector], storage.d_keys[storage.selector ^ 1], storage.d_values[storage.selector], storage.d_values[storage.selector ^ 1], num_elements); if (error) break; } // Obtain a CTA work distribution util::CtaWorkDistribution<SizeT> work( num_elements, sweep_grid_size, log_schedule_granularity); // Grid size tuning int grid_size[3] = {sweep_grid_size, 1, sweep_grid_size}; if (unform_grid_size) { // Make sure that all kernels launch the same number of CTAs grid_size[1] = grid_size[0]; } // Smem allocation tuning int dynamic_smem[3] = {0, 0, 0}; if (dynamic_smem_config == DYNAMIC_SMEM_UNIFORM) { // Pad with dynamic smem so all kernels get the same total smem allocation int max_static_smem = CUB_MAX( upsweep_props.kernel_attrs.sharedSizeBytes, CUB_MAX( spine_props.kernel_attrs.sharedSizeBytes, downsweep_props.kernel_attrs.sharedSizeBytes)); dynamic_smem[0] = max_static_smem - upsweep_props.kernel_attrs.sharedSizeBytes; dynamic_smem[1] = max_static_smem - spine_props.kernel_attrs.sharedSizeBytes; dynamic_smem[2] = max_static_smem - downsweep_props.kernel_attrs.sharedSizeBytes; } else if (dynamic_smem_config == DYNAMIC_SMEM_LCM) { // Pad upsweep/downsweep with dynamic smem so kernel occupancy a multiple of the lowest occupancy int min_occupancy = CUB_MIN(upsweep_props.max_cta_occupancy, downsweep_props.max_cta_occupancy); dynamic_smem[0] = upsweep_props.SmemPadding(min_occupancy); dynamic_smem[2] = downsweep_props.SmemPadding(min_occupancy); } // Print debug info if (debug) { work.Print(); printf( "Upsweep: tile size(%d), occupancy(%d), grid_size(%d), threads(%d), dynamic smem(%d)\n" "Spine: tile size(%d), occupancy(%d), grid_size(%d), threads(%d), dynamic smem(%d)\n" "Downsweep: tile size(%d), occupancy(%d), grid_size(%d), threads(%d), dynamic smem(%d)\n", (1 << upsweep_props.log_tile_elements), upsweep_props.max_cta_occupancy, grid_size[0], upsweep_props.threads, dynamic_smem[0], (1 << spine_props.log_tile_elements), spine_props.max_cta_occupancy, grid_size[1], spine_props.threads, dynamic_smem[1], (1 << downsweep_props.log_tile_elements), downsweep_props.max_cta_occupancy, grid_size[2], downsweep_props.threads, dynamic_smem[2]); fflush(stdout); } // // Upsweep // // Set shared mem bank mode cudaSharedMemConfig old_sm_config; cudaDeviceGetSharedMemConfig(&old_sm_config); if (old_sm_config != upsweep_props.sm_bank_config) cudaDeviceSetSharedMemConfig(upsweep_props.sm_bank_config); // Upsweep reduction into spine upsweep_props.kernel_func<<<grid_size[0], upsweep_props.threads, dynamic_smem[0], stream>>>( (SizeT*) spine(), storage.d_keys[storage.selector], storage.d_keys[storage.selector ^ 1], work); if (debug) { error = cudaThreadSynchronize(); if (error = util::B40CPerror(error, "Upsweep kernel failed ", __FILE__, __LINE__)) break; } // // Spine // // Set shared mem bank mode if (spine_props.sm_bank_config != upsweep_props.sm_bank_config) cudaDeviceSetSharedMemConfig(spine_props.sm_bank_config); // Spine scan spine_props.kernel_func<<<grid_size[1], spine_props.threads, dynamic_smem[1], stream>>>( (SizeT*) spine(), (SizeT*) spine(), spine_elements); if (debug) { error = cudaThreadSynchronize(); if (error = util::B40CPerror(error, "Spine kernel failed ", __FILE__, __LINE__)) break; } // // Downsweep // // Set shared mem bank mode if (downsweep_props.sm_bank_config != spine_props.sm_bank_config) cudaDeviceSetSharedMemConfig(downsweep_props.sm_bank_config); // Downsweep scan from spine downsweep_props.kernel_func<<<grid_size[2], downsweep_props.threads, dynamic_smem[2], stream>>>( (SizeT *) spine(), storage.d_keys[storage.selector], storage.d_keys[storage.selector ^ 1], storage.d_values[storage.selector], storage.d_values[storage.selector ^ 1], work); if (debug) { error = cudaThreadSynchronize(); if (error = util::B40CPerror(error, "Downsweep kernel failed ", __FILE__, __LINE__)) break; } // Restore smem bank mode if (old_sm_config != downsweep_props.sm_bank_config) cudaDeviceSetSharedMemConfig(old_sm_config); } while(0); return error; } }; } // namespace radix_sort } // namespace b40c B40C_NS_POSTFIX
the_stack
#include "gpu_utils.cuh" #include "nonbonded.hpp" #include "vendored/hilbert.h" #include "k_nonbonded.cuh" #include <fstream> #include <streambuf> #include <string> namespace timemachine { template <typename RealType, bool Interpolated> Nonbonded<RealType, Interpolated>::Nonbonded( const std::vector<int> &exclusion_idxs, // [E,2] const std::vector<double> &scales, // [E, 2] const std::vector<int> &lambda_plane_idxs, // [N] const std::vector<int> &lambda_offset_idxs, // [N] const double beta, const double cutoff, const std::string &kernel_src // const std::string &transform_lambda_charge, // const std::string &transform_lambda_sigma, // const std::string &transform_lambda_epsilon, // const std::string &transform_lambda_w ) : N_(lambda_offset_idxs.size()), cutoff_(cutoff), E_(exclusion_idxs.size() / 2), nblist_(lambda_offset_idxs.size()), beta_(beta), d_sort_storage_(nullptr), d_sort_storage_bytes_(0), nblist_padding_(0.1), disable_hilbert_(false), kernel_ptrs_({// enumerate over every possible kernel combination // U: Compute U // X: Compute DU_DL // L: Compute DU_DX // P: Compute DU_DP // U X L P &k_nonbonded_unified<RealType, 0, 0, 0, 0>, &k_nonbonded_unified<RealType, 0, 0, 0, 1>, &k_nonbonded_unified<RealType, 0, 0, 1, 0>, &k_nonbonded_unified<RealType, 0, 0, 1, 1>, &k_nonbonded_unified<RealType, 0, 1, 0, 0>, &k_nonbonded_unified<RealType, 0, 1, 0, 1>, &k_nonbonded_unified<RealType, 0, 1, 1, 0>, &k_nonbonded_unified<RealType, 0, 1, 1, 1>, &k_nonbonded_unified<RealType, 1, 0, 0, 0>, &k_nonbonded_unified<RealType, 1, 0, 0, 1>, &k_nonbonded_unified<RealType, 1, 0, 1, 0>, &k_nonbonded_unified<RealType, 1, 0, 1, 1>, &k_nonbonded_unified<RealType, 1, 1, 0, 0>, &k_nonbonded_unified<RealType, 1, 1, 0, 1>, &k_nonbonded_unified<RealType, 1, 1, 1, 0>, &k_nonbonded_unified<RealType, 1, 1, 1, 1>}), compute_w_coords_instance_(kernel_cache_.program(kernel_src.c_str()).kernel("k_compute_w_coords").instantiate()), compute_permute_interpolated_( kernel_cache_.program(kernel_src.c_str()).kernel("k_permute_interpolated").instantiate()), compute_add_ull_to_real_interpolated_( kernel_cache_.program(kernel_src.c_str()).kernel("k_add_ull_to_real_interpolated").instantiate()) { if (lambda_offset_idxs.size() != N_) { throw std::runtime_error("lambda offset idxs need to have size N"); } if (lambda_offset_idxs.size() != lambda_plane_idxs.size()) { throw std::runtime_error("lambda offset idxs and plane idxs need to be equivalent"); } if (scales.size() / 2 != E_) { throw std::runtime_error("bad scales size!"); } gpuErrchk(cudaMalloc(&d_lambda_plane_idxs_, N_ * sizeof(*d_lambda_plane_idxs_))); gpuErrchk(cudaMemcpy( d_lambda_plane_idxs_, &lambda_plane_idxs[0], N_ * sizeof(*d_lambda_plane_idxs_), cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc(&d_lambda_offset_idxs_, N_ * sizeof(*d_lambda_offset_idxs_))); gpuErrchk(cudaMemcpy( d_lambda_offset_idxs_, &lambda_offset_idxs[0], N_ * sizeof(*d_lambda_offset_idxs_), cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc(&d_perm_, N_ * sizeof(*d_perm_))); gpuErrchk(cudaMalloc(&d_sorted_x_, N_ * 3 * sizeof(*d_sorted_x_))); gpuErrchk(cudaMalloc(&d_w_, N_ * sizeof(*d_w_))); gpuErrchk(cudaMalloc(&d_dw_dl_, N_ * sizeof(*d_dw_dl_))); gpuErrchk(cudaMalloc(&d_sorted_w_, N_ * sizeof(*d_sorted_w_))); gpuErrchk(cudaMalloc(&d_sorted_dw_dl_, N_ * sizeof(*d_sorted_dw_dl_))); gpuErrchk(cudaMalloc(&d_unsorted_p_, N_ * 3 * sizeof(*d_unsorted_p_))); // interpolated gpuErrchk(cudaMalloc(&d_sorted_p_, N_ * 3 * sizeof(*d_sorted_p_))); // interpolated gpuErrchk(cudaMalloc(&d_unsorted_dp_dl_, N_ * 3 * sizeof(*d_unsorted_dp_dl_))); // interpolated gpuErrchk(cudaMalloc(&d_sorted_dp_dl_, N_ * 3 * sizeof(*d_sorted_dp_dl_))); // interpolated gpuErrchk(cudaMalloc(&d_sorted_du_dx_, N_ * 3 * sizeof(*d_sorted_du_dx_))); gpuErrchk(cudaMalloc(&d_sorted_du_dp_, N_ * 3 * sizeof(*d_sorted_du_dp_))); gpuErrchk(cudaMalloc(&d_du_dp_buffer_, N_ * 3 * sizeof(*d_du_dp_buffer_))); gpuErrchk(cudaMalloc(&d_exclusion_idxs_, E_ * 2 * sizeof(*d_exclusion_idxs_))); gpuErrchk( cudaMemcpy(d_exclusion_idxs_, &exclusion_idxs[0], E_ * 2 * sizeof(*d_exclusion_idxs_), cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc(&d_scales_, E_ * 2 * sizeof(*d_scales_))); gpuErrchk(cudaMemcpy(d_scales_, &scales[0], E_ * 2 * sizeof(*d_scales_), cudaMemcpyHostToDevice)); gpuErrchk(cudaMallocHost(&p_ixn_count_, 1 * sizeof(*p_ixn_count_))); gpuErrchk(cudaMalloc(&d_nblist_x_, N_ * 3 * sizeof(*d_nblist_x_))); gpuErrchk(cudaMemset(d_nblist_x_, 0, N_ * 3 * sizeof(*d_nblist_x_))); // set non-sensical positions gpuErrchk(cudaMalloc(&d_nblist_box_, 3 * 3 * sizeof(*d_nblist_x_))); gpuErrchk(cudaMemset(d_nblist_box_, 0, 3 * 3 * sizeof(*d_nblist_x_))); gpuErrchk(cudaMalloc(&d_rebuild_nblist_, 1 * sizeof(*d_rebuild_nblist_))); gpuErrchk(cudaMallocHost(&p_rebuild_nblist_, 1 * sizeof(*p_rebuild_nblist_))); gpuErrchk(cudaMalloc(&d_sort_keys_in_, N_ * sizeof(d_sort_keys_in_))); gpuErrchk(cudaMalloc(&d_sort_keys_out_, N_ * sizeof(d_sort_keys_out_))); gpuErrchk(cudaMalloc(&d_sort_vals_in_, N_ * sizeof(d_sort_vals_in_))); // initialize hilbert curve std::vector<unsigned int> bin_to_idx(256 * 256 * 256); for (int i = 0; i < 256; i++) { for (int j = 0; j < 256; j++) { for (int k = 0; k < 256; k++) { bitmask_t hilbert_coords[3]; hilbert_coords[0] = i; hilbert_coords[1] = j; hilbert_coords[2] = k; unsigned int bin = static_cast<unsigned int>(hilbert_c2i(3, 8, hilbert_coords)); bin_to_idx[i * 256 * 256 + j * 256 + k] = bin; } } } gpuErrchk(cudaMalloc(&d_bin_to_idx_, 256 * 256 * 256 * sizeof(*d_bin_to_idx_))); gpuErrchk( cudaMemcpy(d_bin_to_idx_, &bin_to_idx[0], 256 * 256 * 256 * sizeof(*d_bin_to_idx_), cudaMemcpyHostToDevice)); // estimate size needed to do radix sorting, this can use uninitialized data. cub::DeviceRadixSort::SortPairs( d_sort_storage_, d_sort_storage_bytes_, d_sort_keys_in_, d_sort_keys_out_, d_sort_vals_in_, d_perm_, N_); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaMalloc(&d_sort_storage_, d_sort_storage_bytes_)); }; template <typename RealType, bool Interpolated> Nonbonded<RealType, Interpolated>::~Nonbonded() { gpuErrchk(cudaFree(d_exclusion_idxs_)); gpuErrchk(cudaFree(d_scales_)); gpuErrchk(cudaFree(d_lambda_plane_idxs_)); gpuErrchk(cudaFree(d_lambda_offset_idxs_)); gpuErrchk(cudaFree(d_du_dp_buffer_)); gpuErrchk(cudaFree(d_perm_)); // nullptr if we never built nblist gpuErrchk(cudaFree(d_bin_to_idx_)); gpuErrchk(cudaFree(d_sorted_x_)); gpuErrchk(cudaFree(d_w_)); gpuErrchk(cudaFree(d_dw_dl_)); gpuErrchk(cudaFree(d_sorted_w_)); gpuErrchk(cudaFree(d_sorted_dw_dl_)); gpuErrchk(cudaFree(d_unsorted_p_)); gpuErrchk(cudaFree(d_sorted_p_)); gpuErrchk(cudaFree(d_unsorted_dp_dl_)); gpuErrchk(cudaFree(d_sorted_dp_dl_)); gpuErrchk(cudaFree(d_sorted_du_dx_)); gpuErrchk(cudaFree(d_sorted_du_dp_)); gpuErrchk(cudaFree(d_sort_keys_in_)); gpuErrchk(cudaFree(d_sort_keys_out_)); gpuErrchk(cudaFree(d_sort_vals_in_)); gpuErrchk(cudaFree(d_sort_storage_)); gpuErrchk(cudaFreeHost(p_ixn_count_)); gpuErrchk(cudaFree(d_nblist_x_)); gpuErrchk(cudaFree(d_nblist_box_)); gpuErrchk(cudaFree(d_rebuild_nblist_)); gpuErrchk(cudaFreeHost(p_rebuild_nblist_)); }; template <typename RealType, bool Interpolated> void Nonbonded<RealType, Interpolated>::set_nblist_padding(double val) { nblist_padding_ = val; } template <typename RealType, bool Interpolated> void Nonbonded<RealType, Interpolated>::disable_hilbert_sort() { disable_hilbert_ = true; } template <typename RealType, bool Interpolated> void Nonbonded<RealType, Interpolated>::hilbert_sort(const double *d_coords, const double *d_box, cudaStream_t stream) { const int tpb = 32; const int B = (N_ + tpb - 1) / tpb; k_coords_to_kv<<<B, tpb, 0, stream>>>(N_, d_coords, d_box, d_bin_to_idx_, d_sort_keys_in_, d_sort_vals_in_); gpuErrchk(cudaPeekAtLastError()); cub::DeviceRadixSort::SortPairs( d_sort_storage_, d_sort_storage_bytes_, d_sort_keys_in_, d_sort_keys_out_, d_sort_vals_in_, d_perm_, N_, 0, // begin bit sizeof(*d_sort_keys_in_) * 8, // end bit stream // cudaStream ); gpuErrchk(cudaPeekAtLastError()); } void __global__ k_arange(int N, unsigned int *arr) { const int atom_idx = blockIdx.x * blockDim.x + threadIdx.x; if (atom_idx >= N) { return; } arr[atom_idx] = atom_idx; } template <typename RealType, bool Interpolated> void Nonbonded<RealType, Interpolated>::execute_device( const int N, const int P, const double *d_x, const double *d_p, // 2 * N * 3 const double *d_box, // 3 * 3 const double lambda, unsigned long long *d_du_dx, double *d_du_dp, unsigned long long *d_du_dl, unsigned long long *d_u, cudaStream_t stream) { // (ytz) the nonbonded algorithm proceeds as follows: // (done in constructor), construct a hilbert curve mapping each of the 256x256x256 cells into an index. // a. decide if we need to rebuild the neighborlist, if so: // - look up which cell each particle belongs to, and its linear index along the hilbert curve. // - use radix pair sort keyed on the hilbert index with values equal to the atomic index // - resulting sorted values is the permutation array. // - permute lambda plane/offsets, coords // b. else: // - permute new coords // c. permute parameters // d. compute the nonbonded interactions using the neighborlist // e. inverse permute the forces, du/dps into the original index. // f. u and du/dl is buffered into a per-particle array, and then reduced. // g. note that du/dl is not an exact per-particle du/dl - it is only used for reduction purposes. if (N != N_) { std::cout << N << " " << N_ << std::endl; throw std::runtime_error("Nonbonded::execute_device() N != N_"); } const int M = Interpolated ? 2 : 1; if (P != M * N_ * 3) { std::cout << P << " " << N_ << std::endl; throw std::runtime_error("Nonbonded::execute_device() P != M*N_*3"); } // identify which tiles contain interpolated parameters const int tpb = 32; const int B = (N + tpb - 1) / tpb; dim3 dimGrid(B, 3, 1); // (ytz) see if we need to rebuild the neighborlist. // (ytz + jfass): note that this logic needs to change if we use NPT later on since a resize in the box // can introduce new interactions. k_check_rebuild_coords_and_box<RealType> <<<B, tpb, 0, stream>>>(N, d_x, d_nblist_x_, d_box, d_nblist_box_, nblist_padding_, d_rebuild_nblist_); gpuErrchk(cudaPeekAtLastError()); // we can optimize this away by doing the check on the GPU directly. gpuErrchk(cudaMemcpyAsync( p_rebuild_nblist_, d_rebuild_nblist_, 1 * sizeof(*p_rebuild_nblist_), cudaMemcpyDeviceToHost, stream)); gpuErrchk(cudaStreamSynchronize(stream)); // slow! if (p_rebuild_nblist_[0] > 0) { // (ytz): update the permutation index before building neighborlist, as the neighborlist is tied // to a particular sort order if (!disable_hilbert_) { this->hilbert_sort(d_x, d_box, stream); } else { k_arange<<<B, tpb, 0, stream>>>(N, d_perm_); gpuErrchk(cudaPeekAtLastError()); } // compute new coordinates, new lambda_idxs, new_plane_idxs k_permute<<<dimGrid, tpb, 0, stream>>>(N, d_perm_, d_x, d_sorted_x_); gpuErrchk(cudaPeekAtLastError()); nblist_.build_nblist_device(N, d_sorted_x_, d_box, cutoff_ + nblist_padding_, stream); gpuErrchk(cudaMemcpyAsync( p_ixn_count_, nblist_.get_ixn_count(), 1 * sizeof(*p_ixn_count_), cudaMemcpyDeviceToHost, stream)); std::vector<double> h_box(9); gpuErrchk(cudaMemcpyAsync(&h_box[0], d_box, 3 * 3 * sizeof(*d_box), cudaMemcpyDeviceToHost, stream)); // Verify that the cutoff and box size are valid together. If cutoff is greater than half the box // then a particle can interact with multiple periodic copies. const double db_cutoff = (cutoff_ + nblist_padding_) * 2; cudaStreamSynchronize(stream); // Verify that box is orthogonal and the width of the box in all dimensions is greater than twice the cutoff for (int i = 0; i < 9; i++) { if (i == 0 || i == 4 || i == 8) { if (h_box[i] < db_cutoff) { throw std::runtime_error( "Cutoff with padding is more than half of the box width, neighborlist is no longer reliable"); } } else if (h_box[i] != 0.0) { throw std::runtime_error("Provided non-ortholinear box, unable to compute nonbonded energy"); } } gpuErrchk(cudaMemsetAsync(d_rebuild_nblist_, 0, sizeof(*d_rebuild_nblist_), stream)); gpuErrchk(cudaMemcpyAsync(d_nblist_x_, d_x, N * 3 * sizeof(*d_x), cudaMemcpyDeviceToDevice, stream)); gpuErrchk(cudaMemcpyAsync(d_nblist_box_, d_box, 3 * 3 * sizeof(*d_box), cudaMemcpyDeviceToDevice, stream)); } else { k_permute<<<dimGrid, tpb, 0, stream>>>(N, d_perm_, d_x, d_sorted_x_); gpuErrchk(cudaPeekAtLastError()); } // do parameter interpolation here if (Interpolated) { CUresult result = compute_permute_interpolated_.configure(dimGrid, tpb, 0, stream) .launch(lambda, N, d_perm_, d_p, d_sorted_p_, d_sorted_dp_dl_); if (result != 0) { throw std::runtime_error("Driver call to k_permute_interpolated failed"); } } else { k_permute<<<dimGrid, tpb, 0, stream>>>(N, d_perm_, d_p, d_sorted_p_); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaMemsetAsync(d_sorted_dp_dl_, 0, N * 3 * sizeof(*d_sorted_dp_dl_), stream)) } // this stream needs to be synchronized so we can be sure that p_ixn_count_ is properly set. // reset buffers and sorted accumulators if (d_du_dx) { gpuErrchk(cudaMemsetAsync(d_sorted_du_dx_, 0, N * 3 * sizeof(*d_sorted_du_dx_), stream)) } if (d_du_dp) { gpuErrchk(cudaMemsetAsync(d_sorted_du_dp_, 0, N * 3 * sizeof(*d_sorted_du_dp_), stream)) } // update new w coordinates // (tbd): cache lambda value for equilibrium calculations CUresult result = compute_w_coords_instance_.configure(B, tpb, 0, stream) .launch(N, lambda, cutoff_, d_lambda_plane_idxs_, d_lambda_offset_idxs_, d_w_, d_dw_dl_); if (result != 0) { throw std::runtime_error("Driver call to k_compute_w_coords"); } gpuErrchk(cudaPeekAtLastError()); k_permute_2x<<<B, tpb, 0, stream>>>(N, d_perm_, d_w_, d_dw_dl_, d_sorted_w_, d_sorted_dw_dl_); gpuErrchk(cudaPeekAtLastError()); // look up which kernel we need for this computation int kernel_idx = 0; kernel_idx |= d_du_dp ? 1 << 0 : 0; kernel_idx |= d_du_dl ? 1 << 1 : 0; kernel_idx |= d_du_dx ? 1 << 2 : 0; kernel_idx |= d_u ? 1 << 3 : 0; kernel_ptrs_[kernel_idx]<<<p_ixn_count_[0], tpb, 0, stream>>>( N, d_sorted_x_, d_sorted_p_, d_box, d_sorted_dp_dl_, d_sorted_w_, d_sorted_dw_dl_, lambda, beta_, cutoff_, nblist_.get_ixn_tiles(), nblist_.get_ixn_atoms(), d_sorted_du_dx_, d_sorted_du_dp_, d_du_dl, // switch to nullptr if we don't request du_dl d_u // switch to nullptr if we don't request energies ); gpuErrchk(cudaPeekAtLastError()); // coords are N,3 if (d_du_dx) { k_inv_permute_accum<<<dimGrid, tpb, 0, stream>>>(N, d_perm_, d_sorted_du_dx_, d_du_dx); gpuErrchk(cudaPeekAtLastError()); } // params are N,3 // this needs to be an accumulated permute if (d_du_dp) { k_inv_permute_assign<<<dimGrid, tpb, 0, stream>>>(N, d_perm_, d_sorted_du_dp_, d_du_dp_buffer_); gpuErrchk(cudaPeekAtLastError()); } // exclusions use the non-sorted version if (E_ > 0) { dim3 dimGridExclusions((E_ + tpb - 1) / tpb, 1, 1); if (Interpolated) { k_inv_permute_assign_2x<<<dimGrid, tpb, 0, stream>>>( N, d_perm_, d_sorted_p_, d_sorted_dp_dl_, d_unsorted_p_, d_unsorted_dp_dl_); gpuErrchk(cudaPeekAtLastError()); } k_nonbonded_exclusions<RealType><<<dimGridExclusions, tpb, 0, stream>>>( E_, d_x, Interpolated ? d_unsorted_p_ : d_p, d_box, Interpolated ? d_unsorted_dp_dl_ : d_sorted_dp_dl_, d_w_, d_dw_dl_, lambda, d_exclusion_idxs_, d_scales_, beta_, cutoff_, d_du_dx, d_du_dp_buffer_, d_du_dl, d_u); gpuErrchk(cudaPeekAtLastError()); } if (d_du_dp) { if (Interpolated) { CUresult result = compute_add_ull_to_real_interpolated_.configure(dimGrid, tpb, 0, stream) .launch(lambda, N, d_du_dp_buffer_, d_du_dp); if (result != 0) { throw std::runtime_error("Driver call to k_add_ull_to_real_interpolated failed"); } } else { k_add_ull_to_real<<<dimGrid, tpb, 0, stream>>>(N, d_du_dp_buffer_, d_du_dp); } gpuErrchk(cudaPeekAtLastError()); } } template class Nonbonded<double, true>; template class Nonbonded<float, true>; template class Nonbonded<double, false>; template class Nonbonded<float, false>; } // namespace timemachine
the_stack
// #ifdef _HFS_CUDA_ON_ #include "../precomp.hpp" #include "../slic/slic.hpp" namespace cv { namespace hfs { namespace slic { namespace engines { __global__ void cvtImgSpaceDevice(const Vector4u* inimg, Vector2i img_size, Vector4f* outimg); __global__ void initClusterCentersDevice(const Vector4f* inimg, Vector2i map_size, Vector2i img_size, int spixel_size, gSpixelInfo* out_spixel); __global__ void findCenterAssociationDevice(const Vector4f* inimg, const gSpixelInfo* in_spixel_map, Vector2i map_size, Vector2i img_size, int spixel_size, float weight, float max_xy_dist, float max_color_dist, int* out_idx_img); __global__ void updateClusterCenterDevice(const Vector4f* inimg, const int* in_idx_img, Vector2i map_size, Vector2i img_size, int spixel_size, int no_blocks_per_line, gSpixelInfo* accum_map); __global__ void finalizeReductionResultDevice(const gSpixelInfo* accum_map, Vector2i map_size, int no_blocks_per_spixel, gSpixelInfo* spixel_list); __global__ void enforceConnectivityDevice(const int* in_idx_img, Vector2i img_size, int* out_idx_img); __global__ void enforceConnectivityDevice1_2(const int* in_idx_img, Vector2i img_size, int* out_idx_img); SegEngineGPU::SegEngineGPU(const slicSettings& in_settings) : SegEngine(in_settings) { source_img = Ptr<UChar4Image>(new UChar4Image(in_settings.img_size)); cvt_img = Ptr<Float4Image>(new Float4Image(in_settings.img_size)); idx_img = Ptr<IntImage>(new IntImage(in_settings.img_size)); tmp_idx_img = Ptr<IntImage>(new IntImage(in_settings.img_size)); spixel_size = in_settings.spixel_size; int spixel_per_col = (int)ceil((float)in_settings.img_size.x / (float)spixel_size); int spixel_per_row = (int)ceil((float)in_settings.img_size.y / (float)spixel_size); map_size = Vector2i(spixel_per_col, spixel_per_row); spixel_map = Ptr<gSpixelMap>(new gSpixelMap(map_size)); no_grid_per_center = (int)ceil(spixel_size*3.0f / HFS_BLOCK_DIM)*((int)ceil(spixel_size*3.0f / HFS_BLOCK_DIM)); Vector2i accum_size(map_size.x*no_grid_per_center, map_size.y); accum_map = Ptr<gSpixelMap>(new gSpixelMap(accum_size)); // normalizing factors max_color_dist = 15.0f / (1.7321f * 128); max_color_dist *= max_color_dist; max_xy_dist = 1.0f / (2 * spixel_size * spixel_size); } SegEngineGPU::~SegEngineGPU() {} void SegEngineGPU::cvtImgSpace(Ptr<UChar4Image> inimg, Ptr<Float4Image> outimg) { Vector4u* inimg_ptr = inimg->getGpuData(); Vector4f* outimg_ptr = outimg->getGpuData(); dim3 blockSize(HFS_BLOCK_DIM, HFS_BLOCK_DIM); dim3 gridSize = getGridSize(img_size, blockSize); cvtImgSpaceDevice << <gridSize, blockSize >> >(inimg_ptr, img_size, outimg_ptr); } void SegEngineGPU::initClusterCenters() { gSpixelInfo* spixel_list = spixel_map->getGpuData(); Vector4f* img_ptr = cvt_img->getGpuData(); dim3 blockSize(HFS_BLOCK_DIM, HFS_BLOCK_DIM); dim3 gridSize = getGridSize(map_size, blockSize); initClusterCentersDevice << <gridSize, blockSize >> > (img_ptr, map_size, img_size, spixel_size, spixel_list); } void SegEngineGPU::findCenterAssociation() { gSpixelInfo* spixel_list = spixel_map->getGpuData(); Vector4f* img_ptr = cvt_img->getGpuData(); int* idx_ptr = idx_img->getGpuData(); dim3 blockSize(HFS_BLOCK_DIM, HFS_BLOCK_DIM); dim3 gridSize = getGridSize(img_size, blockSize); findCenterAssociationDevice << <gridSize, blockSize >> > (img_ptr, spixel_list, map_size, img_size, spixel_size, slic_settings.coh_weight, max_xy_dist, max_color_dist, idx_ptr); } void SegEngineGPU::updateClusterCenter() { gSpixelInfo* accum_map_ptr = accum_map->getGpuData(); gSpixelInfo* spixel_list_ptr = spixel_map->getGpuData(); Vector4f* img_ptr = cvt_img->getGpuData(); int* idx_ptr = idx_img->getGpuData(); int no_blocks_per_line = (int)ceil(spixel_size * 3.0f / HFS_BLOCK_DIM); dim3 blockSize(HFS_BLOCK_DIM, HFS_BLOCK_DIM); dim3 gridSize(map_size.x, map_size.y, no_grid_per_center); updateClusterCenterDevice << <gridSize, blockSize >> > (img_ptr, idx_ptr, map_size, img_size, spixel_size, no_blocks_per_line, accum_map_ptr); dim3 gridSize2(map_size.x, map_size.y); finalizeReductionResultDevice << <gridSize2, blockSize >> > (accum_map_ptr, map_size, no_grid_per_center, spixel_list_ptr); } void SegEngineGPU::enforceConnectivity() { int* idx_ptr = idx_img->getGpuData(); int* tmp_idx_ptr = tmp_idx_img->getGpuData(); dim3 blockSize(HFS_BLOCK_DIM, HFS_BLOCK_DIM); dim3 gridSize = getGridSize(img_size, blockSize); enforceConnectivityDevice << <gridSize, blockSize >> > (idx_ptr, img_size, tmp_idx_ptr); enforceConnectivityDevice << <gridSize, blockSize >> > (tmp_idx_ptr, img_size, idx_ptr); enforceConnectivityDevice1_2 << <gridSize, blockSize >> > (idx_ptr, img_size, tmp_idx_ptr); enforceConnectivityDevice1_2 << <gridSize, blockSize >> > (tmp_idx_ptr, img_size, idx_ptr); } __global__ void cvtImgSpaceDevice(const Vector4u* inimg, Vector2i img_size, Vector4f* outimg) { int idx_x = threadIdx.x + blockIdx.x * blockDim.x; int idx_y = threadIdx.y + blockIdx.y * blockDim.y; if (idx_x >= img_size.x || idx_y >= img_size.y) return; int idx = idx_y*img_size.x + idx_x; rgb2CIELab(inimg[idx], outimg[idx]); } __global__ void initClusterCentersDevice(const Vector4f* inimg, Vector2i map_size, Vector2i img_size, int spixel_size, gSpixelInfo* out_spixel) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= map_size.x || y >= map_size.y) return; initClusterCentersShared(inimg, map_size, img_size, spixel_size, x, y, out_spixel); } __global__ void findCenterAssociationDevice(const Vector4f* inimg, const gSpixelInfo* in_spixel_map, Vector2i map_size, Vector2i img_size, int spixel_size, float weight, float max_xy_dist, float max_color_dist, int* out_idx_img) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= img_size.x || y >= img_size.y) return; findCenterAssociationShared(inimg, in_spixel_map, map_size, img_size, spixel_size, weight, x, y, max_xy_dist, max_color_dist, out_idx_img); } __global__ void updateClusterCenterDevice(const Vector4f* inimg, const int* in_idx_img, Vector2i map_size, Vector2i img_size, int spixel_size, int no_blocks_per_line, gSpixelInfo* accum_map) { int local_id = threadIdx.y * blockDim.x + threadIdx.x; __shared__ Float4_ color_shared[HFS_BLOCK_DIM*HFS_BLOCK_DIM]; __shared__ Float2_ xy_shared[HFS_BLOCK_DIM*HFS_BLOCK_DIM]; __shared__ volatile int count_shared[HFS_BLOCK_DIM*HFS_BLOCK_DIM]; __shared__ bool should_add; color_shared[local_id] = Float4_(0, 0, 0, 0); xy_shared[local_id] = Float2_(0, 0); count_shared[local_id] = 0; should_add = false; __syncthreads(); int no_blocks_per_spixel = gridDim.z; int spixel_id = blockIdx.y * map_size.x + blockIdx.x; // compute the relative position in the search window int block_x = blockIdx.z % no_blocks_per_line; int block_y = blockIdx.z / no_blocks_per_line; int x_offset = block_x * HFS_BLOCK_DIM + threadIdx.x; int y_offset = block_y * HFS_BLOCK_DIM + threadIdx.y; if (x_offset < spixel_size * 3 && y_offset < spixel_size * 3) { // compute the start of the search window int x_start = blockIdx.x * spixel_size - spixel_size; int y_start = blockIdx.y * spixel_size - spixel_size; int x_img = x_start + x_offset; int y_img = y_start + y_offset; if (x_img >= 0 && x_img < img_size.x && y_img >= 0 && y_img < img_size.y) { int img_idx = y_img * img_size.x + x_img; if (in_idx_img[img_idx] == spixel_id) { color_shared[local_id] = Float4_(inimg[img_idx].x, inimg[img_idx].y, inimg[img_idx].z, inimg[img_idx].w); xy_shared[local_id] = Float2_(x_img, y_img); count_shared[local_id] = 1; should_add = true; } } } __syncthreads(); if (should_add) { if (local_id < 128) { color_shared[local_id] += color_shared[local_id + 128]; xy_shared[local_id] += xy_shared[local_id + 128]; count_shared[local_id] += count_shared[local_id + 128]; } __syncthreads(); if (local_id < 64) { color_shared[local_id] += color_shared[local_id + 64]; xy_shared[local_id] += xy_shared[local_id + 64]; count_shared[local_id] += count_shared[local_id + 64]; } __syncthreads(); if (local_id < 32) { color_shared[local_id] += color_shared[local_id + 32]; color_shared[local_id] += color_shared[local_id + 16]; color_shared[local_id] += color_shared[local_id + 8]; color_shared[local_id] += color_shared[local_id + 4]; color_shared[local_id] += color_shared[local_id + 2]; color_shared[local_id] += color_shared[local_id + 1]; xy_shared[local_id] += xy_shared[local_id + 32]; xy_shared[local_id] += xy_shared[local_id + 16]; xy_shared[local_id] += xy_shared[local_id + 8]; xy_shared[local_id] += xy_shared[local_id + 4]; xy_shared[local_id] += xy_shared[local_id + 2]; xy_shared[local_id] += xy_shared[local_id + 1]; count_shared[local_id] += count_shared[local_id + 32]; count_shared[local_id] += count_shared[local_id + 16]; count_shared[local_id] += count_shared[local_id + 8]; count_shared[local_id] += count_shared[local_id + 4]; count_shared[local_id] += count_shared[local_id + 2]; count_shared[local_id] += count_shared[local_id + 1]; } } __syncthreads(); if (local_id == 0) { int accum_map_idx = spixel_id * no_blocks_per_spixel + blockIdx.z; accum_map[accum_map_idx].center = Vector2f(xy_shared[0].x, xy_shared[0].y); accum_map[accum_map_idx].color_info = Vector4f(color_shared[0].x, color_shared[0].y, color_shared[0].z, color_shared[0].w); accum_map[accum_map_idx].num_pixels = count_shared[0]; } } __global__ void finalizeReductionResultDevice(const gSpixelInfo* accum_map, Vector2i map_size, int no_blocks_per_spixel, gSpixelInfo* spixel_list) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= map_size.x || y >= map_size.y) return; finalizeReductionResultShared(accum_map, map_size, no_blocks_per_spixel, x, y, spixel_list); } __global__ void enforceConnectivityDevice(const int* in_idx_img, Vector2i img_size, int* out_idx_img) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= img_size.x || y >= img_size.y) return; supressLocalLable(in_idx_img, img_size, x, y, out_idx_img); } __global__ void enforceConnectivityDevice1_2(const int* in_idx_img, Vector2i img_size, int* out_idx_img) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= img_size.x || y >= img_size.y) return; supressLocalLable2(in_idx_img, img_size, x, y, out_idx_img); } }}}} // #endif
the_stack
using namespace std; //============================================================================== // // Error handling helpers // //============================================================================== static void stdDebugOutput(const string &msg) { cout << msg; } static NCVDebugOutputHandler *debugOutputHandler = stdDebugOutput; void ncvDebugOutput(const string &msg) { debugOutputHandler(msg); } void ncvSetDebugOutputHandler(NCVDebugOutputHandler *func) { debugOutputHandler = func; } //============================================================================== // // Memory wrappers and helpers // //============================================================================== Ncv32u alignUp(Ncv32u what, Ncv32u alignment) { Ncv32u alignMask = alignment-1; Ncv32u inverseAlignMask = ~alignMask; Ncv32u res = (what + alignMask) & inverseAlignMask; return res; } void NCVMemPtr::clear() { ptr = NULL; memtype = NCVMemoryTypeNone; } void NCVMemSegment::clear() { begin.clear(); size = 0; } NCVStatus memSegCopyHelper(void *dst, NCVMemoryType dstType, const void *src, NCVMemoryType srcType, size_t sz, cudaStream_t cuStream) { NCVStatus ncvStat; switch (dstType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: switch (srcType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: memcpy(dst, src, sz); ncvStat = NCV_SUCCESS; break; case NCVMemoryTypeDevice: if (cuStream != 0) { ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToHost), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } break; case NCVMemoryTypeDevice: switch (srcType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: if (cuStream != 0) { ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyHostToDevice), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; case NCVMemoryTypeDevice: if (cuStream != 0) { ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToDevice), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } return ncvStat; } NCVStatus memSegCopyHelper2D(void *dst, Ncv32u dstPitch, NCVMemoryType dstType, const void *src, Ncv32u srcPitch, NCVMemoryType srcType, Ncv32u widthbytes, Ncv32u height, cudaStream_t cuStream) { NCVStatus ncvStat; switch (dstType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: switch (srcType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: for (Ncv32u i=0; i<height; i++) { memcpy((char*)dst + i * dstPitch, (char*)src + i * srcPitch, widthbytes); } ncvStat = NCV_SUCCESS; break; case NCVMemoryTypeDevice: if (cuStream != 0) { ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToHost), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } break; case NCVMemoryTypeDevice: switch (srcType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: if (cuStream != 0) { ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyHostToDevice), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; case NCVMemoryTypeDevice: if (cuStream != 0) { ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToDevice), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } return ncvStat; } //=================================================================== // // NCVMemStackAllocator class members implementation // //=================================================================== NCVMemStackAllocator::NCVMemStackAllocator(Ncv32u alignment) : currentSize(0), _maxSize(0), allocBegin(NULL), begin(NULL), end(NULL), _memType(NCVMemoryTypeNone), _alignment(alignment), bReusesMemory(false) { NcvBool bProperAlignment = (alignment & (alignment-1)) == 0; ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: alignment not power of 2"); } NCVMemStackAllocator::NCVMemStackAllocator(NCVMemoryType memT, size_t capacity, Ncv32u alignment, void *reusePtr) : currentSize(0), _maxSize(0), allocBegin(NULL), _memType(memT), _alignment(alignment) { NcvBool bProperAlignment = (alignment & (alignment-1)) == 0; ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: _alignment not power of 2"); ncvAssertPrintCheck(memT != NCVMemoryTypeNone, "NCVMemStackAllocator ctor:: Incorrect allocator type"); allocBegin = NULL; if (reusePtr == NULL && capacity != 0) { bReusesMemory = false; switch (memT) { case NCVMemoryTypeDevice: ncvAssertCUDAReturn(cudaMalloc(&allocBegin, capacity), ); break; case NCVMemoryTypeHostPinned: ncvAssertCUDAReturn(cudaMallocHost(&allocBegin, capacity), ); break; case NCVMemoryTypeHostPageable: allocBegin = (Ncv8u *)malloc(capacity); break; default:; } } else { bReusesMemory = true; allocBegin = (Ncv8u *)reusePtr; } if (capacity == 0) { allocBegin = (Ncv8u *)(0x1); } if (!isCounting()) { begin = allocBegin; end = begin + capacity; } } NCVMemStackAllocator::~NCVMemStackAllocator() { if (allocBegin != NULL) { ncvAssertPrintCheck(currentSize == 0, "NCVMemStackAllocator dtor:: not all objects were deallocated properly, forcing destruction"); if (!bReusesMemory && (allocBegin != (Ncv8u *)(0x1))) { switch (_memType) { case NCVMemoryTypeDevice: ncvAssertCUDAReturn(cudaFree(allocBegin), ); break; case NCVMemoryTypeHostPinned: ncvAssertCUDAReturn(cudaFreeHost(allocBegin), ); break; case NCVMemoryTypeHostPageable: free(allocBegin); break; default:; } } allocBegin = NULL; } } NCVStatus NCVMemStackAllocator::alloc(NCVMemSegment &seg, size_t size) { seg.clear(); ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC); size = alignUp(size, this->_alignment); this->currentSize += size; this->_maxSize = std::max(this->_maxSize, this->currentSize); if (!isCounting()) { size_t availSize = end - begin; ncvAssertReturn(size <= availSize, NCV_ALLOCATOR_INSUFFICIENT_CAPACITY); } seg.begin.ptr = begin; seg.begin.memtype = this->_memType; seg.size = size; begin += size; return NCV_SUCCESS; } NCVStatus NCVMemStackAllocator::dealloc(NCVMemSegment &seg) { ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC); ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC); ncvAssertReturn(seg.begin.ptr != NULL || isCounting(), NCV_ALLOCATOR_BAD_DEALLOC); ncvAssertReturn(seg.begin.ptr == begin - seg.size, NCV_ALLOCATOR_DEALLOC_ORDER); currentSize -= seg.size; begin -= seg.size; seg.clear(); ncvAssertReturn(allocBegin <= begin, NCV_ALLOCATOR_BAD_DEALLOC); return NCV_SUCCESS; } NcvBool NCVMemStackAllocator::isInitialized(void) const { return ((this->_alignment & (this->_alignment-1)) == 0) && isCounting() || this->allocBegin != NULL; } NcvBool NCVMemStackAllocator::isCounting(void) const { return this->_memType == NCVMemoryTypeNone; } NCVMemoryType NCVMemStackAllocator::memType(void) const { return this->_memType; } Ncv32u NCVMemStackAllocator::alignment(void) const { return this->_alignment; } size_t NCVMemStackAllocator::maxSize(void) const { return this->_maxSize; } //=================================================================== // // NCVMemNativeAllocator class members implementation // //=================================================================== NCVMemNativeAllocator::NCVMemNativeAllocator(NCVMemoryType memT, Ncv32u alignment) : currentSize(0), _maxSize(0), _memType(memT), _alignment(alignment) { ncvAssertPrintReturn(memT != NCVMemoryTypeNone, "NCVMemNativeAllocator ctor:: counting not permitted for this allocator type", ); } NCVMemNativeAllocator::~NCVMemNativeAllocator() { ncvAssertPrintCheck(currentSize == 0, "NCVMemNativeAllocator dtor:: detected memory leak"); } NCVStatus NCVMemNativeAllocator::alloc(NCVMemSegment &seg, size_t size) { seg.clear(); ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC); switch (this->_memType) { case NCVMemoryTypeDevice: ncvAssertCUDAReturn(cudaMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR); break; case NCVMemoryTypeHostPinned: ncvAssertCUDAReturn(cudaMallocHost(&seg.begin.ptr, size), NCV_CUDA_ERROR); break; case NCVMemoryTypeHostPageable: seg.begin.ptr = (Ncv8u *)malloc(size); break; default:; } this->currentSize += alignUp(size, this->_alignment); this->_maxSize = std::max(this->_maxSize, this->currentSize); seg.begin.memtype = this->_memType; seg.size = size; return NCV_SUCCESS; } NCVStatus NCVMemNativeAllocator::dealloc(NCVMemSegment &seg) { ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC); ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC); ncvAssertReturn(seg.begin.ptr != NULL, NCV_ALLOCATOR_BAD_DEALLOC); ncvAssertReturn(currentSize >= alignUp(seg.size, this->_alignment), NCV_ALLOCATOR_BAD_DEALLOC); currentSize -= alignUp(seg.size, this->_alignment); switch (this->_memType) { case NCVMemoryTypeDevice: ncvAssertCUDAReturn(cudaFree(seg.begin.ptr), NCV_CUDA_ERROR); break; case NCVMemoryTypeHostPinned: ncvAssertCUDAReturn(cudaFreeHost(seg.begin.ptr), NCV_CUDA_ERROR); break; case NCVMemoryTypeHostPageable: free(seg.begin.ptr); break; default:; } seg.clear(); return NCV_SUCCESS; } NcvBool NCVMemNativeAllocator::isInitialized(void) const { return (this->_alignment != 0); } NcvBool NCVMemNativeAllocator::isCounting(void) const { return false; } NCVMemoryType NCVMemNativeAllocator::memType(void) const { return this->_memType; } Ncv32u NCVMemNativeAllocator::alignment(void) const { return this->_alignment; } size_t NCVMemNativeAllocator::maxSize(void) const { return this->_maxSize; } //=================================================================== // // Time and timer routines // //=================================================================== typedef struct _NcvTimeMoment NcvTimeMoment; #if defined(_WIN32) || defined(_WIN64) #include <Windows.h> typedef struct _NcvTimeMoment { LONGLONG moment, freq; } NcvTimeMoment; static void _ncvQueryMoment(NcvTimeMoment *t) { QueryPerformanceFrequency((LARGE_INTEGER *)&(t->freq)); QueryPerformanceCounter((LARGE_INTEGER *)&(t->moment)); } double _ncvMomentToMicroseconds(NcvTimeMoment *t) { return 1000000.0 * t->moment / t->freq; } double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2) { return 1000000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq); } double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2) { return 1000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq); } #elif defined(__GNUC__) #include <sys/time.h> typedef struct _NcvTimeMoment { struct timeval tv; struct timezone tz; } NcvTimeMoment; void _ncvQueryMoment(NcvTimeMoment *t) { gettimeofday(& t->tv, & t->tz); } double _ncvMomentToMicroseconds(NcvTimeMoment *t) { return 1000000.0 * t->tv.tv_sec + (double)t->tv.tv_usec; } double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2) { return (((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000000 + (double)t2->tv.tv_usec - (double)t1->tv.tv_usec); } double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2) { return ((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000; } #endif //#if defined(_WIN32) || defined(_WIN64) struct _NcvTimer { NcvTimeMoment t1, t2; }; NcvTimer ncvStartTimer(void) { struct _NcvTimer *t; t = (struct _NcvTimer *)malloc(sizeof(struct _NcvTimer)); _ncvQueryMoment(&t->t1); return t; } double ncvEndQueryTimerUs(NcvTimer t) { double res; _ncvQueryMoment(&t->t2); res = _ncvMomentsDiffToMicroseconds(&t->t1, &t->t2); free(t); return res; } double ncvEndQueryTimerMs(NcvTimer t) { double res; _ncvQueryMoment(&t->t2); res = _ncvMomentsDiffToMilliseconds(&t->t1, &t->t2); free(t); return res; } //=================================================================== // // Operations with rectangles // //=================================================================== //from OpenCV void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights); NCVStatus ncvGroupRectangles_host(NCVVector<NcvRect32u> &hypotheses, Ncv32u &numHypotheses, Ncv32u minNeighbors, Ncv32f intersectEps, NCVVector<Ncv32u> *hypothesesWeights) { ncvAssertReturn(hypotheses.memType() == NCVMemoryTypeHostPageable || hypotheses.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR); if (hypothesesWeights != NULL) { ncvAssertReturn(hypothesesWeights->memType() == NCVMemoryTypeHostPageable || hypothesesWeights->memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR); } if (numHypotheses == 0) { return NCV_SUCCESS; } std::vector<NcvRect32u> rects(numHypotheses); memcpy(&rects[0], hypotheses.ptr(), numHypotheses * sizeof(NcvRect32u)); std::vector<Ncv32u> weights; if (hypothesesWeights != NULL) { groupRectangles(rects, minNeighbors, intersectEps, &weights); } else { groupRectangles(rects, minNeighbors, intersectEps, NULL); } numHypotheses = (Ncv32u)rects.size(); if (numHypotheses > 0) { memcpy(hypotheses.ptr(), &rects[0], numHypotheses * sizeof(NcvRect32u)); } if (hypothesesWeights != NULL) { memcpy(hypothesesWeights->ptr(), &weights[0], numHypotheses * sizeof(Ncv32u)); } return NCV_SUCCESS; } template <class T> static NCVStatus drawRectsWrapperHost(T *h_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *h_rects, Ncv32u numRects, T color) { ncvAssertReturn(h_dst != NULL && h_rects != NULL, NCV_NULL_PTR); ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID); ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP); ncvAssertReturn(numRects != 0, NCV_SUCCESS); ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID); for (Ncv32u i=0; i<numRects; i++) { NcvRect32u rect = h_rects[i]; if (rect.x < dstWidth) { for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++) { h_dst[i*dstStride+rect.x] = color; } } if (rect.x+rect.width-1 < dstWidth) { for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++) { h_dst[i*dstStride+rect.x+rect.width-1] = color; } } if (rect.y < dstHeight) { for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++) { h_dst[rect.y*dstStride+j] = color; } } if (rect.y + rect.height - 1 < dstHeight) { for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++) { h_dst[(rect.y+rect.height-1)*dstStride+j] = color; } } } return NCV_SUCCESS; } NCVStatus ncvDrawRects_8u_host(Ncv8u *h_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *h_rects, Ncv32u numRects, Ncv8u color) { return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color); } NCVStatus ncvDrawRects_32u_host(Ncv32u *h_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *h_rects, Ncv32u numRects, Ncv32u color) { return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color); } const Ncv32u NUMTHREADS_DRAWRECTS = 32; const Ncv32u NUMTHREADS_DRAWRECTS_LOG2 = 5; template <class T> __global__ void drawRects(T *d_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *d_rects, Ncv32u numRects, T color) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; if (blockId > numRects * 4) { return; } NcvRect32u curRect = d_rects[blockId >> 2]; NcvBool bVertical = blockId & 0x1; NcvBool bTopLeft = blockId & 0x2; Ncv32u pt0x, pt0y; if (bVertical) { Ncv32u numChunks = (curRect.height + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2; pt0x = bTopLeft ? curRect.x : curRect.x + curRect.width - 1; pt0y = curRect.y; if (pt0x < dstWidth) { for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++) { Ncv32u ptY = pt0y + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x; if (ptY < pt0y + curRect.height && ptY < dstHeight) { d_dst[ptY * dstStride + pt0x] = color; } } } } else { Ncv32u numChunks = (curRect.width + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2; pt0x = curRect.x; pt0y = bTopLeft ? curRect.y : curRect.y + curRect.height - 1; if (pt0y < dstHeight) { for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++) { Ncv32u ptX = pt0x + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x; if (ptX < pt0x + curRect.width && ptX < dstWidth) { d_dst[pt0y * dstStride + ptX] = color; } } } } } template <class T> static NCVStatus drawRectsWrapperDevice(T *d_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *d_rects, Ncv32u numRects, T color, cudaStream_t cuStream) { ncvAssertReturn(d_dst != NULL && d_rects != NULL, NCV_NULL_PTR); ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID); ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP); ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID); if (numRects == 0) { return NCV_SUCCESS; } dim3 grid(numRects * 4); dim3 block(NUMTHREADS_DRAWRECTS); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } drawRects<T><<<grid, block>>>(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); return NCV_SUCCESS; } NCVStatus ncvDrawRects_8u_device(Ncv8u *d_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *d_rects, Ncv32u numRects, Ncv8u color, cudaStream_t cuStream) { return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream); } NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *d_rects, Ncv32u numRects, Ncv32u color, cudaStream_t cuStream) { return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream); }
the_stack
#include "gtest/gtest.h" #include "keops_includes.h" using namespace keops; namespace { TEST(tensordot, zeros){ __TYPE__ FA[4] = {4.4, 5.4, 6.2, 6.5}; __TYPE__ FB[4] = {1.4, 1.2, 1.5, 1.22}; auto x = Vi(0,4); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,4); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = TensorDot(x,y, Ind(4), Ind(4), Ind(), Ind()); __TYPE__ out_keops[16]; EvalRed<GpuConv1D_FromHost>(Sum_Reduction(f,0),1, 1, out_keops, FA, FB); auto f_legacy = TensorProd(x,y); __TYPE__ out_legacy[16]; EvalRed<GpuConv1D_FromHost>(Sum_Reduction(f_legacy,0),1, 1, out_legacy, FA, FB); __TYPE__ s2d = 0; for(int i=0; i<16; i++) { // std::cout << out_keops[i] << " " << out_legacy[i] << std::endl; s2d += abs(out_legacy[i] - out_keops[i]); } EXPECT_LE(s2d,5e-6); } TEST(tensordot, one){ auto x = Vi(0,2*2*2); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,2*2); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = TensorDot(x,y, Ind(2,2,2), Ind(2,2), Ind(2), Ind(0)); auto Sum_f = Sum_Reduction(f,0); // 0 means output of reduction will be "i"-indexed (0 means"i", 1 means "j") __TYPE__ FA[8] = {4.4, 5.4, 6.2, 6.5, 7.5, 6.1, 8.7, 1.3}; __TYPE__ FB[4] = {1.4, 1.2, 1.5, 1.22}; __TYPE__ out_keops[8]; EvalRed<GpuConv1D_FromHost>(Sum_f,1, 1, out_keops, FA, FB); double out_loop[8] = {0, 0, 0, 0, 0, 0, 0, 0}; #if C_CONTIGUOUS size_t q =0 ; for (size_t i = 0; i < 2; i++) for (size_t j = 0; j < 2; j++) { for (size_t k = 0; k < 2; k++, q++) for (size_t l = 0; l < 2; l++) { // size_t kda = 4 * i + 2 * j + l; // size_t kdb = l * 2 + k; // size_t I = 4 * i + 2 * j + k; // std::cout << "(" << i << "," << j << "," << k << "," << l <<") " << I << " " << kda << " " << kdb << std::endl; //out_loop[4 * i + 2 * j + k] += FA[4 * i + 2 * j + l] * FB[l * 2 + k]; out_loop[q] += FA[4 * i + 2 * j + l] * FB[l * 2 + k]; } } #else for (size_t i = 0; i < 2; i++) for (size_t j = 0; j < 2; j++) { for (size_t k = 0; k < 2; k++) for (size_t l = 0; l < 2; l++) { // size_t kda = 4 * i + 2 * j + l; // size_t kdb = l * 2 + k; // size_t I = 4 * i + 2 * j + k; // std::cout << "(" << i << "," << j << "," << k << "," << l <<") " << I << " " << kda << " " << kdb << std::endl; out_loop[4 * k + 2 * j + i] += FA[4 * l + 2 * j + i] * FB[k * 2 + l]; } } #endif __TYPE__ s2d = 0; for(int i=0; i<8; i++) { // std::cout << out_keops[i] << " " << out_loop[i] << std::endl; s2d += abs(out_keops[i] - out_loop[i]); } EXPECT_LE(s2d,5e-6); } /* import numpy a = np.array([4.4, 5.4, 6.2, 6.5, 7.5, 6.1, 8.7, 1.3]).reshape(2,2,2) b = np.array([1.4, 1.2, 1.5, 1.22]).reshape(2,2) np.tensordot(a, b, axes=([2],[0])).flatten() # array([14.26 , 11.868, 18.43 , 15.37 , 19.65 , 16.442, 14.13 , 12.026]) import numpy a = np.array([4.4, 5.4, 6.2, 6.5, 7.5, 6.1, 8.7, 1.3]).reshape(2,2,2, order='F') b = np.array([1.4, 1.2, 1.5, 1.22]).reshape(2,2, order='F') np.tensordot(a, b, axes=([2],[0])).flatten(order='F') # array([15.16 , 14.88 , 19.12 , 10.66 , 15.75 , 15.542, 19.914, 11.336]) */ TEST(tensordot, two){ auto x = Vi(0,2*2*2); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,2*2); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = TensorDot(x,y, Ind(2,2,2), Ind(2,2), Ind(1,2), Ind(0,1)); auto Sum_f = Sum_Reduction(f,0); // 0 means output of reduction will be "i"-indexed (0 means"i", 1 means "j") __TYPE__ FA[8] = {4.4, 5.4, 6.2, 6.5, 7.5, 6.1, 8.7, 1.3}; __TYPE__ FB[4] = {1.4, 1.2, 1.5, 1.22}; __TYPE__ out_keops[2]; EvalRed<GpuConv1D_FromHost>(Sum_f,1, 1, out_keops, FA, FB); __TYPE__ out_loop[2] = {0, 0}; #if C_CONTIGUOUS size_t q = 0; for (size_t i = 0; i < 2; i++) { size_t qq = 0; for (size_t k = 0; k < 2; k++) for (size_t l = 0; l < 2; l++, q++, qq++) { // out_loop[i] += FA[4 * i + 2 * k + l] * FB[k * 2 + l]; out_loop[i] += FA[q] * FB[qq]; } } #else for (size_t i = 0; i < 2; i++) for (size_t k = 0; k < 2; k++) for (size_t l = 0; l < 2; l++) out_loop[i] += FA[4 * l + 2 * k + i] * FB[l * 2 + k]; #endif __TYPE__ s2d = 0; for(int i=0; i<2; i++) { // std::cout << std::setprecision(20) << out_keops[i] << " " << std::setprecision(20) << out_loop[i] << std::endl; s2d += abs(out_keops[i] - out_loop[i]); } EXPECT_LE(s2d,5e-6); } /* import numpy a = np.array([4.4, 5.4, 6.2, 6.5, 7.5, 6.1, 8.7, 1.3]).reshape(2,2,2) b = np.array([1.4, 1.2, 1.5, 1.22]).reshape(2,2) np.tensordot(a, b, axes=([1,2],[0,1])).flatten() # array([29.87 , 32.456]) import numpy a = np.array([4.4, 5.4, 6.2, 6.5, 7.5, 6.1, 8.7, 1.3]).reshape(2,2,2, order='F') b = np.array([1.4, 1.2, 1.5, 1.22]).reshape(2,2, order='F') np.tensordot(a, b, axes=([1,2],[0,1])).flatten(order='F') # array([35.464, 26.096]) */ TEST(tensordot, three){ auto x = Vi(0,5*4*3); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,4*3*2); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = TensorDot(x,y, Ind(5, 4, 3), Ind(4, 3, 2), Ind(1, 2), Ind(0, 1)); auto Sum_f = Sum_Reduction(f,0); // 0 means output of reduction will be "i"-indexed (0 means"i", 1 means "j") __TYPE__ FAA[60] = {7, 9, 9, 5, 8, 3, 6, 9, 6, 0, 5, 7, 3, 4, 3, 5, 3, 3, 0, 9, 9, 6, 0, 3, 3, 7, 0, 8, 6, 0, 6, 1, 3, 1, 4, 7, 3, 9, 8, 8, 3, 7, 2, 3, 1, 9, 5, 7, 7, 5, 9, 7, 0, 1, 9, 7, 5, 0, 3, 8}; __TYPE__ FBB[24] = {6, 4, 2, 9, 9, 5, 1, 6, 7, 8, 2, 4, 1, 9, 7, 8, 5, 4, 3, 2, 3, 8, 5, 7}; __TYPE__ out_keops[10]; EvalRed<GpuConv1D_FromHost>(Sum_f,1, 1, out_keops, FAA, FBB); __TYPE__ out_loop[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if C_CONTIGUOUS for (size_t i = 0; i < 5; i++) for (size_t j = 0; j < 2; j++) for (size_t k = 0; k < 4; k++) for (size_t l = 0; l < 3; l++) { // size_t kda = 12 * i + 3 * k + l; // size_t kdb = 6 * k + 2 * l + j; // size_t I = 2 * i + j; // std::cout << "(" << i << "," << j << "," << k << "," << l <<") " << I << " " << kda << " " << kdb << std::endl; out_loop[2 * i + j] += FAA[12 * i + 3 * k + l] * FBB[6 * k + 2 * l + j]; } #else for (size_t i = 0; i < 5; i++) for (size_t j = 0; j < 2; j++) for (size_t k = 0; k < 4; k++) for (size_t l = 0; l < 3; l++) { // size_t kda = 20 * l + 5 * k + i; // size_t kdb = 12 * j + 4 * l + k; // size_t I = 5 * j + i; // std::cout << "(" << i << "," << j << "," << k << "," << l <<") " << I << " " << kda << " " << kdb << std::endl; out_loop[5 * j + i] += FAA[20 * l + 5 * k + i] * FBB[12 * j + 4 * l + k]; } #endif __TYPE__ s2d = 0; for(int i=0; i<10; i++) { // std::cout << out_keops[i] << " " << out_loop[i] << std::endl; s2d += abs(out_keops[i] - out_loop[i]); } EXPECT_LE(s2d,5e-6); } /* import numpy a = np.array([7, 9, 9, 5, 8, 3, 6, 9, 6, 0, 5, 7, 3, 4, 3, 5, 3, 3, 0, 9, 9, 6, 0, 3, 3, 7, 0, 8, 6, 0, 6, 1, 3, 1, 4, 7, 3, 9, 8, 8, 3, 7, 2, 3, 1, 9, 5, 7, 7, 5, 9, 7, 0, 1, 9, 7, 5, 0, 3, 8.]).reshape(5, 4, 3) b = np.array([6, 4, 2, 9, 9, 5, 1, 6, 7, 8, 2, 4, 1, 9, 7, 8, 5, 4, 3, 2, 3, 8, 5, 7.]).reshape(4, 3, 2) np.tensordot(a, b, axes=([1, 2], [0, 1])).flatten() # array([357., 499., 226., 270., 160., 328., 256., 386., 274., 401.]) import numpy a = np.array([7, 9, 9, 5, 8, 3, 6, 9, 6, 0, 5, 7, 3, 4, 3, 5, 3, 3, 0, 9, 9, 6, 0, 3, 3, 7, 0, 8, 6, 0, 6, 1, 3, 1, 4, 7, 3, 9, 8, 8, 3, 7, 2, 3, 1, 9, 5, 7, 7, 5, 9, 7, 0, 1, 9, 7, 5, 0, 3, 8.]).reshape(5, 4, 3, order='F') b = np.array([6, 4, 2, 9, 9, 5, 1, 6, 7, 8, 2, 4, 1, 9, 7, 8, 5, 4, 3, 2, 3, 8, 5, 7.]).reshape(4, 3, 2, order='F') np.tensordot(a, b, axes=([1, 2], [0, 1])).flatten(order='F') #array([412., 315., 290., 259., 311., 389., 306., 256., 236., 288.]) */ TEST(tensordot, four){ auto x = Vi(0,5*4*3); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,5*4*3); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = TensorDot(x,y, Ind(5, 4, 3), Ind(5, 4, 3), Ind(0, 1, 2), Ind(0, 1, 2)); auto Sum_f = Sum_Reduction(f,0); // 0 means output of reduction will be "i"-indexed (0 means"i", 1 means "j") __TYPE__ FAA[60] = {7, 9, 9, 5, 8, 3, 6, 9, 6, 0, 5, 7, 3, 4, 3, 5, 3, 3, 0, 9, 9, 6, 0, 3, 3, 7, 0, 8, 6, 0, 6, 1, 3, 1, 4, 7, 3, 9, 8, 8, 3, 7, 2, 3, 1, 9, 5, 7, 7, 5, 9, 7, 0, 1, 9, 7, 5, 0, 3, 8}; __TYPE__ out_keops; EvalRed<GpuConv1D_FromHost>(Sum_f,1, 1, &out_keops, FAA, FAA); __TYPE__ out_loop = 0; for (size_t i = 0; i < 5; i++) for (size_t j = 0; j < 4; j++) for (size_t k = 0; k < 3; k++) out_loop += FAA[12 * i + 3 * j + k] * FAA[12 * i + 3 * j + k]; __TYPE__ s2d = abs(out_keops - out_loop); EXPECT_LE(s2d,5e-6); } /* import numpy a = np.array([7, 9, 9, 5, 8, 3, 6, 9, 6, 0, 5, 7, 3, 4, 3, 5, 3, 3, 0, 9, 9, 6, 0, 3, 3, 7, 0, 8, 6, 0, 6, 1, 3, 1, 4, 7, 3, 9, 8, 8, 3, 7, 2, 3, 1, 9, 5, 7, 7, 5, 9, 7, 0, 1, 9, 7, 5, 0, 3, 8.]) (a * a).sum() # 1968 */ TEST(tensordot, five){ auto x = Vi(0,4*5*3); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,3*4*2); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = TensorDot(x,y, Ind(4, 5, 3), Ind(3, 4, 2), Ind(0, 2), Ind(1, 0)); auto Sum_f = Sum_Reduction(f,0); // 0 means output of reduction will be "i"-indexed (0 means"i", 1 means "j") __TYPE__ FAA[60] = {7, 9, 9, 5, 8, 3, 6, 9, 6, 0, 5, 7, 3, 4, 3, 5, 3, 3, 0, 9, 9, 6, 0, 3, 3, 7, 0, 8, 6, 0, 6, 1, 3, 1, 4, 7, 3, 9, 8, 8, 3, 7, 2, 3, 1, 9, 5, 7, 7, 5, 9, 7, 0, 1, 9, 7, 5, 0, 3, 8}; __TYPE__ FBB[24] = {6, 4, 2, 9, 9, 5, 1, 6, 7, 8, 2, 4, 1, 9, 7, 8, 5, 4, 3, 2, 3, 8, 5, 7}; __TYPE__ out_keops[10]; EvalRed<GpuConv1D_FromHost>(Sum_f,1, 1, out_keops, FAA, FBB); __TYPE__ out_loop[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if C_CONTIGUOUS for (size_t i = 0; i < 5; i++) for (size_t j = 0; j < 2; j++) for (size_t k = 0; k < 4; k++) for (size_t l = 0; l < 3; l++) { // size_t kda = 15 * k + 3 * i + l; // size_t kdb = 8 * l + 2 * k + j; // size_t I = 2 * i + j; // std::cout << "(" << i << "," << j << "," << k << "," << l <<") " << I << " " << kda << " " << kdb << std::endl; out_loop[2 * i + j] += FAA[15 * k + 3 * i + l] * FBB[8 * l + 2 * k + j]; } #else for (size_t i = 0; i < 5; i++) for (size_t j = 0; j < 2; j++) for (size_t k = 0; k < 4; k++) for (size_t l = 0; l < 3; l++) { // size_t kda = 20 * k + 4 * i + k; // size_t kdb = 12 * j + 3 * k + l; // size_t I = 5 * j + i; // std::cout << "(" << i << "," << j << "," << k << "," << l <<") " << I << " " << kda << " " << kdb << std::endl; out_loop[5 * j + i] += FAA[20 * l + 4 * i + k] * FBB[12 * j + 3 * k + l]; } #endif __TYPE__ s2d = 0; for(int i=0; i<10; i++) { // std::cout << out_keops[i] << " " << out_loop[i] << std::endl; s2d += abs(out_keops[i] - out_loop[i]); } EXPECT_LE(s2d,5e-6); } /* import numpy as np a= np.array([[[7, 9, 9], [5, 8, 3], [6, 9, 6], [0, 5, 7]], [[3, 4, 3], [5, 3, 3],[0, 9, 9],[6, 0, 3]],[[3, 7, 0],[8, 6, 0],[6, 1, 3],[1, 4, 7]],[[3, 9, 8],[8, 3, 7],[2, 3, 1],[9, 5, 7]],[[7, 5, 9],[7, 0, 1],[9, 7, 5],[0, 3, 8]]]).flatten().reshape(4, 5, 3) b = np.array([[[6, 4],[2, 9],[9, 5]],[[1, 6],[7, 8],[2, 4]],[[1, 9],[7, 8],[5, 4]],[[3, 2],[3, 8],[5, 7]]]).flatten().reshape(3,4,2) np.tensordot(a,b,axes=([0,2],[1,0])).flatten() # array([318, 405, 267, 392, 222, 389, 269, 391, 174, 277]) import numpy as np a= np.array([[[7, 9, 9], [5, 8, 3], [6, 9, 6], [0, 5, 7]], [[3, 4, 3], [5, 3, 3],[0, 9, 9],[6, 0, 3]],[[3, 7, 0],[8, 6, 0],[6, 1, 3],[1, 4, 7]],[[3, 9, 8],[8, 3, 7],[2, 3, 1],[9, 5, 7]],[[7, 5, 9],[7, 0, 1],[9, 7, 5],[0, 3, 8]]]).flatten().reshape(4, 5, 3, order='F') b = np.array([[[6, 4],[2, 9],[9, 5]],[[1, 6],[7, 8],[2, 4]],[[1, 9],[7, 8],[5, 4]],[[3, 2],[3, 8],[5, 7]]]).flatten().reshape(3,4,2, order='F') np.tensordot(a,b,axes=([0,2],[1,0])).flatten(order='F') # array([335, 354, 289, 252, 337, 348, 331, 293, 239, 327]) */ TEST(tensordot, six){ auto x = Vi(0,4*5*3); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,3*4*2); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = TensorDot(x,y, Ind(4, 5, 3), Ind(3, 4, 2), Ind(2, 0), Ind(0, 1)); auto Sum_f = Sum_Reduction(f,0); // 0 means output of reduction will be "i"-indexed (0 means"i", 1 means "j") __TYPE__ FAA[60] = {7, 9, 9, 5, 8, 3, 6, 9, 6, 0, 5, 7, 3, 4, 3, 5, 3, 3, 0, 9, 9, 6, 0, 3, 3, 7, 0, 8, 6, 0, 6, 1, 3, 1, 4, 7, 3, 9, 8, 8, 3, 7, 2, 3, 1, 9, 5, 7, 7, 5, 9, 7, 0, 1, 9, 7, 5, 0, 3, 8}; __TYPE__ FBB[24] = {6, 4, 2, 9, 9, 5, 1, 6, 7, 8, 2, 4, 1, 9, 7, 8, 5, 4, 3, 2, 3, 8, 5, 7}; __TYPE__ out_keops[10]; EvalRed<GpuConv1D_FromHost>(Sum_f,1, 1, out_keops, FAA, FBB); __TYPE__ out_loop[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if C_CONTIGUOUS for (size_t i = 0; i < 5; i++) for (size_t j = 0; j < 2; j++) for (size_t k = 0; k < 3; k++) for (size_t l = 0; l < 4; l++) { // size_t kda = 15 * l + 3 * i + k; // size_t kdb = 8 * k + 2 * l + j; // size_t I = 2 * i + j; // std::cout << "(" << i << "," << j << "," << k << "," << l <<") " << I << " " << kda << " " << kdb << std::endl; out_loop[2 * i + j] += FAA[15 * l + 3 * i + k] * FBB[8 * k + 2 * l + j]; } #else for (size_t i = 0; i < 5; i++) for (size_t j = 0; j < 2; j++) for (size_t k = 0; k < 3; k++) for (size_t l = 0; l < 4; l++) { // size_t kda = 20 * k + 4 * i + l; // size_t kdb = 12 * j + 3 * l + k; // size_t I = 5 * j + i; // std::cout << "(" << i << "," << j << "," << k << "," << l <<") " << I << " " << kda << " " << kdb << std::endl; out_loop[5 * j + i] += FAA[20 * k + 4 * i + l] * FBB[12 * j + 3 * l + k]; } #endif __TYPE__ s2d = 0; for(int i=0; i<10; i++) { // std::cout << out_keops[i] << " " << out_loop[i] << std::endl; s2d += abs(out_keops[i] - out_loop[i]); } EXPECT_LE(s2d,5e-6); } /* import numpy as np a = np.array([7, 9, 9, 5, 8, 3, 6, 9, 6, 0, 5, 7, 3, 4, 3, 5, 3, 3, 0, 9, 9, 6, 0, 3, 3, 7, 0, 8, 6, 0, 6, 1, 3, 1, 4, 7, 3, 9, 8, 8, 3, 7, 2, 3, 1, 9, 5, 7, 7, 5, 9, 7, 0, 1, 9, 7, 5, 0, 3, 8]).reshape(4, 5, 3) b = np.array([6, 4, 2, 9, 9, 5, 1, 6, 7, 8, 2, 4, 1, 9, 7, 8, 5, 4, 3, 2, 3, 8, 5, 7]).reshape(3, 4, 2) np.tensordot(a,b,axes=([2,0],[0,1])).flatten() # array([318, 405, 267, 392, 222, 389, 269, 391, 174, 277]) import numpy as np a = np.array([7, 9, 9, 5, 8, 3, 6, 9, 6, 0, 5, 7, 3, 4, 3, 5, 3, 3, 0, 9, 9, 6, 0, 3, 3, 7, 0, 8, 6, 0, 6, 1, 3, 1, 4, 7, 3, 9, 8, 8, 3, 7, 2, 3, 1, 9, 5, 7, 7, 5, 9, 7, 0, 1, 9, 7, 5, 0, 3, 8]).reshape(4, 5, 3, order='F') b = np.array([6, 4, 2, 9, 9, 5, 1, 6, 7, 8, 2, 4, 1, 9, 7, 8, 5, 4, 3, 2, 3, 8, 5, 7]).reshape(3, 4, 2, order='F') np.tensordot(a,b,axes=([2,0],[0,1])).flatten(order='F') # array([335, 354, 289, 252, 337, 348, 331, 293, 239, 327]) */ TEST(tensordot, seven){ auto x = Vi(0,4*5*3); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,3*4*2); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = TensorDot(x,y, Ind(4, 5, 3), Ind(3, 4, 2), Ind(0), Ind(1)); auto Sum_f = Sum_Reduction(f,0); // 0 means output of reduction will be "i"-indexed (0 means"i", 1 means "j") __TYPE__ FAA[60] = {7, 9, 9, 5, 8, 3, 6, 9, 6, 0, 5, 7, 3, 4, 3, 5, 3, 3, 0, 9, 9, 6, 0, 3, 3, 7, 0, 8, 6, 0, 6, 1, 3, 1, 4, 7, 3, 9, 8, 8, 3, 7, 2, 3, 1, 9, 5, 7, 7, 5, 9, 7, 0, 1, 9, 7, 5, 0, 3, 8}; __TYPE__ FBB[24] = {6, 4, 2, 9, 9, 5, 1, 6, 7, 8, 2, 4, 1, 9, 7, 8, 5, 4, 3, 2, 3, 8, 5, 7}; __TYPE__ out_keops[90]; EvalRed<GpuConv1D_FromHost>(Sum_f,1, 1, out_keops, FAA, FBB); __TYPE__ out_loop[90] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if C_CONTIGUOUS for (size_t i = 0; i < 5; i++) for (size_t j = 0; j < 3; j++) for (size_t k = 0; k < 3; k++) for (size_t l = 0; l < 2; l++) for (size_t m = 0; m < 4; m++) { // size_t kda = 15 * l + 3 * i + k; // size_t kdb = 8 * k + 2 * l + j; // size_t I = 2 * i + j; // std::cout << "(" << i << "," << j << "," << k << "," << l <<") " << I << " " << kda << " " << kdb << std::endl; out_loop[18 * i + 6 * j + 2 * k + l] += FAA[15 * m + 3 * i + j] * FBB[8 * k + 2 * m + l]; } #else for (size_t i = 0; i < 5; i++) for (size_t j = 0; j < 3; j++) for (size_t k = 0; k < 3; k++) for (size_t l = 0; l < 2; l++) for (size_t m = 0; m < 4; m++) { // size_t I = 45 * l + 15 * k + 5 * j + i; // size_t kda = 20 * j + 4 * i + m; // size_t kdb = 12 * l + 3 * m + k; // std::cout << "(" << i << "," << j << "," << k << "," << l <<") " << I << " " << kda << " " << kdb << std::endl; out_loop[45 * l + 15 * k + 5 * j + i] += FAA[20 * j + 4 * i + m] * FBB[12 * l + 3 * m + k]; } #endif __TYPE__ s2d = 0; for(int i=0; i<90; i++) { // std::cout << out_keops[i] << " " << out_loop[i] << std::endl; s2d += abs(out_keops[i] - out_loop[i]); } EXPECT_LE(s2d,5e-6); } /* import numpy as np a= np.array([[[7, 9, 9], [5, 8, 3], [6, 9, 6], [0, 5, 7]], [[3, 4, 3], [5, 3, 3],[0, 9, 9],[6, 0, 3]],[[3, 7, 0],[8, 6, 0],[6, 1, 3],[1, 4, 7]], [[3, 9, 8],[8, 3, 7],[2, 3, 1],[9, 5, 7]],[[7, 5, 9],[7, 0, 1],[9, 7, 5],[0, 3, 8]]]).flatten().reshape(4, 5, 3) b = np.array([[[6, 4],[2, 9],[9, 5]],[[1, 6],[7, 8],[2, 4]],[[1, 9],[7, 8],[5, 4]],[[3, 2],[3, 8],[5, 7]]]).flatten().reshape(3,4,2) np.tensordot(a,b,axes=([0],[1])).flatten() # array([115, 157, 128, 202, 113, 149, 74, 98, 105, 133, 82, 85, 94, # 120, 121, 167, 98, 115, 46, 67, 85, 105, 63, 77, 107, 163, # 113, 176, 104, 117, 108, 182, 109, 195, 108, 149, 82, 135, 106, # 155, 92, 109, 135, 81, 72, 153, 72, 108, 115, 97, 63, 140, # 68, 101, 87, 121, 77, 156, 78, 133, 78, 140, 101, 151, 90, # 107, 110, 93, 91, 159, 81, 119, 52, 94, 39, 74, 45, 44, # 66, 103, 64, 107, 62, 73, 35, 65, 78, 97, 58, 76]) import numpy as np aa= np.array([[[7, 9, 9], [5, 8, 3], [6, 9, 6], [0, 5, 7]], [[3, 4, 3], [5, 3, 3],[0, 9, 9],[6, 0, 3]],[[3, 7, 0],[8, 6, 0],[6, 1, 3],[1, 4, 7]], [[3, 9, 8],[8, 3, 7],[2, 3, 1],[9,5, 7]],[[7, 5, 9],[7, 0, 1],[9, 7, 5],[0, 3, 8]]]).flatten().reshape(4, 5, 3, order='F') bb = np.array([[[6, 4],[2, 9],[9, 5]],[[1, 6],[7, 8],[2, 4]],[[1, 9],[7, 8],[5, 4]],[[3, 2],[3, 8],[5, 7]]]).flatten().reshape(3,4,2, order='F') np.tensordot(aa,bb,axes=([0],[1])).flatten(order='F') # array([172, 153, 97, 97, 117, 132, 145, 50, 87, 171, 107, 148, 152, # 74, 97, 173, 113, 68, 76, 57, 96, 91, 62, 59, 157, 93, # 129, 141, 77, 54, 142, 109, 75, 67, 57, 60, 73, 58, 67, # 139, 67, 110, 130, 96, 63, 146, 122, 77, 84, 99, 81, 123, # 32, 79, 163, 89, 144, 130, 91, 78, 151, 144, 99, 78, 87, # 126, 102, 71, 75, 128, 81, 99, 141, 58, 91, 147, 149, 106, # 81, 96, 108, 105, 67, 86, 137, 76, 107, 145, 80, 100]) */ TEST(tensordot, height){ auto x = Vi(0,4); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,4); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = TensorDot(x,y, Ind(4), Ind(4), Ind(), Ind()); auto Sum_f = Sum_Reduction(f,0); // 0 means output of reduction will be "i"-indexed (0 means"i", 1 means "j") __TYPE__ FA[4] = {4.4, 5.4, 6.2, 6.5}; __TYPE__ FB[4] = {1.4, 1.2, 1.5, 1.22}; __TYPE__ out_keops[16]; EvalRed<GpuConv1D_FromHost>(Sum_f,1, 1, out_keops, FA, FB); double out_loop[16]; #if C_CONTIGUOUS size_t q =0 ; for (size_t i = 0; i < 4; i++) { for (size_t j = 0; j < 4; j++, q++) { out_loop[q] = FA[i] * FB[j]; } } #else for (size_t i = 0; i < 4; i++) { for (size_t j = 0; j < 4; j++) { out_loop[4 * j + i] = FA[i] * FB[j]; } } #endif __TYPE__ s2d = 0; for(int i=0; i<16; i++) { // std::cout << out_keops[i] << " " << out_loop[i] << std::endl; s2d += abs(out_keops[i] - out_loop[i]); } EXPECT_LE(s2d,5e-6); } /* import numpy as np a = np.array([4.4, 5.4, 6.2, 6.5]) b = np.array([1.4, 1.2, 1.5, 1.22]) (a[:,None] @ b[None,:]).flatten() import numpy as np a = np.array([4.4, 5.4, 6.2, 6.5]) b = np.array([1.4, 1.2, 1.5, 1.22]) (a[:,None] @ b[None,:]).flatten(order='F') */ TEST(tensordot, nine){ __TYPE__ FA[4] = {4.4, 5.4, 6.2, 6.5}; __TYPE__ FB[4] = {1.4, 1.2, 1.5, 1.22}; __TYPE__ XI[16] = {6, 4, 2, 9, 9, 5, 1, 6, 7, 8, 2, 4, 1, 9, 7, 8}; auto x = Vi(0,4); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,4); // y is the third variable and represents a 3D vector, "j"-indexed. auto xi = Vj(2,16); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = Grad(TensorDot(x,y, Ind(4), Ind(4), Ind(), Ind()),x,xi); __TYPE__ out_keops[4]; EvalRed<GpuConv1D_FromHost>(Sum_Reduction(f,0),1, 1, out_keops, FA, FB, XI); auto f_legacy = Grad(TensorProd(x,y),x,xi); __TYPE__ out_legacy[4]; EvalRed<GpuConv1D_FromHost>(Sum_Reduction(f_legacy,0),1, 1, out_legacy, FA, FB, XI); __TYPE__ s2d = 0; for(int i=0; i<4; i++) { // std::cout << out_keops[i] << " " << out_legacy[i] << std::endl; s2d += abs(out_legacy[i] - out_keops[i]); } EXPECT_LE(s2d,5e-6); } /* import torch a = torch.tensor([4.4, 5.4, 6.2, 6.5], requires_grad=True) b = torch.tensor([1.4, 1.2, 1.5, 1.22], requires_grad=True) c = a[:,None] @ b[None,:] xi = torch.tensor([6, 4, 2, 9, 9, 5, 1, 6, 7, 8, 2, 4, 1, 9, 7, 8.]).reshape(4,4) torch.autograd.grad(c, a, xi) # (tensor([27.1800, 27.4200, 27.2800, 32.4600]), # Torch does not support order='F' options. We have to use .transpose() method to mimic its behaviour import torch a = torch.tensor([4.4, 5.4, 6.2, 6.5], requires_grad=True) b = torch.tensor([1.4, 1.2, 1.5, 1.22], requires_grad=True) c = a[:,None] @ b[None,:] xi = torch.tensor([6, 4, 2, 9, 9, 5, 1, 6, 7, 8, 2, 4, 1, 9, 7, 8.]).reshape(4,4).transpose(0,1) torch.autograd.grad(c, a, xi) # (tensor([30.9200, 34.5800, 15.5400, 35.5600]),) */ TEST(tensordot, ten){ __TYPE__ FA[4] = {4.4, 5.4, 6.2, 6.5}; __TYPE__ FB[4] = {1.4, 1.2, 1.5, 1.22}; __TYPE__ XI[16] = {6, 4, 2, 9, 9, 5, 1, 6, 7, 8, 2, 4, 1, 9, 7, 8}; auto x = Vi(0,4); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,4); // y is the third variable and represents a 3D vector, "j"-indexed. auto xi = Vi(2,16); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = Grad(TensorDot(x,y, Ind(4), Ind(4), Ind(), Ind()),y,xi); __TYPE__ out_keops[4]; EvalRed<GpuConv1D_FromHost>(Sum_Reduction(f,0),1, 1, out_keops, FA, FB, XI); auto f_legacy = Grad(TensorProd(x,y),y,xi); __TYPE__ out_legacy[4]; EvalRed<GpuConv1D_FromHost>(Sum_Reduction(f_legacy,0),1, 1, out_legacy, FA, FB, XI); __TYPE__ s2d = 0; for(int i=0; i<4; i++) { // std::cout << out_keops[i] << " " << out_legacy[i] << std::endl; s2d += abs(out_legacy[i] - out_keops[i]); } EXPECT_LE(s2d,5e-6); } /* import torch a = torch.tensor([4.4, 5.4, 6.2, 6.5], requires_grad=True) b = torch.tensor([1.4, 1.2, 1.5, 1.22], requires_grad=True) c = a[:,None] @ b[None,:] xi = torch.tensor([6, 4, 2, 9, 9, 5, 1, 6, 7, 8, 2, 4, 1, 9, 7, 8.]).reshape(4,4) torch.autograd.grad(c, b, xi) #(tensor([124.9000, 152.7000, 72.1000, 148.8000]),) # Torch does not support order='F' options. We have to use .transpose() method to mimic its behaviour import torch a = torch.tensor([4.4, 5.4, 6.2, 6.5], requires_grad=True) b = torch.tensor([1.4, 1.2, 1.5, 1.22], requires_grad=True) c = a[:,None] @ b[None,:] xi = torch.tensor([6, 4, 2, 9, 9, 5, 1, 6, 7, 8, 2, 4, 1, 9, 7, 8.]).reshape(4,4).transpose(0,1) torch.autograd.grad(c, b, xi) # (tensor([118.9000, 111.8000, 112.4000, 148.4000]),) */ TEST(tensordot, eleven){ __TYPE__ FA[16] = {6, 4, 2, 9, 9, 5, 1, 6, 7, 8, 2, 4, 1, 9, 7, 8}; __TYPE__ FB[4] = {1.4, 1.2, 1.5, 1.22}; auto x = Vi(0,16); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,4); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = TensorDot(x,y, Ind(4,4), Ind(4), Ind(1), Ind(0)); __TYPE__ out_keops[4]; EvalRed<GpuConv1D_FromHost>(Sum_Reduction(f,0),1, 1, out_keops, FA, FB); auto f_legacy = MatVecMult(x,y); __TYPE__ out_legacy[4]; EvalRed<GpuConv1D_FromHost>(Sum_Reduction(f_legacy,0),1, 1, out_legacy, FA, FB); __TYPE__ s2d = 0; for(int i=0; i<4; i++) { // std::cout << out_keops[i] << " " << out_legacy[i] << std::endl; s2d += abs(out_legacy[i] - out_keops[i]); } EXPECT_LE(s2d,5e-6); } TEST(tensordot, twelve){ __TYPE__ FA[16] = {6, 44, 20, 9, 99, 5, 1, 6, 7, 8, 2, 4, 1.1, 55.9, 7, 8}; __TYPE__ FB[4] = {1.4, 1.2, 1.5, 1.22}; auto x = Vi(0,16); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,4); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = TensorDot(y,x, Ind(4), Ind(4,4), Ind(0), Ind(0)); __TYPE__ out_keops[4]; EvalRed<GpuConv1D_FromHost>(Sum_Reduction(f,0),1, 1, out_keops, FA, FB); auto f_legacy = VecMatMult(y,x); __TYPE__ out_legacy[4]; EvalRed<GpuConv1D_FromHost>(Sum_Reduction(f_legacy,0),1, 1, out_legacy, FA, FB); __TYPE__ s2d = 0; for(int i=0; i<4; i++) { // std::cout << out_keops[i] << " " << out_legacy[i] << std::endl; s2d += abs(out_legacy[i] - out_keops[i]); } EXPECT_LE(s2d,5e-6); } /* import numpy a = np.array([6, 44, 20, 9, 99, 5, 1, 6, 7, 8, 2, 4, 1.1, 55.9, 7, 8]).reshape(4,4) b = np.array([1.4, 1.2, 1.5, 1.22]).reshape(1,4) b @ a # array([[139.042, 147.798, 40.74 , 35.56 ]]) import numpy a = np.array([6, 44, 20, 9, 99, 5, 1, 6, 7, 8, 2, 4, 1.1, 55.9, 7, 8]).reshape(4,4, order='F') b = np.array([1.4, 1.2, 1.5, 1.22]).reshape(1,4, order='F') b @ a # array([[102.18, 153.42, 27.28, 88.88]]) */ TEST(tensordot, thirteen){ __TYPE__ FA[16] = {7.7, 4.5, 2.7, 9.8, 9.3, 5.34, 1.56, 6, 7.43, 8.7, 2.21, 4.98, 1.2, 9.32, 7.76, 8.33}; __TYPE__ FB[4] = {2.4, 1.2, 1.5, 1.22}; __TYPE__ XI[4] = { 4.4, 2.4, 6.65, 5.5}; auto x = Vi(0,16); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,4); // y is the third variable and represents a 3D vector, "j"-indexed. auto xi = Vj(2,4); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = Grad(TensorDot(x,y, Ind(4,4), Ind(4), Ind(1), Ind(0)),x,xi); //auto f = TensorDot(x,y, Ind(4,4), Ind(4), Ind(1), Ind(0)); __TYPE__ out_keops[16]; EvalRed<GpuConv1D_FromHost>(Sum_Reduction(f,0),1, 1, out_keops, FA, FB, XI); auto f_legacy = Grad(MatVecMult(x,y),x,xi); __TYPE__ out_legacy[16]; EvalRed<GpuConv1D_FromHost>(Sum_Reduction(f_legacy,0),1, 1, out_legacy, FA, FB, XI); __TYPE__ s2d = 0; #if C_CONTIGUOUS for(int i=0; i<16; i++) { // std::cout << out_keops[i] << " " << out_legacy[i] << std::endl; s2d += abs(out_legacy[i] - out_keops[i]); } #else for(int i=0; i<16; i++) { // std::cout << out_keops[i] << " " << out_legacy[i] << std::endl; s2d += abs(out_legacy[i] - out_keops[i]); } #endif EXPECT_LE(s2d,5e-6); } /* import torch a = torch.tensor([7.7, 4.5, 2.7, 9.8, 9.3, 5.34, 1.56, 6, 7.43, 8.7, 2.21, 4.98, 1.2, 9.32, 7.76, 8.33], requires_grad=True).reshape(4,4) b = torch.tensor([2.4, 1.2, 1.5, 1.22], requires_grad=True).reshape(4,1) c = a @ b xi = torch.tensor([ 4.4, 2.4, 6.65, 5.5]).reshape(4,1) torch.autograd.grad(c, a, xi)[0].view(-1) # tensor([10.5600, 5.2800, 6.6000, 5.3680, 5.7600, 2.8800, 3.6000, 2.9280, 15.9600, 7.9800, 9.9750, 8.1130, 13.2000, 6.6000, 8.2500, 6.7100]) # Torch does not support order='F' options. We have to use .transpose() method to mimic its behaviour import torch a = torch.tensor([7.7, 4.5, 2.7, 9.8, 9.3, 5.34, 1.56, 6, 7.43, 8.7, 2.21, 4.98, 1.2, 9.32, 7.76, 8.33], requires_grad=True).reshape(4,4).transpose(0,1) b = torch.tensor([2.4, 1.2, 1.5, 1.22], requires_grad=True).reshape(4,1) c = a @ b xi = torch.tensor([ 4.4, 2.4, 6.65, 5.5]).reshape(1,4) torch.autograd.grad(c.transpose(0,1), a, xi)[0].view(-1) # tensor([116.5350, 97.1100, 85.3000, 116.5500]) */ TEST(tensordot, fourteen){ __TYPE__ FA[16] = {7.7, 4.5, 2.7, 9.8, 9.3, 5.34, 1.56, 6, 7, 8, 2, 4, 1, 9, 7, 8}; __TYPE__ FB[4] = {2.4, 1.2, 1.5, 1.22}; __TYPE__ XI[4] = { 4.4, 2.4, 6.65, 5.5}; auto x = Vi(0,16); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,4); // y is the third variable and represents a 3D vector, "j"-indexed. auto xi = Vi(2,4); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = Grad(TensorDot(x,y, Ind(4,4), Ind(4), Ind(1), Ind(0)),y,xi); __TYPE__ out_keops[4]; EvalRed<GpuConv1D_FromHost>(Sum_Reduction(f,0),1, 1, out_keops, FA, FB, XI); auto f_legacy = Grad(MatVecMult(x,y),y,xi); __TYPE__ out_legacy[4]; EvalRed<GpuConv1D_FromHost>(Sum_Reduction(f_legacy,0),1, 1, out_legacy, FA, FB, XI); __TYPE__ s2d = 0; for(int i=0; i<4; i++) { // std::cout << out_keops[i] << " " << out_legacy[i] << std::endl; s2d += abs(out_legacy[i] - out_keops[i]); } EXPECT_LE(s2d,5e-6); } /* import torch a = torch.tensor([7.7, 4.5, 2.7, 9.8, 9.3, 5.34, 1.56, 6, 7, 8, 2, 4, 1, 9, 7, 8], requires_grad=True).reshape(4,4) b = torch.tensor([2.4, 1.2, 1.5, 1.22], requires_grad=True).reshape(4,1) c = a @ b xi = torch.tensor([ 4.4, 2.4, 6.65, 5.5]).reshape(4,1) torch.autograd.grad(c, b, xi)[0].view(-1) #tensor([108.2500, 135.3160, 67.4240, 128.1200]) # Torch does not support order='F' options. We have to use .transpose() method to mimic its behaviour import torch a = torch.tensor([7.7, 4.5, 2.7, 9.8, 9.3, 5.34, 1.56, 6, 7, 8, 2, 4, 1, 9, 7, 8], requires_grad=True).reshape(4,4).transpose(0,1) b = torch.tensor([2.4, 1.2, 1.5, 1.22], requires_grad=True).reshape(4,1) c = a @ b xi = torch.tensor([ 4.4, 2.4, 6.65, 5.5]).reshape(1,4) torch.autograd.grad(c.transpose(0,1), b, xi)[0].view(-1) # tensor([116.5350, 97.1100, 85.3000, 116.5500]) */ TEST(tensordot, fifteen){ __TYPE__ FA[4] = {4.4, 5.4, 6.2, 6.5}; __TYPE__ FB[4] = {1.4, 1.2, 1.5, 1.22}; auto x = Vi(0,4); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,4); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = TensorDot(x,y, Ind(4), Ind(4), Ind(), Ind()); __TYPE__ out_keops[16]; EvalRed<GpuConv1D_FromHost>(Sum_Reduction(f,0),1, 1, out_keops, FA, FB); auto f_legacy = TensorProd(x,y); __TYPE__ out_legacy[16]; EvalRed<GpuConv1D_FromHost>(Sum_Reduction(f_legacy,0),1, 1, out_legacy, FA, FB); __TYPE__ s2d = 0; for(int i=0; i<16; i++) { // std::cout << out_keops[i] << " " << out_legacy[i] << std::endl; s2d += abs(out_legacy[i] - out_keops[i]); } EXPECT_LE(s2d,5e-6); } TEST(tensordot, sixteen){ auto x = Vi(0,5*4*3); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,4*2); // y is the third variable and represents a 3D vector, "j"-indexed. auto f = TensorDot(x,y, Ind(5, 4, 3), Ind(4, 2), Ind(1), Ind(0), Ind(0, 2, 1)); auto Sum_f = Sum_Reduction(f,0); // 0 means output of reduction will be "i"-indexed (0 means"i", 1 means "j") __TYPE__ FAA[60] = {7, 9, 9, 5, 8, 3, 6, 9, 6, 0, 5, 7, 3, 4, 3, 5, 3, 3, 0, 9, 9, 6, 0, 3, 3, 7, 0, 8, 6, 0, 6, 1, 3, 1, 4, 7, 3, 9, 8, 8, 3, 7, 2, 3, 1, 9, 5, 7, 7, 5, 9, 7, 0, 1, 9, 7, 5, 0, 3, 8}; __TYPE__ FBB[8] = {6, 4, 2, 9, 9, 5, 1, 6}; __TYPE__ out_keops[30]; EvalRed<GpuConv1D_FromHost>(Sum_f,1, 1, out_keops, FAA, FBB); __TYPE__ out_loop[30] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if C_CONTIGUOUS for (size_t i = 0; i < 5; i++) for (size_t j = 0; j < 3; j++) for (size_t k = 0; k < 2; k++) for (size_t l = 0; l < 4; l++) { // size_t kda = 12 * i + 3 * l + j; // size_t kdb = 2 * l + k; // size_t I = 6 * i + 3 * k + j; // std::cout << "(" << i << "," << j << "," << k << "," << l <<") " << I << " " << kda << " " << kdb << std::endl; out_loop[6 * i + 3 * k + j] += FAA[12 * i + 3 * l + j] * FBB[ 2 * l + k]; } #else for (size_t i = 0; i < 5; i++) for (size_t j = 0; j < 3; j++) for (size_t k = 0; k < 2; k++) for (size_t l = 0; l < 4; l++) { // size_t kda = 20 * l + 5 * k + i; // size_t kdb = 12 * j + 4 * l + k; // size_t I = 5 * j + i; // std::cout << "(" << i << "," << j << "," << k << "," << l <<") " << I << " " << kda << " " << kdb << std::endl; out_loop[10 * j + 5 * k + i] += FAA[20 * j + 5 * l + i] * FBB[4 * k + l]; } #endif __TYPE__ s2d = 0; for(int i=0; i<30; i++) { // std::cout << out_keops[i] << " " << out_loop[i] << std::endl; s2d += abs(out_keops[i] - out_loop[i]); } EXPECT_LE(s2d,5e-6); } /* import numpy as np a = np.array([7, 9, 9, 5, 8, 3, 6, 9, 6, 0, 5, 7, 3, 4, 3, 5, 3, 3, 0, 9, 9, 6, 0, 3, 3, 7, 0, 8, 6, 0, 6, 1, 3, 1, 4, 7, 3, 9, 8, 8, 3, 7, 2, 3, 1, 9, 5, 7, 7, 5, 9, 7, 0, 1, 9, 7, 5, 0, 3, 8]).reshape(5, 4, 3) b = np.array([6, 4, 2, 9, 9, 5, 1, 6]).reshape(4, 2) np.tensordot(a,b,axes=([1],[0])).swapaxes(2,1).flatten() # array([106, 156, 121, 103, 183, 135, 34, 111, 108, 93, 88, 102, 89, 67, 34, 120, 111, 57, 61, 92, 78, 148, 108, 142, 137, 96, 109, 136, 73, 118]) import numpy as np a = np.array([7, 9, 9, 5, 8, 3, 6, 9, 6, 0, 5, 7, 3, 4, 3, 5, 3, 3, 0, 9, 9, 6, 0, 3, 3, 7, 0, 8, 6, 0, 6, 1, 3, 1, 4, 7, 3, 9, 8, 8, 3, 7, 2, 3, 1, 9, 5, 7, 7, 5, 9, 7, 0, 1, 9, 7, 5, 0, 3, 8]).reshape(5, 4, 3, order='F') b = np.array([6, 4, 2, 9, 9, 5, 1, 6]).reshape(4, 2, order='F') np.tensordot(a,b,axes=([1],[0])).swapaxes(2,1).flatten(order='F') # array([109, 119, 123, 62, 135, 113, 136, 147, 79, 129, 157, 65, 119, 116, 98, 164, 73, 97, 106, 79, 135, 121, 40, 75, 116, 123, 125, 53, 81, 91]) */ /* TEST(tensordot, seventeen){ auto x = Vi(0, 2*3*4 ); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,4*2); // y is the third variable and represents a 3D vector, "j"-indexed. // auto xi = Vj(2,3); //auto Sum_f = Sum_Reduction( Grad(TensorDot(x, y, Ind(2,3,4), Ind(4,2), Ind(2,0), Ind(0,1), Ind(0)), x, xi),0); // 0 means output of reduction will be "i"-indexed (0 means"i", 1 means "j") auto xi = Vi(2,3); auto Sum_f = Sum_Reduction( Grad(TensorDot(x, y, Ind(2,3,4), Ind(4,2), Ind(2,0), Ind(0,1), Ind(0)), y, xi),0); // 0 means output of reduction will be "i"-indexed (0 means"i", 1 means "j") __TYPE__ FAA[24] = {6, 4, 2, 9, 9, 5, 1, 6,6, 4, 2, 9, 9, 5, 1, 6,6, 4, 2, 9, 9, 5, 1, 6}; __TYPE__ FBB[8] = {6, 4, 2, 9, 9, 5, 1, 6}; __TYPE__ XI[3] = {6, 4, 2}; __TYPE__ out_keops[8]; EvalRed<GpuConv1D_FromHost>(Sum_f,1, 1, out_keops, FAA, FBB, XI); __TYPE__ s2d = 0; for(int i=0; i<8; i++) { std::cout << out_keops[i] << std::endl; s2d += abs(out_keops[i] ); } EXPECT_LE(s2d,5e-6); } */ TEST(tensordot, seventeen){ auto x = Vi(0,4*5*3); // x is the second variable and represents a 3D vector, "i"-indexed. auto y = Vj(1,3*4*2); // y is the third variable and represents a 3D vector, "j"-indexed. //auto f = TensorDot(x,y, Ind(4, 5, 3), Ind(3, 4, 2), Ind(0, 2), Ind(1, 0), Ind(0,1)); auto f = TensorDot(x,y, Ind(4, 5, 3), Ind(3, 4, 2), Ind(0, 2), Ind(1, 0), Ind(1,0)); auto Sum_f = Sum_Reduction(f,0); // 0 means output of reduction will be "i"-indexed (0 means"i", 1 means "j") __TYPE__ FAA[60] = {7, 9, 9, 5, 8, 3, 6, 9, 6, 0, 5, 7, 3, 4, 3, 5, 3, 3, 0, 9, 9, 6, 0, 3, 3, 7, 0, 8, 6, 0, 6, 1, 3, 1, 4, 7, 3, 9, 8, 8, 3, 7, 2, 3, 1, 9, 5, 7, 7, 5, 9, 7, 0, 1, 9, 7, 5, 0, 3, 8}; __TYPE__ FBB[24] = {6, 4, 2, 9, 9, 5, 1, 6, 7, 8, 2, 4, 1, 9, 7, 8, 5, 4, 3, 2, 3, 8, 5, 7}; __TYPE__ out_keops[10]; EvalRed<GpuConv1D_FromHost>(Sum_f,1, 1, out_keops, FAA, FBB); __TYPE__ out_loop[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if C_CONTIGUOUS for (size_t i = 0; i < 5; i++) for (size_t j = 0; j < 2; j++) for (size_t k = 0; k < 4; k++) for (size_t l = 0; l < 3; l++) { // size_t kda = 15 * k + 3 * i + l; // size_t kdb = 8 * l + 2 * k + j; // size_t I = 2 * i + j; // std::cout << "(" << i << "," << j << "," << k << "," << l <<") " << I << " " << kda << " " << kdb << std::endl; out_loop[5 * j + i] += FAA[15 * k + 3 * i + l] * FBB[8 * l + 2 * k + j]; } #else for (size_t i = 0; i < 5; i++) for (size_t j = 0; j < 2; j++) for (size_t k = 0; k < 4; k++) for (size_t l = 0; l < 3; l++) { // size_t I = 5 * j + i; // size_t kda = 20 * l + 4 * i + k; // size_t kdb = 12 * j + 3 * k + l; // std::cout << "(" << i << "," << j << "," << k << "," << l <<") " << I << " " << kda << " " << kdb << std::endl; out_loop[2 * i + j] += FAA[20 * l + 4 * i + k] * FBB[12 * j + 3 * k + l]; } #endif __TYPE__ s2d = 0; for(int i=0; i<10; i++) { // std::cout << out_keops[i] << " " << out_loop[i] << std::endl; s2d += abs(out_keops[i] - out_loop[i]); } EXPECT_LE(s2d,5e-6); } /* import numpy as np a= np.array([[[7, 9, 9], [5, 8, 3], [6, 9, 6], [0, 5, 7]], [[3, 4, 3], [5, 3, 3],[0, 9, 9],[6, 0, 3]],[[3, 7, 0],[8, 6, 0],[6, 1, 3],[1, 4, 7]], [[3, 9, 8],[8, 3, 7],[2, 3, 1],[9, 5, 7]],[[7, 5, 9],[7, 0, 1],[9, 7, 5],[0, 3, 8]]]).flatten().reshape(4, 5, 3) b = np.array([[[6, 4],[2, 9],[9, 5]],[[1, 6],[7, 8],[2, 4]],[[1, 9],[7, 8],[5, 4]],[[3, 2],[3, 8],[5, 7]]]).flatten().reshape(3,4,2) print(np.tensordot(a,b,axes=([0,2],[1,0])).swapaxes(1,0).flatten()) # [318 267 222 269 174 405 392 389 391 277] import numpy as np aa= np.array([[[7, 9, 9], [5, 8, 3], [6, 9, 6], [0, 5, 7]], [[3, 4, 3], [5, 3, 3],[0, 9, 9],[6, 0, 3]],[[3, 7, 0],[8, 6, 0],[6, 1, 3],[1, 4, 7]], [[3, 9, 8],[8, 3, 7],[2, 3, 1],[9,5, 7]],[[7, 5, 9],[7, 0, 1],[9, 7, 5],[0, 3, 8]]]).flatten().reshape(4, 5, 3, order='F') bb = np.array([[[6, 4],[2, 9],[9, 5]],[[1, 6],[7, 8],[2, 4]],[[1, 9],[7, 8],[5, 4]],[[3, 2],[3, 8],[5, 7]]]).flatten().reshape(3,4,2, order='F') print(np.tensordot(aa,bb,axes=([0,2],[1,0])).swapaxes(1,0).flatten(order='F')) # [335 348 354 331 289 293 252 239 337 327] */ } // namespace GTEST_API_ int main(int argc, char **argv) { testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
the_stack
namespace cudatbx { namespace scattering { /* ========================================================================== */ cudatbx::scattering::direct_summation::direct_summation() { // set host and device pointers to NULL h_xyz = NULL; h_solvent = NULL; h_h = NULL; h_rt = NULL; h_weights = NULL; h_scattering_type = NULL; h_a = NULL; h_b = NULL; h_c = NULL; d_xyz = NULL; d_solvent = NULL; d_h = NULL; d_rt = NULL; d_weights = NULL; d_scattering_type = NULL; amplitudes_allocated = false; h_real = NULL; h_imag = NULL; d_real = NULL; d_imag = NULL; workspace_allocated = false; d_workspace = NULL; } cudatbx::scattering::direct_summation::~direct_summation() { clear_arrays(); clear_amplitudes(); clear_workspace(); } // -------------------------------------------------------------------------- void cudatbx::scattering::direct_summation::set_xyz (const scitbx::af::const_ref<scitbx::vec3<double> >& xyz) { // allocate memory if necessary if (n_xyz != xyz.size()) { clear_xyz(); n_xyz = xyz.size(); padded_n_xyz = cudatbx::calculate_padded_size(n_xyz,padding); size_xyz = 3 * padded_n_xyz; h_xyz = new fType[size_xyz]; cudaSafeCall( cudaMalloc((void**)&d_xyz,size_xyz*sizeof(fType)) ); } // convert values for (int i=0; i<n_xyz; i++) { for (int j=0; j<3; j++) { h_xyz[j*padded_n_xyz + i] = fType(xyz[i][j]); } } // transfer to GPU cudaSafeCall( cudaMemcpy(d_xyz, h_xyz, size_xyz*sizeof(fType), cudaMemcpyHostToDevice) ); } void cudatbx::scattering::direct_summation::clear_xyz() { delete[] h_xyz; cudaSafeCall( cudaFree(d_xyz) ); h_xyz = NULL; d_xyz = NULL; } // -------------------------------------------------------------------------- void cudatbx::scattering::direct_summation::set_solvent_weights (const scitbx::af::const_ref<double>& solvent_weights) { // allocate memory if necessary SCITBX_ASSERT (n_xyz == solvent_weights.size()); if (n_solvent != solvent_weights.size()) { clear_solvent_weights(); n_solvent = solvent_weights.size(); h_solvent = new fType[padded_n_xyz]; cudaSafeCall( cudaMalloc((void**)&d_solvent,padded_n_xyz*sizeof(fType)) ); } // convert values for (int i=0; i<n_xyz; i++) { h_solvent[i] = fType(solvent_weights[i]); } // transfer to GPU cudaSafeCall( cudaMemcpy(d_solvent, h_solvent, padded_n_xyz*sizeof(fType), cudaMemcpyHostToDevice) ); } void cudatbx::scattering::direct_summation::clear_solvent_weights() { delete[] h_solvent; cudaSafeCall( cudaFree(d_solvent) ); h_solvent = NULL; d_solvent = NULL; } // -------------------------------------------------------------------------- void cudatbx::scattering::direct_summation::set_hkl (const scitbx::af::const_ref<scitbx::vec3<double> >& h) { // allocate memory if necessary if (n_h != h.size()) { clear_hkl(); n_h = h.size(); padded_n_h = cudatbx::calculate_padded_size(n_h,padding); size_h = 3 * padded_n_h; h_h = new fType[size_h]; cudaSafeCall( cudaMalloc((void**)&d_h,size_h*sizeof(fType)) ); } // convert values for (int i=0; i<n_h; i++) { for (int j=0; j<3; j++) { h_h[j*padded_n_h + i] = fType(h[i][j]); } } // transfer to GPU cudaSafeCall( cudaMemcpy(d_h, h_h, size_h*sizeof(fType), cudaMemcpyHostToDevice) ); } void cudatbx::scattering::direct_summation::clear_hkl() { delete[] h_h; cudaSafeCall( cudaFree(d_h) ); h_h = NULL; d_h = NULL; } // -------------------------------------------------------------------------- void cudatbx::scattering::direct_summation::set_q (const scitbx::af::const_ref<double>& q) { // q data, use h variables if (n_h != q.size()) { clear_q(); n_h = q.size(); padded_n_h = cudatbx::calculate_padded_size(n_h,padding); size_h = padded_n_h; h_h = new fType[size_h]; cudaSafeCall( cudaMalloc((void**)&d_h,size_h*sizeof(fType)) ); } // convert values for (int i=0; i<n_h; i++) { h_h[i] = fType(q[i]); } // transfer to GPU cudaSafeCall( cudaMemcpy(d_h, h_h, size_h*sizeof(fType), cudaMemcpyHostToDevice) ); } void cudatbx::scattering::direct_summation::clear_q() { clear_hkl(); } // -------------------------------------------------------------------------- void cudatbx::scattering::direct_summation::set_lattice (const scitbx::af::const_ref<double>& lattice_weights, const scitbx::af::const_ref<double>& lattice) { // lattice points, use rotation/translation if (n_rt != lattice_weights.size()) { clear_lattice(); n_rt = lattice_weights.size(); size_rt = cudatbx::calculate_padded_size(n_rt,padding); h_weights = new fType[size_rt]; h_rt = new fType[3*size_rt]; cudaSafeCall( cudaMalloc((void**)&d_weights,size_rt*sizeof(fType)) ); cudaSafeCall( cudaMalloc((void**)&d_rt,3*size_rt*sizeof(fType)) ); } // convert values for (int i=0; i<n_rt; i++) { h_weights[i] = fType(lattice_weights[i]/n_rt); for (int j=0; j<3; j++) { h_rt[j*size_rt + i] = fType(lattice[j*n_rt + i]); } } // transfer to GPU cudaSafeCall( cudaMemcpy(d_weights, h_weights, size_rt*sizeof(fType), cudaMemcpyHostToDevice) ); cudaSafeCall( cudaMemcpy(d_rt, h_rt, 3*size_rt*sizeof(fType), cudaMemcpyHostToDevice) ); } void cudatbx::scattering::direct_summation::clear_weights() { delete[] h_weights; cudaSafeCall( cudaFree(d_weights) ); h_weights = NULL; d_weights = NULL; } void cudatbx::scattering::direct_summation::clear_lattice() { clear_weights(); clear_rotations_translations(); } // -------------------------------------------------------------------------- void cudatbx::scattering::direct_summation::set_rotations_translations (const scitbx::af::const_ref<double>& rotations, const scitbx::af::const_ref<scitbx::vec3<double> >& translations) { // each rotation/translation pair is combined and padded to take up // 64 bytes so that a coalesced read will read two pairs if (n_rt != translations.size()) { clear_rotations_translations(); n_rt = translations.size(); size_rt = padded_size * n_rt; h_rt = new fType[size_rt]; cudaSafeCall( cudaMalloc((void**)&d_rt,size_rt*sizeof(fType)) ); } // convert values for (int i=0; i<n_rt; i++) { for (int j=0; j<9; j++) { h_rt[padded_size*i + j] = fType(rotations[9*i + j]); } for (int j=0; j<3; j++) { h_rt[padded_size*i + j + 9] = fType(translations[i][j]); } } // transfer to GPU cudaSafeCall( cudaMemcpy(d_rt, h_rt, size_rt*sizeof(fType), cudaMemcpyHostToDevice) ); } void cudatbx::scattering::direct_summation::clear_rotations_translations() { delete[] h_rt; cudaSafeCall( cudaFree(d_rt) ); h_rt = NULL; d_rt = NULL; } // -------------------------------------------------------------------------- void cudatbx::scattering::direct_summation::set_scattering_types (const scitbx::af::const_ref<std::string>& scatterers, const cctbx::xray::scattering_type_registry& registry) { // allocate memory if necessary SCITBX_ASSERT (n_xyz == scatterers.size()); if (n_scatterers != scatterers.size()) { clear_scattering_types(); n_scatterers = scatterers.size(); h_scattering_type = new int[padded_n_xyz]; cudaSafeCall( cudaMalloc((void**)&d_scattering_type, padded_n_xyz*sizeof(int)) ); } // convert values for (int i=0; i<n_xyz; i++) { h_scattering_type[i] = registry.unique_index(scatterers[i]); } // transfer to GPU cudaSafeCall( cudaMemcpy(d_scattering_type,h_scattering_type, padded_n_xyz*sizeof(int),cudaMemcpyHostToDevice) ); } void cudatbx::scattering::direct_summation::clear_scattering_types() { delete[] h_scattering_type; cudaSafeCall( cudaFree(d_scattering_type) ); h_scattering_type = NULL; d_scattering_type = NULL; } // -------------------------------------------------------------------------- void cudatbx::scattering::direct_summation::set_scattering_type_registry (const cctbx::xray::scattering_type_registry& registry, const bool& complex_form_factor) { // convert form factors // add ordinary oxygen form factor at end for boundary layer solvent scitbx::af::shared<boost::optional <cctbx::eltbx::xray_scattering::gaussian> > unique_gaussians = registry.unique_gaussians; n_types = unique_gaussians.size() + 1; n_terms = unique_gaussians[0].get().n_terms(); f_size = n_types * n_terms; delete[] h_a; delete[] h_b; delete[] h_c; h_a = new fType[f_size]; h_b = new fType[f_size]; h_c = new fType[n_types]; for (int i=0; i<f_size; i++) { h_a[i] = fType(0.0); h_b[i] = fType(0.0); } for (int i=0; i<n_types-1; i++) { for (int j=0; j<n_terms; j++) { h_a[i*n_terms + j] = fType(unique_gaussians[i].get().array_of_a()[j]); h_b[i*n_terms + j] = fType(unique_gaussians[i].get().array_of_b()[j]); } if (unique_gaussians[i].get().use_c()) { h_c[i] = fType(unique_gaussians[i].get().c()); } else { h_c[i] = fType(0.0); } } // add form factor for boundary layer solvent cctbx::eltbx::xray_scattering::gaussian hoh = cctbx::eltbx::xray_scattering::wk1995("O",true).fetch(); for (int i=0; i<hoh.array_of_a().size(); i++){ h_a[(n_types-1)*n_terms + i] = fType(hoh.array_of_a()[i]); h_b[(n_types-1)*n_terms + i] = fType(hoh.array_of_b()[i]); } if (hoh.use_c()) { h_c[n_types-1] = fType(hoh.c()); } else { h_c[n_types-1] = fType(0.0); } // transfer to GPU cudaSafeCall( cudaMemcpyToSymbol(dc_a, h_a, f_size*sizeof(fType)) ); cudaSafeCall( cudaMemcpyToSymbol(dc_b, h_b, f_size*sizeof(fType)) ); cudaSafeCall( cudaMemcpyToSymbol(dc_c, h_c, n_types*sizeof(fType)) ); cudaSafeCall( cudaMemcpyToSymbol(dc_n_types, &n_types, sizeof(int)) ); cudaSafeCall( cudaMemcpyToSymbol(dc_n_terms, &n_terms, sizeof(int)) ); cudaSafeCall( cudaMemcpyToSymbol(dc_complex_form_factor, &complex_form_factor, sizeof(bool)) ); } void cudatbx::scattering::direct_summation::clear_scattering_type_registry() { delete[] h_a; delete[] h_b; delete[] h_c; h_a = NULL; h_b = NULL; h_c = NULL; } // -------------------------------------------------------------------------- void cudatbx::scattering::direct_summation::clear_arrays() { // clear pointers and set all pointers to NULL clear_xyz(); clear_solvent_weights(); clear_hkl(); clear_rotations_translations(); clear_weights(); clear_scattering_types(); clear_scattering_type_registry(); } // -------------------------------------------------------------------------- void cudatbx::scattering::direct_summation::allocate_amplitudes() { if (amplitudes_allocated) { clear_amplitudes(); } h_real = new fType[n_h]; h_imag = new fType[n_h]; cudaSafeCall( cudaMalloc((void**)&d_real,n_h*sizeof(fType)) ); cudaSafeCall( cudaMalloc((void**)&d_imag,n_h*sizeof(fType)) ); amplitudes_allocated = true; } void cudatbx::scattering::direct_summation::reset_amplitudes() { fType zero = fType(0.0); for (int i=0; i<n_h; i++) { h_real[i] = zero; h_imag[i] = zero; } cudaSafeCall( cudaMemcpy(d_real,h_real,n_h*sizeof(fType), cudaMemcpyHostToDevice) ); cudaSafeCall( cudaMemcpy(d_imag,h_imag,n_h*sizeof(fType), cudaMemcpyHostToDevice) ); } void cudatbx::scattering::direct_summation::clear_amplitudes() { delete[] h_real; delete[] h_imag; cudaSafeCall( cudaFree(d_real) ); cudaSafeCall( cudaFree(d_imag) ); h_real = NULL; h_imag = NULL; d_real = NULL; d_imag = NULL; amplitudes_allocated = false; } // -------------------------------------------------------------------------- void cudatbx::scattering::direct_summation::allocate_workspace (const int& length) { if (workspace_allocated) { clear_workspace(); } cudaSafeCall( cudaMalloc((void**)&d_workspace,length*sizeof(fType)) ); workspace_allocated = true; } void cudatbx::scattering::direct_summation::clear_workspace() { cudaSafeCall( cudaFree(d_workspace) ); d_workspace = NULL; workspace_allocated = false; } /* -------------------------------------------------------------------------- reorganizes data and calls cuda padded to multiple of 128 bytes, (32 * sizeof(float or int)) */ void cudatbx::scattering::direct_summation::run_kernel() { int blocks_per_grid = cudatbx::calculate_blocks_per_grid(n_h,threads_per_block); structure_factor_kernel<fType><<<blocks_per_grid,threads_per_block>>> (d_scattering_type, d_xyz, d_solvent, n_xyz, padded_n_xyz, d_h, n_h, padded_n_h, d_rt, n_rt, d_real, d_imag); } void cudatbx::scattering::direct_summation::add (const scitbx::af::const_ref<std::string>& scatterers, const scitbx::af::const_ref<scitbx::vec3<double> >& xyz, const scitbx::af::const_ref<double>& solvent_weights, const scitbx::af::const_ref<scitbx::vec3<double> >& h, const scitbx::af::const_ref<double>& rotations, const scitbx::af::const_ref<scitbx::vec3<double> >& translations, const cctbx::xray::scattering_type_registry& registry, const bool& complex_form_factor) { // reorganize input data, allocates arrays, transfer to GPU, order matters set_xyz(xyz); set_solvent_weights(solvent_weights); set_hkl(h); set_rotations_translations(rotations,translations); set_scattering_types(scatterers,registry); set_scattering_type_registry(registry,complex_form_factor); // allocate arrays for results if necessary if (!amplitudes_allocated) { allocate_amplitudes(); reset_amplitudes(); } // run calculation run_kernel(); // deallocate arrays clear_arrays(); } /* -------------------------------------------------------------------------- reorganizes data and calls cuda padded to multiple of 128 bytes, (32 * sizeof(float or int)) "Rapid and accurate calculation of small-angle scattering profiles using the golden ratio" Watson, MC, Curtis, JE. J. Appl. Cryst. (2013). 46, 1171-1177 solvent variables are used for weights and code is not optimal possibly subclass or split everything into functions */ void cudatbx::scattering::direct_summation::prepare_saxs (const scitbx::af::const_ref<std::string>& scatterers, const scitbx::af::const_ref<scitbx::vec3<double> >& xyz, const scitbx::af::const_ref<double>& solvent_weights, const scitbx::af::const_ref<double>& q, const scitbx::af::const_ref<double>& lattice_weights, const scitbx::af::const_ref<double>& lattice, const cctbx::xray::scattering_type_registry& registry, const bool& complex_form_factor) { // reorganize input data, allocates arrays, transfer to GPU, order matters set_xyz(xyz); set_solvent_weights(solvent_weights); set_q(q); set_lattice(lattice_weights,lattice); set_scattering_types(scatterers,registry); set_scattering_type_registry(registry,complex_form_factor); // allocate arrays for results if necessary if (!amplitudes_allocated) { allocate_amplitudes(); } } void cudatbx::scattering::direct_summation::run_saxs_kernel() { // allocate working space if necessary if (!workspace_allocated) { workspace_size = int(std::floor(n_h*n_rt/padding + 1.0)) * padding; allocate_workspace(3*workspace_size); } int blocks_per_grid = cudatbx::calculate_blocks_per_grid (n_rt,threads_per_block); expand_q_lattice_kernel<fType><<<blocks_per_grid,threads_per_block>>> (d_h, n_h, d_rt, n_rt, size_rt, d_workspace, workspace_size); blocks_per_grid = cudatbx::calculate_blocks_per_grid (n_h*n_rt,threads_per_block); saxs_kernel<fType><<<blocks_per_grid,threads_per_block>>> (d_scattering_type, d_xyz, d_solvent, n_xyz, padded_n_xyz, n_h, n_rt, d_workspace, workspace_size); } void cudatbx::scattering::direct_summation::run_solvent_saxs_kernel() { // allocate working space if necessary if (!workspace_allocated) { workspace_size = int(std::floor(n_h*n_rt/padding + 1.0)) * padding; allocate_workspace(7*workspace_size); } int blocks_per_grid = cudatbx::calculate_blocks_per_grid (n_rt,threads_per_block); expand_q_lattice_kernel<fType><<<blocks_per_grid,threads_per_block>>> (d_h, n_h, d_rt, n_rt, size_rt, d_workspace, workspace_size); blocks_per_grid = cudatbx::calculate_blocks_per_grid (n_h*n_rt,threads_per_block); solvent_saxs_kernel<fType><<<blocks_per_grid,threads_per_block>>> (d_scattering_type, d_xyz, d_solvent, n_xyz, padded_n_xyz, n_h, n_rt, d_workspace, workspace_size); } void cudatbx::scattering::direct_summation::run_collect_solvent_saxs_kernel (const double& c1, const double& c2) { assert(workspace_allocated); int blocks_per_grid = cudatbx::calculate_blocks_per_grid (n_h*n_rt,threads_per_block); collect_solvent_saxs_kernel<fType><<<blocks_per_grid,threads_per_block>>> (n_h, n_rt,fType(c1),fType(c2),d_workspace, workspace_size); } void cudatbx::scattering::direct_summation::sum_over_lattice() { int blocks_per_grid = cudatbx::calculate_blocks_per_grid (n_rt,threads_per_block); for (int i=0; i<n_h; i++) { cudatbx::math::weighted_sum_kernel<fType> <<<blocks_per_grid,threads_per_block,threads_per_block*sizeof(fType)>>> (&d_workspace[i*n_rt],d_weights,n_rt,&d_real[i]); } } /* -------------------------------------------------------------------------- return total sum */ scitbx::af::shared<std::complex<double> > cudatbx::scattering::direct_summation::get_sum() { scitbx::af::shared<std::complex<double> > sf(n_h); assert(amplitudes_allocated); cudaSafeCall( cudaMemcpy(h_real,d_real,n_h*sizeof(fType), cudaMemcpyDeviceToHost) ); cudaSafeCall( cudaMemcpy(h_imag,d_imag,n_h*sizeof(fType), cudaMemcpyDeviceToHost) ); for (int i=0; i<n_h; i++) { sf[i] = std::complex<double>(double(h_real[i]),double(h_imag[i])); } return sf; } /* ========================================================================== */ } }
the_stack
// Torch port: // IMAGINE, Sergey Zagoruyko, Francisco Massa, 2015 #include "THC.h" #include <algorithm> #include <cfloat> #include "common.h" using std::max; using std::min; template <typename Dtype> __global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = (bottom_rois[0] - 1); int roi_start_w = round((bottom_rois[1] - 1) * spatial_scale); int roi_start_h = round((bottom_rois[2] - 1)* spatial_scale); int roi_end_w = round((bottom_rois[3] - 1) * spatial_scale); int roi_end_h = round((bottom_rois[4] - 1) * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } extern "C" void inn_ROIPooling_updateOutput(THCState *state, THCudaTensor *output, THCudaTensor *indices, THCudaTensor *data, THCudaTensor* rois, int W, int H, double spatial_scale) { THAssert(THCudaTensor_nDimension(state, data) == 4); THAssert(THCudaTensor_nDimension(state, rois) == 2 && rois->size[1] == 5); THAssert(THCudaTensor_isContiguous(state, data)); THAssert(THCudaTensor_isContiguous(state, rois)); long num_rois = rois->size[0]; long nInputPlane = data->size[1]; THCudaTensor_resize4d(state, output, num_rois, nInputPlane, H, W); THCudaTensor_resize4d(state, indices, num_rois, nInputPlane, H, W); long count = THCudaTensor_nElement(state, output); ROIPoolForward<float><<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( count, THCudaTensor_data(state, data), spatial_scale, nInputPlane, data->size[2], data->size[3], H, W, THCudaTensor_data(state, rois), THCudaTensor_data(state, output), (int*)THCudaTensor_data(state, indices) ); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in inn_ROIPooling_updateOutput: %s\n", cudaGetErrorString(err)); THError("aborting"); } } template <typename Dtype> __global__ void ROIPoolForwardV2(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = (bottom_rois[0] - 1); int roi_start_w = round((bottom_rois[1] - 1) * spatial_scale); int roi_start_h = round((bottom_rois[2] - 1)* spatial_scale); int roi_end_w = round((bottom_rois[3] - 1) * spatial_scale) - 1; int roi_end_h = round((bottom_rois[4] - 1) * spatial_scale) - 1; // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int hstart = static_cast<int>(round(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(round(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(round(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(round(static_cast<Dtype>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } extern "C" void inn_ROIPooling_updateOutputV2(THCState *state, THCudaTensor *output, THCudaTensor *indices, THCudaTensor *data, THCudaTensor* rois, int W, int H, double spatial_scale) { THAssert(THCudaTensor_nDimension(state, data) == 4); THAssert(THCudaTensor_nDimension(state, rois) == 2 && rois->size[1] == 5); THAssert(THCudaTensor_isContiguous(state, data)); THAssert(THCudaTensor_isContiguous(state, rois)); long num_rois = rois->size[0]; long nInputPlane = data->size[1]; THCudaTensor_resize4d(state, output, num_rois, nInputPlane, H, W); THCudaTensor_resize4d(state, indices, num_rois, nInputPlane, H, W); long count = THCudaTensor_nElement(state, output); ROIPoolForwardV2<float><<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( count, THCudaTensor_data(state, data), spatial_scale, nInputPlane, data->size[2], data->size[3], H, W, THCudaTensor_data(state, rois), THCudaTensor_data(state, output), (int*)THCudaTensor_data(state, indices) ); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in inn_ROIPooling_updateOutput: %s\n", cudaGetErrorString(err)); THError("aborting"); } } template <typename Dtype> __global__ void ROIPoolBackwardAtomic(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = (bottom_rois[0] - 1); int bottom_offset = (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + top_offset; Dtype* offset_bottom_diff = bottom_diff + bottom_offset; const int* offset_argmax_data = argmax_data + top_offset; int argmax = offset_argmax_data[ph*pooled_width + pw]; if(argmax != -1) { atomicAdd(offset_bottom_diff + argmax, offset_top_diff[ph * pooled_width + pw]); } } } extern "C" void inn_ROIPooling_updateGradInputAtomic(THCState *state, THCudaTensor *gradInput, THCudaTensor *indices, THCudaTensor *data, THCudaTensor *gradOutput, THCudaTensor* rois, int W, int H, double spatial_scale) { THAssert(THCudaTensor_nDimension(state, data) == 4); THAssert(THCudaTensor_nDimension(state, rois) == 2 && rois->size[1] == 5); THAssert(THCudaTensor_isContiguous(state, data)); THAssert(THCudaTensor_isContiguous(state, rois)); long num_rois = rois->size[0]; long nInputPlane = data->size[1]; THCudaTensor_resizeAs(state, gradInput, data); THCudaTensor_zero(state, gradInput); long count = THCudaTensor_nElement(state, gradOutput); ROIPoolBackwardAtomic<float><<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( count, THCudaTensor_data(state, gradOutput), (int*)THCudaTensor_data(state, indices), num_rois, spatial_scale, nInputPlane, data->size[2], data->size[3], H, W, THCudaTensor_data(state, gradInput), THCudaTensor_data(state, rois) ); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in inn_ROIPooling_updateGradInputAtomic: %s\n", cudaGetErrorString(err)); THError("aborting"); } }
the_stack
#define XTALK_REGION_ONLY /* // xtalk calculation from excess hydrogen by neighbours __global__ void SimpleXTalkNeighbourContributionAndAccumulation_LocalMem(// Here FL stands for flows const unsigned short * RegionMask, //per Region const unsigned short * bfMask, // per Bead const unsigned short * bstateMask, //per Bead float * xTalkContribution, // buffer XTalk contribution to this well NxF float * genericXTalkTracesRegion, // one trace of max compressed frames per thread block or per region (atomicAdd) int * numGenericXTalkTracesRegion, //one int per region to average after accumulation const short* RawTraces, //NxF const float * EmptyTraceRegion, //FxR const float* BeadParamCube, //NxP const float* RegionFrameCube, //FxRxT bkgTrace, DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber const ConstantParamsRegion * constRegP, // R const PerFlowParamsRegion * perFlowRegP, // R const PerNucParamsRegion * perNucRegP, //RxNuc const size_t * numFramesRegion, // R const bool * TestingGenericXtakSampleMask //ToDo: remove whne testing done ) { extern __shared__ float smBaseimpleXTalkNeighbour[]; ///////////////////////// //coordinates and indices const size_t regionCol = blockIdx.x; const size_t regionRow = (blockIdx.y*blockDim.y)/ImgRegP.getRegH(); //image coordinates const int ix = regionCol * ImgRegP.getRegW() + threadIdx.x; const int iy = (blockIdx.y*blockDim.y) + threadIdx.y; size_t idx = ImgRegP.getWellIdx(ix,iy); //region index to address region specific parameters const size_t regId = ImgRegP.getRegIdFromGrid(regionCol,regionRow); //////////////////////// // region specifics //set offset to first trace for this region float * genericXTalkTraceGlobal = genericXTalkTracesRegion + regId*ConstFrmP.getMaxCompFrames(); int * numGenericXTalkTracesGlobal = numGenericXTalkTracesRegion + regId; //exit if no work for whole region if( LDG_ACCESS(RegionMask,regId) != RegionMaskLive) return; size_t nframes = numFramesRegion[regId]; if (nframes == 0) return; //Shared Memory pointers float * sm_base = smBaseimpleXTalkNeighbour; float * sm_warp_base = sm_base + threadIdx.y*blockDim.x; float * sm = sm_warp_base + threadIdx.x; float * sm_warpTrace_base = sm_base + blockDim.x * blockDim.y; float * sm_warpTrace = sm_warpTrace_base + threadIdx.y * ConstFrmP.getMaxCompFrames(); int t=threadIdx.x; //set shared mem warp trace buffer to 0 while( t < ConstFrmP.getMaxCompFrames() ){ sm_warpTrace[t] = 0.0f; t += blockDim.x; } //stride from one per bead plane to the next const size_t BeadPlaneStride = ImgRegP.getPlaneStride(); //stride from one regions*frames plane to the next const size_t RegionFramesPlaneStride = ConstFrmP.getMaxCompFrames() * ImgRegP.getNumRegions(); //update base pointer to data for this region in region frame cube RegionFrameCube += regId*ConstFrmP.getMaxCompFrames(); //DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber const float * emptyTrace = EmptyTraceRegion + regId*ConstFrmP.getUncompFrames(); const float* deltaFrames = RegionFrameCube + RfDeltaFrames*RegionFramesPlaneStride; //update per region pointers constRegP += regId; perFlowRegP += regId; //point to correct nuc perNucRegP += ImgRegP.getNumRegions() * ConstFlowP.getNucId() + regId; ///////////////////////////////// // bead specific pointer updates //bfMask += idx; //bstateMask += idx; //RawTraces += idx; //BeadParamCube += idx; //sliding window int rx = threadIdx.x; int ry = iy%ImgRegP.getRegH(); const int windowWidth = blockDim.x; const int rowsPerStride = blockDim.y; const size_t rowStride = rowsPerStride * ImgRegP.getImgW(); int windowOffset = 0; //|regId 0 |regId 1 | //|b0w0-------> |b2w0--> | each warp slides across region //|b0w1----> |b2w1-----> | independent from other warps. //|b1w0 |... | multiple blocks work on one region //|b1w1 | | all the threads within a block work //|_____________|_____________| on wells of the same region //|regId 2 |regId 3 | //| | | ////////////////////////////////// // local memory and variables //can probably be partially removed and reused instead float incorp_rise[MAX_COMPRESSED_FRAMES_GPU]; float lost_hydrogen[MAX_COMPRESSED_FRAMES_GPU]; float bulk_signal[MAX_COMPRESSED_FRAMES_GPU]; float xtalk[MAX_COMPRESSED_FRAMES_GPU]; //volatile float * xtalk = xtalkBuff; int numGenericXTalkTraces = 0; if(ry < ImgRegP.getRegH(regId)) { // warp divergent code // DO NOT call syncthread within this branch!!! while(windowOffset < ImgRegP.getRegW(regId)){ //update coordinates and offsets for well we are accumulating for const int lrx = rx + windowOffset; const int lix = ix + windowOffset; const int lidx = idx + windowOffset; float * lxTalkContribution = xTalkContribution + lidx; //const unsigned short * lBfMask = bfMask + lidx; bool useForGenericXTalk = false; // zeroing has to be done before next if statement for later warp level accumulation for (int f=0; f<nframes; ++f) { xtalk[f] = 0; } //ony threads that work on well within the reagion actually do this work here: if(lrx < ImgRegP.getRegW(regId)){ useForGenericXTalk = TestingGenericXtakSampleMask[lidx]; //ToDo: remove after testing //useForGenericXTalk = useForEmpty(lBfMask); //useForGenericXTalk = Match(lBfMask, MaskLive); //useForGenericXTalk = (Match(lBfMask, MaskLive) || useForEmpty(lBfMask)); for (int nid=0; nid<ConstTraceXTalkP.getNumNeighbours(); ++nid){ //neighbor global coordinates int nix; int niy; //get coordinates for neighbor we are workign on ConstTraceXTalkP.getBlockCoord(nix,niy,nid,lix,iy); if( ImgRegP.getRegBaseX(regId) <= nix && nix < ImgRegP.getRegUpperX(regId)) { if( ImgRegP.getRegBaseY(regId) <= niy && niy < ImgRegP.getRegUpperY(regId)) { //update local mask offsets for current neighbor for filtering size_t nIdx = ImgRegP.getWellIdx(nix,niy); const unsigned short * nBfMask = bfMask +nIdx; const unsigned short * nBstateMask = bstateMask + nIdx; //filter non-live, pinned or corrupt neighbors if( Match(nBfMask, MaskLive) && !( Match(nBstateMask,BkgMaskPinned) || Match(nBstateMask,BkgMaskCorrupt)) ){ //update local buffer offsets for current neighbor const short* nRawTraces = RawTraces + nIdx; const float* nBeadParamCube = BeadParamCube + nIdx; const float copies = *(nBeadParamCube + BpCopies*BeadPlaneStride); const float R = *(nBeadParamCube + BpR*BeadPlaneStride); //float Rval, tau; const float etbR = ComputeETBR(perNucRegP, perFlowRegP->getRatioDrift(), R, copies); const float tauB = ComputeTauB(constRegP, etbR); //const float SP = ComputeSP(perFlowRegP->getCopyDrift(), copies); // Calculate approximate incorporation signal float one_over_two_taub = 1.0f / (2.0f*tauB); int f = 0; float xt = deltaFrames[f]*one_over_two_taub; incorp_rise[f] = (1.0f+xt)*nRawTraces[f*BeadPlaneStride] - (etbR+xt)*emptyTrace[f]; f++; for (;f<nframes; ++f) { xt = deltaFrames[f]*one_over_two_taub; incorp_rise[f] = (1.0+xt)*nRawTraces[f*BeadPlaneStride] - (1.0f-xt)*nRawTraces[(f-1)*BeadPlaneStride] - ((etbR+xt)*emptyTrace[f]-(etbR-xt)*emptyTrace[f-1]) + incorp_rise[(f-1)]; } // Calculate lost hydrogen f = perFlowRegP->getStart(); xt = 1.0f/(1.0f + (deltaFrames[f]*one_over_two_taub)); lost_hydrogen[f] = incorp_rise[f]*xt; f++; for (;f<nframes; ++f) { xt = 1.0f/(1.0f + (deltaFrames[f]*one_over_two_taub)); lost_hydrogen[f] = (incorp_rise[f] - incorp_rise[(f-1)] + (1.0f-(deltaFrames[f]*one_over_two_taub))*lost_hydrogen[(f-1)])*xt; } for (f = perFlowRegP->getStart();f<nframes; ++f) { lost_hydrogen[f] = incorp_rise[f] - lost_hydrogen[f]; } // Calculate ions from bulk float taue = etbR * tauB; f = perFlowRegP->getStart(); one_over_two_taub = 1.0f / (2.0f*taue); xt = 1.0f/(1.0f + (deltaFrames[f]*one_over_two_taub)); bulk_signal[f] = lost_hydrogen[f]*xt; f++; for (;f<nframes; ++f) { xt = 1.0f/(1.0f + (deltaFrames[f]*one_over_two_taub)); bulk_signal[f] = (lost_hydrogen[f] - lost_hydrogen[(f-1)] + (1.0f-(deltaFrames[f]*one_over_two_taub))*bulk_signal[(f-1)])*xt; } // Scale down the ion by neighbor multiplier for (f=perFlowRegP->getStart(); f<nframes; ++f){ xtalk[f] += bulk_signal[f] * ConstTraceXTalkP.getMultiplier(nid); } } } } } } //if rx < regiond width //now this is where the warp level magic gets thrown in int genericXTalkTracesThisWarp = 0; // how many traces are we actually working on in this window int * ismWB = (int*)sm_warp_base; int * ism = ismWB + threadIdx.x; *ism = (useForGenericXTalk)?(1):(0); WarpSumNoSync(ism); genericXTalkTracesThisWarp = *ismWB; numGenericXTalkTraces += genericXTalkTracesThisWarp; //count how many XTalks traces for empties got already handled by this warp for (int f=0; f<nframes; ++f){ float thisFrame = xtalk[f] ; if(lrx < ImgRegP.getRegW(regId)) // let only threads write to global that actually have data from within the region (all others have 0) *lxTalkContribution = thisFrame ; //store xtalk for this single well frame by frame //more warp level magic: //accumulate generic Xtalk for this window and add to xtalk already accumulated by this warp //WarpTraceAccumSingleFrame(sm_warpTrace,f,sm_warp_base,thisFrame,useForGenericXTalk);//thisFrame,useForGenericXTalk); *sm = (useForGenericXTalk)?(thisFrame):(0.0f); WarpSumNoSync(sm); float genXtalkFrame = *sm_warp_base; //WarpTraceAccumSingleFrame(sm_warpTrace,f,sm_warp_base,thisFrame,useForGenericXTalk);//thisFrame,useForGenericXTalk); if(threadIdx.x == 0) sm_warpTrace[f] += genXtalkFrame; lxTalkContribution+=BeadPlaneStride; // one frame per plane per bead } windowOffset += windowWidth; } // while windowOffset < region Width. }// if ry < region Height // END of warp divergent code from here on we can syncthread again!! __syncthreads(); //block level reduction and global accumulation with atomics BlockTraceAccumfromWarpsInplaceToGlobal(genericXTalkTraceGlobal,1,sm_warpTrace_base,nframes, ConstFrmP.getMaxCompFrames(), true); __syncthreads(); int * ism = (int*)sm; *ism = numGenericXTalkTraces; BlockAccumValuePerWarpToGlobal(numGenericXTalkTracesGlobal,(int*)sm_base,true); } */ //the next two kernels calculate the same as above but use 1/neighbours the calculations // xtalk calculation from excess hydrogen by neighbours __global__ void SimpleXTalkNeighbourContribution(// Here FL stands for flows const unsigned short * RegionMask, //per Region const unsigned short * bfMask, // per Bead const unsigned short * bstateMask, //per Bead float * myBaseXTalkContribution, // buffer XTalk contribution of this well NxF const short* RawTraces, //NxF const float* BeadParamCube, //NxP const float* RegionFrameCube, //FxRxT bkgTrace, DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber const ConstantParamsRegion * constRegP, // R const PerFlowParamsRegion * perFlowRegP, // R const PerNucParamsRegion * perNucRegP, //RxNuc const size_t * numFramesRegion // R ) { ///////////////////////// //coordinates and indices const size_t regionCol = blockIdx.x; const size_t regionRow = (blockIdx.y*blockDim.y)/ImgRegP.getRegH(); //image coordinates const int ix = regionCol * ImgRegP.getRegW() + threadIdx.x; const int iy = (blockIdx.y*blockDim.y) + threadIdx.y; size_t idx = ImgRegP.getWellIdx(ix,iy); //region index to address region specific parameters const size_t regId = ImgRegP.getRegIdFromGrid(regionCol,regionRow); //////////////////////// // region specifics //exit if no work for whole region if( LDG_ACCESS(RegionMask,regId) != RegionMaskLive) return; size_t nframes = numFramesRegion[regId]; if (nframes == 0) return; //stride from one per bead plane to the next const size_t BeadPlaneStride = ImgRegP.getPlaneStride(); //stride from one regions*frames plane to the next const size_t RegionFramesPlaneStride = ConstFrmP.getMaxCompFrames() * ImgRegP.getNumRegions(); //update base pointer to data for this region in region frame cube RegionFrameCube += regId*ConstFrmP.getMaxCompFrames(); //DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber const float * emptyTrace = ConstHistCol.getLatestEmptyTraces() + regId*ConstFrmP.getUncompFrames(); //EmptyTraceRegion + regId*ConstFrmP.getUncompFrames(); const float* deltaFrames = RegionFrameCube + RfDeltaFrames*RegionFramesPlaneStride; //update per region pointers constRegP += regId; perFlowRegP += regId; //point to correct nuc perNucRegP += ImgRegP.getNumRegions() * ConstFlowP.getNucId() + regId; ////////////////////////// // sliding window base region coordinates //|regId 0 |regId 1 | //|b0w0-------> |b2w0--> | each warp slides across region //|b0w1----> |b2w1-----> | independent from other warps. //|b1w0 |... | multiple blocks work on one region //|b1w1 | | all the threads within a block work //|_____________|_____________| on wells of the same region //|regId 2 |regId 3 | //| | | int rx = threadIdx.x; int ry = iy%ImgRegP.getRegH(); const int windowWidth = blockDim.x; int windowOffset = 0; ////////////////////////////////// // local memory and variables float incorp_rise[MAX_COMPRESSED_FRAMES_GPU]; float lost_hydrogen[MAX_COMPRESSED_FRAMES_GPU]; float bulk_signal[MAX_COMPRESSED_FRAMES_GPU]; if(ry < ImgRegP.getRegH(regId)) { // warp divergent code // DO NOT call syncthread within this branch it can lead to undefined state while(windowOffset < ImgRegP.getRegW(regId)){ //update coordinates and offsets for well we are accumulating for const int lrx = rx + windowOffset; //const int lix = ix + windowOffset; const int lidx = idx + windowOffset; float * lmyBaseXTalkContribution = myBaseXTalkContribution + lidx; //ony threads that work on well within the reagion actually do this work here: if(lrx < ImgRegP.getRegW(regId)){ //Acquire well specific pointers: const unsigned short * lBfMask = bfMask +lidx; const unsigned short * lBstateMask = bstateMask + lidx; //filter non-live, pinned or corrupt neighbors const short* lRawTraces = RawTraces + lidx; const float* lBeadParamCube = BeadParamCube + lidx; const float copies = *(lBeadParamCube + BpCopies*BeadPlaneStride); const float R = *(lBeadParamCube + BpR*BeadPlaneStride); //am I a worthy contributor to xtalk? only then perform calculation. if not store 0 at the end bool contributor = Match(lBfMask, MaskLive) && !( Match(lBstateMask,BkgMaskPinned) || Match(lBstateMask,BkgMaskCorrupt)); if(contributor){ //float Rval, tau; const float etbR = ComputeETBR(perNucRegP, perFlowRegP->getRatioDrift(), R, copies); const float tauB = ComputeTauB(constRegP, etbR); //const float SP = ComputeSP(perFlowRegP->getCopyDrift(), copies); // Calculate approximate incorporation signal float one_over_two_taub = 1.0f / (2.0f*tauB); int f = 0; float xt = deltaFrames[f]*one_over_two_taub; incorp_rise[f] = (1.0f+xt)*lRawTraces[f*BeadPlaneStride] - (etbR+xt)*emptyTrace[f]; f++; for (;f<nframes; ++f) { xt = deltaFrames[f]*one_over_two_taub; incorp_rise[f] = (1.0+xt)*lRawTraces[f*BeadPlaneStride] - (1.0f-xt)*lRawTraces[(f-1)*BeadPlaneStride] - ((etbR+xt)*emptyTrace[f]-(etbR-xt)*emptyTrace[f-1]) + incorp_rise[(f-1)]; } // Calculate lost hydrogen f = perFlowRegP->getFineStart(); xt = 1.0f/(1.0f + (deltaFrames[f]*one_over_two_taub)); lost_hydrogen[f] = incorp_rise[f]*xt; f++; for (;f<nframes; ++f) { xt = 1.0f/(1.0f + (deltaFrames[f]*one_over_two_taub)); lost_hydrogen[f] = (incorp_rise[f] - incorp_rise[(f-1)] + (1.0f-(deltaFrames[f]*one_over_two_taub))*lost_hydrogen[(f-1)])*xt; } for (f = perFlowRegP->getFineStart();f<nframes; ++f) { lost_hydrogen[f] = incorp_rise[f] - lost_hydrogen[f]; } // Calculate ions from bulk float taue = etbR * tauB; f = perFlowRegP->getFineStart(); one_over_two_taub = 1.0f / (2.0f*taue); xt = 1.0f/(1.0f + (deltaFrames[f]*one_over_two_taub)); bulk_signal[f] = lost_hydrogen[f]*xt; f++; for (;f<nframes; ++f) { xt = 1.0f/(1.0f + (deltaFrames[f]*one_over_two_taub)); bulk_signal[f] = (lost_hydrogen[f] - lost_hydrogen[(f-1)] + (1.0f-(deltaFrames[f]*one_over_two_taub))*bulk_signal[(f-1)])*xt; } }// if contributor //contributors store bulk_signal to global all other store 0 values for(int f=perFlowRegP->getFineStart(); f<nframes; f++) lmyBaseXTalkContribution[f*BeadPlaneStride] = (contributor)?(bulk_signal[f]):(0); } //if lrx < regiond width windowOffset += windowWidth; } // while windowOffset < regW } //if ry < regH } // xtalk calculation from excess hydrogen by neighbours __global__ void GenericXTalkAndNeighbourAccumulation(// Here FL stands for flows const unsigned short * RegionMask, //per Region const unsigned short * bfMask, // per Bead const unsigned short * bstateMask, //per Bead float * BaseXTalkContribution, // XTalk of each single well float * xTalkContribution, // buffer XTalk to store accumulated xtalk at each well float * genericXTalkTracesperBlock, // one trace of max compressed frames per thread block or per region (atomicAdd) int * numGenericXTalkTracesRegion, //one int per region to average after accumulation const PerFlowParamsRegion * perFlowRegP, // R const size_t * numFramesRegion, // R const bool * TestingGenericXtakSampleMask //ToDo: remove whne testing done ) { extern __shared__ float smBaseimpleXTalkNeighbour[]; ///////////////////////// //coordinates and indices const size_t regionCol = blockIdx.x; const size_t regionRow = (blockIdx.y*blockDim.y)/ImgRegP.getRegH(); //image coordinates const int ix = regionCol * ImgRegP.getRegW() + threadIdx.x; const int iy = (blockIdx.y*blockDim.y) + threadIdx.y; size_t idx = ImgRegP.getWellIdx(ix,iy); //region index to address region specific parameters const size_t regId = ImgRegP.getRegIdFromGrid(regionCol,regionRow); //////////////////////// // region specifics perFlowRegP += regId; //set offset to first trace for this region int blocksPerRegion = (ImgRegP.getRegH() + blockDim.y -1)/blockDim.y; float * genericXTalkRegionBase = genericXTalkTracesperBlock + regId * ConstFrmP.getMaxCompFrames() * blocksPerRegion; int * numGenericXTalkTracesGlobal = numGenericXTalkTracesRegion + regId; //exit if no work for whole region if( LDG_ACCESS(RegionMask,regId) != RegionMaskLive) return; size_t nframes = numFramesRegion[regId]; if (nframes == 0) return; //////////////////////// //Shared Memory pointers float * sm_base = smBaseimpleXTalkNeighbour; float * sm_warp_base = sm_base + threadIdx.y*blockDim.x; float * sm = sm_warp_base + threadIdx.x; float * sm_warpTrace_base = sm_base + blockDim.x * blockDim.y; float * sm_warpTrace = sm_warpTrace_base + threadIdx.y * ConstFrmP.getMaxCompFrames(); int t=threadIdx.x; //set shared mem warp trace buffer to 0 while( t < ConstFrmP.getMaxCompFrames() ){ sm_warpTrace[t] = 0.0f; t += blockDim.x; } ////////////////////////// // sliding window base region coordinates int rx = threadIdx.x; int ry = iy%ImgRegP.getRegH(); //find offset for generic xtalk trace in global for this threadBlock int blockInRegion = ry/blockDim.y; float * genericXTalkTraceGlobal = genericXTalkRegionBase + blockInRegion*ConstFrmP.getMaxCompFrames(); const int windowWidth = blockDim.x; const int rowsPerStride = blockDim.y; const size_t rowStride = rowsPerStride * ImgRegP.getImgW(); //stride from one per bead plane to the next: const size_t BeadPlaneStride = ImgRegP.getPlaneStride(); int windowOffset = 0; int numGenericXTalkTraces = 0; // local memory and variables float xtalk[MAX_COMPRESSED_FRAMES_GPU]; if(ry < ImgRegP.getRegH(regId)) { // warp divergent code // DO NOT call syncthread within this branch!!! while(windowOffset < ImgRegP.getRegW(regId)){ //update coordinates and offsets for well we are accumulating for const int lrx = rx + windowOffset; const int lix = ix + windowOffset; const int lidx = idx + windowOffset; float * lxTalkContribution = xTalkContribution + lidx; const unsigned short * lBfMask = bfMask + lidx; bool useForGenericXTalk = false; // zeroing has to be done before next if statement for later warp level accumulation for (int f=0; f<nframes; ++f) { xtalk[f] = 0; } //only threads that work on well within the region actually do this work here: if(lrx < ImgRegP.getRegW(regId)){ //useForGenericXTalk = TestingGenericXtakSampleMask[lidx]; //ToDo: remove after testing useForGenericXTalk = useForEmpty(lBfMask); //useForGenericXTalk = Match(lBfMask, MaskLive); //useForGenericXTalk = (Match(lBfMask, MaskLive) || useForEmpty(lBfMask)); for (int nid=0; nid<ConstTraceXTalkP.getNumNeighbours(); ++nid){ //neighbor global coordinates int nix; int niy; //get coordinates for neighbor we are working on ConstTraceXTalkP.getBlockCoord(nix,niy,nid,lix,iy); #ifdef XTALK_REGION_ONLY if( ImgRegP.getRegBaseX(regId) <= nix && nix < ImgRegP.getRegUpperX(regId)) { if( ImgRegP.getRegBaseY(regId) <= niy && niy < ImgRegP.getRegUpperY(regId)) { #else if( 0 <= nix && nix < ImgRegP.getImgW()) { if( 0 <= niy && niy < ImgRegP.getImgH()) { #endif //update local mask offsets for current neighbor for filtering size_t nIdx = ImgRegP.getWellIdx(nix,niy); const unsigned short * nBfMask = bfMask +nIdx; const unsigned short * nBstateMask = bstateMask + nIdx; const float * nBaseXTalkContribution = BaseXTalkContribution + nIdx; //filter non-live, pinned or corrupt neighbors //if(lrx == 41 && ry == 55) printf("%lu,%d,%d,",nIdx, nix, niy); if( Match(nBfMask, MaskLive) && !( Match(nBstateMask,BkgMaskPinned) || Match(nBstateMask,BkgMaskCorrupt)) ){ //update local buffer offsets for current neighbor //for (int f=perFlowRegP->getStart(); f<nframes; ++f){ for (int f=0; f<nframes; ++f){ xtalk[f] += nBaseXTalkContribution[f*BeadPlaneStride] * ConstTraceXTalkP.getMultiplier(nid); // if(lrx == 41 && ry == 55) printf("%f,",nBaseXTalkContribution[f*BeadPlaneStride] * ConstTraceXTalkP.getMultiplier(nid)); } } //if(lrx == 41 && ry == 55) printf("\n"); } } }// neighbour loop } //if rx < region width //now this is where the warp level magic gets thrown in int genericXTalkTracesThisWarp = 0; // how many traces are we actually working on in this window int * ismWB = (int*)sm_warp_base; int * ism = ismWB + threadIdx.x; *ism = (useForGenericXTalk)?(1):(0); WarpSumNoSync(ism); genericXTalkTracesThisWarp = *ismWB; numGenericXTalkTraces += genericXTalkTracesThisWarp; //count how many XTalks traces for empties got already handled by this warp for (int f=0; f<nframes; ++f){ float thisFrame = xtalk[f] ; if(lrx < ImgRegP.getRegW(regId)) // let only threads write to global that actually have data from within the region (all others have 0) *lxTalkContribution = thisFrame ; //store xtalk for this single well frame by frame //more warp level magic: //accumulate generic Xtalk for this window and add to xtalk already accumulated by this warp *sm = (useForGenericXTalk)?(thisFrame):(0.0f); WarpSumNoSync(sm); float genXtalkFrame = *sm_warp_base + sm_warpTrace[f] ; //WarpTraceAccumSingleFrame(sm_warpTrace,f,sm_warp_base,thisFrame,useForGenericXTalk);//thisFrame,useForGenericXTalk); if(threadIdx.x == 0) sm_warpTrace[f] = genXtalkFrame; lxTalkContribution+=BeadPlaneStride; // one frame per plane per bead } windowOffset += windowWidth; } // while windowOffset < region Width. }// if ry < region Height // END of warp divergent code from here on we can syncthread again!! //block level reduction and global accumulation with atomics BlockTraceAccumfromWarpsInplaceToGlobal(genericXTalkTraceGlobal,1,sm_warpTrace_base,nframes, ConstFrmP.getMaxCompFrames(), false); __syncthreads(); int * ism = (int*)sm; *ism = numGenericXTalkTraces; BlockAccumValuePerWarpToGlobal(numGenericXTalkTracesGlobal,(int*)sm_base,true); } //one 1D block per region __global__ void GenericXTalkAccumulation(// Here FL stands for flows float * genericXTalkTracesRegion, // one trace of max compressed frames per thread block or per region (atomicAdd) const float * genericXTalkTracesPerBlock, const int * numGenericXTalkTracesRegion, //one int per region to average after accumulation const size_t * numFrames, const int blocksPerRegion ) { //one block per region const size_t regId = blockIdx.x + blockIdx.y * gridDim.x; //per region trace output genericXTalkTracesRegion += regId*ConstFrmP.getMaxCompFrames(); //pointer to first partial trace for accumulation const float * genericXTalkRegionBase = genericXTalkTracesPerBlock + regId * ConstFrmP.getMaxCompFrames() * blocksPerRegion; const int numtraces=numGenericXTalkTracesRegion[regId]; const int tIdx = threadIdx.x + threadIdx.y*blockDim.x; const int windowSize = blockDim.x*blockDim.y; const size_t numf = numFrames[regId]; //if more frames than threads in block (schould never happen) for(size_t f=tIdx; f<numf;f+=windowSize){ float myframe=0; //pointer to frame f in first partial trace const float * PerBlockThisFrame = genericXTalkRegionBase + tIdx; for(int block=0; block<blocksPerRegion;block++){ if(f<numf){ myframe += *PerBlockThisFrame; } //move to frame f for next block in partial traces PerBlockThisFrame += ConstFrmP.getMaxCompFrames(); // move to next trace; } genericXTalkTracesRegion[f] = (numtraces>0)?(myframe/numtraces):(0.0f); } }
the_stack
#include <cusp/linear_operator.h> #include <cusp/gallery/poisson.h> #include <cusp/gallery/random.h> #include <cusp/array2d.h> #include <cusp/coo_matrix.h> #include <cusp/csr_matrix.h> #include <cusp/dia_matrix.h> #include <cusp/ell_matrix.h> #include <cusp/hyb_matrix.h> #include <cusp/permutation_matrix.h> #include <cusp/multiply.h> ///////////////////////////////////////// // Sparse Matrix-Matrix Multiplication // ///////////////////////////////////////// template <typename SparseMatrixType, typename DenseMatrixType> void CompareSparseMatrixMatrixMultiply(DenseMatrixType A, DenseMatrixType B) { DenseMatrixType C; cusp::multiply(A, B, C); SparseMatrixType _A(A), _B(B), _C; cusp::multiply(_A, _B, _C); ASSERT_EQUAL(C == DenseMatrixType(_C), true); typename SparseMatrixType::view _Aview(_A), _Bview(_B), _Cview(_C); cusp::multiply(_Aview, _Bview, _Cview); ASSERT_EQUAL(C == DenseMatrixType(_Cview), true); } template <typename TestMatrix> void TestSparseMatrixMatrixMultiply(void) { typedef typename TestMatrix::value_type ValueType; cusp::array2d<ValueType,cusp::host_memory> A(3,2); A(0,0) = 1.0; A(0,1) = 2.0; A(1,0) = 3.0; A(1,1) = 0.0; A(2,0) = 5.0; A(2,1) = 6.0; cusp::array2d<ValueType,cusp::host_memory> B(2,4); B(0,0) = 0.0; B(0,1) = 2.0; B(0,2) = 3.0; B(0,3) = 4.0; B(1,0) = 5.0; B(1,1) = 0.0; B(1,2) = 0.0; B(1,3) = 8.0; cusp::array2d<ValueType,cusp::host_memory> C(2,2); C(0,0) = 0.0; C(0,1) = 0.0; C(1,0) = 3.0; C(1,1) = 5.0; cusp::array2d<ValueType,cusp::host_memory> D(2,1); D(0,0) = 2.0; D(1,0) = 3.0; cusp::array2d<ValueType,cusp::host_memory> E(2,2); E(0,0) = 0.0; E(0,1) = 0.0; E(1,0) = 0.0; E(1,1) = 0.0; cusp::array2d<ValueType,cusp::host_memory> F(2,3); F(0,0) = 0.0; F(0,1) = 1.5; F(0,2) = 3.0; F(1,0) = 0.5; F(1,1) = 0.0; F(1,2) = 0.0; cusp::array2d<ValueType,cusp::host_memory> G; cusp::gallery::poisson5pt(G, 4, 6); cusp::array2d<ValueType,cusp::host_memory> H; cusp::gallery::poisson5pt(H, 8, 3); cusp::array2d<ValueType,cusp::host_memory> I; cusp::gallery::random(I, 24, 24, 150); cusp::array2d<ValueType,cusp::host_memory> J; cusp::gallery::random(J, 24, 24, 50); cusp::array2d<ValueType,cusp::host_memory> K; cusp::gallery::random(K, 24, 12, 20); //thrust::host_vector< cusp::array2d<float,cusp::host_memory> > matrices; std::vector< cusp::array2d<ValueType,cusp::host_memory> > matrices; matrices.push_back(A); matrices.push_back(B); matrices.push_back(C); matrices.push_back(D); matrices.push_back(E); matrices.push_back(F); matrices.push_back(G); matrices.push_back(H); matrices.push_back(I); matrices.push_back(J); matrices.push_back(K); // test matrix multiply for every pair of compatible matrices for(size_t i = 0; i < matrices.size(); i++) { const cusp::array2d<ValueType,cusp::host_memory>& left = matrices[i]; for(size_t j = 0; j < matrices.size(); j++) { const cusp::array2d<ValueType,cusp::host_memory>& right = matrices[j]; if (left.num_cols == right.num_rows) CompareSparseMatrixMatrixMultiply<TestMatrix>(left, right); } } } DECLARE_SPARSE_MATRIX_UNITTEST(TestSparseMatrixMatrixMultiply); template <typename SparseMatrixType, typename DenseMatrixType> void CompareScaledSparseMatrixMatrixMultiply(DenseMatrixType A, DenseMatrixType B) { typedef typename SparseMatrixType::value_type ValueType; thrust::identity<ValueType> initialize; thrust::multiplies<ValueType> combine; thrust::plus<ValueType> reduce; DenseMatrixType C(A); cusp::multiply(A, B, C, initialize, combine, reduce); SparseMatrixType _A(A), _B(B), _C(A); cusp::multiply(_A, _B, _C, initialize, combine, reduce); ASSERT_EQUAL(C == DenseMatrixType(_C), true); typename SparseMatrixType::view _Aview(_A), _Bview(_B), _Cview(_C); cusp::multiply(_Aview, _Bview, _Cview, initialize, combine, reduce); ASSERT_EQUAL(C == DenseMatrixType(_Cview), true); } template <typename TestMatrix> void TestScaledSparseMatrixMatrixMultiply(void) { typedef typename TestMatrix::value_type ValueType; cusp::array2d<ValueType,cusp::host_memory> A(3,2); A(0,0) = 1.0; A(0,1) = 2.0; A(1,0) = 3.0; A(1,1) = 0.0; A(2,0) = 5.0; A(2,1) = 6.0; cusp::array2d<ValueType,cusp::host_memory> B(2,4); B(0,0) = 0.0; B(0,1) = 2.0; B(0,2) = 3.0; B(0,3) = 4.0; B(1,0) = 5.0; B(1,1) = 0.0; B(1,2) = 0.0; B(1,3) = 8.0; cusp::array2d<ValueType,cusp::host_memory> C(2,2); C(0,0) = 0.0; C(0,1) = 0.0; C(1,0) = 3.0; C(1,1) = 5.0; cusp::array2d<ValueType,cusp::host_memory> D(2,1); D(0,0) = 2.0; D(1,0) = 3.0; cusp::array2d<ValueType,cusp::host_memory> E(2,2); E(0,0) = 0.0; E(0,1) = 0.0; E(1,0) = 0.0; E(1,1) = 0.0; cusp::array2d<ValueType,cusp::host_memory> F(2,3); F(0,0) = 0.0; F(0,1) = 1.5; F(0,2) = 3.0; F(1,0) = 0.5; F(1,1) = 0.0; F(1,2) = 0.0; cusp::array2d<ValueType,cusp::host_memory> G; cusp::gallery::poisson5pt(G, 4, 6); cusp::array2d<ValueType,cusp::host_memory> H; cusp::gallery::poisson5pt(H, 8, 3); cusp::array2d<ValueType,cusp::host_memory> I; cusp::gallery::random(I, 24, 24, 150); cusp::array2d<ValueType,cusp::host_memory> J; cusp::gallery::random(J, 24, 24, 50); cusp::array2d<ValueType,cusp::host_memory> K; cusp::gallery::random(K, 24, 12, 20); //thrust::host_vector< cusp::array2d<float,cusp::host_memory> > matrices; std::vector< cusp::array2d<ValueType,cusp::host_memory> > matrices; matrices.push_back(A); matrices.push_back(B); matrices.push_back(C); matrices.push_back(D); matrices.push_back(E); matrices.push_back(F); matrices.push_back(G); matrices.push_back(H); matrices.push_back(I); matrices.push_back(J); matrices.push_back(K); // test matrix multiply for every pair of compatible matrices for(size_t i = 0; i < matrices.size(); i++) { const cusp::array2d<ValueType,cusp::host_memory>& left = matrices[i]; for(size_t j = 0; j < matrices.size(); j++) { const cusp::array2d<ValueType,cusp::host_memory>& right = matrices[j]; if (left.num_cols == right.num_rows) CompareScaledSparseMatrixMatrixMultiply<TestMatrix>(left, right); } } } /* DECLARE_SPARSE_MATRIX_UNITTEST(TestScaledSparseMatrixMatrixMultiply); */ /////////////////////////////////////////////// // Sparse Matrix-Dense Matrix Multiplication // /////////////////////////////////////////////// template <typename SparseMatrixType, typename DenseMatrixType> void CompareSparseMatrixDenseMatrixMultiply(DenseMatrixType A, DenseMatrixType B) { typedef typename SparseMatrixType::value_type ValueType; typedef typename SparseMatrixType::memory_space MemorySpace; typedef cusp::array2d<ValueType,MemorySpace,cusp::column_major> DenseSpaceMatrixType; DenseMatrixType C(A.num_rows, B.num_cols); cusp::multiply(A, B, C); SparseMatrixType _A(A); // Copy B into the memory space DenseSpaceMatrixType B_space(B); // Allocate _B and ensure each column is properly aligned DenseSpaceMatrixType _B(B.num_rows, B.num_cols, ValueType(0), cusp::detail::round_up(B.num_rows, size_t(128))); // Copy columns of B into _B for(size_t i = 0; i < B.num_cols; i++ ) cusp::blas::copy(B_space.column(i), _B.column(i)); // test container { DenseSpaceMatrixType _C(C.num_rows, C.num_cols); cusp::multiply(_A, _B, _C); ASSERT_EQUAL(C == DenseMatrixType(_C), true); } { // test view DenseSpaceMatrixType _C(C.num_rows, C.num_cols); typename SparseMatrixType::view _Aview(_A); typename DenseSpaceMatrixType::view _Bview(_B), _Cview(_C); cusp::multiply(_Aview, _Bview, _Cview); ASSERT_EQUAL(C == DenseMatrixType(_C), true); } } template <typename TestMatrix> void TestSparseMatrixDenseMatrixMultiply(void) { cusp::array2d<float,cusp::host_memory> A(3,2); A(0,0) = 1.0; A(0,1) = 2.0; A(1,0) = 3.0; A(1,1) = 0.0; A(2,0) = 5.0; A(2,1) = 6.0; cusp::array2d<float,cusp::host_memory> B(2,4); B(0,0) = 0.0; B(0,1) = 2.0; B(0,2) = 3.0; B(0,3) = 4.0; B(1,0) = 5.0; B(1,1) = 0.0; B(1,2) = 0.0; B(1,3) = 8.0; cusp::array2d<float,cusp::host_memory> C(2,2); C(0,0) = 0.0; C(0,1) = 0.0; C(1,0) = 3.0; C(1,1) = 5.0; cusp::array2d<float,cusp::host_memory> D(2,1); D(0,0) = 2.0; D(1,0) = 3.0; cusp::array2d<float,cusp::host_memory> E(2,2); E(0,0) = 0.0; E(0,1) = 0.0; E(1,0) = 0.0; E(1,1) = 0.0; cusp::array2d<float,cusp::host_memory> F(2,3); F(0,0) = 0.0; F(0,1) = 1.5; F(0,2) = 3.0; F(1,0) = 0.5; F(1,1) = 0.0; F(1,2) = 0.0; cusp::array2d<float,cusp::host_memory> G; cusp::gallery::poisson5pt(G, 4, 6); cusp::array2d<float,cusp::host_memory> H; cusp::gallery::poisson5pt(H, 8, 3); cusp::array2d<float,cusp::host_memory> I; cusp::gallery::random(I, 24, 24, 150); cusp::array2d<float,cusp::host_memory> J; cusp::gallery::random(J, 24, 24, 50); cusp::array2d<float,cusp::host_memory> K; cusp::gallery::random(K, 24, 12, 20); //thrust::host_vector< cusp::array2d<float,cusp::host_memory,cusp::column_major> > matrices; std::vector< cusp::array2d<float,cusp::host_memory,cusp::column_major> > matrices; matrices.push_back(A); matrices.push_back(B); matrices.push_back(C); matrices.push_back(D); matrices.push_back(E); matrices.push_back(F); matrices.push_back(G); matrices.push_back(H); matrices.push_back(I); matrices.push_back(J); matrices.push_back(K); // test matrix multiply for every pair of compatible matrices for(size_t i = 0; i < matrices.size(); i++) { const cusp::array2d<float,cusp::host_memory,cusp::column_major>& left = matrices[i]; for(size_t j = 0; j < matrices.size(); j++) { const cusp::array2d<float,cusp::host_memory,cusp::column_major>& right = matrices[j]; if (left.num_cols == right.num_rows) CompareSparseMatrixDenseMatrixMultiply<TestMatrix>(left, right); } } } /* DECLARE_SPARSE_MATRIX_UNITTEST(TestSparseMatrixDenseMatrixMultiply); */ ///////////////////////////////////////// // Sparse Matrix-Vector Multiplication // ///////////////////////////////////////// template <typename SparseMatrixType, typename DenseMatrixType> void CompareSparseMatrixVectorMultiply(DenseMatrixType A) { typedef typename SparseMatrixType::value_type ValueType; typedef typename SparseMatrixType::memory_space MemorySpace; // setup reference input cusp::array1d<ValueType, cusp::host_memory> x(A.num_cols); cusp::array1d<ValueType, cusp::host_memory> y(A.num_rows, 10); for(size_t i = 0; i < x.size(); i++) x[i] = i % 10; // compute reference output cusp::multiply(A, x, y); // test container { SparseMatrixType _A(A); cusp::array1d<ValueType, MemorySpace> _x(x); cusp::array1d<ValueType, MemorySpace> _y(A.num_rows, 10); cusp::multiply(_A, _x, _y); ASSERT_EQUAL(_y, y); } // test matrix view { SparseMatrixType _A(A); cusp::array1d<ValueType, MemorySpace> _x(x); cusp::array1d<ValueType, MemorySpace> _y(A.num_rows, 10); typename SparseMatrixType::view _V(_A); cusp::multiply(_V, _x, _y); ASSERT_EQUAL(_y, y); } // test array view { SparseMatrixType _A(A); cusp::array1d<ValueType, MemorySpace> _x(x); cusp::array1d<ValueType, MemorySpace> _y(A.num_rows, 10); typename cusp::array1d<ValueType, MemorySpace> _Vx(_x), _Vy(_y); cusp::multiply(_A, _Vx, _Vy); ASSERT_EQUAL(_Vy, y); } } // TODO use COO reference format and test larger problem sizes template <class TestMatrix> void TestSparseMatrixVectorMultiply() { typedef typename TestMatrix::value_type ValueType; cusp::array2d<ValueType, cusp::host_memory> A(5,4); A(0,0) = 13; A(0,1) = 80; A(0,2) = 0; A(0,3) = 0; A(1,0) = 0; A(1,1) = 27; A(1,2) = 0; A(1,3) = 0; A(2,0) = 55; A(2,1) = 0; A(2,2) = 24; A(2,3) = 42; A(3,0) = 0; A(3,1) = 69; A(3,2) = 0; A(3,3) = 83; A(4,0) = 0; A(4,1) = 0; A(4,2) = 27; A(4,3) = 0; cusp::array2d<ValueType,cusp::host_memory> B(2,4); B(0,0) = 0.0; B(0,1) = 2.0; B(0,2) = 3.0; B(0,3) = 4.0; B(1,0) = 5.0; B(1,1) = 0.0; B(1,2) = 0.0; B(1,3) = 8.0; cusp::array2d<ValueType,cusp::host_memory> C(2,2); C(0,0) = 0.0; C(0,1) = 0.0; C(1,0) = 3.0; C(1,1) = 5.0; cusp::array2d<ValueType,cusp::host_memory> D(2,1); D(0,0) = 2.0; D(1,0) = 3.0; cusp::array2d<ValueType,cusp::host_memory> E(2,2); E(0,0) = 0.0; E(0,1) = 0.0; E(1,0) = 0.0; E(1,1) = 0.0; cusp::array2d<ValueType,cusp::host_memory> F(2,3); F(0,0) = 0.0; F(0,1) = 1.5; F(0,2) = 3.0; F(1,0) = 0.5; F(1,1) = 0.0; F(1,2) = 0.0; cusp::array2d<ValueType,cusp::host_memory> G; cusp::gallery::poisson5pt(G, 4, 6); cusp::array2d<ValueType,cusp::host_memory> H; cusp::gallery::poisson5pt(H, 8, 3); CompareSparseMatrixVectorMultiply<TestMatrix>(A); CompareSparseMatrixVectorMultiply<TestMatrix>(B); CompareSparseMatrixVectorMultiply<TestMatrix>(C); CompareSparseMatrixVectorMultiply<TestMatrix>(D); CompareSparseMatrixVectorMultiply<TestMatrix>(E); CompareSparseMatrixVectorMultiply<TestMatrix>(F); CompareSparseMatrixVectorMultiply<TestMatrix>(G); CompareSparseMatrixVectorMultiply<TestMatrix>(H); } DECLARE_SPARSE_MATRIX_UNITTEST(TestSparseMatrixVectorMultiply); template <typename SparseMatrixType, typename DenseMatrixType> void CompareScaledSparseMatrixVectorMultiply(DenseMatrixType A) { typedef typename SparseMatrixType::value_type ValueType; typedef typename SparseMatrixType::memory_space MemorySpace; // setup reference input cusp::array1d<ValueType, cusp::host_memory> x(A.num_cols); cusp::array1d<ValueType, cusp::host_memory> y(A.num_rows, 10); for(size_t i = 0; i < x.size(); i++) x[i] = i % 10; thrust::identity<ValueType> initialize; thrust::multiplies<ValueType> combine; thrust::plus<ValueType> reduce; // compute reference output cusp::multiply(A, x, y, initialize, combine, reduce); // test container { SparseMatrixType _A(A); cusp::array1d<ValueType, MemorySpace> _x(x); cusp::array1d<ValueType, MemorySpace> _y(A.num_rows, 10); cusp::multiply(_A, _x, _y, initialize, combine, reduce); ASSERT_EQUAL(_y, y); } // test matrix view { SparseMatrixType _A(A); cusp::array1d<ValueType, MemorySpace> _x(x); cusp::array1d<ValueType, MemorySpace> _y(A.num_rows, 10); typename SparseMatrixType::view _V(_A); cusp::multiply(_V, _x, _y, initialize, combine, reduce); ASSERT_EQUAL(_y, y); } // test array view { SparseMatrixType _A(A); cusp::array1d<ValueType, MemorySpace> _x(x); cusp::array1d<ValueType, MemorySpace> _y(A.num_rows, 10); typename cusp::array1d<ValueType, MemorySpace> _Vx(_x), _Vy(_y); cusp::multiply(_A, _Vx, _Vy, initialize, combine, reduce); ASSERT_EQUAL(_Vy, y); } } template <class TestMatrix> void TestScaledSparseMatrixVectorMultiply() { typedef typename TestMatrix::value_type ValueType; cusp::array2d<ValueType, cusp::host_memory> A(5,4); A(0,0) = 13; A(0,1) = 80; A(0,2) = 0; A(0,3) = 0; A(1,0) = 0; A(1,1) = 27; A(1,2) = 0; A(1,3) = 0; A(2,0) = 55; A(2,1) = 0; A(2,2) = 24; A(2,3) = 42; A(3,0) = 0; A(3,1) = 69; A(3,2) = 0; A(3,3) = 83; A(4,0) = 0; A(4,1) = 0; A(4,2) = 27; A(4,3) = 0; cusp::array2d<ValueType,cusp::host_memory> B(2,4); B(0,0) = 0.0; B(0,1) = 2.0; B(0,2) = 3.0; B(0,3) = 4.0; B(1,0) = 5.0; B(1,1) = 0.0; B(1,2) = 0.0; B(1,3) = 8.0; cusp::array2d<ValueType,cusp::host_memory> C(2,2); C(0,0) = 0.0; C(0,1) = 0.0; C(1,0) = 3.0; C(1,1) = 5.0; cusp::array2d<ValueType,cusp::host_memory> D(2,1); D(0,0) = 2.0; D(1,0) = 3.0; cusp::array2d<ValueType,cusp::host_memory> E(2,2); E(0,0) = 0.0; E(0,1) = 0.0; E(1,0) = 0.0; E(1,1) = 0.0; cusp::array2d<ValueType,cusp::host_memory> F(2,3); F(0,0) = 0.0; F(0,1) = 1.5; F(0,2) = 3.0; F(1,0) = 0.5; F(1,1) = 0.0; F(1,2) = 0.0; cusp::array2d<ValueType,cusp::host_memory> G; cusp::gallery::poisson5pt(G, 4, 6); cusp::array2d<ValueType,cusp::host_memory> H; cusp::gallery::poisson5pt(H, 8, 3); CompareScaledSparseMatrixVectorMultiply<TestMatrix>(A); CompareScaledSparseMatrixVectorMultiply<TestMatrix>(B); CompareScaledSparseMatrixVectorMultiply<TestMatrix>(C); CompareScaledSparseMatrixVectorMultiply<TestMatrix>(D); CompareScaledSparseMatrixVectorMultiply<TestMatrix>(E); CompareScaledSparseMatrixVectorMultiply<TestMatrix>(F); CompareScaledSparseMatrixVectorMultiply<TestMatrix>(G); CompareScaledSparseMatrixVectorMultiply<TestMatrix>(H); } DECLARE_SPARSE_MATRIX_UNITTEST(TestScaledSparseMatrixVectorMultiply); ////////////////////////////// // General Linear Operators // ////////////////////////////// template <class MemorySpace> void TestMultiplyIdentityOperator(void) { cusp::array1d<float, MemorySpace> x(4); cusp::array1d<float, MemorySpace> y(4); x[0] = 7.0f; y[0] = 0.0f; x[1] = 5.0f; y[1] = -2.0f; x[2] = 4.0f; y[2] = 0.0f; x[3] = -3.0f; y[3] = 5.0f; cusp::identity_operator<float, MemorySpace> A(4,4); cusp::multiply(A, x, y); ASSERT_EQUAL(y[0], 7.0f); ASSERT_EQUAL(y[1], 5.0f); ASSERT_EQUAL(y[2], 4.0f); ASSERT_EQUAL(y[3], -3.0f); } DECLARE_HOST_DEVICE_UNITTEST(TestMultiplyIdentityOperator); /////////////////////////// // Permutation Operators // /////////////////////////// template <class MemorySpace> void TestMultiplyPermutationOperator(void) { cusp::array1d<float, MemorySpace> x(4); cusp::array1d<float, MemorySpace> y(4); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; cusp::permutation_matrix<int, MemorySpace> P(4); P.permutation[0] = 3; P.permutation[1] = 2; P.permutation[2] = 1; P.permutation[3] = 0; cusp::multiply(P, x, y); ASSERT_EQUAL(y[0], -3.0f); ASSERT_EQUAL(y[1], 4.0f); ASSERT_EQUAL(y[2], 5.0f); ASSERT_EQUAL(y[3], 7.0f); } DECLARE_HOST_DEVICE_UNITTEST(TestMultiplyPermutationOperator); template<typename TestMatrix> void TestPermutationMatrixMultiply(void) { typedef typename TestMatrix::index_type IndexType; typedef typename TestMatrix::value_type ValueType; typedef typename TestMatrix::memory_space MemorySpace; cusp::coo_matrix<IndexType,ValueType,cusp::host_memory> A(3,3,7); A.row_indices[0] = 0; A.column_indices[0] = 0; A.values[0] = 10; A.row_indices[1] = 0; A.column_indices[1] = 1; A.values[1] = 20; A.row_indices[2] = 0; A.column_indices[2] = 2; A.values[2] = 30; A.row_indices[3] = 1; A.column_indices[3] = 0; A.values[3] = 40; A.row_indices[4] = 1; A.column_indices[4] = 1; A.values[4] = 50; A.row_indices[5] = 2; A.column_indices[5] = 0; A.values[5] = 60; A.row_indices[6] = 2; A.column_indices[6] = 2; A.values[6] = 70; cusp::array1d<IndexType,MemorySpace> permutation(3); permutation[0] = 2; permutation[1] = 1; permutation[2] = 0; cusp::permutation_matrix<IndexType,MemorySpace> P(3, permutation); // Test row permutations { TestMatrix PA; TestMatrix A_(A); cusp::multiply(P, A_, PA); cusp::array2d<ValueType,cusp::host_memory> host_matrix(PA); ASSERT_EQUAL(PA.num_rows, A.num_rows); ASSERT_EQUAL(PA.num_cols, A.num_cols); ASSERT_EQUAL(PA.num_entries, A.num_entries); ASSERT_EQUAL(host_matrix(0,0), ValueType(60)); ASSERT_EQUAL(host_matrix(0,1), ValueType( 0)); ASSERT_EQUAL(host_matrix(0,2), ValueType(70)); ASSERT_EQUAL(host_matrix(1,0), ValueType(40)); ASSERT_EQUAL(host_matrix(1,1), ValueType(50)); ASSERT_EQUAL(host_matrix(1,2), ValueType( 0)); ASSERT_EQUAL(host_matrix(2,0), ValueType(10)); ASSERT_EQUAL(host_matrix(2,1), ValueType(20)); ASSERT_EQUAL(host_matrix(2,2), ValueType(30)); } // Test column permutations { TestMatrix AP; TestMatrix A_(A); cusp::multiply(A_, P, AP); cusp::array2d<ValueType,cusp::host_memory> host_matrix(AP); ASSERT_EQUAL(AP.num_rows, A.num_rows); ASSERT_EQUAL(AP.num_cols, A.num_cols); ASSERT_EQUAL(AP.num_entries, A.num_entries); ASSERT_EQUAL(host_matrix(0,0), ValueType(30)); ASSERT_EQUAL(host_matrix(0,1), ValueType(20)); ASSERT_EQUAL(host_matrix(0,2), ValueType(10)); ASSERT_EQUAL(host_matrix(1,0), ValueType( 0)); ASSERT_EQUAL(host_matrix(1,1), ValueType(50)); ASSERT_EQUAL(host_matrix(1,2), ValueType(40)); ASSERT_EQUAL(host_matrix(2,0), ValueType(70)); ASSERT_EQUAL(host_matrix(2,1), ValueType( 0)); ASSERT_EQUAL(host_matrix(2,2), ValueType(60)); } } DECLARE_SPARSE_MATRIX_UNITTEST(TestPermutationMatrixMultiply); template <typename MatrixType1, typename MatrixType2, typename MatrixType3> void multiply(my_system& system, const MatrixType1& A, const MatrixType2& B, MatrixType3& C) { system.validate_dispatch(); return; } void TestMatrixMatrixMultiplyDispatch() { // initialize testing variables cusp::csr_matrix<int, float, cusp::device_memory> A, B, C; my_system sys(0); // call with explicit dispatching cusp::multiply(sys, A, B, C); // check if dispatch policy was used ASSERT_EQUAL(true, sys.is_valid()); } DECLARE_UNITTEST(TestMatrixMatrixMultiplyDispatch); template <typename LinearOperator, typename MatrixOrVector1, typename MatrixOrVector2, typename UnaryFunction, typename BinaryFunction1, typename BinaryFunction2> void multiply(my_system& system, const LinearOperator& A, const MatrixOrVector1& B, MatrixOrVector2& C, UnaryFunction initialize, BinaryFunction1 combine, BinaryFunction2 reduce) { system.validate_dispatch(); return; } void TestMatrixVectorMultiplyDispatch() { // initialize testing variables cusp::csr_matrix<int, float, cusp::device_memory> A, B, C; cusp::array1d<float, cusp::device_memory> x; { my_system sys(0); // call with explicit dispatching cusp::multiply(sys, A, x, x); // check if dispatch policy was used ASSERT_EQUAL(true, sys.is_valid()); } { my_system sys(0); // call with explicit dispatching cusp::multiply(sys, A, x, x, cusp::constant_functor<float>(), thrust::multiplies<float>(), thrust::plus<float>()); // check if dispatch policy was used ASSERT_EQUAL(true, sys.is_valid()); } } DECLARE_UNITTEST(TestMatrixVectorMultiplyDispatch);
the_stack
void blas_sgemm(cublasHandle_t handle, const bool TransA, const bool TransB, const int M, const int N, const int K, const float alpha, float *A, const int lda, float *B, const int ldb, const float beta, float *C, const int ldc) { checkCudaErrors(cublasSgemm(handle, TransA ? CUBLAS_OP_T : CUBLAS_OP_N, TransB ? CUBLAS_OP_T : CUBLAS_OP_N, M, N, K, &alpha, A, lda, B, ldb, &beta, C, ldc)); } __global__ void MemoryCpyLinear(float* out, float* in, size_t max, size_t warpsize, float mul) { for(int i = blockIdx.x * blockDim.x + threadIdx.x ; i < max; i += gridDim.x * blockDim.x) out[i] = in[i%warpsize] * mul; __syncthreads(); } op_BatchedLinear::op_BatchedLinear( std::string key_query_kernel, std::string key_query_bias, std::string key_key_kernel, std::string key_key_bias, std::string key_val_kernel, std::string key_val_bias, global_handle* handle) : op_kernel(handle) { std::vector<tagged_tensor *> tts = handle->tts; std::vector<std::string> keys = {key_query_kernel}; tagged_tensor* tt = look_up_tts(tts, keys); query_kernel = tt->gpu_mem; keys = {key_query_bias}; tt = look_up_tts(tts, keys); query_bias = tt->gpu_mem; keys = {key_key_kernel}; tt = look_up_tts(tts, keys); key_kernel = tt->gpu_mem; keys = {key_key_bias}; tt = look_up_tts(tts, keys); key_bias = tt->gpu_mem; keys = {key_val_kernel}; tt = look_up_tts(tts, keys); val_kernel = tt->gpu_mem; keys = {key_val_bias}; tt = look_up_tts(tts, keys); val_bias = tt->gpu_mem; size_t hidden_size = handle->hidden_size; float *key, *value; checkCudaErrors(cudaMalloc((void**)&batch_attentin_weights, sizeof(float) * hidden_size * hidden_size * 3)); key = batch_attentin_weights + 1 * hidden_size * hidden_size; value = batch_attentin_weights + 2 * hidden_size * hidden_size; dim3 threads(512, 1, 1); dim3 blocks(hidden_size * hidden_size/512 + 1, 1, 1); MemoryCpyLinear<<<blocks, threads>>>(batch_attentin_weights, query_kernel, hidden_size * hidden_size, hidden_size * hidden_size); MemoryCpyLinear<<<blocks, threads>>>(key, key_kernel, hidden_size * hidden_size, hidden_size * hidden_size); MemoryCpyLinear<<<blocks, threads>>>(value, val_kernel, hidden_size * hidden_size, hidden_size * hidden_size); checkCudaErrors(cudaFree(query_kernel)); checkCudaErrors(cudaFree(key_kernel)); checkCudaErrors(cudaFree(val_kernel)); query_kernel = batch_attentin_weights; key_kernel = key; val_kernel = value; } template <typename T> void op_Linear::forward ( T* &output, T* input, size_t n, size_t k, size_t m, bool is_prepare, bool debug) { stored_input = input; output = handle->global_malloc_manage_float.get_new_head_point(n * m); if (debug) { debug_tensor_gpu<T>(std::string("weights"), kernel, 10, m, min((int)n,10)); debug_tensor_gpu<T>(std::string("bias"), bias, 10, m, 1); debug_tensor_gpu<T>(std::string("input_Linear"), input, 10, k, min((int)n,10)); std::cout<<"n : "<<n<<std::endl; std::cout<<"k : "<<k<<std::endl; std::cout<<"m : "<<m<<std::endl; } if(!is_prepare){ dim3 threads(1024, 1, 1); dim3 blocks(min((long)65535, n*m/1024) + 1, 1, 1); MemoryCpyLinear<<<blocks, threads, 0, handle->copy_stream>>>( output, bias, n*m, m); } else{ checkCudaErrors(cudaMemcpyAsync(output, bias, n*m*sizeof(float), cudaMemcpyDeviceToDevice, handle->copy_stream)); } cudaEventRecord(handle->copy_event, handle->copy_stream); cudaStreamWaitEvent(handle->cal_stream, handle->copy_event, 0); if(debug) debug_tensor_gpu<T>(std::string("After Linear copy"), output,10, m, min((int)n,10)); std::vector<size_t> a_shape={n, k}; std::vector<size_t> b_shape={k, m}; std::vector<size_t> c_shape={n, m}; blas_sgemm(handle->handle, false, false, m, n, k, 1.0f, kernel, m, input, k, 1.0f, output, m); if(debug) debug_tensor_gpu<T>(std::string("Linear out"), output, 10, m, min((int)n,10)); } template void op_Linear::forward<float>( float* &output, float* input, size_t n, size_t k, size_t m, bool is_prepare, bool debug); template<typename T> __global__ void MemoryCpyLinearTranpose(T *out, T *in, int n, int m, int max) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < max; i += gridDim.x * blockDim.x) { out[(i % m) * n + i / m] = in[i]; } __syncthreads(); } template<typename T> void op_Linear::backward(T *dout, size_t n, size_t k, size_t m) { T *kernel_tranpose; kernel_tranpose = handle->global_malloc_manage_float.get_new_head_point(k * m); dim3 threads(1024, 1, 1); dim3 blocks(min((long) 65535, (k * m + 1023) / 1024), 1, 1); MemoryCpyLinearTranpose<T> << < blocks, threads, 0, handle->copy_stream >> > (kernel_tranpose, kernel, k, m, k * m); cudaEventRecord(handle->copy_event, handle->copy_stream); cudaStreamWaitEvent(handle->cal_stream, handle->copy_event, 0); // debug_tensor_gpu<float>(std::string("kernel_tranpose"), kernel_tranpose, k, k, m); std::vector <size_t> a_shape = {n, m}; std::vector <size_t> b_shape = {m, k}; std::vector <size_t> c_shape = {n, k}; grad_input = handle->global_malloc_manage_float.get_new_head_point(n * k); matmul(handle->handle, dout, a_shape, kernel_tranpose, b_shape, grad_input, c_shape, false, false, 1.0f, 0.0f); T *input_tranpose; input_tranpose = handle->global_malloc_manage_float.get_new_head_point(n * k); dim3 threads1(1024, 1, 1); dim3 blocks1(min((long) 65535, (k * n + 1023) / 1024), 1, 1); MemoryCpyLinearTranpose<T> << < blocks1, threads1, 0, handle->copy_stream >> > (input_tranpose, stored_input, n, k, n * k); cudaEventRecord(handle->copy_event, handle->copy_stream); cudaStreamWaitEvent(handle->cal_stream, handle->copy_event, 0); a_shape = {k, n}; b_shape = {n, m}; c_shape = {k, m}; grad_kernel = handle->global_malloc_manage_float.get_new_head_point(k * m); matmul(handle->handle, input_tranpose, a_shape, dout, b_shape, grad_kernel, c_shape, false, false, 1.0f, 0.0f); grad_bias = handle->global_malloc_manage_float.get_new_head_point(n * m); dim3 threads2(1024, 1, 1); dim3 blocks2(min((long) 65535, (n * m + 1023) / 1024), 1, 1); MemoryCpyLinear<<< blocks2, threads2, 0, handle->copy_stream >>> (grad_bias, dout, n * m, n * m, n); } template void op_Linear::backward<float>( float *dout, size_t n, size_t k, size_t m); template <typename T> void op_BatchedLinear::forward( T* &output, T* input, size_t n, size_t k, size_t m, bool is_prepare, bool debug) { output = handle->global_malloc_manage_float.get_new_head_point(3 * n * m); //dim3 threads(512, 1, 1); //dim3 blocks(max(3*n*m, 3*k*m)/512 + 1, 1, 1); //BatchMemoryCpyLinear<T><<<blocks, threads>>>(weights, output, weights_0, beta_0, weights_1, // beta_1, weights_2, beta_2, n, k, m); if(!is_prepare){ dim3 threads(1024, 1, 1); dim3 blocks(min((long)65535, n*m/1024) + 1, 1, 1); MemoryCpyLinear<<<blocks, threads, 0, handle->copy_stream>>>( output, query_bias, n*m, m); MemoryCpyLinear<<<blocks, threads, 0, handle->copy_stream>>>( output + n*m, key_bias, n*m, m); MemoryCpyLinear<<<blocks, threads, 0, handle->copy_stream>>>( output + 2*n*m, val_bias, n*m, m); } else{ checkCudaErrors(cudaMemcpyAsync(output, query_bias, n*m*sizeof(float), cudaMemcpyDeviceToDevice, handle->copy_stream)); checkCudaErrors(cudaMemcpyAsync(output + n*m, key_bias, n*m*sizeof(float), cudaMemcpyDeviceToDevice, handle->copy_stream)); checkCudaErrors(cudaMemcpyAsync(output + 2*n*m, val_bias, n*m*sizeof(float), cudaMemcpyDeviceToDevice, handle->copy_stream)); } //dim3 threads2(512, 1, 1); //dim3 blocks2(k*m/512 + 1, 1, 1); //MemoryCpyLinear<T><<<blocks2, threads2>>>(weights, weights_0, k*m, k*m); //MemoryCpyLinear<T><<<blocks2, threads2>>>(weights + k*m, weights_1, k*m, k*m); //MemoryCpyLinear<T><<<blocks2, threads2>>>(weights + 2*k*m, weights_2, k*m, k*m); cudaEventRecord(handle->copy_event, handle->copy_stream); cudaStreamWaitEvent(handle->cal_stream, handle->copy_event, 0); if(debug){ //debug_tensor_gpu<T>(std::string("inputs"), input, 10, 768, 11); //debug_tensor_gpu<T>(std::string("key"), weights_0, 10, 768, 11); //debug_tensor_gpu<T>(std::string("query"), weights_0+k*m, 10, 768, 11); //debug_tensor_gpu<T>(std::string("value"), weights_0+2*k*m, 10, 768, 11); debug_tensor_gpu<T>(std::string("before matmul"), output, 5, handle->hidden_size*handle->seq_length, handle->batchsize*3); //debug_tensor_gpu<T>(std::string("bias"), beta_0, 10, handle->hidden_size, 11); } std::vector<size_t> a_shape={3, n, k}; std::vector<size_t> b_shape={3, k, m}; std::vector<size_t> c_shape={3, n, m}; matmul(handle->handle, input, a_shape, batch_attentin_weights, b_shape, output, c_shape, false, false, 1.0f, 1.0f, 0); //if(debug) //debug_tensor_gpu<T>(std::string("Linear out"), output, 11, 768, 11*3); } template void op_BatchedLinear::forward<float>( float* &output, float* input, size_t n, size_t k, size_t m, bool is_prepare, bool debug);
the_stack
#include "cuda_helper.h" #undef SPH_ROTL32 #define SPH_ROTL32 ROTL32 static uint32_t *d_gnounce[MAX_GPUS]; static uint32_t *d_GNonce[MAX_GPUS]; __constant__ uint64_t pTarget[4]; #define shl(x, n) ((x) << (n)) #define shr(x, n) ((x) >> (n)) #define ss0(x) (shr((x), 1) ^ shl((x), 3) ^ SPH_ROTL32((x), 4) ^ SPH_ROTL32((x), 19)) #define ss1(x) (shr((x), 1) ^ shl((x), 2) ^ SPH_ROTL32((x), 8) ^ SPH_ROTL32((x), 23)) #define ss2(x) (shr((x), 2) ^ shl((x), 1) ^ SPH_ROTL32((x), 12) ^ SPH_ROTL32((x), 25)) #define ss3(x) (shr((x), 2) ^ shl((x), 2) ^ SPH_ROTL32((x), 15) ^ SPH_ROTL32((x), 29)) #define ss4(x) (shr((x), 1) ^ (x)) #define ss5(x) (shr((x), 2) ^ (x)) #define rs1(x) SPH_ROTL32((x), 3) #define rs2(x) SPH_ROTL32((x), 7) #define rs3(x) SPH_ROTL32((x), 13) #define rs4(x) SPH_ROTL32((x), 16) #define rs5(x) SPH_ROTL32((x), 19) #define rs6(x) SPH_ROTL32((x), 23) #define rs7(x) SPH_ROTL32((x), 27) /* Message expansion function 1 */ __forceinline__ __device__ uint32_t expand32_1(int i, uint32_t *M32, const uint32_t *H, uint32_t *Q) { return (ss1(Q[i - 16]) + ss2(Q[i - 15]) + ss3(Q[i - 14]) + ss0(Q[i - 13]) + ss1(Q[i - 12]) + ss2(Q[i - 11]) + ss3(Q[i - 10]) + ss0(Q[i - 9]) + ss1(Q[i - 8]) + ss2(Q[i - 7]) + ss3(Q[i - 6]) + ss0(Q[i - 5]) + ss1(Q[i - 4]) + ss2(Q[i - 3]) + ss3(Q[i - 2]) + ss0(Q[i - 1]) + ((i*(0x05555555ul) + SPH_ROTL32(M32[(i - 16) % 16], ((i - 16) % 16) + 1) + SPH_ROTL32(M32[(i - 13) % 16], ((i - 13) % 16) + 1) - SPH_ROTL32(M32[(i - 6) % 16], ((i - 6) % 16) + 1)) ^ H[(i - 16 + 7) % 16])); } /* Message expansion function 2 */ __forceinline__ __device__ uint32_t expand32_2(int i, uint32_t *M32, const uint32_t *H, uint32_t *Q) { return (Q[i - 16] + rs1(Q[i - 15]) + Q[i - 14] + rs2(Q[i - 13]) + Q[i - 12] + rs3(Q[i - 11]) + Q[i - 10] + rs4(Q[i - 9]) + Q[i - 8] + rs5(Q[i - 7]) + Q[i - 6] + rs6(Q[i - 5]) + Q[i - 4] + rs7(Q[i - 3]) + ss4(Q[i - 2]) + ss5(Q[i - 1]) + ((i*(0x05555555ul) + SPH_ROTL32(M32[(i - 16) % 16], ((i - 16) % 16) + 1) + SPH_ROTL32(M32[(i - 13) % 16], ((i - 13) % 16) + 1) - SPH_ROTL32(M32[(i - 6) % 16], ((i - 6) % 16) + 1)) ^ H[(i - 16 + 7) % 16])); } __forceinline__ __device__ void Compression256(uint32_t * M32) { uint32_t Q[32], XL32, XH32; const uint32_t H[16] = { 0x40414243, 0x44454647, 0x48494A4B, 0x4C4D4E4F, 0x50515253, 0x54555657, 0x58595A5B, 0x5C5D5E5F, 0x60616263, 0x64656667, 0x68696A6B, 0x6C6D6E6F, 0x70717273, 0x74757677, 0x78797A7B, 0x7C7D7E7F }; Q[0] = (M32[5] ^ H[5]) - (M32[7] ^ H[7]) + (M32[10] ^ H[10]) + (M32[13] ^ H[13]) + (M32[14] ^ H[14]); Q[1] = (M32[6] ^ H[6]) - (M32[8] ^ H[8]) + (M32[11] ^ H[11]) + (M32[14] ^ H[14]) - (M32[15] ^ H[15]); Q[2] = (M32[0] ^ H[0]) + (M32[7] ^ H[7]) + (M32[9] ^ H[9]) - (M32[12] ^ H[12]) + (M32[15] ^ H[15]); Q[3] = (M32[0] ^ H[0]) - (M32[1] ^ H[1]) + (M32[8] ^ H[8]) - (M32[10] ^ H[10]) + (M32[13] ^ H[13]); Q[4] = (M32[1] ^ H[1]) + (M32[2] ^ H[2]) + (M32[9] ^ H[9]) - (M32[11] ^ H[11]) - (M32[14] ^ H[14]); Q[5] = (M32[3] ^ H[3]) - (M32[2] ^ H[2]) + (M32[10] ^ H[10]) - (M32[12] ^ H[12]) + (M32[15] ^ H[15]); Q[6] = (M32[4] ^ H[4]) - (M32[0] ^ H[0]) - (M32[3] ^ H[3]) - (M32[11] ^ H[11]) + (M32[13] ^ H[13]); Q[7] = (M32[1] ^ H[1]) - (M32[4] ^ H[4]) - (M32[5] ^ H[5]) - (M32[12] ^ H[12]) - (M32[14] ^ H[14]); Q[8] = (M32[2] ^ H[2]) - (M32[5] ^ H[5]) - (M32[6] ^ H[6]) + (M32[13] ^ H[13]) - (M32[15] ^ H[15]); Q[9] = (M32[0] ^ H[0]) - (M32[3] ^ H[3]) + (M32[6] ^ H[6]) - (M32[7] ^ H[7]) + (M32[14] ^ H[14]); Q[10] = (M32[8] ^ H[8]) - (M32[1] ^ H[1]) - (M32[4] ^ H[4]) - (M32[7] ^ H[7]) + (M32[15] ^ H[15]); Q[11] = (M32[8] ^ H[8]) - (M32[0] ^ H[0]) - (M32[2] ^ H[2]) - (M32[5] ^ H[5]) + (M32[9] ^ H[9]); Q[12] = (M32[1] ^ H[1]) + (M32[3] ^ H[3]) - (M32[6] ^ H[6]) - (M32[9] ^ H[9]) + (M32[10] ^ H[10]); Q[13] = (M32[2] ^ H[2]) + (M32[4] ^ H[4]) + (M32[7] ^ H[7]) + (M32[10] ^ H[10]) + (M32[11] ^ H[11]); Q[14] = (M32[3] ^ H[3]) - (M32[5] ^ H[5]) + (M32[8] ^ H[8]) - (M32[11] ^ H[11]) - (M32[12] ^ H[12]); Q[15] = (M32[12] ^ H[12]) - (M32[4] ^ H[4]) - (M32[6] ^ H[6]) - (M32[9] ^ H[9]) + (M32[13] ^ H[13]); /* Diffuse the differences in every word in a bijective manner with ssi, and then add the values of the previous double pipe. */ Q[0] = ss0(Q[0]) + H[1]; Q[1] = ss1(Q[1]) + H[2]; Q[2] = ss2(Q[2]) + H[3]; Q[3] = ss3(Q[3]) + H[4]; Q[4] = ss4(Q[4]) + H[5]; Q[5] = ss0(Q[5]) + H[6]; Q[6] = ss1(Q[6]) + H[7]; Q[7] = ss2(Q[7]) + H[8]; Q[8] = ss3(Q[8]) + H[9]; Q[9] = ss4(Q[9]) + H[10]; Q[10] = ss0(Q[10]) + H[11]; Q[11] = ss1(Q[11]) + H[12]; Q[12] = ss2(Q[12]) + H[13]; Q[13] = ss3(Q[13]) + H[14]; Q[14] = ss4(Q[14]) + H[15]; Q[15] = ss0(Q[15]) + H[0]; /* This is the Message expansion or f_1 in the documentation. */ /* It has 16 rounds. */ /* Blue Midnight Wish has two tunable security parameters. */ /* The parameters are named EXPAND_1_ROUNDS and EXPAND_2_ROUNDS. */ /* The following relation for these parameters should is satisfied: */ /* EXPAND_1_ROUNDS + EXPAND_2_ROUNDS = 16 */ #pragma unroll for (int i=16; i<18; i++) Q[i] = expand32_1(i, M32, H, Q); #pragma nounroll for (int i=18; i<32; i++) Q[i] = expand32_2(i, M32, H, Q); /* Blue Midnight Wish has two temporary cummulative variables that accumulate via XORing */ /* 16 new variables that are prooduced in the Message Expansion part. */ XL32 = Q[16] ^ Q[17] ^ Q[18] ^ Q[19] ^ Q[20] ^ Q[21] ^ Q[22] ^ Q[23]; XH32 = XL32^Q[24] ^ Q[25] ^ Q[26] ^ Q[27] ^ Q[28] ^ Q[29] ^ Q[30] ^ Q[31]; /* This part is the function f_2 - in the documentation */ /* Compute the double chaining pipe for the next message block. */ M32[0] = (shl(XH32, 5) ^ shr(Q[16], 5) ^ M32[0]) + (XL32 ^ Q[24] ^ Q[0]); M32[1] = (shr(XH32, 7) ^ shl(Q[17], 8) ^ M32[1]) + (XL32 ^ Q[25] ^ Q[1]); M32[2] = (shr(XH32, 5) ^ shl(Q[18], 5) ^ M32[2]) + (XL32 ^ Q[26] ^ Q[2]); M32[3] = (shr(XH32, 1) ^ shl(Q[19], 5) ^ M32[3]) + (XL32 ^ Q[27] ^ Q[3]); M32[4] = (shr(XH32, 3) ^ Q[20] ^ M32[4]) + (XL32 ^ Q[28] ^ Q[4]); M32[5] = (shl(XH32, 6) ^ shr(Q[21], 6) ^ M32[5]) + (XL32 ^ Q[29] ^ Q[5]); M32[6] = (shr(XH32, 4) ^ shl(Q[22], 6) ^ M32[6]) + (XL32 ^ Q[30] ^ Q[6]); M32[7] = (shr(XH32, 11) ^ shl(Q[23], 2) ^ M32[7]) + (XL32 ^ Q[31] ^ Q[7]); M32[8] = SPH_ROTL32(M32[4], 9) + (XH32 ^ Q[24] ^ M32[8]) + (shl(XL32, 8) ^ Q[23] ^ Q[8]); M32[9] = SPH_ROTL32(M32[5], 10) + (XH32 ^ Q[25] ^ M32[9]) + (shr(XL32, 6) ^ Q[16] ^ Q[9]); M32[10] = SPH_ROTL32(M32[6], 11) + (XH32 ^ Q[26] ^ M32[10]) + (shl(XL32, 6) ^ Q[17] ^ Q[10]); M32[11] = SPH_ROTL32(M32[7], 12) + (XH32 ^ Q[27] ^ M32[11]) + (shl(XL32, 4) ^ Q[18] ^ Q[11]); M32[12] = SPH_ROTL32(M32[0], 13) + (XH32 ^ Q[28] ^ M32[12]) + (shr(XL32, 3) ^ Q[19] ^ Q[12]); M32[13] = SPH_ROTL32(M32[1], 14) + (XH32 ^ Q[29] ^ M32[13]) + (shr(XL32, 4) ^ Q[20] ^ Q[13]); M32[14] = SPH_ROTL32(M32[2], 15) + (XH32 ^ Q[30] ^ M32[14]) + (shr(XL32, 7) ^ Q[21] ^ Q[14]); M32[15] = SPH_ROTL32(M32[3], 16) + (XH32 ^ Q[31] ^ M32[15]) + (shr(XL32, 2) ^ Q[22] ^ Q[15]); } __forceinline__ __device__ void Compression256_2(uint32_t * M32) { uint32_t XL32, XH32, Q[32]; const uint32_t H[16] = { 0xaaaaaaa0, 0xaaaaaaa1, 0xaaaaaaa2, 0xaaaaaaa3, 0xaaaaaaa4, 0xaaaaaaa5, 0xaaaaaaa6, 0xaaaaaaa7, 0xaaaaaaa8, 0xaaaaaaa9, 0xaaaaaaaa, 0xaaaaaaab, 0xaaaaaaac, 0xaaaaaaad, 0xaaaaaaae, 0xaaaaaaaf }; Q[0] = (M32[5] ^ H[5]) - (M32[7] ^ H[7]) + (M32[10] ^ H[10]) + (M32[13] ^ H[13]) + (M32[14] ^ H[14]); Q[1] = (M32[6] ^ H[6]) - (M32[8] ^ H[8]) + (M32[11] ^ H[11]) + (M32[14] ^ H[14]) - (M32[15] ^ H[15]); Q[2] = (M32[0] ^ H[0]) + (M32[7] ^ H[7]) + (M32[9] ^ H[9]) - (M32[12] ^ H[12]) + (M32[15] ^ H[15]); Q[3] = (M32[0] ^ H[0]) - (M32[1] ^ H[1]) + (M32[8] ^ H[8]) - (M32[10] ^ H[10]) + (M32[13] ^ H[13]); Q[4] = (M32[1] ^ H[1]) + (M32[2] ^ H[2]) + (M32[9] ^ H[9]) - (M32[11] ^ H[11]) - (M32[14] ^ H[14]); Q[5] = (M32[3] ^ H[3]) - (M32[2] ^ H[2]) + (M32[10] ^ H[10]) - (M32[12] ^ H[12]) + (M32[15] ^ H[15]); Q[6] = (M32[4] ^ H[4]) - (M32[0] ^ H[0]) - (M32[3] ^ H[3]) - (M32[11] ^ H[11]) + (M32[13] ^ H[13]); Q[7] = (M32[1] ^ H[1]) - (M32[4] ^ H[4]) - (M32[5] ^ H[5]) - (M32[12] ^ H[12]) - (M32[14] ^ H[14]); Q[8] = (M32[2] ^ H[2]) - (M32[5] ^ H[5]) - (M32[6] ^ H[6]) + (M32[13] ^ H[13]) - (M32[15] ^ H[15]); Q[9] = (M32[0] ^ H[0]) - (M32[3] ^ H[3]) + (M32[6] ^ H[6]) - (M32[7] ^ H[7]) + (M32[14] ^ H[14]); Q[10] = (M32[8] ^ H[8]) - (M32[1] ^ H[1]) - (M32[4] ^ H[4]) - (M32[7] ^ H[7]) + (M32[15] ^ H[15]); Q[11] = (M32[8] ^ H[8]) - (M32[0] ^ H[0]) - (M32[2] ^ H[2]) - (M32[5] ^ H[5]) + (M32[9] ^ H[9]); Q[12] = (M32[1] ^ H[1]) + (M32[3] ^ H[3]) - (M32[6] ^ H[6]) - (M32[9] ^ H[9]) + (M32[10] ^ H[10]); Q[13] = (M32[2] ^ H[2]) + (M32[4] ^ H[4]) + (M32[7] ^ H[7]) + (M32[10] ^ H[10]) + (M32[11] ^ H[11]); Q[14] = (M32[3] ^ H[3]) - (M32[5] ^ H[5]) + (M32[8] ^ H[8]) - (M32[11] ^ H[11]) - (M32[12] ^ H[12]); Q[15] = (M32[12] ^ H[12]) - (M32[4] ^ H[4]) - (M32[6] ^ H[6]) - (M32[9] ^ H[9]) + (M32[13] ^ H[13]); /* Diffuse the differences in every word in a bijective manner with ssi, and then add the values of the previous double pipe.*/ Q[0] = ss0(Q[0]) + H[1]; Q[1] = ss1(Q[1]) + H[2]; Q[2] = ss2(Q[2]) + H[3]; Q[3] = ss3(Q[3]) + H[4]; Q[4] = ss4(Q[4]) + H[5]; Q[5] = ss0(Q[5]) + H[6]; Q[6] = ss1(Q[6]) + H[7]; Q[7] = ss2(Q[7]) + H[8]; Q[8] = ss3(Q[8]) + H[9]; Q[9] = ss4(Q[9]) + H[10]; Q[10] = ss0(Q[10]) + H[11]; Q[11] = ss1(Q[11]) + H[12]; Q[12] = ss2(Q[12]) + H[13]; Q[13] = ss3(Q[13]) + H[14]; Q[14] = ss4(Q[14]) + H[15]; Q[15] = ss0(Q[15]) + H[0]; /* This is the Message expansion or f_1 in the documentation. */ /* It has 16 rounds. */ /* Blue Midnight Wish has two tunable security parameters. */ /* The parameters are named EXPAND_1_ROUNDS and EXPAND_2_ROUNDS. */ /* The following relation for these parameters should is satisfied: */ /* EXPAND_1_ROUNDS + EXPAND_2_ROUNDS = 16 */ #pragma unroll for (int i = 16; i<18; i++) Q[i] = expand32_1(i, M32, H, Q); #pragma nounroll for (int i = 18; i<32; i++) Q[i] = expand32_2(i, M32, H, Q); /* Blue Midnight Wish has two temporary cummulative variables that accumulate via XORing */ /* 16 new variables that are prooduced in the Message Expansion part. */ XL32 = Q[16] ^ Q[17] ^ Q[18] ^ Q[19] ^ Q[20] ^ Q[21] ^ Q[22] ^ Q[23]; XH32 = XL32 ^ Q[24] ^ Q[25] ^ Q[26] ^ Q[27] ^ Q[28] ^ Q[29] ^ Q[30] ^ Q[31]; M32[2] = (shr(XH32, 5) ^ shl(Q[18], 5) ^ M32[2]) + (XL32 ^ Q[26] ^ Q[2]); M32[3] = (shr(XH32, 1) ^ shl(Q[19], 5) ^ M32[3]) + (XL32 ^ Q[27] ^ Q[3]); M32[14] = SPH_ROTL32(M32[2], 15) + (XH32 ^ Q[30] ^ M32[14]) + (shr(XL32, 7) ^ Q[21] ^ Q[14]); M32[15] = SPH_ROTL32(M32[3], 16) + (XH32 ^ Q[31] ^ M32[15]) + (shr(XL32, 2) ^ Q[22] ^ Q[15]); } #define TPB 512 __global__ __launch_bounds__(TPB, 2) void bmw256_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint64_t *g_hash, uint32_t *const __restrict__ nonceVector) { const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { uint32_t message[16] = { 0 }; LOHI(message[0], message[1], __ldg(&g_hash[thread])); LOHI(message[2], message[3], __ldg(&g_hash[thread + 1 * threads])); LOHI(message[4], message[5], __ldg(&g_hash[thread + 2 * threads])); LOHI(message[6], message[7], __ldg(&g_hash[thread + 3 * threads])); message[8]=0x80; message[14]=0x100; Compression256(message); Compression256_2(message); if (((uint64_t*)message)[7] <= pTarget[3]) { uint32_t tmp = atomicExch(&nonceVector[0], startNounce + thread); if (tmp != 0) nonceVector[1] = tmp; } } } __host__ void bmw256_cpu_hash_32(int thr_id, uint32_t threads, uint32_t startNounce, uint64_t *g_hash, uint32_t *resultnonces) { const uint32_t threadsperblock = TPB; dim3 grid((threads + threadsperblock - 1) / threadsperblock); dim3 block(threadsperblock); cudaMemset(d_GNonce[thr_id], 0, 2 * sizeof(uint32_t)); bmw256_gpu_hash_32 << <grid, block >> >(threads, startNounce, g_hash, d_GNonce[thr_id]); cudaMemcpy(d_gnounce[thr_id], d_GNonce[thr_id], 2 * sizeof(uint32_t), cudaMemcpyDeviceToHost); resultnonces[0] = *(d_gnounce[thr_id]); resultnonces[1] = *(d_gnounce[thr_id] + 1); } __host__ void bmw256_cpu_init(int thr_id, uint32_t threads) { cudaMalloc(&d_GNonce[thr_id], 2 * sizeof(uint32_t)); cudaMallocHost(&d_gnounce[thr_id], 2 * sizeof(uint32_t)); } __host__ void bmw256_cpu_free(int thr_id) { cudaFree(d_GNonce[thr_id]); cudaFreeHost(d_gnounce[thr_id]); } __host__ void bmw256_setTarget(const void *pTargetIn) { cudaMemcpyToSymbol(pTarget, pTargetIn, 32, 0, cudaMemcpyHostToDevice); }
the_stack
\brief Unit tests for thread-level GEMM */ #include "cutlass/arch/wmma.h" #ifdef CUTLASS_ARCH_WMMA_SM70_ENABLED #include "mma_pipelined_testbed.h" #include "cutlass/gemm/threadblock/default_mma_core_wmma.h" /// All tests use single staged (kStages=1) mma pipeline for the gemm mainloop /// Test name format: SM[arch]_gemm_threadblock_singlestage_wmma_[alayout]_[blayout]_[clayout]_[dtype].[threadblock_shape]_[warp_shape] /////////////////////////////////////////////////////////////////////////////////////////////////////// /// WMMA Floating point (f16 accumulation) - Single stage - Threadblock level tests //// /////////////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f16, 64x64x32_64x64x32_16x16x16) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = cutlass::half_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(64, 64, 32); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f16, 128x128x32_64x64x32_16x16x16) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = cutlass::half_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(128, 128, 64); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f16, multicta_256x256x96_128x128x32_64x64x32_16x16x16) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = cutlass::half_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(256, 256, 96); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(2, 2); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } /////////////////////////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m32n8k16.f16.f16 (wmma native size 32x8x16) /////////////////////////////////////////////////////////////////////////////// TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f16, 64x64x32_64x64x32_32x8x16) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = cutlass::half_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(64, 64, 128); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } ////////////////////////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m8n32k16.f16.f16 (wmma native size 8x32x16) ////////////////////////////////////////////////////////////////////////////// TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f16, 64x64x32_64x64x32_8x32x16) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = cutlass::half_t; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(64, 64, 128); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } /////////////////////////////////////////////////////////////////////////////////////////////////////// /// WMMA Floating point (f32 accumulation) - Single stage - Threadblock level tests //// /////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m16n16k16.f32.f32 (wmma native size 16x16x16) ////////////////////////////////////////////////////////////////////////////////// TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f32, 64x64x32_64x64x32_16x16x16) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(64, 64, 128); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f32, 128x128x32_64x64x32_16x16x16) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(128, 128, 128); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f32, multicta_256x256x96_128x128x32_64x64x32_16x16x16) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(256, 256, 96); using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 16, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(2, 2); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } /////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m32n8k16.f32.f32 (wmma native size 32x8x16) //////////////////////////////////////////////////////////// TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f32, 64x64x32_64x64x32_32x8x16) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(64, 64, 128); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<32, 8, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } ///////////////////////////////////////////////////////////////////////////////// /// wmma.mma.sync.aligned.alayout.blayout.shape.dtype.ctype /// wmma.mma.sync.aligned.row.col.m8n32k16.f32.f32 (wmma native size 8x32x16) ///////////////////////////////////////////////////////////////////////////////// TEST(SM70_gemm_threadblock_singlestage_wmma_tensor_op_row_col_row_f32, 64x64x32_64x64x32_8x32x16) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::ColumnMajor; using ElementC = float; using LayoutC = cutlass::layout::RowMajor; static const int kStages = 1; cutlass::gemm::GemmCoord problem_size(64, 64, 128); using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<8, 32, 16>; float alpha = 1.f; float beta = 0.0f; // Define the MmaCore components using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassWmmaTensorOp, kStages>; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore, kStages>(problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block); } #endif //CUTLASS_ARCH_WMMA_SM70_ENABLED
the_stack
#include <cugraph/graph_view.hpp> #include <cugraph/matrix_partition_device_view.cuh> #include <cugraph/partition_manager.hpp> #include <cugraph/prims/row_col_properties.cuh> #include <cugraph/utilities/dataframe_buffer.cuh> #include <cugraph/utilities/device_comm.cuh> #include <cugraph/utilities/error.hpp> #include <cugraph/utilities/host_barrier.hpp> #include <cugraph/utilities/host_scalar_comm.cuh> #include <cugraph/utilities/thrust_tuple_utils.cuh> #include <cugraph/vertex_partition_device_view.cuh> #include <raft/handle.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/copy.h> #include <thrust/execution_policy.h> #include <thrust/gather.h> #include <thrust/iterator/permutation_iterator.h> #include <algorithm> #include <numeric> #include <type_traits> #include <utility> namespace cugraph { namespace detail { template <typename GraphViewType, typename VertexValueInputIterator, typename MatrixMajorValueOutputWrapper> void copy_to_matrix_major(raft::handle_t const& handle, GraphViewType const& graph_view, VertexValueInputIterator vertex_value_input_first, MatrixMajorValueOutputWrapper& matrix_major_value_output) { if constexpr (GraphViewType::is_multi_gpu) { using vertex_t = typename GraphViewType::vertex_type; auto& comm = handle.get_comms(); auto const comm_rank = comm.get_rank(); auto& row_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().row_name()); auto const row_comm_rank = row_comm.get_rank(); auto const row_comm_size = row_comm.get_size(); auto& col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name()); auto const col_comm_rank = col_comm.get_rank(); auto const col_comm_size = col_comm.get_size(); // barrier is necessary here to avoid potential overlap (which can leads to deadlock) between // two different communicators (beginning of col_comm) #if 1 // FIXME: temporary hack till UCC is integrated into RAFT (so we can use UCC barrier with DASK // and MPI barrier with MPI) host_barrier(comm, handle.get_stream_view()); #else handle.get_stream_view().synchronize(); comm.barrier(); // currently, this is ncclAllReduce #endif if (matrix_major_value_output.key_first()) { auto key_offsets = GraphViewType::is_adj_matrix_transposed ? *(graph_view.get_local_sorted_unique_edge_col_offsets()) : *(graph_view.get_local_sorted_unique_edge_row_offsets()); vertex_t max_rx_size{0}; for (int i = 0; i < col_comm_size; ++i) { max_rx_size = std::max( max_rx_size, graph_view.get_vertex_partition_size(i * row_comm_size + row_comm_rank)); } auto rx_value_buffer = allocate_dataframe_buffer< typename std::iterator_traits<VertexValueInputIterator>::value_type>(max_rx_size, handle.get_stream()); auto rx_value_first = get_dataframe_buffer_begin(rx_value_buffer); for (int i = 0; i < col_comm_size; ++i) { device_bcast(col_comm, vertex_value_input_first, rx_value_first, graph_view.get_vertex_partition_size(i * row_comm_size + row_comm_rank), i, handle.get_stream()); auto v_offset_first = thrust::make_transform_iterator( *(matrix_major_value_output.key_first()) + key_offsets[i], [v_first = graph_view.get_vertex_partition_first( i * row_comm_size + row_comm_rank)] __device__(auto v) { return v - v_first; }); thrust::gather(handle.get_thrust_policy(), v_offset_first, v_offset_first + (key_offsets[i + 1] - key_offsets[i]), rx_value_first, matrix_major_value_output.value_data() + key_offsets[i]); } } else { std::vector<size_t> rx_counts(col_comm_size, size_t{0}); std::vector<size_t> displacements(col_comm_size, size_t{0}); for (int i = 0; i < col_comm_size; ++i) { rx_counts[i] = graph_view.get_vertex_partition_size(i * row_comm_size + row_comm_rank); displacements[i] = (i == 0) ? 0 : displacements[i - 1] + rx_counts[i - 1]; } device_allgatherv(col_comm, vertex_value_input_first, matrix_major_value_output.value_data(), rx_counts, displacements, handle.get_stream()); } // barrier is necessary here to avoid potential overlap (which can leads to deadlock) between // two different communicators (end of col_comm) #if 1 // FIXME: temporary hack till UCC is integrated into RAFT (so we can use UCC barrier with DASK // and MPI barrier with MPI) host_barrier(comm, handle.get_stream_view()); #else handle.get_stream_view().synchronize(); comm.barrier(); // currently, this is ncclAllReduce #endif } else { assert(!(matrix_major_value_output.key_first())); assert(graph_view.get_number_of_local_vertices() == GraphViewType::is_adj_matrix_transposed ? graph_view.get_number_of_local_adj_matrix_partition_cols() : graph_view.get_number_of_local_adj_matrix_partition_rows()); thrust::copy(handle.get_thrust_policy(), vertex_value_input_first, vertex_value_input_first + graph_view.get_number_of_local_vertices(), matrix_major_value_output.value_data()); } } template <typename GraphViewType, typename VertexIterator, typename VertexValueInputIterator, typename MatrixMajorValueOutputWrapper> void copy_to_matrix_major(raft::handle_t const& handle, GraphViewType const& graph_view, VertexIterator vertex_first, VertexIterator vertex_last, VertexValueInputIterator vertex_value_input_first, MatrixMajorValueOutputWrapper& matrix_major_value_output) { using vertex_t = typename GraphViewType::vertex_type; using edge_t = typename GraphViewType::edge_type; using weight_t = typename GraphViewType::weight_type; if constexpr (GraphViewType::is_multi_gpu) { auto& comm = handle.get_comms(); auto const comm_rank = comm.get_rank(); auto& row_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().row_name()); auto const row_comm_rank = row_comm.get_rank(); auto const row_comm_size = row_comm.get_size(); auto& col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name()); auto const col_comm_rank = col_comm.get_rank(); auto const col_comm_size = col_comm.get_size(); // barrier is necessary here to avoid potential overlap (which can leads to deadlock) between // two different communicators (beginning of col_comm) #if 1 // FIXME: temporary hack till UCC is integrated into RAFT (so we can use UCC barrier with DASK // and MPI barrier with MPI) host_barrier(comm, handle.get_stream_view()); #else handle.get_stream_view().synchronize(); comm.barrier(); // currently, this is ncclAllReduce #endif auto rx_counts = host_scalar_allgather(col_comm, static_cast<size_t>(thrust::distance(vertex_first, vertex_last)), handle.get_stream()); auto max_rx_size = std::reduce(rx_counts.begin(), rx_counts.end(), size_t{0}, [](auto lhs, auto rhs) { return std::max(lhs, rhs); }); rmm::device_uvector<vertex_t> rx_vertices(max_rx_size, handle.get_stream()); auto rx_tmp_buffer = allocate_dataframe_buffer< typename std::iterator_traits<VertexValueInputIterator>::value_type>(max_rx_size, handle.get_stream()); auto rx_value_first = get_dataframe_buffer_begin(rx_tmp_buffer); auto key_offsets = GraphViewType::is_adj_matrix_transposed ? graph_view.get_local_sorted_unique_edge_col_offsets() : graph_view.get_local_sorted_unique_edge_row_offsets(); for (int i = 0; i < col_comm_size; ++i) { auto matrix_partition = matrix_partition_device_view_t<vertex_t, edge_t, weight_t, GraphViewType::is_multi_gpu>( graph_view.get_matrix_partition_view(i)); if (col_comm_rank == i) { auto vertex_partition = vertex_partition_device_view_t<vertex_t, GraphViewType::is_multi_gpu>( graph_view.get_vertex_partition_view()); auto map_first = thrust::make_transform_iterator(vertex_first, [vertex_partition] __device__(auto v) { return vertex_partition.get_local_vertex_offset_from_vertex_nocheck(v); }); // FIXME: this gather (and temporary buffer) is unnecessary if NCCL directly takes a // permutation iterator (and directly gathers to the internal buffer) thrust::gather(handle.get_thrust_policy(), map_first, map_first + thrust::distance(vertex_first, vertex_last), vertex_value_input_first, rx_value_first); } // FIXME: these broadcast operations can be placed between ncclGroupStart() and // ncclGroupEnd() device_bcast( col_comm, vertex_first, rx_vertices.begin(), rx_counts[i], i, handle.get_stream()); device_bcast(col_comm, rx_value_first, rx_value_first, rx_counts[i], i, handle.get_stream()); if (matrix_major_value_output.key_first()) { thrust::for_each( handle.get_thrust_policy(), thrust::make_counting_iterator(vertex_t{0}), thrust::make_counting_iterator((*key_offsets)[i + 1] - (*key_offsets)[i]), [rx_vertex_first = rx_vertices.begin(), rx_vertex_last = rx_vertices.end(), rx_value_first, output_key_first = *(matrix_major_value_output.key_first()) + (*key_offsets)[i], output_value_first = matrix_major_value_output.value_data() + (*key_offsets)[i]] __device__(auto i) { auto major = *(output_key_first + i); auto it = thrust::lower_bound(thrust::seq, rx_vertex_first, rx_vertex_last, major); if ((it != rx_vertex_last) && (*it == major)) { auto rx_value = *(rx_value_first + thrust::distance(rx_vertex_first, it)); *(output_value_first + i) = rx_value; } }); } else { auto map_first = thrust::make_transform_iterator( rx_vertices.begin(), [matrix_partition] __device__(auto v) { return matrix_partition.get_major_offset_from_major_nocheck(v); }); // FIXME: this scatter is unnecessary if NCCL directly takes a permutation iterator (and // directly scatters from the internal buffer) thrust::scatter( handle.get_thrust_policy(), rx_value_first, rx_value_first + rx_counts[i], map_first, matrix_major_value_output.value_data() + matrix_partition.get_major_value_start_offset()); } } // barrier is necessary here to avoid potential overlap (which can leads to deadlock) between // two different communicators (end of col_comm) #if 1 // FIXME: temporary hack till UCC is integrated into RAFT (so we can use UCC barrier with DASK // and MPI barrier with MPI) host_barrier(comm, handle.get_stream_view()); #else handle.get_stream_view().synchronize(); comm.barrier(); // currently, this is ncclAllReduce #endif } else { assert(!(matrix_major_value_output.key_first())); assert(graph_view.get_number_of_local_vertices() == GraphViewType::is_adj_matrix_transposed ? graph_view.get_number_of_local_adj_matrix_partition_cols() : graph_view.get_number_of_local_adj_matrix_partition_rows()); auto val_first = thrust::make_permutation_iterator(vertex_value_input_first, vertex_first); thrust::scatter(handle.get_thrust_policy(), val_first, val_first + thrust::distance(vertex_first, vertex_last), vertex_first, matrix_major_value_output.value_data()); } } template <typename GraphViewType, typename VertexValueInputIterator, typename MatrixMinorValueOutputWrapper> void copy_to_matrix_minor(raft::handle_t const& handle, GraphViewType const& graph_view, VertexValueInputIterator vertex_value_input_first, MatrixMinorValueOutputWrapper& matrix_minor_value_output) { if constexpr (GraphViewType::is_multi_gpu) { using vertex_t = typename GraphViewType::vertex_type; auto& comm = handle.get_comms(); auto const comm_rank = comm.get_rank(); auto& row_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().row_name()); auto const row_comm_rank = row_comm.get_rank(); auto const row_comm_size = row_comm.get_size(); auto& col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name()); auto const col_comm_rank = col_comm.get_rank(); auto const col_comm_size = col_comm.get_size(); // barrier is necessary here to avoid potential overlap (which can leads to deadlock) between // two different communicators (beginning of row_comm) #if 1 // FIXME: temporary hack till UCC is integrated into RAFT (so we can use UCC barrier with DASK // and MPI barrier with MPI) host_barrier(comm, handle.get_stream_view()); #else handle.get_stream_view().synchronize(); comm.barrier(); // currently, this is ncclAllReduce #endif if (matrix_minor_value_output.key_first()) { auto key_offsets = GraphViewType::is_adj_matrix_transposed ? *(graph_view.get_local_sorted_unique_edge_row_offsets()) : *(graph_view.get_local_sorted_unique_edge_col_offsets()); vertex_t max_rx_size{0}; for (int i = 0; i < row_comm_size; ++i) { max_rx_size = std::max( max_rx_size, graph_view.get_vertex_partition_size(col_comm_rank * row_comm_size + i)); } auto rx_value_buffer = allocate_dataframe_buffer< typename std::iterator_traits<VertexValueInputIterator>::value_type>(max_rx_size, handle.get_stream()); auto rx_value_first = get_dataframe_buffer_begin(rx_value_buffer); for (int i = 0; i < row_comm_size; ++i) { device_bcast(row_comm, vertex_value_input_first, rx_value_first, graph_view.get_vertex_partition_size(col_comm_rank * row_comm_size + i), i, handle.get_stream()); auto v_offset_first = thrust::make_transform_iterator( *(matrix_minor_value_output.key_first()) + key_offsets[i], [v_first = graph_view.get_vertex_partition_first( col_comm_rank * row_comm_size + i)] __device__(auto v) { return v - v_first; }); thrust::gather(handle.get_thrust_policy(), v_offset_first, v_offset_first + (key_offsets[i + 1] - key_offsets[i]), rx_value_first, matrix_minor_value_output.value_data() + key_offsets[i]); } } else { std::vector<size_t> rx_counts(row_comm_size, size_t{0}); std::vector<size_t> displacements(row_comm_size, size_t{0}); for (int i = 0; i < row_comm_size; ++i) { rx_counts[i] = graph_view.get_vertex_partition_size(col_comm_rank * row_comm_size + i); displacements[i] = (i == 0) ? 0 : displacements[i - 1] + rx_counts[i - 1]; } device_allgatherv(row_comm, vertex_value_input_first, matrix_minor_value_output.value_data(), rx_counts, displacements, handle.get_stream()); } // barrier is necessary here to avoid potential overlap (which can leads to deadlock) between // two different communicators (end of row_comm) #if 1 // FIXME: temporary hack till UCC is integrated into RAFT (so we can use UCC barrier with DASK // and MPI barrier with MPI) host_barrier(comm, handle.get_stream_view()); #else handle.get_stream_view().synchronize(); comm.barrier(); // currently, this is ncclAllReduce #endif } else { assert(!(matrix_minor_value_output.key_first())); assert(graph_view.get_number_of_local_vertices() == GraphViewType::is_adj_matrix_transposed ? graph_view.get_number_of_local_adj_matrix_partition_rows() : graph_view.get_number_of_local_adj_matrix_partition_cols()); thrust::copy(handle.get_thrust_policy(), vertex_value_input_first, vertex_value_input_first + graph_view.get_number_of_local_vertices(), matrix_minor_value_output.value_data()); } } template <typename GraphViewType, typename VertexIterator, typename VertexValueInputIterator, typename MatrixMinorValueOutputWrapper> void copy_to_matrix_minor(raft::handle_t const& handle, GraphViewType const& graph_view, VertexIterator vertex_first, VertexIterator vertex_last, VertexValueInputIterator vertex_value_input_first, MatrixMinorValueOutputWrapper& matrix_minor_value_output) { using vertex_t = typename GraphViewType::vertex_type; using edge_t = typename GraphViewType::edge_type; using weight_t = typename GraphViewType::weight_type; if constexpr (GraphViewType::is_multi_gpu) { auto& comm = handle.get_comms(); auto const comm_rank = comm.get_rank(); auto& row_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().row_name()); auto const row_comm_rank = row_comm.get_rank(); auto const row_comm_size = row_comm.get_size(); auto& col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name()); auto const col_comm_rank = col_comm.get_rank(); auto const col_comm_size = col_comm.get_size(); // barrier is necessary here to avoid potential overlap (which can leads to deadlock) between // two different communicators (beginning of row_comm) #if 1 // FIXME: temporary hack till UCC is integrated into RAFT (so we can use UCC barrier with DASK // and MPI barrier with MPI) host_barrier(comm, handle.get_stream_view()); #else handle.get_stream_view().synchronize(); comm.barrier(); // currently, this is ncclAllReduce #endif auto rx_counts = host_scalar_allgather(row_comm, static_cast<size_t>(thrust::distance(vertex_first, vertex_last)), handle.get_stream()); auto max_rx_size = std::reduce(rx_counts.begin(), rx_counts.end(), size_t{0}, [](auto lhs, auto rhs) { return std::max(lhs, rhs); }); rmm::device_uvector<vertex_t> rx_vertices(max_rx_size, handle.get_stream()); auto rx_tmp_buffer = allocate_dataframe_buffer< typename std::iterator_traits<VertexValueInputIterator>::value_type>(max_rx_size, handle.get_stream()); auto rx_value_first = get_dataframe_buffer_begin(rx_tmp_buffer); auto key_offsets = GraphViewType::is_adj_matrix_transposed ? graph_view.get_local_sorted_unique_edge_row_offsets() : graph_view.get_local_sorted_unique_edge_col_offsets(); auto matrix_partition = matrix_partition_device_view_t<vertex_t, edge_t, weight_t, GraphViewType::is_multi_gpu>( graph_view.get_matrix_partition_view(size_t{0})); for (int i = 0; i < row_comm_size; ++i) { if (row_comm_rank == i) { auto vertex_partition = vertex_partition_device_view_t<vertex_t, GraphViewType::is_multi_gpu>( graph_view.get_vertex_partition_view()); auto map_first = thrust::make_transform_iterator(vertex_first, [vertex_partition] __device__(auto v) { return vertex_partition.get_local_vertex_offset_from_vertex_nocheck(v); }); // FIXME: this gather (and temporary buffer) is unnecessary if NCCL directly takes a // permutation iterator (and directly gathers to the internal buffer) thrust::gather(handle.get_thrust_policy(), map_first, map_first + thrust::distance(vertex_first, vertex_last), vertex_value_input_first, rx_value_first); } // FIXME: these broadcast operations can be placed between ncclGroupStart() and // ncclGroupEnd() device_bcast( row_comm, vertex_first, rx_vertices.begin(), rx_counts[i], i, handle.get_stream()); device_bcast(row_comm, rx_value_first, rx_value_first, rx_counts[i], i, handle.get_stream()); if (matrix_minor_value_output.key_first()) { thrust::for_each( handle.get_thrust_policy(), thrust::make_counting_iterator(vertex_t{0}), thrust::make_counting_iterator((*key_offsets)[i + 1] - (*key_offsets)[i]), [rx_vertex_first = rx_vertices.begin(), rx_vertex_last = rx_vertices.end(), rx_value_first, output_key_first = *(matrix_minor_value_output.key_first()) + (*key_offsets)[i], output_value_first = matrix_minor_value_output.value_data() + (*key_offsets)[i]] __device__(auto i) { auto minor = *(output_key_first + i); auto it = thrust::lower_bound(thrust::seq, rx_vertex_first, rx_vertex_last, minor); if ((it != rx_vertex_last) && (*it == minor)) { auto rx_value = *(rx_value_first + thrust::distance(rx_vertex_first, it)); *(output_value_first + i) = rx_value; } }); } else { auto map_first = thrust::make_transform_iterator( rx_vertices.begin(), [matrix_partition] __device__(auto v) { return matrix_partition.get_minor_offset_from_minor_nocheck(v); }); // FIXME: this scatter is unnecessary if NCCL directly takes a permutation iterator (and // directly scatters from the internal buffer) thrust::scatter(handle.get_thrust_policy(), rx_value_first, rx_value_first + rx_counts[i], map_first, matrix_minor_value_output.value_data()); } } // barrier is necessary here to avoid potential overlap (which can leads to deadlock) between // two different communicators (end of row_comm) #if 1 // FIXME: temporary hack till UCC is integrated into RAFT (so we can use UCC barrier with DASK // and MPI barrier with MPI) host_barrier(comm, handle.get_stream_view()); #else handle.get_stream_view().synchronize(); comm.barrier(); // currently, this is ncclAllReduce #endif } else { assert(!(matrix_minor_value_output.key_first())); assert(graph_view.get_number_of_local_vertices() == graph_view.get_number_of_local_adj_matrix_partition_rows()); auto val_first = thrust::make_permutation_iterator(vertex_value_input_first, vertex_first); thrust::scatter(handle.get_thrust_policy(), val_first, val_first + thrust::distance(vertex_first, vertex_last), vertex_first, matrix_minor_value_output.value_data()); } } } // namespace detail /** * @brief Copy vertex property values to the corresponding graph adjacency matrix row property * variables. * * This version fills the entire set of graph adjacency matrix row property values. * * @tparam GraphViewType Type of the passed non-owning graph object. * @tparam VertexValueInputIterator Type of the iterator for vertex properties. * @param handle RAFT handle object to encapsulate resources (e.g. CUDA stream, communicator, and * handles to various CUDA libraries) to run graph algorithms. * @param graph_view Non-owning graph object. * @param vertex_value_input_first Iterator pointing to the vertex properties for the first * (inclusive) vertex (assigned to this process in multi-GPU). `vertex_value_input_last` (exclusive) * is deduced as @p vertex_value_input_first + @p graph_view.get_number_of_local_vertices(). * @param adj_matrix_row_value_output Wrapper used to access data storage to copy row properties * (for the rows assigned to this process in multi-GPU). */ template <typename GraphViewType, typename VertexValueInputIterator> void copy_to_adj_matrix_row( raft::handle_t const& handle, GraphViewType const& graph_view, VertexValueInputIterator vertex_value_input_first, row_properties_t<GraphViewType, typename std::iterator_traits<VertexValueInputIterator>::value_type>& adj_matrix_row_value_output) { if constexpr (GraphViewType::is_adj_matrix_transposed) { copy_to_matrix_minor(handle, graph_view, vertex_value_input_first, adj_matrix_row_value_output); } else { copy_to_matrix_major(handle, graph_view, vertex_value_input_first, adj_matrix_row_value_output); } } /** * @brief Copy vertex property values to the corresponding graph adjacency matrix row property * variables. * * This version fills only a subset of graph adjacency matrix row property values. [@p vertex_first, * @p vertex_last) specifies the vertices with new values to be copied to graph adjacency matrix row * property variables. * * @tparam GraphViewType Type of the passed non-owning graph object. * @tparam VertexIterator Type of the iterator for vertex identifiers. * @tparam VertexValueInputIterator Type of the iterator for vertex properties. * @param handle RAFT handle object to encapsulate resources (e.g. CUDA stream, communicator, and * handles to various CUDA libraries) to run graph algorithms. * @param graph_view Non-owning graph object. * @param vertex_first Iterator pointing to the first (inclusive) vertex with new values to be * copied. v in [vertex_first, vertex_last) should be distinct (and should belong to this process in * multi-GPU), otherwise undefined behavior * @param vertex_last Iterator pointing to the last (exclusive) vertex with new values to be copied. * @param vertex_value_input_first Iterator pointing to the vertex properties for the first * (inclusive) vertex (assigned to this process in multi-GPU). `vertex_value_input_last` (exclusive) * is deduced as @p vertex_value_input_first + @p graph_view.get_number_of_local_vertices(). * @param adj_matrix_row_value_output Wrapper used to access data storage to copy row properties * (for the rows assigned to this process in multi-GPU). */ template <typename GraphViewType, typename VertexIterator, typename VertexValueInputIterator> void copy_to_adj_matrix_row( raft::handle_t const& handle, GraphViewType const& graph_view, VertexIterator vertex_first, VertexIterator vertex_last, VertexValueInputIterator vertex_value_input_first, row_properties_t<GraphViewType, typename std::iterator_traits<VertexValueInputIterator>::value_type>& adj_matrix_row_value_output) { if constexpr (GraphViewType::is_adj_matrix_transposed) { copy_to_matrix_minor(handle, graph_view, vertex_first, vertex_last, vertex_value_input_first, adj_matrix_row_value_output); } else { copy_to_matrix_major(handle, graph_view, vertex_first, vertex_last, vertex_value_input_first, adj_matrix_row_value_output); } } /** * @brief Copy vertex property values to the corresponding graph adjacency matrix column property * variables. * * This version fills the entire set of graph adjacency matrix column property values. * * @tparam GraphViewType Type of the passed non-owning graph object. * @tparam VertexValueInputIterator Type of the iterator for vertex properties. * @param handle RAFT handle object to encapsulate resources (e.g. CUDA stream, communicator, and * handles to various CUDA libraries) to run graph algorithms. * @param graph_view Non-owning graph object. * @param vertex_value_input_first Iterator pointing to the vertex properties for the first * (inclusive) vertex (assigned to this process in multi-GPU). `vertex_value_input_last` (exclusive) * is deduced as @p vertex_value_input_first + @p graph_view.get_number_of_local_vertices(). * @param adj_matrix_col_value_output Wrapper used to access data storage to copy column properties * (for the columns assigned to this process in multi-GPU). */ template <typename GraphViewType, typename VertexValueInputIterator> void copy_to_adj_matrix_col( raft::handle_t const& handle, GraphViewType const& graph_view, VertexValueInputIterator vertex_value_input_first, col_properties_t<GraphViewType, typename std::iterator_traits<VertexValueInputIterator>::value_type>& adj_matrix_col_value_output) { if constexpr (GraphViewType::is_adj_matrix_transposed) { copy_to_matrix_major(handle, graph_view, vertex_value_input_first, adj_matrix_col_value_output); } else { copy_to_matrix_minor(handle, graph_view, vertex_value_input_first, adj_matrix_col_value_output); } } /** * @brief Copy vertex property values to the corresponding graph adjacency matrix column property * variables. * * This version fills only a subset of graph adjacency matrix column property values. [@p * vertex_first, @p vertex_last) specifies the vertices with new values to be copied to graph * adjacency matrix column property variables. * * @tparam GraphViewType Type of the passed non-owning graph object. * @tparam VertexIterator Type of the iterator for vertex identifiers. * @tparam VertexValueInputIterator Type of the iterator for vertex properties. * @param handle RAFT handle object to encapsulate resources (e.g. CUDA stream, communicator, and * handles to various CUDA libraries) to run graph algorithms. * @param graph_view Non-owning graph object. * @param vertex_first Iterator pointing to the first (inclusive) vertex with new values to be * copied. v in [vertex_first, vertex_last) should be distinct (and should belong to this process in * multi-GPU), otherwise undefined behavior * @param vertex_last Iterator pointing to the last (exclusive) vertex with new values to be copied. * @param vertex_value_input_first Iterator pointing to the vertex properties for the first * (inclusive) vertex (assigned to this process in multi-GPU). `vertex_value_input_last` (exclusive) * is deduced as @p vertex_value_input_first + @p graph_view.get_number_of_local_vertices(). * @param adj_matrix_col_value_output Wrapper used to access data storage to copy column properties * (for the columns assigned to this process in multi-GPU). */ template <typename GraphViewType, typename VertexIterator, typename VertexValueInputIterator> void copy_to_adj_matrix_col( raft::handle_t const& handle, GraphViewType const& graph_view, VertexIterator vertex_first, VertexIterator vertex_last, VertexValueInputIterator vertex_value_input_first, col_properties_t<GraphViewType, typename std::iterator_traits<VertexValueInputIterator>::value_type>& adj_matrix_col_value_output) { if constexpr (GraphViewType::is_adj_matrix_transposed) { copy_to_matrix_major(handle, graph_view, vertex_first, vertex_last, vertex_value_input_first, adj_matrix_col_value_output); } else { copy_to_matrix_minor(handle, graph_view, vertex_first, vertex_last, vertex_value_input_first, adj_matrix_col_value_output); } } } // namespace cugraph
the_stack
//#include <thrust/device_ptr.h> //#include <thrust/scan.h> __device__ __forceinline__ void decode_subsequence( std::uint32_t subsequence_size, std::uint32_t current_subsequence, std::uint32_t subsequences_processed, UNIT_TYPE mask, std::uint32_t shift, std::uint32_t start_bit, std::uint32_t &in_pos, UNIT_TYPE* in_ptr, UNIT_TYPE &window, UNIT_TYPE &next, STATE_TYPE &state, std::uint32_t &last_word_unit, std::uint32_t &last_word_bit, std::uint32_t &num_symbols, std::uint32_t &out_pos, SYMBOL_TYPE* out_ptr, std::uint32_t &next_out_pos, const uint* __restrict__ table, const std::uint32_t bits_in_unit, const std::uint32_t number_of_states, std::uint32_t &last_at, STATE_TYPE &last_state, bool overflow, bool write_output) { // current unit in this subsequence std::uint32_t current_unit = 0; // current bit position in unit std::uint32_t at = start_bit; // number of symbols found in this subsequence std::uint32_t num_symbols_l = 0; UNIT_TYPE copy_next = next; auto load_next = [&]() { window = in_ptr[in_pos]; next = in_ptr[in_pos + 1]; copy_next = next; }; // shift to start if(current_subsequence == 0 || subsequences_processed == 0) { copy_next <<= bits_in_unit - at; next >>= at; window >>= at; window |= copy_next; } // perform overflow from previous subsequence if(overflow && current_subsequence > 0 && subsequences_processed == 0) { // decode first symbol const uint hit_p = table[state - number_of_states]; const STATE_TYPE next_state_p = (std::uint16_t) (hit_p & 0x0000FFFF); std::uint32_t taken = hit_p >> 24; state = (next_state_p << taken) | (~(mask << taken) & window); while(state < number_of_states) { std::uint32_t shift_w = window >> taken; ++taken; state = (state << 1) | (~(mask << 1) & shift_w); } if(at == 0) { ++num_symbols_l; if(write_output) { if(out_pos < next_out_pos) { out_ptr[out_pos] = (hit_p & ((std::uint32_t) 0x00FF0000)) >> 16; ++out_pos; } } } if(taken > 0) { copy_next = next; copy_next <<= bits_in_unit - taken; } else copy_next = 0; next >>= taken; window >>= taken; at += taken; window |= copy_next; // overflow if(at > bits_in_unit) { ++in_pos; load_next(); at -= bits_in_unit; window >>= at; next >>= at; copy_next <<= bits_in_unit - at; window |= copy_next; } if(at == bits_in_unit) { ++in_pos; load_next(); at = 0; } } while(current_unit < subsequence_size) { while(at < bits_in_unit) { last_state = state; const uint hit = table[state - number_of_states]; // decode a symbol const STATE_TYPE next_state = (std::uint16_t) (hit & 0x0000FFFF); std::uint32_t taken = hit >> 24; state = (next_state << taken) | (~(mask << taken) & window); while(state < number_of_states) { shift = window >> taken; ++taken; state = (state << 1) | (~(mask << 1) & shift); } ++num_symbols_l; if(write_output) { if(out_pos < next_out_pos) { out_ptr[out_pos] = (hit & ((std::uint32_t) 0x00FF0000)) >> 16; ++out_pos; } } if(taken > 0) { copy_next = next; copy_next <<= bits_in_unit - taken; } else copy_next = 0; next >>= taken; window >>= taken; last_word_bit = at; at += taken; window |= copy_next; last_word_unit = current_unit; } // refill decoder window if necessary ++current_unit; ++in_pos; load_next(); if(at == bits_in_unit) { at = 0; } else { at -= bits_in_unit; window >>= at; next >>= at; copy_next <<= bits_in_unit - at; window |= copy_next; } } num_symbols = num_symbols_l; last_at = at; } __global__ void phase1_decode_subseq( std::uint32_t subsequence_size, std::uint32_t total_num_subsequences, std::uint32_t table_size, UNIT_TYPE* in_ptr, const uint* __restrict__ table, uint4* sync_points, const std::uint32_t bits_in_unit, const std::uint32_t number_of_states, const STATE_TYPE initial_state, const std::uint32_t initial_bit) { const std::uint32_t gid = blockDim.x * blockIdx.x + threadIdx.x; if(gid * 4 < total_num_subsequences) { std::uint32_t current_subsequence = gid * 4; std::uint32_t in_pos = gid * subsequence_size * 4; // mask const UNIT_TYPE mask = (UNIT_TYPE) (0) - 1; // shift right const std::uint32_t shift = bits_in_unit - table_size; std::uint32_t out_pos = 0; std::uint32_t next_out_pos = 0; std::uint8_t* out_ptr = 0; // current state STATE_TYPE state = initial_state; // sliding window UNIT_TYPE window = in_ptr[in_pos]; UNIT_TYPE next = in_ptr[in_pos + 1]; // start bit of last codeword in this subsequence std::uint32_t last_word_unit = 0; std::uint32_t last_word_bit = 0; // last state in this subsequence STATE_TYPE last_state; // number of symbols found in this subsequence std::uint32_t num_symbols = 0; // bit position of next codeword std::uint32_t last_at = (gid == 0) ? bits_in_unit - initial_bit : 0; std::uint32_t subsequences_processed = 0; bool synchronised_flag = false; std::uint32_t last_subsequence = blockDim.x * (blockIdx.x + 1) * 4; if(last_subsequence > total_num_subsequences) last_subsequence = total_num_subsequences; auto sync = [&](std::uint32_t i) { if(subsequences_processed >= 4) { uint4 sync_point = sync_points[current_subsequence + i]; if(sync_point.x == last_word_unit && sync_point.y == last_word_bit && sync_point.w == last_state) { synchronised_flag = true; } } }; uint4 s0, s1, s2, s3; bool wrt0 = false; bool wrt1 = false; bool wrt2 = false; bool wrt3 = false; while(subsequences_processed < blockDim.x * 4) { if(!synchronised_flag && current_subsequence < last_subsequence) { decode_subsequence(subsequence_size, current_subsequence, subsequences_processed, mask, shift, last_at, in_pos, in_ptr, window, next, state, last_word_unit, last_word_bit, num_symbols, out_pos, out_ptr, next_out_pos, table, bits_in_unit, number_of_states, last_at, last_state, false, false); sync(0); s0 = {last_word_unit, last_word_bit, num_symbols, last_state}; wrt0 = true; } if(!synchronised_flag && current_subsequence < last_subsequence) { decode_subsequence(subsequence_size, current_subsequence + 1, subsequences_processed + 1, mask, shift, last_at, in_pos, in_ptr, window, next, state, last_word_unit, last_word_bit, num_symbols, out_pos, out_ptr, next_out_pos, table, bits_in_unit, number_of_states, last_at, last_state, false, false); sync(1); s1 = {last_word_unit, last_word_bit, num_symbols, last_state}; wrt1 = true; } if(!synchronised_flag && current_subsequence < last_subsequence) { decode_subsequence(subsequence_size, current_subsequence + 2, subsequences_processed + 2, mask, shift, last_at, in_pos, in_ptr, window, next, state, last_word_unit, last_word_bit, num_symbols, out_pos, out_ptr, next_out_pos, table, bits_in_unit, number_of_states, last_at, last_state, false, false); sync(2); s2 = {last_word_unit, last_word_bit, num_symbols, last_state}; wrt2 = true; } if(!synchronised_flag && current_subsequence < last_subsequence) { decode_subsequence(subsequence_size, current_subsequence + 3, subsequences_processed + 3, mask, shift, last_at, in_pos, in_ptr, window, next, state, last_word_unit, last_word_bit, num_symbols, out_pos, out_ptr, next_out_pos, table, bits_in_unit, number_of_states, last_at, last_state, false, false); sync(3); s3 = {last_word_unit, last_word_bit, num_symbols, last_state}; wrt3 = true; } if(wrt0) { sync_points[current_subsequence] = s0; wrt0 = false; } if(wrt1) { sync_points[current_subsequence + 1] = s1; wrt1 = false; } if(wrt2) { sync_points[current_subsequence + 2] = s2; wrt2 = false; } if(wrt3) { sync_points[current_subsequence + 3] = s3; wrt3 = false; } current_subsequence += 4; subsequences_processed += 4; __syncthreads(); } } } __global__ void phase2_synchronise_blocks( std::uint32_t subsequence_size, std::uint32_t total_num_subsequences, std::uint32_t table_size, std::uint32_t num_blocks, UNIT_TYPE* in_ptr, const uint* __restrict__ table, uint4* sync_points, SYMBOL_TYPE* block_synchronised, const std::uint32_t bits_in_unit, const std::uint32_t number_of_states, const STATE_TYPE initial_state) { const std::uint32_t gid = blockIdx.x; const std::uint32_t num_of_seams = num_blocks - 1; if(gid < num_of_seams) { // mask const UNIT_TYPE mask = (UNIT_TYPE) (0) - 1; // shift const std::uint32_t shift = bits_in_unit - table_size; std::uint32_t out_pos = 0; std::uint32_t next_out_pos = 0; std::uint8_t* out_ptr = 0; // jump to first sequence of the block std::uint32_t current_subsequence = (gid + 1) * blockDim.x; // search for synchronised sequences at the end of previous block uint4 sync_point = sync_points[current_subsequence - 1]; // current unit std::uint32_t in_pos = (current_subsequence - 1) * subsequence_size; // start bit of last codeword in this subsequence std::uint32_t last_word_unit = in_pos + sync_point.x; std::uint32_t last_word_bit = sync_point.y; // state STATE_TYPE state = sync_point.w; // last state in this subsequence STATE_TYPE last_state; // number of symbols found in this subsequence std::uint32_t num_symbols = 0; std::uint32_t last_at = sync_point.y; in_pos += sync_point.x; // sliding window UNIT_TYPE window = in_ptr[in_pos]; UNIT_TYPE next = in_ptr[in_pos + 1]; std::uint32_t subsequences_processed = 0; bool synchronised_flag = false; while(subsequences_processed < blockDim.x) { if(!synchronised_flag) { decode_subsequence(subsequence_size, current_subsequence, subsequences_processed, mask, shift, last_at, in_pos, in_ptr, window, next, state, last_word_unit, last_word_bit, num_symbols, out_pos, out_ptr, next_out_pos, table, bits_in_unit, number_of_states, last_at, last_state, true, false); sync_point = sync_points[current_subsequence]; // if sync point detected if(sync_point.x == last_word_unit && sync_point.y == last_word_bit && sync_point.w == last_state) { sync_point.z = num_symbols; block_synchronised[gid + 1] = 1; synchronised_flag = true; } // correct erroneous position data else { sync_point.x = last_word_unit; sync_point.y = last_word_bit; sync_point.z = num_symbols; sync_point.w = last_state; block_synchronised[gid + 1] = 0; } sync_points[current_subsequence] = sync_point; } ++current_subsequence; ++subsequences_processed; __syncthreads(); } } } __global__ void phase3_copy_num_symbols_from_sync_points_to_aux( std::uint32_t total_num_subsequences, const uint4* __restrict__ sync_points, std::uint32_t* subsequence_output_sizes) { const std::uint32_t gid = blockDim.x * blockIdx.x + threadIdx.x; if(gid < total_num_subsequences) { subsequence_output_sizes[gid] = sync_points[gid].z; } } __global__ void phase3_copy_num_symbols_from_aux_to_sync_points( std::uint32_t total_num_subsequences, uint4* sync_points, const std::uint32_t* __restrict__ subsequence_output_sizes) { const std::uint32_t gid = blockDim.x * blockIdx.x + threadIdx.x; if(gid < total_num_subsequences) { sync_points[gid].z = subsequence_output_sizes[gid]; } } __global__ void phase4_decode_write_output( std::uint32_t subsequence_size, std::uint32_t total_num_subsequences, std::uint32_t table_size, UNIT_TYPE* in_ptr, SYMBOL_TYPE* out_ptr, std::uint32_t output_size, const uint* __restrict__ table, const uint4* __restrict__ sync_points, const std::uint32_t bits_in_unit, const std::uint32_t number_of_states, const STATE_TYPE initial_state, const std::uint32_t initial_bit) { const std::uint32_t gid = blockDim.x * blockIdx.x + threadIdx.x; if(gid < total_num_subsequences) { // mask const UNIT_TYPE mask = (UNIT_TYPE) (0) - 1; // shift const size_t shift = bits_in_unit - table_size; // start bit of last codeword in this subsequence std::uint32_t last_word_unit = 0; std::uint32_t last_word_bit = 0; // state STATE_TYPE state = initial_state; // number of symbols found in this subsequence std::uint32_t num_symbols = 0; // bit position of next codeword std::uint32_t last_at = 0; // last state in this subsequence STATE_TYPE last_state; std::uint32_t subsequences_processed = 0; std::uint32_t current_subsequence = gid; std::uint32_t in_pos = current_subsequence * subsequence_size; uint4 sync_point = sync_points[current_subsequence]; uint4 next_sync_point = sync_points[current_subsequence + 1]; std::uint32_t out_pos = sync_point.z; std::uint32_t next_out_pos = gid == total_num_subsequences - 1 ? output_size : next_sync_point.z; if(gid > 0) { sync_point = sync_points[current_subsequence - 1]; in_pos = (current_subsequence - 1) * subsequence_size; state = sync_point.w; } // sliding window UNIT_TYPE window = in_ptr[in_pos]; UNIT_TYPE next = in_ptr[in_pos + 1]; // start bit std::uint32_t start = bits_in_unit - initial_bit; if(gid > 0) { in_pos += sync_point.x; start = sync_point.y; window = in_ptr[in_pos]; next = in_ptr[in_pos + 1]; } // overflow from previous subsequence, decode, write output decode_subsequence(subsequence_size, current_subsequence, subsequences_processed, mask, shift, start, in_pos, in_ptr, window, next, state, last_word_unit, last_word_bit, num_symbols, out_pos, out_ptr, next_out_pos, table, bits_in_unit, number_of_states, last_at, last_state, true, true); } } void cuhd::CUHDGPUDecoder::decode( UNIT_TYPE* d_input_buffer, size_t input_size, SYMBOL_TYPE* d_output_buffer, size_t output_size, std::uint32_t* d_table, uint4* d_sync_info, std::uint32_t* d_output_sizes, std::uint8_t* d_sequence_synced, std::uint8_t* h_sequence_synced, STATE_TYPE initial_state, std::uint32_t initial_bit, std::uint32_t number_of_states, size_t max_codeword_length, size_t preferred_subsequence_size, size_t threads_per_block) { size_t num_subseq = SDIV(input_size, preferred_subsequence_size); size_t num_sequences = SDIV(num_subseq, threads_per_block); const std::uint32_t bits_in_unit = sizeof(UNIT_TYPE) * 8; // launch phase 1 (intra-sequence synchronisation) hipLaunchKernelGGL(phase1_decode_subseq, dim3(num_sequences), dim3(threads_per_block), 0, 0, preferred_subsequence_size, num_subseq, max_codeword_length, d_input_buffer, d_table, d_sync_info, bits_in_unit, number_of_states, initial_state, initial_bit); // launch phase 2 (inter-sequence synchronisation) bool blocks_synchronised = true; do { hipLaunchKernelGGL(phase2_synchronise_blocks, dim3(num_sequences), dim3(threads_per_block), 0, 0, preferred_subsequence_size, num_subseq, max_codeword_length, num_sequences, d_input_buffer, d_table, d_sync_info, d_sequence_synced, bits_in_unit, number_of_states, initial_state); // aux->retrieve_sync_data(); hipMemcpy(h_sequence_synced, d_sequence_synced, num_sequences * sizeof(std::uint8_t), hipMemcpyDeviceToHost); bool zero_found = false; for(size_t i = 1; i < num_sequences-1; ++i) { if(h_sequence_synced[i] == 0) { zero_found = true; break; } } if(zero_found) { blocks_synchronised = false; } else { blocks_synchronised = true; } } while(!blocks_synchronised); // launch phase 3 (parallel prefix sum) hipLaunchKernelGGL(phase3_copy_num_symbols_from_sync_points_to_aux, dim3(num_sequences), dim3(threads_per_block), 0, 0, num_subseq, d_sync_info, d_output_sizes); //thrust::device_ptr<std::uint32_t> thrust_sync_points(d_output_sizes); //thrust::exclusive_scan(thrust_sync_points, //thrust_sync_points + num_subseq, thrust_sync_points); std::uint32_t *h_output_sizes = (std::uint32_t*) malloc ((num_subseq + 1) * sizeof(std::uint32_t)); h_output_sizes[0] = 0; hipMemcpy(h_output_sizes + 1, d_output_sizes, num_subseq * sizeof(std::uint32_t), hipMemcpyDeviceToHost); for (int i = 1; i < num_subseq; i++) { h_output_sizes[i] += h_output_sizes[i-1]; } hipMemcpy(d_output_sizes, h_output_sizes, num_subseq * sizeof(std::uint32_t), hipMemcpyHostToDevice); free(h_output_sizes); hipLaunchKernelGGL(phase3_copy_num_symbols_from_aux_to_sync_points, dim3(num_sequences), dim3(threads_per_block), 0, 0, num_subseq, d_sync_info, d_output_sizes); // launch phase 4 (final decoding) hipLaunchKernelGGL(phase4_decode_write_output, dim3(num_sequences), dim3(threads_per_block), 0, 0, preferred_subsequence_size, num_subseq, max_codeword_length, d_input_buffer, d_output_buffer, output_size, d_table, d_sync_info, bits_in_unit, number_of_states, initial_state, initial_bit); }
the_stack
#include <opencv2/cudafeatures2d.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" #define BLOCK_X 8 #define BLOCK_Y 8 #define BLOCK_Z 4 using namespace cv; namespace { // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) { return (bitmap >> pos) & 1; } // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ void SetBit(T &bitmap, unsigned char pos) { bitmap |= (1 << pos); } // Returns the root index of the UFTree __device__ unsigned Find(const int *s_buf, unsigned n) { while (s_buf[n] != n) { n = s_buf[n]; } return n; } __device__ unsigned FindAndCompress(int *s_buf, unsigned n) { unsigned id = n; while (s_buf[n] != n) { n = s_buf[n]; s_buf[id] = n; } return n; } // Merges the UFTrees of a and b, linking one root to the other __device__ void Union(int *s_buf, unsigned a, unsigned b) { bool done; do { a = Find(s_buf, a); b = Find(s_buf, b); if (a < b) { int old = atomicMin(s_buf + b, a); done = (old == b); b = old; } else if (b < a) { int old = atomicMin(s_buf + a, b); done = (old == a); a = old; } else { done = true; } } while (!done); } __global__ void InitLabeling(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i labels, unsigned int *last_voxel_conn) { unsigned x = (blockIdx.x * BLOCK_X + threadIdx.x) * 2; unsigned y = (blockIdx.y * BLOCK_Y + threadIdx.y) * 2; unsigned z = (blockIdx.z * BLOCK_Z + threadIdx.z) * 2; unsigned img_index = z * (img.stepz / img.elem_size) + y * (img.stepy / img.elem_size) + x; unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x; if (x < labels.x && y < labels.y && z < labels.z) { #define P0 0x77707770777 unsigned long long P = 0L; if (img[img_index]) { P |= P0; } if (x + 1 < img.x) { if (img[img_index + 1]) { P |= (P0 << 1); } if (y + 1 < img.y && img[img_index + img.stepy / img.elem_size + 1]) { P |= (P0 << 5); } } if (y + 1 < img.y) { if (img[img_index + img.stepy / img.elem_size]) { P |= (P0 << 4); } } if (z + 1 < img.z) { if (img[img_index + img.stepz / img.elem_size]) { P |= (P0 << 16); } if (x + 1 < img.x) { if (img[img_index + img.stepz / img.elem_size + 1]) { P |= (P0 << 17); } //if (y + 1 < img.y && img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size + 1]) { // P |= (P0 << 21); //} } if (y + 1 < img.y) { if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size]) { P |= (P0 << 20); } } } #undef P0 // checks on borders if (x == 0) { P &= 0xEEEEEEEEEEEEEEEE; } if (x + 1 >= img.x) { P &= 0x3333333333333333; } else if (x + 2 >= img.x) { P &= 0x7777777777777777; } if (y == 0) { P &= 0xFFF0FFF0FFF0FFF0; } if (y + 1 >= img.y) { P &= 0x00FF00FF00FF00FF; } else if (y + 2 >= img.y) { P &= 0x0FFF0FFF0FFF0FFF; } if (z == 0) { P &= 0xFFFFFFFFFFFF0000; } if (z + 1 >= img.z) { P &= 0x00000000FFFFFFFF; } //else if (z + 2 >= img.z) { // P &= 0x0000FFFFFFFFFFFF; //} // P is now ready to be used to find neighbour blocks (or it should be) // P value avoids range errors unsigned int connections = 0; bool connected = false; if (P > 0) { // Lower plane unsigned char * plane_data = img.data + img_index - img.stepz; unsigned lower_plane_index = labels_index - 2 * (labels.stepz / labels.elem_size); if (HasBit(P, 0) && plane_data[0 - img.stepy - 1]) { if (connected) { SetBit(connections, 0); } else { labels[labels_index] = lower_plane_index - 2 * (labels.stepy / labels.elem_size) - 2; connected = true; } } if ((HasBit(P, 1) && plane_data[0 - img.stepy]) || (HasBit(P, 2) && plane_data[0 - img.stepy + 1])) { if (connected) { SetBit(connections, 1); } else { labels[labels_index] = lower_plane_index - 2 * (labels.stepy / labels.elem_size); connected = true; } } if (HasBit(P, 3) && plane_data[0 - img.stepy + 2]) { if (connected) { SetBit(connections, 2); } else { labels[labels_index] = lower_plane_index - 2 * (labels.stepy / labels.elem_size) + 2; connected = true; } } if ((HasBit(P, 4) && plane_data[-1]) || (HasBit(P, 8) && plane_data[img.stepy - 1])) { if (connected) { SetBit(connections, 3); } else { labels[labels_index] = lower_plane_index - 2; connected = true; } } if ((HasBit(P, 5) && plane_data[0]) || (HasBit(P, 6) && plane_data[1]) || (HasBit(P, 9) && plane_data[img.stepy]) || (HasBit(P, 10) && plane_data[img.stepy + 1])) { if (connected) { SetBit(connections, 4); } else { labels[labels_index] = lower_plane_index; connected = true; } } if ((HasBit(P, 7) && plane_data[2]) || (HasBit(P, 11) && plane_data[img.stepy + 2])) { if (connected) { SetBit(connections, 5); } else { labels[labels_index] = lower_plane_index + 2; connected = true; } } if (HasBit(P, 12) && plane_data[2 * img.stepy - 1]) { if (connected) { SetBit(connections, 6); } else { labels[labels_index] = lower_plane_index + 2 * (labels.stepy / labels.elem_size) - 2; connected = true; } } if ((HasBit(P, 13) && plane_data[2 * img.stepy]) || (HasBit(P, 14) && plane_data[2 * img.stepy + 1])) { if (connected) { SetBit(connections, 7); } else { labels[labels_index] = lower_plane_index + 2 * (labels.stepy / labels.elem_size); connected = true; } } if (HasBit(P, 15) && plane_data[2 * img.stepy + 2]) { if (connected) { SetBit(connections, 8); } else { labels[labels_index] = lower_plane_index + 2 * (labels.stepy / labels.elem_size) + 2; connected = true; } } // Current planes plane_data += img.stepz; if ((HasBit(P, 16) && plane_data[0 - img.stepy - 1]) || (HasBit(P, 32) && plane_data[img.stepz - img.stepy - 1])) { if (connected) { SetBit(connections, 9); } else { labels[labels_index] = labels_index - 2 * (labels.stepy / labels.elem_size) - 2; connected = true; } } if ((HasBit(P, 17) && plane_data[0 - img.stepy]) || (HasBit(P, 18) && plane_data[0 - img.stepy + 1]) || (HasBit(P, 33) && plane_data[img.stepz - img.stepy]) || (HasBit(P, 34) && plane_data[img.stepz - img.stepy + 1])) { if (connected) { SetBit(connections, 10); } else { labels[labels_index] = labels_index - 2 * (labels.stepy / labels.elem_size); connected = true; } } if ((HasBit(P, 19) && plane_data[0 - img.stepy + 2]) || (HasBit(P, 35) && plane_data[img.stepz - img.stepy + 2])) { if (connected) { SetBit(connections, 11); } else { labels[labels_index] = labels_index - 2 * (labels.stepy / labels.elem_size) + 2; connected = true; } } if ((HasBit(P, 20) && plane_data[-1]) || (HasBit(P, 24) && plane_data[img.stepy - 1]) || (HasBit(P, 36) && plane_data[img.stepz - 1]) || (HasBit(P, 40) && plane_data[img.stepz + img.stepy - 1])) { if (connected) { SetBit(connections, 12); } else { labels[labels_index] = labels_index - 2; connected = true; } } } if (!connected) { labels[labels_index] = labels_index; } // Write connections if (x + 1 < labels.x) { labels[labels_index + 1] = connections; } else if (y + 1 < labels.y) { labels[labels_index + labels.stepy / labels.elem_size] = connections; } else if (z + 1 < labels.z) { labels[labels_index + labels.stepz / labels.elem_size] = connections; } else { *last_voxel_conn = connections; } } } __global__ void Merge(cuda::PtrStepSz3i labels, unsigned int *last_voxel_conn) { unsigned x = (blockIdx.x * BLOCK_X + threadIdx.x) * 2; unsigned y = (blockIdx.y * BLOCK_Y + threadIdx.y) * 2; unsigned z = (blockIdx.z * BLOCK_Z + threadIdx.z) * 2; unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x; if (x < labels.x && y < labels.y && z < labels.z) { unsigned int connections; if (x + 1 < labels.x) { connections = labels[labels_index + 1]; } else if (y + 1 < labels.y) { connections = labels[labels_index + labels.stepy / labels.elem_size]; } else if (z + 1 < labels.z) { connections = labels[labels_index + labels.stepz / labels.elem_size]; } else { connections = *last_voxel_conn; } // Lower plane unsigned lower_plane_index = labels_index - 2 * (labels.stepz / labels.elem_size); if (HasBit(connections, 0)) { Union(labels.data, labels_index, lower_plane_index - 2 * (labels.stepy / labels.elem_size + 1)); } if (HasBit(connections, 1)) { Union(labels.data, labels_index, lower_plane_index - 2 * (labels.stepy / labels.elem_size)); } if (HasBit(connections, 2)) { Union(labels.data, labels_index, lower_plane_index - 2 * (labels.stepy / labels.elem_size - 1)); } if (HasBit(connections, 3)) { Union(labels.data, labels_index, lower_plane_index - 2); } if (HasBit(connections, 4)) { Union(labels.data, labels_index, lower_plane_index); } if (HasBit(connections, 5)) { Union(labels.data, labels_index, lower_plane_index + 2); } if (HasBit(connections, 6)) { Union(labels.data, labels_index, lower_plane_index + 2 * (labels.stepy / labels.elem_size - 1)); } if (HasBit(connections, 7)) { Union(labels.data, labels_index, lower_plane_index + 2 * (labels.stepy / labels.elem_size)); } if (HasBit(connections, 8)) { Union(labels.data, labels_index, lower_plane_index + 2 * (labels.stepy / labels.elem_size + 1)); } // Current planes if (HasBit(connections, 9)) { Union(labels.data, labels_index, labels_index - 2 * (labels.stepy / labels.elem_size + 1)); } if (HasBit(connections, 10)) { Union(labels.data, labels_index, labels_index - 2 * (labels.stepy / labels.elem_size)); } if (HasBit(connections, 11)) { Union(labels.data, labels_index, labels_index - 2 * (labels.stepy / labels.elem_size - 1)); } if (HasBit(connections, 12)) { Union(labels.data, labels_index, labels_index - 2); } } } __global__ void PathCompression(cuda::PtrStepSz3i labels) { unsigned x = 2 * (blockIdx.x * BLOCK_X + threadIdx.x); unsigned y = 2 * (blockIdx.y * BLOCK_Y + threadIdx.y); unsigned z = 2 * (blockIdx.z * BLOCK_Z + threadIdx.z); unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x; if (x < labels.x && y < labels.y && z < labels.z) { FindAndCompress(labels.data, labels_index); } } __global__ void FinalLabeling(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i labels) { unsigned x = 2 * (blockIdx.x * BLOCK_X + threadIdx.x); unsigned y = 2 * (blockIdx.y * BLOCK_Y + threadIdx.y); unsigned z = 2 * (blockIdx.z * BLOCK_Z + threadIdx.z); unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x; unsigned img_index = z * (img.stepz / img.elem_size) + y * (img.stepy / img.elem_size) + x; if (x < labels.x && y < labels.y && z < labels.z) { int label = labels[labels_index] + 1; // Current plane if (img[img_index]) { labels[labels_index] = label; } else { labels[labels_index] = 0; } if (x + 1 < labels.x) { if (img[img_index + 1]) labels[labels_index + 1] = label; else { labels[labels_index + 1] = 0; } if (y + 1 < labels.y) { if (img[img_index + img.stepy + 1]) labels[labels_index + (labels.stepy / labels.elem_size) + 1] = label; else { labels[labels_index + (labels.stepy / labels.elem_size) + 1] = 0; } } } if (y + 1 < labels.y) { if (img[img_index + img.stepy]) labels[labels_index + (labels.stepy / labels.elem_size)] = label; else { labels[labels_index + (labels.stepy / labels.elem_size)] = 0; } } // Upper plane if (z + 1 < labels.z) { if (img[img_index + img.stepz / img.elem_size]) labels[labels_index + labels.stepz / labels.elem_size] = label; else { labels[labels_index + labels.stepz / labels.elem_size] = 0; } if (x + 1 < labels.x) { if (img[img_index + img.stepz / img.elem_size + 1]) labels[labels_index + labels.stepz / labels.elem_size + 1] = label; else { labels[labels_index + labels.stepz / labels.elem_size + 1] = 0; } if (y + 1 < labels.y) { if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size + 1]) labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size) + 1] = label; else { labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size) + 1] = 0; } } } if (y + 1 < labels.y) { if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size]) labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size)] = label; else { labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size)] = 0; } } } } } } class BKE_3D : public GpuLabeling3D<Connectivity3D::CONN_26> { private: dim3 grid_size_; dim3 block_size_; unsigned int* last_voxel_conn_; bool allocated_last_conn_; public: BKE_3D() {} void PerformLabeling() { d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1); // Decide whether last_pixel_ needs specific allocation or not // It only needs it in the case that input volume has 2 or more dimensions equals to 1 allocated_last_conn_ = false; if ((d_img_.x % 2 == 1) && (d_img_.y % 2 == 1) && (d_img_.z % 2 == 1)) { if (d_img_.x > 1 && d_img_.y > 1) { last_voxel_conn_ = reinterpret_cast<unsigned int*>(d_img_labels_.data + (d_img_labels_.z - 1) * d_img_labels_.stepz + (d_img_labels_.y - 2) * d_img_labels_.stepy) + d_img_labels_.x - 2; } else if (d_img_.x > 1 && d_img_.z > 1) { last_voxel_conn_ = reinterpret_cast<unsigned int*>(d_img_labels_.data + (d_img_labels_.z - 2) * d_img_labels_.stepz + (d_img_labels_.y - 1) * d_img_labels_.stepy) + d_img_labels_.x - 2; } else if (d_img_.y > 1 && d_img_.z > 1) { last_voxel_conn_ = reinterpret_cast<unsigned int*>(d_img_labels_.data + (d_img_labels_.z - 2) * d_img_labels_.stepz + (d_img_labels_.y - 2) * d_img_labels_.stepy) + d_img_labels_.x - 1; } else { cudaMalloc(&last_voxel_conn_, sizeof(unsigned int)); allocated_last_conn_ = true; } } grid_size_ = dim3((d_img_.x + BLOCK_X - 1) / BLOCK_X, (d_img_.y + BLOCK_Y - 1) / BLOCK_Y, (d_img_.z + BLOCK_Z - 1) / BLOCK_Z); block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z); InitLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_voxel_conn_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); PathCompression << <grid_size_, block_size_ >> > (d_img_labels_); Merge << <grid_size_, block_size_ >> > (d_img_labels_, last_voxel_conn_); //Mat1i block_info_final; //d_img_labels_.download(block_info_final); PathCompression << <grid_size_, block_size_ >> > (d_img_labels_); FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); // d_img_labels_.download(img_labels_); if (allocated_last_conn_) { cudaFree(last_voxel_conn_); } cudaDeviceSynchronize(); } private: void Alloc() { d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1); allocated_last_conn_ = false; if ((d_img_.x % 2 == 1) && (d_img_.y % 2 == 1) && (d_img_.z % 2 == 1)) { if (d_img_.x > 1 && d_img_.y > 1) { last_voxel_conn_ = reinterpret_cast<unsigned int*>(d_img_labels_.data + (d_img_labels_.z - 1) * d_img_labels_.stepz + (d_img_labels_.y - 2) * d_img_labels_.stepy) + d_img_labels_.x - 2; } else if (d_img_.x > 1 && d_img_.z > 1) { last_voxel_conn_ = reinterpret_cast<unsigned int*>(d_img_labels_.data + (d_img_labels_.z - 2) * d_img_labels_.stepz + (d_img_labels_.y - 1) * d_img_labels_.stepy) + d_img_labels_.x - 2; } else if (d_img_.y > 1 && d_img_.z > 1) { last_voxel_conn_ = reinterpret_cast<unsigned int*>(d_img_labels_.data + (d_img_labels_.z - 2) * d_img_labels_.stepz + (d_img_labels_.y - 2) * d_img_labels_.stepy) + d_img_labels_.x - 1; } else { cudaMalloc(&last_voxel_conn_, sizeof(unsigned int)); allocated_last_conn_ = true; } } cudaDeviceSynchronize(); } void Dealloc() { if (allocated_last_conn_) { cudaFree(last_voxel_conn_); } cudaDeviceSynchronize(); } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3((d_img_.x + BLOCK_X - 1) / BLOCK_X, (d_img_.y + BLOCK_Y - 1) / BLOCK_Y, (d_img_.z + BLOCK_Z - 1) / BLOCK_Z); block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z); InitLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_voxel_conn_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); PathCompression << <grid_size_, block_size_ >> > (d_img_labels_); Merge << <grid_size_, block_size_ >> > (d_img_labels_, last_voxel_conn_); //Mat1i block_info_final; //d_img_labels_.download(block_info_final); PathCompression << <grid_size_, block_size_ >> > (d_img_labels_); FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); cudaDeviceSynchronize(); } public: void PerformLabelingWithSteps() { perf_.start(); Alloc(); perf_.stop(); double alloc_timing = perf_.last(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); perf_.start(); Dealloc(); perf_.stop(); double dealloc_timing = perf_.last(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(BKE_3D);
the_stack
//////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Depth bilateral filter namespace kfusion { namespace device { __global__ void bilateral_kernel(const PtrStepSz<ushort> src, PtrStep<ushort> dst, const int ksz, const float sigma_spatial2_inv_half, const float sigma_depth2_inv_half) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= src.cols || y >= src.rows) return; int value = src(y, x); int tx = min(x - ksz / 2 + ksz, src.cols - 1); int ty = min(y - ksz / 2 + ksz, src.rows - 1); float sum1 = 0; float sum2 = 0; for (int cy = max(y - ksz / 2, 0); cy < ty; ++cy) { for (int cx = max(x - ksz / 2, 0); cx < tx; ++cx) { int depth = src(cy, cx); float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy); float color2 = (value - depth) * (value - depth); float weight = __expf(-(space2 * sigma_spatial2_inv_half + color2 * sigma_depth2_inv_half)); sum1 += depth * weight; sum2 += weight; } } dst(y, x) = __float2int_rn(sum1 / sum2); } } // namespace device } // namespace kfusion void kfusion::device::bilateralFilter(const Depth& src, Depth& dst, int kernel_size, float sigma_spatial, float sigma_depth) { sigma_depth *= 1000; // meters -> mm dim3 block(64, 16); dim3 grid(divUp(src.cols(), block.x), divUp(src.rows(), block.y)); cudaSafeCall(cudaFuncSetCacheConfig(bilateral_kernel, cudaFuncCachePreferL1)); bilateral_kernel<<<grid, block>>>(src, dst, kernel_size, 0.5f / (sigma_spatial * sigma_spatial), 0.5f / (sigma_depth * sigma_depth)); cudaSafeCall(cudaGetLastError()); }; //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Depth truncation namespace kfusion { namespace device { __global__ void truncate_depth_kernel(PtrStepSz<ushort> depth, ushort max_dist /*mm*/) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < depth.cols && y < depth.rows) if (depth(y, x) > max_dist) depth(y, x) = 0; } } // namespace device } // namespace kfusion void kfusion::device::truncateDepth(Depth& depth, float max_dist /*meters*/) { dim3 block(64, 16); dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y)); truncate_depth_kernel<<<grid, block>>>(depth, static_cast<ushort>(max_dist * 1000.f)); cudaSafeCall(cudaGetLastError()); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Build depth pyramid namespace kfusion { namespace device { __global__ void pyramid_kernel(const PtrStepSz<ushort> src, PtrStepSz<ushort> dst, float sigma_depth_mult3) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= dst.cols || y >= dst.rows) return; const int D = 5; int center = src(2 * y, 2 * x); int tx = min(2 * x - D / 2 + D, src.cols - 1); int ty = min(2 * y - D / 2 + D, src.rows - 1); int cy = max(0, 2 * y - D / 2); int sum = 0; int count = 0; for (; cy < ty; ++cy) for (int cx = max(0, 2 * x - D / 2); cx < tx; ++cx) { int val = src(cy, cx); if (abs(val - center) < sigma_depth_mult3) { sum += val; ++count; } } dst(y, x) = (count == 0) ? 0 : sum / count; } } // namespace device } // namespace kfusion void kfusion::device::depthPyr(const Depth& source, Depth& pyramid, float sigma_depth) { sigma_depth *= 1000; // meters -> mm dim3 block(64, 16); dim3 grid(divUp(pyramid.cols(), block.x), divUp(pyramid.rows(), block.y)); pyramid_kernel<<<grid, block>>>(source, pyramid, sigma_depth * 3); cudaSafeCall(cudaGetLastError()); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Compute normals namespace kfusion { namespace device { __global__ void compute_normals_kernel(const PtrStepSz<ushort> depth, const Reprojector reproj, PtrStep<Normal> normals) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= depth.cols || y >= depth.rows) return; const float qnan = numeric_limits<float>::quiet_NaN(); Normal n_out = make_float4(qnan, qnan, qnan, 0.f); if (x < depth.cols - 1 && y < depth.rows - 1) { // mm -> meters float z00 = depth(y, x) * 0.001f; float z01 = depth(y, x + 1) * 0.001f; float z10 = depth(y + 1, x) * 0.001f; if (z00 * z01 * z10 != 0) { float3 v00 = reproj(x, y, z00); float3 v01 = reproj(x + 1, y, z01); float3 v10 = reproj(x, y + 1, z10); float3 n = normalized(cross(v01 - v00, v10 - v00)); n_out = make_float4(-n.x, -n.y, -n.z, 0.f); } } normals(y, x) = n_out; } __global__ void mask_depth_kernel(const PtrStep<Normal> normals, PtrStepSz<ushort> depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < depth.cols || y < depth.rows) { float4 n = normals(y, x); if (isnan(n.x)) depth(y, x) = 0; } } } // namespace device } // namespace kfusion void kfusion::device::computeNormalsAndMaskDepth(const Reprojector& reproj, Depth& depth, Normals& normals) { dim3 block(64, 16); dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y)); compute_normals_kernel<<<grid, block>>>(depth, reproj, normals); cudaSafeCall(cudaGetLastError()); mask_depth_kernel<<<grid, block>>>(normals, depth); cudaSafeCall(cudaGetLastError()); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Compute computePointNormals namespace kfusion { namespace device { __global__ void points_normals_kernel(const Reprojector reproj, const PtrStepSz<ushort> depth, PtrStep<Point> points, PtrStep<Normal> normals) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= depth.cols || y >= depth.rows) return; const float qnan = numeric_limits<float>::quiet_NaN(); points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan); if (x >= depth.cols - 1 || y >= depth.rows - 1) return; // mm -> meters float z00 = depth(y, x) * 0.001f; float z01 = depth(y, x + 1) * 0.001f; float z10 = depth(y + 1, x) * 0.001f; if (z00 * z01 * z10 != 0) { float3 v00 = reproj(x, y, z00); float3 v01 = reproj(x + 1, y, z01); float3 v10 = reproj(x, y + 1, z10); float3 n = normalized(cross(v01 - v00, v10 - v00)); normals(y, x) = make_float4(-n.x, -n.y, -n.z, 0.f); points(y, x) = make_float4(v00.x, v00.y, v00.z, 0.f); } } } // namespace device } // namespace kfusion void kfusion::device::computePointNormals(const Reprojector& reproj, const Depth& depth, Points& points, Normals& normals) { dim3 block(64, 16); dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y)); points_normals_kernel<<<grid, block>>>(reproj, depth, points, normals); cudaSafeCall(cudaGetLastError()); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Compute dists namespace kfusion { namespace device { __global__ void compute_dists_kernel(const PtrStepSz<ushort> depth, Dists dists, float2 finv, float2 c) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < depth.cols || y < depth.rows) { float xl = (x - c.x) * finv.x; float yl = (y - c.y) * finv.y; float lambda = sqrtf(xl * xl + yl * yl + 1); dists(y, x) = depth(y, x) * lambda * 0.001f; // meters } } } // namespace device } // namespace kfusion void kfusion::device::compute_dists(const Depth& depth, Dists dists, float2 f, float2 c) { dim3 block(64, 16); dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y)); compute_dists_kernel<<<grid, block>>>(depth, dists, make_float2(1.f / f.x, 1.f / f.y), c); cudaSafeCall(cudaGetLastError()); } namespace kfusion { namespace device { __global__ void resize_depth_normals_kernel(const PtrStep<ushort> dsrc, const PtrStep<float4> nsrc, PtrStepSz<ushort> ddst, PtrStep<float4> ndst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= ddst.cols || y >= ddst.rows) return; const float qnan = numeric_limits<float>::quiet_NaN(); ushort d = 0; float4 n = make_float4(qnan, qnan, qnan, qnan); int xs = x * 2; int ys = y * 2; int d00 = dsrc(ys + 0, xs + 0); int d01 = dsrc(ys + 0, xs + 1); int d10 = dsrc(ys + 1, xs + 0); int d11 = dsrc(ys + 1, xs + 1); if (d00 * d01 != 0 && d10 * d11 != 0) { d = (d00 + d01 + d10 + d11) / 4; float4 n00 = nsrc(ys + 0, xs + 0); float4 n01 = nsrc(ys + 0, xs + 1); float4 n10 = nsrc(ys + 1, xs + 0); float4 n11 = nsrc(ys + 1, xs + 1); n.x = (n00.x + n01.x + n10.x + n11.x) * 0.25; n.y = (n00.y + n01.y + n10.y + n11.y) * 0.25; n.z = (n00.z + n01.z + n10.z + n11.z) * 0.25; } ddst(y, x) = d; ndst(y, x) = n; } } // namespace device } // namespace kfusion void kfusion::device::resizeDepthNormals(const Depth& depth, const Normals& normals, Depth& depth_out, Normals& normals_out) { int in_cols = depth.cols(); int in_rows = depth.rows(); int out_cols = in_cols / 2; int out_rows = in_rows / 2; dim3 block(64, 16); dim3 grid(divUp(out_cols, block.x), divUp(out_rows, block.y)); resize_depth_normals_kernel<<<grid, block>>>(depth, normals, depth_out, normals_out); cudaSafeCall(cudaGetLastError()); } namespace kfusion { namespace device { __global__ void resize_points_normals_kernel(const PtrStep<Point> vsrc, const PtrStep<Normal> nsrc, PtrStepSz<Point> vdst, PtrStep<Normal> ndst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= vdst.cols || y >= vdst.rows) return; const float qnan = numeric_limits<float>::quiet_NaN(); vdst(y, x) = ndst(y, x) = make_float4(qnan, qnan, qnan, 0.f); int xs = x * 2; int ys = y * 2; float3 d00 = tr(vsrc(ys + 0, xs + 0)); float3 d01 = tr(vsrc(ys + 0, xs + 1)); float3 d10 = tr(vsrc(ys + 1, xs + 0)); float3 d11 = tr(vsrc(ys + 1, xs + 1)); if (!isnan(d00.x * d01.x * d10.x * d11.x)) { float3 d = (d00 + d01 + d10 + d11) * 0.25f; vdst(y, x) = make_float4(d.x, d.y, d.z, 0.f); float3 n00 = tr(nsrc(ys + 0, xs + 0)); float3 n01 = tr(nsrc(ys + 0, xs + 1)); float3 n10 = tr(nsrc(ys + 1, xs + 0)); float3 n11 = tr(nsrc(ys + 1, xs + 1)); float3 n = (n00 + n01 + n10 + n11) * 0.25f; ndst(y, x) = make_float4(n.x, n.y, n.z, 0.f); } } } // namespace device } // namespace kfusion void kfusion::device::resizePointsNormals(const Points& points, const Normals& normals, Points& points_out, Normals& normals_out) { int out_cols = points.cols() / 2; int out_rows = points.rows() / 2; dim3 block(64, 16); dim3 grid(divUp(out_cols, block.x), divUp(out_rows, block.y)); resize_points_normals_kernel<<<grid, block>>>(points, normals, points_out, normals_out); cudaSafeCall(cudaGetLastError()); } namespace kfusion { namespace device { /* calculate for the projected triangle the bounding box in the image domain */ __host__ __device__ void get_bounding_box(float2 v1, float2 v2, float2 v3, int2& min, int2& max) { min.x = static_cast<int>(fmin(v1.x, fmin(v2.x, v3.x))); min.y = static_cast<int>(fmin(v1.y, fmin(v2.y, v3.y))); max.x = static_cast<int>(fmax(v1.x, fmax(v2.x, v3.x))); max.y = static_cast<int>(fmax(v1.y, fmax(v2.y, v3.y))); } __host__ __device__ __forceinline__ float edge_function(const float2& a, const float2& b, const float2& c) { return (c.x - a.x) * (b.y - a.y) - (c.y - a.y) * (b.x - a.x); } /* rasterise surface triangles */ __global__ void rasterise_surface_kernel(const Projector proj, const Aff3f vol2cam, const PtrSz<Point> vsrc, const PtrSz<Normal> nsrc, PtrStepSz<Point> points_out, PtrStep<Normal> normals_out) { int x = (threadIdx.x + blockIdx.x * blockDim.x) * 3; if ((x + 2) >= vsrc.size) return; /* get vertices and normals */ float3 v1 = vol2cam * tr(vsrc[x]); float3 v2 = vol2cam * tr(vsrc[x + 1]); float3 v3 = vol2cam * tr(vsrc[x + 2]); /* project vertices onto the image plane */ float2 coos1 = proj(v1); float2 coos2 = proj(v2); float2 coos3 = proj(v3); /* get 2-d triangle bounding box */ int2 min; int2 max; get_bounding_box(coos1, coos2, coos3, min, max); /* check for validity of coordinates */ if (min.x < 0 || min.y < 0 || max.x >= points_out.cols || max.y >= points_out.rows) return; /* used for smooth interpoation */ float area = edge_function(coos1, coos2, coos3); /* shade pixels */ for (int i = min.x; i < max.x; i++) { for (int j = min.y; j < max.y; j++) { /* coordinates of the centre of the pixel */ float2 p = make_float2(i + 0.5f, j + 0.5f); float w0 = edge_function(coos2, coos3, p) / area; float w1 = edge_function(coos3, coos1, p) / area; float w2 = edge_function(coos1, coos2, p) / area; float z = w0 * v1.z + w1 * v2.z + w2 * v3.z; if (z < points_out(j, i).z || fabs(points_out(j, i).z) < 1e-7f) { points_out(j, i) = make_float4(w0 * v1.x + w1 * v2.x + w2 * v3.x, w0 * v1.y + w1 * v2.y + w2 * v3.y, z, 0.f); } } } for (int i = min.x; i < max.x; i++) { for (int j = min.y; j < max.y; j++) { float3 v0 = tr(points_out(j, i)); float3 v1 = tr(points_out(j + 1, i)); float3 v2 = tr(points_out(j, i + 1)); float3 n = normalized(cross(v1 - v0, v2 - v0)); normals_out(j, i) = make_float4(n.x, n.y, n.z, 1.f); } } } } // namespace device } // namespace kfusion void kfusion::device::rasteriseSurface(const Projector& proj, const Aff3f& vol2cam, const Surface& surface, Points& points_out, Normals& normals_out) { dim3 block(256); dim3 grid(divUp(surface.vertices.size() / 3, block.x)); rasterise_surface_kernel<<<grid, block>>>(proj, vol2cam, surface.vertices, surface.normals, points_out, normals_out); cudaSafeCall(cudaGetLastError()); }
the_stack
* \file nms.cu * \brief NMS Operator * \author Yanghao Li */ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include "../tensor/sort_op.h" #include <map> #include <vector> #include <string> #include <utility> #include <ctime> #include <iterator> #include "../operator_common.h" #include "../mshadow_op.h" #include "./nms-inl.h" #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) #define FRCNN_CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { namespace { // copy score and init order // dets (n, 5); score (n, ); order (n, ) // count should be n (total anchors or proposals) template<typename Dtype> __global__ void CopyScoreKernel(const int count, const Dtype* dets, Dtype* score, int* order) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { score[index] = dets[index * 5 + 4]; order[index] = index; } } // reorder proposals according to order and keep the top_n proposals // prev_dets (n, 5); order (n, ); dets (n, 5) // count should be output anchor numbers (top_n) template<typename Dtype> __global__ void ReorderProposalsKernel(const int count, const Dtype* prev_dets, const int* order, Dtype* dets) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { const int order_i = order[index]; for (int j = 0; j < 5; j ++) { dets[index * 5 + j] = prev_dets[order_i * 5 + j]; } } } __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, uint64_t *dev_mask) { const int threadsPerBlock = sizeof(uint64_t) * 8; const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; uint64_t t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _nms(const mshadow::Tensor<gpu, 2>& boxes, const float nms_overlap_thresh, int *keep, int *num_out, uint64_t *mask_dev, uint64_t *mask_host) { /* @input boxes: (pre_nms_top_n, 5) @return keep @return num_out @tmp mask_dev @tmp mask_host */ const int threadsPerBlock = sizeof(uint64_t) * 8; const int boxes_num = boxes.size(0); const int boxes_dim = boxes.size(1); float* boxes_dev = boxes.dptr_; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); // TODO: need to be rewritten FRCNN_CUDA_CHECK(cudaMemcpy(mask_host, mask_dev, sizeof(uint64_t) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<uint64_t> remv(col_blocks); memset(&remv[0], 0, sizeof(uint64_t) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep[num_to_keep++] = i; uint64_t *p = mask_host + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; } // copy proposals to output // dets (top_n, 5); keep (top_n, ); out (top_n, ) // count should be top_n (total anchors or proposals) template<typename Dtype> __global__ void PrepareOutput(const int count, const Dtype* dets, const int* keep, const int out_size, const int batchIdx, Dtype* out, Dtype* score) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x) { // out[index * 5] = batchIdx; if (index < out_size) { int keep_i = keep[index]; for (int j = 0; j < 4; ++j) { out[index * 4 + j] = dets[keep_i * 5 + j]; } score[index] = dets[keep_i * 5 + 4]; } else { //int keep_i = keep[index % out_size]; for (int j = 0; j < 4; ++j) { out[index * 4 + j] = 0.0f; } score[index] = 0; } } } } // namespace } // namespace cuda } // namespace mshadow namespace mxnet { namespace op { template<typename xpu> class NMSGPUOp : public Operator{ public: explicit NMSGPUOp(NMSParam param) { this->param_ = param; } virtual void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; using namespace mshadow::cuda; CHECK_EQ(in_data.size(), 1); CHECK_EQ(out_data.size(), 2); CHECK_GT(req.size(), 1); // CHECK_EQ(req[proposal::kOut], kWriteTo); Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 3> proposals = in_data[nms::kBBox].get<xpu, 3, float>(s); // batch_idx, rois_idx, 5(x1, y1, x2, y2, score) Tensor<xpu, 3> out = out_data[nms::kOut].get<xpu, 3, float>(s); // batch_idx, rois_idx, 4(x1, y1, x2, y2) Tensor<xpu, 3> out_score = out_data[nms::kScore].get<xpu, 3, float>(s); // batch_idx, rois_idx, 1(score) uint64_t WORKSPACE_LIMIT = 1024 * 1024 * param_.workspace; // 256 MB should be sufficient Tensor<xpu, 1, uint8_t> workspace = ctx.requested[nms::kTempSpace].get_space_typed<xpu, 1, uint8_t>(Shape1(WORKSPACE_LIMIT), s); uint64_t allocated_bytes = 0ULL; uint64_t allocated_bytes_outside_loop = 0ULL; int nbatch = proposals.size(0); int count = proposals.size(1); // set to -1 for max int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count; rpn_pre_nms_top_n = std::min(rpn_pre_nms_top_n, count); int rpn_post_nms_top_n = std::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n); /* copy anchors for all images in batch */ for (int i = 0; i < nbatch; i++) { float* batch_proposals = proposals.dptr_ + i * 5 * count; /* copy score to a continuous memory */ dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock); dim3 dimBlock(kMaxThreadsPerBlock); Tensor<xpu, 1> score(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape1(count)); allocated_bytes += count * sizeof(float); CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit"; Tensor<xpu, 1, int> order(reinterpret_cast<int *>(workspace.dptr_ + allocated_bytes), Shape1(count)); allocated_bytes += count * sizeof(int); CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit"; CheckLaunchParam(dimGrid, dimBlock, "CopyScore"); CopyScoreKernel<<<dimGrid, dimBlock>>>( count, batch_proposals, score.dptr_, order.dptr_); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); if (!param_.already_sorted) { /* argsort score, save order */ thrust::stable_sort_by_key(thrust::device, score.dptr_, score.dptr_ + score.size(0), order.dptr_, thrust::greater<float>()); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); } /* Reorder proposals according to order */ Tensor<xpu, 2> ordered_proposals(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape2(rpn_pre_nms_top_n, 5)); allocated_bytes += rpn_pre_nms_top_n * 5 * sizeof(float); CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit"; dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals"); ReorderProposalsKernel<<<dimGrid, dimBlock>>>( rpn_pre_nms_top_n, batch_proposals, order.dptr_, ordered_proposals.dptr_); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); /* perform nms */ std::vector<int> _keep(rpn_pre_nms_top_n); int out_size = 0; const int boxes_num = rpn_pre_nms_top_n; const int col_blocks = DIVUP(boxes_num, sizeof(uint64_t) * 8); // take special care when allocate memory of 8-byte alignment. allocated_bytes += allocated_bytes % sizeof(uint64_t); Tensor<xpu, 1, uint64_t> mask_tensor(reinterpret_cast<uint64_t *>(workspace.dptr_ + allocated_bytes), Shape1(boxes_num * col_blocks)); allocated_bytes += boxes_num * col_blocks * sizeof(uint64_t); CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit"; // the following line does not need change since it the only place where requires host workspace Tensor<cpu, 1, uint64_t> mask_host_tensor = ctx.requested[nms::kTempSpace].get_host_space_typed<1, uint64_t>(Shape1(boxes_num * col_blocks)); uint64_t *mask_dev = mask_tensor.dptr_; uint64_t *mask_host = mask_host_tensor.dptr_; _nms(ordered_proposals, param_.threshold, &_keep[0], &out_size, mask_dev, mask_host); /* copy nms result to gpu */ Tensor<xpu, 1, int> keep(reinterpret_cast<int *>(workspace.dptr_ + allocated_bytes), Shape1(_keep.size())); allocated_bytes += _keep.size() * sizeof(int); CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit"; FRCNN_CUDA_CHECK(cudaMemcpy(keep.dptr_, &_keep[0], sizeof(int) * _keep.size(), cudaMemcpyHostToDevice)); // less than 64K /* copy results after nms */ dimGrid.x = (rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput"); PrepareOutput<<<dimGrid, dimBlock>>>( rpn_post_nms_top_n, ordered_proposals.dptr_, keep.dptr_, out_size, i, out.dptr_ + i * 4 * rpn_post_nms_top_n, out_score.dptr_ + i * rpn_post_nms_top_n); FRCNN_CUDA_CHECK(cudaPeekAtLastError()); // recycle all bytes allocated within loop allocated_bytes = allocated_bytes_outside_loop; } } virtual void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(in_grad.size(), 1); Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 3> gbbox = in_grad[nms::kBBox].get<xpu, 3, real_t>(s); Assign(gbbox, req[nms::kBBox], 0); } private: NMSParam param_; }; // class NMSGPUOp template<> Operator* CreateOp<gpu>(NMSParam param) { return new NMSGPUOp<gpu>(param); } } // namespace op } // namespace mxnet
the_stack
#include <hash/unordered_multiset.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/fill.h> #include <thrust/find.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/pair.h> #include <thrust/transform.h> namespace cudf { namespace { std::unique_ptr<column> search_ordered(table_view const& haystack, table_view const& needles, bool find_first, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS( column_order.empty() or static_cast<std::size_t>(haystack.num_columns()) == column_order.size(), "Mismatch between number of columns and column order."); CUDF_EXPECTS(null_precedence.empty() or static_cast<std::size_t>(haystack.num_columns()) == null_precedence.size(), "Mismatch between number of columns and null precedence."); // Allocate result column auto result = make_numeric_column( data_type{type_to_id<size_type>()}, needles.num_rows(), mask_state::UNALLOCATED, stream, mr); auto const out_it = result->mutable_view().data<size_type>(); // Handle empty inputs if (haystack.num_rows() == 0) { CUDF_CUDA_TRY( cudaMemsetAsync(out_it, 0, needles.num_rows() * sizeof(size_type), stream.value())); return result; } // This utility will ensure all corresponding dictionary columns have matching keys. // It will return any new dictionary columns created as well as updated table_views. auto const matched = dictionary::detail::match_dictionaries({haystack, needles}, stream); // Prepare to flatten the structs column auto const has_null_elements = has_nested_nulls(haystack) or has_nested_nulls(needles); auto const flatten_nullability = has_null_elements ? structs::detail::column_nullability::FORCE : structs::detail::column_nullability::MATCH_INCOMING; // 0-table_view, 1-column_order, 2-null_precedence, 3-validity_columns auto const t_flattened = structs::detail::flatten_nested_columns( matched.second.front(), column_order, null_precedence, flatten_nullability); auto const values_flattened = structs::detail::flatten_nested_columns(matched.second.back(), {}, {}, flatten_nullability); auto const t_d = table_device_view::create(t_flattened, stream); auto const values_d = table_device_view::create(values_flattened, stream); auto const& lhs = find_first ? *t_d : *values_d; auto const& rhs = find_first ? *values_d : *t_d; auto const& column_order_flattened = t_flattened.orders(); auto const& null_precedence_flattened = t_flattened.null_orders(); auto const column_order_dv = detail::make_device_uvector_async(column_order_flattened, stream); auto const null_precedence_dv = detail::make_device_uvector_async(null_precedence_flattened, stream); auto const count_it = thrust::make_counting_iterator<size_type>(0); auto const comp = row_lexicographic_comparator(nullate::DYNAMIC{has_null_elements}, lhs, rhs, column_order_dv.data(), null_precedence_dv.data()); auto const do_search = [find_first](auto&&... args) { if (find_first) { thrust::lower_bound(std::forward<decltype(args)>(args)...); } else { thrust::upper_bound(std::forward<decltype(args)>(args)...); } }; do_search(rmm::exec_policy(stream), count_it, count_it + haystack.num_rows(), count_it, count_it + needles.num_rows(), out_it, comp); return result; } struct contains_scalar_dispatch { template <typename Element> bool operator()(column_view const& haystack, scalar const& needle, rmm::cuda_stream_view stream) { CUDF_EXPECTS(haystack.type() == needle.type(), "scalar and column types must match"); using Type = device_storage_type_t<Element>; using ScalarType = cudf::scalar_type_t<Element>; auto d_haystack = column_device_view::create(haystack, stream); auto s = static_cast<const ScalarType*>(&needle); if (haystack.has_nulls()) { auto found_iter = thrust::find(rmm::exec_policy(stream), d_haystack->pair_begin<Type, true>(), d_haystack->pair_end<Type, true>(), thrust::make_pair(s->value(stream), true)); return found_iter != d_haystack->pair_end<Type, true>(); } else { auto found_iter = thrust::find(rmm::exec_policy(stream), // d_haystack->begin<Type>(), d_haystack->end<Type>(), s->value(stream)); return found_iter != d_haystack->end<Type>(); } } }; template <> bool contains_scalar_dispatch::operator()<cudf::list_view>(column_view const&, scalar const&, rmm::cuda_stream_view) { CUDF_FAIL("list_view type not supported yet"); } template <> bool contains_scalar_dispatch::operator()<cudf::struct_view>(column_view const& haystack, scalar const& needle, rmm::cuda_stream_view stream) { CUDF_EXPECTS(haystack.type() == needle.type(), "scalar and column types must match"); auto const scalar_table = static_cast<struct_scalar const*>(&needle)->view(); CUDF_EXPECTS(haystack.num_children() == scalar_table.num_columns(), "struct scalar and structs column must have the same number of children"); for (size_type i = 0; i < haystack.num_children(); ++i) { CUDF_EXPECTS(haystack.child(i).type() == scalar_table.column(i).type(), "scalar and column children types must match"); } // Prepare to flatten the structs column and scalar. auto const has_null_elements = has_nested_nulls(table_view{std::vector<column_view>{ haystack.child_begin(), haystack.child_end()}}) || has_nested_nulls(scalar_table); auto const flatten_nullability = has_null_elements ? structs::detail::column_nullability::FORCE : structs::detail::column_nullability::MATCH_INCOMING; // Flatten the input structs column, only materialize the bitmask if there is null in the input. auto const haystack_flattened = structs::detail::flatten_nested_columns(table_view{{haystack}}, {}, {}, flatten_nullability); auto const needle_flattened = structs::detail::flatten_nested_columns(scalar_table, {}, {}, flatten_nullability); // The struct scalar only contains the struct member columns. // Thus, if there is any null in the input, we must exclude the first column in the flattened // table of the input column from searching because that column is the materialized bitmask of // the input structs column. auto const haystack_flattened_content = haystack_flattened.flattened_columns(); auto const haystack_flattened_children = table_view{std::vector<column_view>{ haystack_flattened_content.begin() + static_cast<size_type>(has_null_elements), haystack_flattened_content.end()}}; auto const d_haystack_children_ptr = table_device_view::create(haystack_flattened_children, stream); auto const d_needle_ptr = table_device_view::create(needle_flattened, stream); auto const start_iter = thrust::make_counting_iterator<size_type>(0); auto const end_iter = start_iter + haystack.size(); auto const comp = row_equality_comparator(nullate::DYNAMIC{has_null_elements}, *d_haystack_children_ptr, *d_needle_ptr, null_equality::EQUAL); auto const found_iter = thrust::find_if( rmm::exec_policy(stream), start_iter, end_iter, [comp] __device__(auto const idx) { return comp(idx, 0); // compare haystack[idx] == val[0]. }); return found_iter != end_iter; } template <> bool contains_scalar_dispatch::operator()<cudf::dictionary32>(column_view const& haystack, scalar const& needle, rmm::cuda_stream_view stream) { auto dict_col = cudf::dictionary_column_view(haystack); // first, find the needle in the dictionary's key set auto index = cudf::dictionary::detail::get_index(dict_col, needle, stream); // if found, check the index is actually in the indices column return index->is_valid(stream) ? cudf::type_dispatcher(dict_col.indices().type(), contains_scalar_dispatch{}, dict_col.indices(), *index, stream) : false; } } // namespace namespace detail { bool contains(column_view const& haystack, scalar const& needle, rmm::cuda_stream_view stream) { if (haystack.is_empty()) { return false; } if (not needle.is_valid(stream)) { return haystack.has_nulls(); } return cudf::type_dispatcher( haystack.type(), contains_scalar_dispatch{}, haystack, needle, stream); } struct multi_contains_dispatch { template <typename Element> std::unique_ptr<column> operator()(column_view const& haystack, column_view const& needles, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { std::unique_ptr<column> result = make_numeric_column(data_type{type_to_id<bool>()}, needles.size(), copy_bitmask(needles), needles.null_count(), stream, mr); if (needles.is_empty()) { return result; } mutable_column_view result_view = result.get()->mutable_view(); if (haystack.is_empty()) { thrust::fill( rmm::exec_policy(stream), result_view.begin<bool>(), result_view.end<bool>(), false); return result; } auto hash_set = cudf::detail::unordered_multiset<Element>::create(haystack, stream); auto device_hash_set = hash_set.to_device(); auto d_needles_ptr = column_device_view::create(needles, stream); auto d_needles = *d_needles_ptr; if (needles.has_nulls()) { thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(needles.size()), result_view.begin<bool>(), [device_hash_set, d_needles] __device__(size_t index) { return d_needles.is_null_nocheck(index) || device_hash_set.contains(d_needles.element<Element>(index)); }); } else { thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(needles.size()), result_view.begin<bool>(), [device_hash_set, d_needles] __device__(size_t index) { return device_hash_set.contains(d_needles.element<Element>(index)); }); } return result; } }; template <> std::unique_ptr<column> multi_contains_dispatch::operator()<list_view>( column_view const&, column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) { CUDF_FAIL("list_view type not supported"); } template <> std::unique_ptr<column> multi_contains_dispatch::operator()<struct_view>( column_view const&, column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) { CUDF_FAIL("struct_view type not supported"); } template <> std::unique_ptr<column> multi_contains_dispatch::operator()<dictionary32>( column_view const& haystack_in, column_view const& needles_in, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { dictionary_column_view const haystack(haystack_in); dictionary_column_view const needles(needles_in); // first combine keys so both dictionaries have the same set auto needles_matched = dictionary::detail::add_keys(needles, haystack.keys(), stream); auto const needles_view = dictionary_column_view(needles_matched->view()); auto haystack_matched = dictionary::detail::set_keys(haystack, needles_view.keys(), stream); auto const haystack_view = dictionary_column_view(haystack_matched->view()); // now just use the indices for the contains column_view const haystack_indices = haystack_view.get_indices_annotated(); column_view const needles_indices = needles_view.get_indices_annotated(); return cudf::type_dispatcher(haystack_indices.type(), multi_contains_dispatch{}, haystack_indices, needles_indices, stream, mr); } std::unique_ptr<column> contains(column_view const& haystack, column_view const& needles, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(haystack.type() == needles.type(), "DTYPE mismatch"); return cudf::type_dispatcher( haystack.type(), multi_contains_dispatch{}, haystack, needles, stream, mr); } std::unique_ptr<column> lower_bound(table_view const& haystack, table_view const& needles, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return search_ordered(haystack, needles, true, column_order, null_precedence, stream, mr); } std::unique_ptr<column> upper_bound(table_view const& haystack, table_view const& needles, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return search_ordered(haystack, needles, false, column_order, null_precedence, stream, mr); } } // namespace detail // external APIs std::unique_ptr<column> lower_bound(table_view const& haystack, table_view const& needles, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::lower_bound( haystack, needles, column_order, null_precedence, rmm::cuda_stream_default, mr); } std::unique_ptr<column> upper_bound(table_view const& haystack, table_view const& needles, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::upper_bound( haystack, needles, column_order, null_precedence, rmm::cuda_stream_default, mr); } bool contains(column_view const& haystack, scalar const& needle) { CUDF_FUNC_RANGE(); return detail::contains(haystack, needle, rmm::cuda_stream_default); } std::unique_ptr<column> contains(column_view const& haystack, column_view const& needles, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::contains(haystack, needles, rmm::cuda_stream_default, mr); } } // namespace cudf
the_stack
#include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/winograd.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_sparse_mmcsr<float>(const int M, const int N, const int K, const float alpha, const float* A, const int nnz, const float* B_nonzero_buf, const int* B_idx_pointer_buf, const int* B_nonzero_idx_buf, const float beta,float* C){ // For cuSPARSE, dense matrix A & C is column major, so we can instead compute C'=B'*A' // A & C are intrinsically transposed since c/c++ array is row major // as sparse B is expressed in CSR format, and CSC of B'is the same with CSR of B // use cusparse<t>cscmm if available, if not, transpose B (transposing may not affect the performance as above reason) //NON-BLOCKING NON-BLOCKING NON-BLOCKING CUSPARSE_CHECK(cusparseScsrmm(Caffe::cusparse_handle(),CUSPARSE_OPERATION_TRANSPOSE, //N,M,K,nnz, &alpha, K,M,N,nnz, &alpha, Caffe::cusparse_matdescr(), B_nonzero_buf, B_idx_pointer_buf, B_nonzero_idx_buf, A,K,&beta,C,N )); } template <> void caffe_gpu_sparse_mmcsr<double>(const int M, const int N, const int K, const double alpha, const double* A, const int nnz, const double* B_nonzero_buf, const int* B_idx_pointer_buf, const int* B_nonzero_idx_buf, const double beta,double* C){ //NON-BLOCKING NON-BLOCKING NON-BLOCKING CUSPARSE_CHECK(cusparseDcsrmm(Caffe::cusparse_handle(),CUSPARSE_OPERATION_TRANSPOSE, N,M,K,nnz, &alpha, Caffe::cusparse_matdescr(), B_nonzero_buf, B_idx_pointer_buf, B_nonzero_idx_buf, A,K,&beta,C,N )); } template <> void caffe_gpu_sparse_dense2csr<float>(const int M, const int N, const float* A, int* nnzPerRow, float* A_nonzero_buf, int* A_idx_pointer_buf, int* A_nonzero_idx_buf, int *nnz_total){ //cusparse<t>nnz() NON-BLOCKING //int nnz_total = 0; CUSPARSE_CHECK(cusparseSnnz(Caffe::cusparse_handle(), CUSPARSE_DIRECTION_COLUMN, N,M, Caffe::cusparse_matdescr(), A,N, nnzPerRow,//per row for c style row-major matrix nnz_total )); CUSPARSE_CHECK(cusparseSdense2csc(Caffe::cusparse_handle(), N,M, Caffe::cusparse_matdescr(), A,N, nnzPerRow,//per row for c style row-major matrix A_nonzero_buf,A_nonzero_idx_buf,A_idx_pointer_buf )); } template <> void caffe_gpu_sparse_dense2csr<double>(const int M, const int N, const double* A, int* nnzPerRow, double* A_nonzero_buf, int* A_idx_pointer_buf, int* A_nonzero_idx_buf,int *nnz_total){ //cusparse<t>nnz() NON-BLOCKING //int nnz_total = 0; CUSPARSE_CHECK(cusparseDnnz(Caffe::cusparse_handle(), CUSPARSE_DIRECTION_COLUMN, N,M, Caffe::cusparse_matdescr(), A,N, nnzPerRow,//per row for c style row-major matrix nnz_total )); CUSPARSE_CHECK(cusparseDdense2csc(Caffe::cusparse_handle(), N,M, Caffe::cusparse_matdescr(), A,N, nnzPerRow,//per row for c style row-major matrix A_nonzero_buf,A_nonzero_idx_buf,A_idx_pointer_buf )); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<unsigned long>(const int N, const unsigned long alpha, const unsigned long* X, unsigned long* Y) { NOT_IMPLEMENTED; } template <typename Dtype> __global__ void zerout_kernel(void * mutable_gpu_data, int count, Dtype thre){ //Dtype thre = Dtype(th); Dtype* data_ptr_tmp = static_cast<Dtype*>(mutable_gpu_data); // for(int i=0;i<count;i++){ // if(data_ptr_tmp[i]<thre && data_ptr_tmp[i]>(-thre)){ // data_ptr_tmp[i]=0; // } // } int tid = threadIdx.x + blockDim.x*blockIdx.x; while(tid<count){ if(data_ptr_tmp[tid]<=thre && data_ptr_tmp[tid]>=(-thre)){ data_ptr_tmp[tid] = 0; } tid += gridDim.x*blockDim.x; } } template <typename Dtype> __global__ void zerout_kernel(int count, const Dtype *x, Dtype *y, Dtype thre){ int tid = threadIdx.x + blockDim.x*blockIdx.x; while(tid<count){ if(x[tid]<=thre && x[tid]>=(-thre)){ y[tid] = 0; } else { y[tid] = x[tid]; } tid += gridDim.x*blockDim.x; } } template <typename Dtype> __global__ void zerout_kernel2(int count, Dtype *x, const Dtype *thresholds, int thresholds_len, Dtype weight){ int tid = threadIdx.x + blockDim.x*blockIdx.x; while(tid<count){ Dtype thre = thresholds[tid%thresholds_len]*weight; if(x[tid]<=thre && x[tid]>=(-thre)){ x[tid] = 0; } tid += gridDim.x*blockDim.x; } } template <typename Dtype> void caffe_gpu_zerout(void * mutable_gpu_data, const int count, Dtype th){ zerout_kernel<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, (Dtype *)mutable_gpu_data, (Dtype *)mutable_gpu_data, th); } template <typename Dtype> void caffe_gpu_zerout(int count, const Dtype *x, Dtype *y, Dtype thre){ zerout_kernel<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, x, y, thre); } template <typename Dtype> void caffe_gpu_zerout(int count, Dtype *x, const Dtype *thresholds, int thresholds_len, Dtype weight){ zerout_kernel2<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, x, thresholds, thresholds_len, weight); } template void caffe_gpu_zerout<int>(void * mutable_gpu_data, const int count, int th); template void caffe_gpu_zerout<unsigned int>(void * mutable_gpu_data, const int count, unsigned int th); template void caffe_gpu_zerout<long>(void * mutable_gpu_data, const int count, long th); template void caffe_gpu_zerout<unsigned long>(void * mutable_gpu_data, const int count, unsigned long th); template void caffe_gpu_zerout<float>(void * mutable_gpu_data, const int count, float th); template void caffe_gpu_zerout<double>(void * mutable_gpu_data, const int count, double th); template void caffe_gpu_zerout<int>(int count, const int *x, int *y, int th); template void caffe_gpu_zerout<unsigned int>(int count, const unsigned int *x, unsigned int *y, unsigned int th); template void caffe_gpu_zerout<long>(int count, const long *x, long *y, long th); template void caffe_gpu_zerout<float>(int count, const float *x, float *y, float th); template void caffe_gpu_zerout<double>(int count, const double *x, double *y, double th); template void caffe_gpu_zerout<float>(int count, float *x, const float *thresholds, int thresholds_len, float weight); template void caffe_gpu_zerout<double>(int count, double *x, const double *thresholds, int thresholds_len, double weight); /*template <typename Dtype> __global__ void if_zerout_fiber_kernel( int I, int J, int K, const Dtype *x, Dtype * y, int mode, Dtype thre) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if (0 == mode) { int nfiber = J*K; while (tid < nfiber) { int is_zero = 1; for (int i = 0; i < I; ++i) { if (x[i*J*K + tid] > thre || x[i*J*K + tid] < -thre) { is_zero = 0; break; } } y[tid] = is_zero; tid += gridDim.x*blockDim.x; } } else if (1 == mode) { int nfiber = J*K; while (tid < nfiber) { int is_zero = 1; for (int i = 0; i < I; ++i) { if (x[i*J*K + tid] > thre || x[i*J*K + tid] < -thre) { is_zero = 0; break; } } y[tid] = is_zero; tid += gridDim.x*blockDim.x; } } else { } }*/ template <typename Dtype> __global__ void shrinkage_kernel(void * mutable_gpu_data, int count, Dtype thre){ //Dtype thre = Dtype(th); Dtype* data_ptr_tmp = static_cast<Dtype*>(mutable_gpu_data); // for(int i=0;i<count;i++){ // if(data_ptr_tmp[i]<thre && data_ptr_tmp[i]>(-thre)){ // data_ptr_tmp[i]=0; // } // } int tid = threadIdx.x + blockDim.x*blockIdx.x; while(tid<count){ if(data_ptr_tmp[tid]<thre && data_ptr_tmp[tid]>(-thre)){ data_ptr_tmp[tid] = 0; } else if (data_ptr_tmp[tid] > 0) { data_ptr_tmp[tid] -= thre; } else { data_ptr_tmp[tid] += thre; } tid += gridDim.x*blockDim.x; } } template <typename Dtype> void caffe_gpu_shrinkage(void * mutable_gpu_data, const int count, Dtype th){ shrinkage_kernel<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(mutable_gpu_data, count, th); } template void caffe_gpu_shrinkage<int>(void * mutable_gpu_data, const int count, int th); template void caffe_gpu_shrinkage<unsigned int>(void * mutable_gpu_data, const int count, unsigned int th); template void caffe_gpu_shrinkage<long>(void * mutable_gpu_data, const int count, long th); template void caffe_gpu_shrinkage<float>(void * mutable_gpu_data, const int count, float th); template void caffe_gpu_shrinkage<double>(void * mutable_gpu_data, const int count, double th); template <typename Dtype> __global__ void if_zerout_kernel(const int n, const Dtype * x, Dtype *y, Dtype thre){ int tid = threadIdx.x + blockDim.x*blockIdx.x; while(tid<n){ y[tid] = (x[tid]<=thre && x[tid]>=(-thre)) ? 1 : 0; tid += gridDim.x*blockDim.x; } } template <typename Dtype> __global__ void if_zerout_kernel(const int n, const Dtype * x, Dtype *y, const Dtype *thresholds, int thresholds_len, Dtype weight){ int tid = threadIdx.x + blockDim.x*blockIdx.x; while(tid<n){ Dtype thre = thresholds[tid%thresholds_len]*weight; y[tid] = (x[tid]<=thre && x[tid]>=(-thre)) ? 1 : 0; tid += gridDim.x*blockDim.x; } } template <typename Dtype> void caffe_gpu_if_zerout(const int n, const Dtype * x, Dtype *y, Dtype th){ if_zerout_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(n, x, y, th); } template <typename Dtype> void caffe_gpu_if_zerout(const int n, const Dtype * x, Dtype *y, const Dtype *thresholds, int thresholds_len, Dtype weight) { if_zerout_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(n, x, y, thresholds, thresholds_len, weight); } template void caffe_gpu_if_zerout<int>(const int n, const int * x, int *y, int th); template void caffe_gpu_if_zerout<unsigned int>(const int n, const unsigned int* x, unsigned int *y, unsigned int th); template void caffe_gpu_if_zerout<long>(const int n, const long* x, long *y, long th); template void caffe_gpu_if_zerout<float>(const int n, const float * x, float *y, float th); template void caffe_gpu_if_zerout<double>(const int n, const double* x, double *y, double th); template void caffe_gpu_if_zerout<float>(const int n, const float * x, float *y, const float *thresholds, int thresholds_len, float weight); template void caffe_gpu_if_zerout<double>(const int n, const double* x, double *y, const double *thresholds, int thresholds_len, double weight); template <typename Dtype> __global__ void if_nonzerout_kernel(const int n, const Dtype * x, Dtype *y, Dtype thre){ int tid = threadIdx.x + blockDim.x*blockIdx.x; while(tid<n){ y[tid] = (x[tid]<=thre && x[tid]>=(-thre)) ? 0 : 1; tid += gridDim.x*blockDim.x; } } template <typename Dtype> void caffe_gpu_if_nonzerout(const int n, const Dtype * x, Dtype *y, Dtype th){ if_nonzerout_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(n, x, y, th); } template void caffe_gpu_if_nonzerout<int>(const int n, const int * x, int *y, int th); template void caffe_gpu_if_nonzerout<unsigned int>(const int n, const unsigned int* x, unsigned int *y, unsigned int th); template void caffe_gpu_if_nonzerout<long>(const int n, const long* x, long*y, long th); template void caffe_gpu_if_nonzerout<unsigned long>(const int n, const unsigned long* x, unsigned long*y, unsigned long th); template void caffe_gpu_if_nonzerout<float>(const int n, const float * x, float *y, float th); template void caffe_gpu_if_nonzerout<double>(const int n, const double* x, double *y, double th); //template <> //void caffe_gpu_zerout<int>(void * mutable_gpu_data, int count, int th){ // zerout_kernel<<<32768,256>>>(mutable_gpu_data, count, th); //} // //template <> //void caffe_gpu_zerout<float>(void * mutable_gpu_data, int count, float th){ // zerout_kernel<<<32768,256>>>(mutable_gpu_data, count, th); //} // //template <> //void caffe_gpu_zerout<double>(void * mutable_gpu_data, int count, double th){ // zerout_kernel<<<32768,256>>>(mutable_gpu_data, count, th); //} void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<unsigned long>(const int N, const unsigned long alpha, unsigned long *X) { NOT_IMPLEMENTED; } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<unsigned long>(const int n, const unsigned long* x, const unsigned long* y, unsigned long * out) { NOT_IMPLEMENTED; } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y, int stride) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, stride, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y, int stride) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, stride, y)); } template <> void caffe_gpu_asum<unsigned long>(const int n, const unsigned long* x, unsigned long* y, int stride) { NOT_IMPLEMENTED; } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } //Usage: dim3 block(c,1); dim3 thread(1,n); col_group_lasso_kernel<<<block,thread>>>(n,c,x,y); template <typename Dtype> __global__ void col_group_lasso_kernel(const int n, const int c, const Dtype *x, Dtype* y){ int n_offset = 0; //initialize y while(n_offset<n){ //int idx1 = (n_offset+threadIdx.y)*gridDim.x+blockIdx.x; int idx1 = (n_offset+threadIdx.y)*c+blockIdx.x; if(n_offset+threadIdx.y < n){//BUG: THE N MUST BE MULTIPLE TIMES OF BLOCKDIM.Y IN CURRENT IMPLEMENTATION !!! y[idx1] = x[idx1]*x[idx1]; } n_offset += blockDim.y; } __syncthreads(); //sum along columns n_offset=0; Dtype res = 0; while(n_offset<n){ int len = (n_offset + blockDim.y)<n ? blockDim.y : (n-n_offset);//valid threads to process while(len/2>0){ if(threadIdx.y<len/2){ //int idx1 = (n_offset+threadIdx.y)*gridDim.x+blockIdx.x; //int idx2 = (n_offset+threadIdx.y+(len+1)/2)*gridDim.x+blockIdx.x; int idx1 = (n_offset+threadIdx.y)*c+blockIdx.x; int idx2 = (n_offset+threadIdx.y+(len+1)/2)*c+blockIdx.x; y[idx1] += y[idx2]; } __syncthreads(); len=(len+1)/2; } //res += y[n_offset*gridDim.x+blockIdx.x]; res += y[n_offset*c+blockIdx.x]; n_offset += blockDim.y; } __syncthreads(); //copy n_offset=0; while(n_offset<n){ //int idx1 = (n_offset+threadIdx.y)*gridDim.x+blockIdx.x; int idx1 = (n_offset+threadIdx.y)*c + blockIdx.x; if(n_offset+threadIdx.y < n){ if(res){ y[idx1] = Dtype(sqrt(res)); }else{ y[idx1] = Dtype(0); } } n_offset += blockDim.y; } } //Usage: dim3 block(1,n); dim3 thread(c,1); row_group_lasso_kernel<<<block,thread>>>(n,c,x,y); template <typename Dtype> __global__ void row_group_lasso_kernel(const int n, const int c, const Dtype *x, Dtype* y){ int c_offset = 0; //initialize y while(c_offset<c){ //int idx1 = blockIdx.y * blockDim.x + c_offset + threadIdx.x; int idx1 = blockIdx.y * c + c_offset + threadIdx.x; if(c_offset + threadIdx.x < c){//WITHOUT THIS: THE C MUST BE MULTIPLE TIMES OF BLOCKDIM.X IN CURRENT IMPLEMENTATION !!! y[idx1] = x[idx1]*x[idx1]; } c_offset += blockDim.x; } __syncthreads(); //sum along rows c_offset=0; Dtype res = 0; while(c_offset<c){ int len = (c_offset + blockDim.x)<c ? blockDim.x : (c-c_offset);//valid threads to process while(len/2>0){ if(threadIdx.x<len/2){ //int idx1 = blockIdx.y * blockDim.x + c_offset + threadIdx.x; //int idx2 = blockIdx.y * blockDim.x + c_offset + threadIdx.x + (len+1)/2; int idx1 = blockIdx.y * c + c_offset + threadIdx.x; int idx2 = blockIdx.y * c + c_offset + threadIdx.x + (len+1)/2; y[idx1] += y[idx2]; } __syncthreads(); len=(len+1)/2; } //res += y[blockIdx.y * blockDim.x + c_offset]; res += y[blockIdx.y * c + c_offset]; c_offset += blockDim.x; } __syncthreads(); //copy c_offset=0; while(c_offset<c){ //int idx1 = blockIdx.y * blockDim.x + c_offset + threadIdx.x; int idx1 = blockIdx.y * c + c_offset + threadIdx.x; if(c_offset + threadIdx.x < c){ if(res){ y[idx1] = Dtype(sqrt(res)); }else{ y[idx1] = Dtype(0); } } c_offset += blockDim.x; } } #define XOFFSET(idx) ((idx)%blk_size_c) #define YOFFSET(idx) ((idx)/blk_size_c) //Usage: dim3 block(a,b); dim3 thread(get_threads_per_block,1); col_group_lasso_kernel<<<block,thread,sharemembytesize>>>(n,c,x,y); //one-D thread block processes two-D data block template <typename Dtype> __global__ void block_group_lasso_kernel(const int n, const int c, const Dtype *x, Dtype* y){ int c_offset = 0; const int blk_size_n = n%gridDim.y ? n/gridDim.y+1 : n/gridDim.y; const int blk_size_c = c%gridDim.x ? c/gridDim.x+1 : c/gridDim.x; //extern __shared__ Dtype shared_mem[]; //initialize 1D shared memory while(c_offset<blk_size_n*blk_size_c){ int offset_x = XOFFSET(c_offset + threadIdx.x); int offset_y = YOFFSET(c_offset + threadIdx.x); int x_pos = blockIdx.x * blk_size_c + XOFFSET(c_offset + threadIdx.x); int y_pos = blockIdx.y * blk_size_n + YOFFSET(c_offset + threadIdx.x); int idx1 = y_pos * c + x_pos; //if(x_pos < c && y_pos < n){//WITHOUT THIS: THE C MUST BE MULTIPLE TIMES OF BLOCKDIM.X IN CURRENT IMPLEMENTATION !!! if(offset_x < blk_size_c && offset_y < blk_size_n){//WITHOUT THIS: THE C MUST BE MULTIPLE TIMES OF BLOCKDIM.X IN CURRENT IMPLEMENTATION !!! y[idx1] = x[idx1]*x[idx1]; //shared_mem[c_offset + threadIdx.x] = x[idx1]*x[idx1]; }//else{ //shared_mem[c_offset + threadIdx.x] = 0; //} c_offset += blockDim.x; } __syncthreads(); //sum along block c_offset=0; Dtype res = 0; while(c_offset<blk_size_n*blk_size_c){ int len = (c_offset + blockDim.x)<blk_size_n*blk_size_c ? blockDim.x : (blk_size_n*blk_size_c-c_offset);//valid threads to process while(len/2>0){ if(threadIdx.x<len/2){ int x_pos = blockIdx.x * blk_size_c + XOFFSET(c_offset + threadIdx.x); int y_pos = blockIdx.y * blk_size_n + YOFFSET(c_offset + threadIdx.x); int idx1 = y_pos * c + x_pos; //Dtype sum_elem1 = (x_pos < c && y_pos < n) ? y[idx1] : 0; x_pos = blockIdx.x * blk_size_c + XOFFSET(c_offset + threadIdx.x + (len+1)/2); y_pos = blockIdx.y * blk_size_n + YOFFSET(c_offset + threadIdx.x + (len+1)/2); int idx2 = y_pos * c + x_pos; //Dtype sum_elem2 = (x_pos < c && y_pos < n) ? y[idx2] : 0; //BUG: we must ALWAYS store this data. Use shared memory with size of blk_size_n*blk_size_c!!! y[idx1] += y[idx2]; //shared_mem[c_offset+threadIdx.x] = sum_elem1+sum_elem2; //int idx1 = c_offset + threadIdx.x; //int idx2 = c_offset + threadIdx.x + (len+1)/2; //shared_mem[idx1] += shared_mem[idx2]; } __syncthreads(); len=(len+1)/2; } //res += y[blockIdx.y * c + c_offset]; int x_pos = blockIdx.x * blk_size_c + XOFFSET(c_offset); int y_pos = blockIdx.y * blk_size_n + YOFFSET(c_offset); int idx1 = y_pos * c + x_pos; res += y[idx1]; //res += shared_mem[c_offset]; c_offset += blockDim.x; } __syncthreads(); //copy c_offset=0; while(c_offset<blk_size_n*blk_size_c){ int offset_x = XOFFSET(c_offset + threadIdx.x); int offset_y = YOFFSET(c_offset + threadIdx.x); int x_pos = blockIdx.x * blk_size_c + XOFFSET(c_offset + threadIdx.x); int y_pos = blockIdx.y * blk_size_n + YOFFSET(c_offset + threadIdx.x); int idx1 = y_pos * c + x_pos; //if(x_pos < c && y_pos < n){ if(offset_x < blk_size_c && offset_y < blk_size_n){ if(res){ y[idx1] = Dtype(sqrt(res)); }else{ y[idx1] = Dtype(0); } } c_offset += blockDim.x; } } template <> void caffe_gpu_bar_group_lasso<int>(const int n, const int c, const int* x, int* y, bool along_column_or_row){ NOT_IMPLEMENTED; } template <> void caffe_gpu_bar_group_lasso<unsigned int>(const int n, const int c, const unsigned int* x, unsigned int* y, bool along_column_or_row){ NOT_IMPLEMENTED; } template <> void caffe_gpu_bar_group_lasso<long>(const int n, const int c, const long* x, long* y, bool along_column_or_row){ NOT_IMPLEMENTED; } template <> void caffe_gpu_bar_group_lasso<float>(const int n, const int c, const float* x, float* y, bool along_column_or_row){ int threads_per_block = Caffe::get_threads_per_block(); //LOG(INFO)<<"threads_per_block "<<threads_per_block; if(along_column_or_row){ dim3 block(c,1); dim3 thread(1,n>threads_per_block ? threads_per_block:n );//CAFFE_CUDA_NUM_THREADS col_group_lasso_kernel<<<block,thread>>>(n,c,x,y); }else{ dim3 block(1,n); dim3 thread(c>threads_per_block ? threads_per_block:c, 1);//CAFFE_CUDA_NUM_THREADS row_group_lasso_kernel<<<block,thread>>>(n,c,x,y); } CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_bar_group_lasso<double>(const int n, const int c, const double* x, double* y, bool along_column_or_row){ int threads_per_block = Caffe::get_threads_per_block(); //LOG(INFO)<<"threads_per_block "<<threads_per_block; if(along_column_or_row){ dim3 block(c,1); dim3 thread(1,n>threads_per_block ? threads_per_block:n );//CAFFE_CUDA_NUM_THREADS col_group_lasso_kernel<<<block,thread>>>(n,c,x,y); }else{ dim3 block(1,n); dim3 thread(c>threads_per_block ? threads_per_block:c, 1);//CAFFE_CUDA_NUM_THREADS row_group_lasso_kernel<<<block,thread>>>(n,c,x,y); } CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_block_group_lasso<float>(const int n, const int c, const int blk_size_n, const int blk_size_c, const float *x, float* y){ CHECK_LE(blk_size_n,n); CHECK_LE(blk_size_c,c); CHECK_EQ(n%blk_size_n,0); CHECK_EQ(c%blk_size_c,0); int threads_per_block = Caffe::get_threads_per_block(); //int shared_mem_bytes_per_block = Caffe::get_shared_mem_bytes_per_block(); const int blk_num_n = (n+blk_size_n-1)/blk_size_n; const int blk_num_c = (c+blk_size_c-1)/blk_size_c; const int blk_size = blk_size_n*blk_size_c; //const int sharedmem_bytes = blk_size*sizeof(float)*2; //CHECK_GE(shared_mem_bytes_per_block,sharedmem_bytes); dim3 block(blk_num_c,blk_num_n); dim3 thread(blk_size>threads_per_block?threads_per_block:blk_size, 1); //LOG(INFO)<< "blk_size_n:" << blk_size_n // << " blk_size_c:" << blk_size_c // << " blk_num_n:" << blk_num_n // << " blk_num_c:" << blk_num_c; //block_group_lasso_kernel<<<block,thread,sharedmem_bytes>>>(n, c,x,y); block_group_lasso_kernel<<<block,thread>>>(n, c,x,y); CUDA_POST_KERNEL_CHECK; } template <> void caffe_gpu_block_group_lasso<double>(const int n, const int c, const int blk_size_n, const int blk_size_c, const double *x, double* y){ NOT_IMPLEMENTED; } template <> void caffe_gpu_block_group_lasso<int>(const int n, const int c, const int blk_size_n, const int blk_size_c, const int *x, int* y){ NOT_IMPLEMENTED; } template <> void caffe_gpu_block_group_lasso<unsigned int>(const int n, const int c, const int blk_size_n, const int blk_size_c, const unsigned int *x, unsigned int* y){ NOT_IMPLEMENTED; } template <> void caffe_gpu_block_group_lasso<long>(const int n, const int c, const int blk_size_n, const int blk_size_c, const long *x, long* y){ NOT_IMPLEMENTED; } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <typename Dtype> __global__ void div_checkzero_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = b[index] ? (a[index] / b[index]) : Dtype(0); } } template <typename Dtype> __global__ void inv_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = 1 / a[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div_checkzero<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_checkzero_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div_checkzero<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_checkzero_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_inv<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) inv_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_inv<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) inv_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(eltwise_multi, y[index] = y[index]*x[index] ) void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } template <typename Dtype> __global__ void scan_kernel(Dtype *g_odata, const Dtype *g_idata, int n) { __shared__ Dtype temp[2*64]; // allocated on invocation int thid = threadIdx.x; int pout = 0, pin = 1; // Load input into shared memory. // This is exclusive scan, so shift right by one // and set first element to 0 temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0; __syncthreads(); for (int offset = 1; offset < n; offset *= 2) { pout = 1 - pout; // swap double buffer indices pin = 1 - pout; if (thid >= offset) temp[pout*n+thid] = temp[pin*n+thid] + temp[pin*n+thid - offset]; else temp[pout*n+thid] = temp[pin*n+thid]; __syncthreads(); } g_odata[thid] = temp[pout*n+thid]; // write output } // input matrix row major, output matrix col major when new_m >= n template <typename Dtype> __global__ void impose_sparsity_copyin_kernel( const Dtype *weight, double *weight_temp, const double *A, double *A_temp, const Dtype *mask, int m, int n, double impose_factor, int repeat) { int tid = threadIdx.x + blockDim.x*blockIdx.x; while (tid < repeat*(m + n)) { int i = tid/(m + n); int j = tid%(m + n); if (j < m) { if (mask[i*m + j] == 0) { for (int k = 0; k < n; ++k) { A_temp[(i*n + k)*(m + n) + j] = impose_factor*A[j*n + k]; } } else { for (int k = 0; k < n; ++k) { A_temp[(i*n + k)*(m + n) + j] = 0; } } //weight_temp[i*(m + n) + j] = 0; ///// } else { /*for (int k = 0; k < n; ++k) { A_temp[(i*n + k)*(m + n) + j] = 0; } A_temp[(i*n + j - m)*(m + n) + j] = 1;*/ weight_temp[i*(m + n) + j] = weight[i*n + j - m]; } tid += gridDim.x*blockDim.x; } } template <typename Dtype> __global__ void impose_sparsity_copyout_kernel( Dtype *weight, const double *weight_temp, int m, int n, int repeat) { int tid = threadIdx.x + blockDim.x*blockIdx.x; while (tid < repeat*n) { int i = tid/n; int j = tid%n; weight[i*n + j] = weight_temp[i*(m + n) + j]; tid += gridDim.x*blockDim.x; } } template <typename Dtype> void caffe_gpu_impose_sparsity( Dtype *weight, double *weight_temp, double **weight_temp_ptr, const double *A, double *A_temp, double **A_temp_ptr, Dtype *mask, double impose_factor, int M, int N, int repeat) { impose_sparsity_copyin_kernel<<<CAFFE_GET_BLOCKS(repeat*(M*M + N*N)), CAFFE_CUDA_NUM_THREADS>>>( weight, weight_temp, A, A_temp, mask, M*M, N*N, impose_factor, repeat); int info; CUBLAS_CHECK(cublasDgelsBatched( Caffe::cublas_handle(), CUBLAS_OP_N, M*M + N*N, N*N, 1, A_temp_ptr, M*M + N*N, weight_temp_ptr, M*M + N*N, &info, NULL, repeat)); if (0 != info) { LOG(FATAL) << info << "th parameter is invalid"; } impose_sparsity_copyout_kernel<<<CAFFE_GET_BLOCKS(repeat*N*N), CAFFE_CUDA_NUM_THREADS>>>( weight, weight_temp, M*M, N*N, repeat); } template void caffe_gpu_impose_sparsity<double>( double *weight, double *weight_temp, double **weight_temp_ptr, const double *A, double *A_temp, double **A_temp_ptr, double *mask, double impose_factor, int M, int N, int repeat); template void caffe_gpu_impose_sparsity<float>( float *weight, double *weight_temp, double **weight_temp_ptr, const double *A, double *A_temp, double **A_temp_ptr, float *mask, double impose_factor, int M, int N, int repeat); } // namespace caffe
the_stack
#include <cuda.h> #include <cuda_runtime.h> namespace { #define CUDA_1D_KERNEL_LOOP(i, n) \ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) // The number of cuda threads to use. 512 is used for backward compatibility constexpr int ROI_CUDA_NUM_THREADS = 512; // The maximum number of blocks to use in the default kernel call. constexpr int ROI_MAXIMUM_NUM_BLOCKS = 4096; /** * @brief Compute the number of blocks needed to run N threads. */ inline int ROI_GET_BLOCKS(const int N) { return std::max( std::min( (N + ROI_CUDA_NUM_THREADS - 1) / ROI_CUDA_NUM_THREADS, ROI_MAXIMUM_NUM_BLOCKS), // Use at least 1 block, since CUDA does not allow empty block 1); } template <typename T> __device__ T bilinear_interpolate( const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } int y_low = static_cast<int>(y); int x_low = static_cast<int>(x); int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignForwardKernel( const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate( offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T* w1, T* w2, T* w3, T* w4, int* x_low, int* x_high, int* y_low, int* y_high, const int /*index*/ /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty *w1 = *w2 = *w3 = *w4 = 0.; *x_low = *x_high = *y_low = *y_high = -1; return; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } *y_low = static_cast<int>(y); *x_low = static_cast<int>(x); if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = (T)*y_low; } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = (T)*x_low; } else { *x_high = *x_low + 1; } T ly = y - *y_low; T lx = x - *x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } template <> inline __device__ double gpu_atomic_add(const double val, double* address) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull; unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return val; } template <typename T> __global__ void RoIAlignBackwardKernel( const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, &w1, &w2, &w3, &w4, &x_low, &x_high, &y_low, &y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { /* atomicAdd( offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd( offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd( offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd( offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); */ gpu_atomic_add( static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low); gpu_atomic_add( static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high); gpu_atomic_add( static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low); gpu_atomic_add( static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward } // namespace at::Tensor ROIAlign_Forward_CUDA( const at::Tensor input, const at::Tensor rois, int64_t pooled_height, int64_t pooled_width, double spatial_scale, int64_t sampling_ratio) { AT_ASSERT(input.is_contiguous()); AT_ASSERT(rois.is_contiguous()); AT_ASSERT(input.ndimension() == 4); AT_ASSERT(rois.ndimension() == 2); AT_ASSERT(rois.size(1) == 5); auto proposals = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); // Output Tensor is (num_rois, C, pooled_height, pooled_width) auto output = torch::zeros({proposals, channels, pooled_height, pooled_width}, input.options()); auto count = output.numel(); AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_Forward_CUDA", ([&] { RoIAlignForwardKernel<scalar_t> <<<ROI_GET_BLOCKS(count), ROI_CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, input.data<scalar_t>(), static_cast<scalar_t>(spatial_scale), channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.data<scalar_t>(), output.data<scalar_t>()); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return output; } at::Tensor ROIAlign_Backward_CUDA( const at::Tensor rois, const at::Tensor grad_output, int64_t b_size, int64_t channels, int64_t height, int64_t width, int64_t pooled_height, int64_t pooled_width, double spatial_scale, int64_t sampling_ratio) { AT_ASSERT(rois.is_contiguous()); AT_ASSERT(rois.ndimension() == 2); AT_ASSERT(rois.size(1) == 5); auto roi_cols = rois.size(1); AT_ASSERT(roi_cols == 4 || roi_cols == 5); // Output Tensor is (num_rois, C, pooled_height, pooled_width) // gradient wrt input features auto grad_in = torch::zeros({b_size, channels, height, width}, rois.options()); auto num_rois = rois.size(0); auto count = grad_output.numel(); AT_DISPATCH_FLOATING_TYPES(rois.type(), "ROIAlign_Backward_CUDA", ([&] { RoIAlignBackwardKernel<scalar_t> <<<ROI_GET_BLOCKS(count), ROI_CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, grad_output.data<scalar_t>(), num_rois, static_cast<scalar_t>(spatial_scale), channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_in.data<scalar_t>(), rois.data<scalar_t>()); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return grad_in; }
the_stack
template <int FS, int SB, int padding_l, typename scalar_t> __global__ void lightconv_forward_kernel( const scalar_t* input, const scalar_t* filters, int minibatch, int sequenceLength, int numFeatures, int numFiltersInBlock, scalar_t* output) { const int tid = threadIdx.x; const int batchIdx = blockIdx.x; const int featureIdx = blockIdx.y; const int filterIdx = featureIdx / numFiltersInBlock; const int IOOffset = numFeatures * sequenceLength * batchIdx + featureIdx * sequenceLength; const scalar_t* inputFeature = &input[IOOffset]; scalar_t* outputFeature = &output[IOOffset]; const scalar_t* inputFilter = &filters[filterIdx * FS]; assert(blockDim.x == SB); scalar_t filter[FS]; #pragma unroll for (int i = 0; i < FS; ++i) { filter[i] = inputFilter[i]; } __shared__ scalar_t temp[SB + FS]; zeroSharedMem<FS, SB, padding_l>(temp); const int numIterations = divUp<int, int>(sequenceLength, SB); for (int i = 0; i < numIterations; ++i) { // Read input into shared memory const int inputOffset = i * SB; load_input_to_shared<FS, SB, padding_l>( inputFeature, inputOffset, sequenceLength, i, numIterations, (numIterations == 1), temp); __syncthreads(); scalar_t out = 0; #pragma unroll for (int j = 0; j < FS; ++j) { out += filter[j] * temp[tid + j]; } // Write output const int outputOffset = inputOffset; if ((outputOffset + tid) < sequenceLength) { outputFeature[outputOffset + tid] = out; } __syncthreads(); } } template <int FS, int SB, int padding_l, typename scalar_t> __global__ void lightconv_grad_wrt_input_kernel( const scalar_t* input, const scalar_t* filters, int minibatch, int sequenceLength, int numFeatures, int numFiltersInBlock, scalar_t* output) { // input grad kernel is similar to forward kernel const int tid = threadIdx.x; const int batchIdx = blockIdx.x; const int featureIdx = blockIdx.y; const int filterIdx = featureIdx / numFiltersInBlock; const int IOOffset = numFeatures * sequenceLength * batchIdx + featureIdx * sequenceLength; const scalar_t* inputFeature = &input[IOOffset]; scalar_t* outputFeature = &output[IOOffset]; const scalar_t* inputFilter = &filters[filterIdx * FS]; assert(blockDim.x == SB); scalar_t filter[FS]; // The only change is loading the filter in reverse #pragma unroll for (int i = 0; i < FS; ++i) { filter[i] = inputFilter[FS - i - 1]; } __shared__ scalar_t temp[SB + FS]; const int padding = FS - padding_l - 1; zeroSharedMem<FS, SB, padding>(temp); __syncthreads(); const int numIterations = divUp<int, int>(sequenceLength, SB); for (int i = 0; i < numIterations; ++i) { // Read input into shared memory const int inputOffset = i * SB; load_input_to_shared<FS, SB, padding>( inputFeature, inputOffset, sequenceLength, i, numIterations, false, temp); __syncthreads(); scalar_t out = 0; #pragma unroll for (int j = 0; j < FS; ++j) { out += filter[j] * temp[tid + j]; } // Write output const int outputOffset = inputOffset; if ((outputOffset + tid) < sequenceLength) { outputFeature[outputOffset + tid] = out; } __syncthreads(); } } // This is by far the most expensive kernel in terms of time taken. // Can be 16x slower than the forward or grad_wrt_input when filter size is 31 template <int FS, int SB, int padding_l, typename scalar_t> __global__ void lightconv_grad_wrt_weights_firstpass_short_kernel( const scalar_t* input, const scalar_t* gradInput, int minibatch, int sequenceLength, int numFeatures, int numFiltersInBlock, int numHeads, float* output) { const int tid = threadIdx.x; const int batchIdx = blockIdx.x; const int filterIdx = blockIdx.y; const int numIterations = divUp<int, int>(sequenceLength, SB); float* tempOutputGradWeight = &output[filterIdx * FS * minibatch]; assert(blockDim.x == SB); __shared__ scalar_t tempInput[SB + FS]; __shared__ scalar_t tempGradInput[SB + FS]; // local weight accumulation float accumWeights[FS]; // Initialize memory for (int i = 0; i < FS; ++i) { accumWeights[i] = float(0.0); } // loop over each sequence within filterblock for (int idxInFilterBlock = 0; idxInFilterBlock < numFiltersInBlock; ++idxInFilterBlock) { const int featureOffset = batchIdx * numFeatures * sequenceLength + (filterIdx * numFiltersInBlock + idxInFilterBlock) * sequenceLength; const scalar_t* inputFeature = &input[featureOffset]; const scalar_t* gradInputFeature = &gradInput[featureOffset]; zeroSharedMem<FS, SB, padding_l>(tempInput); zeroSharedMem<FS, SB, (FS / 2)>(tempGradInput); __syncthreads(); for (int i = 0; i < numIterations; ++i) { const int inputOffset = i * SB; load_input_to_shared<FS, SB, padding_l>( inputFeature, inputOffset, sequenceLength, i, numIterations, false, tempInput); load_input_to_shared<FS, SB, (FS / 2)>( gradInputFeature, inputOffset, sequenceLength, i, numIterations, false, tempGradInput); __syncthreads(); const int gradIndex = (FS / 2) + tid; scalar_t tempGrad = tempGradInput[gradIndex]; #pragma unroll for (int j = 0; j < FS; j++) { const int inputIndex = tid + j; accumWeights[j] += tempInput[inputIndex] * tempGrad; } __syncthreads(); } } // Row-major sum for (int filterWeightIdx = 0; filterWeightIdx < FS; ++filterWeightIdx) { float temp; if (tid < sequenceLength) { temp = accumWeights[filterWeightIdx]; } else { temp = float(0.0); } const int outputOffset = filterWeightIdx * minibatch + batchIdx; temp = blockReduce(temp); if (tid == 0) { tempOutputGradWeight[outputOffset] = temp; } } } template <int FS, int SB, typename scalar_t> __global__ void lightconv_grad_wrt_weights_secondpass_short_kernel( const float* input, const int minibatch, const int numFiltersInBlock, scalar_t* output) { assert(blockDim.x == SB); const int tid = threadIdx.x; const int filterIdx = blockIdx.x; const int filterWeightIdx = blockIdx.y; const int inputOffset = filterIdx * FS * minibatch + filterWeightIdx * minibatch; const float* tempInput = &input[inputOffset]; // read into shared memory for reduction int readIndex = tid; float sum = 0.0; while (readIndex < minibatch) { sum += tempInput[readIndex]; readIndex += SB; } float temp = blockReduce(sum); if (tid == 0) { output[blockIdx.x * FS + blockIdx.y] = temp; } } // This is by far the most expensive kernel in terms of time taken. // Can be 16x slower than the forward or grad_wrt_input when filter size is 31 template <int FS, int SB, int padding_l, typename scalar_t> __global__ void lightconv_grad_wrt_weights_firstpass_kernel( const scalar_t* input, const scalar_t* gradInput, int minibatch, int sequenceLength, int numFeatures, int numFiltersInBlock, float* output) { assert(blockDim.x == SB); const int tid = threadIdx.x; const int batchIdx = blockIdx.x; const int featureIdx = blockIdx.y; const int filterIdx = featureIdx / numFiltersInBlock; const int idxInFilterBlock = featureIdx % numFiltersInBlock; const int numIterations = divUp<int, int>(sequenceLength, SB); float temp; __shared__ scalar_t tempInput[SB + FS]; __shared__ scalar_t tempGradInput[SB + FS]; zeroSharedMem<FS, SB, padding_l>(tempInput); zeroSharedMem<FS, SB, (FS / 2)>(tempGradInput); __syncthreads(); float accumWeights[FS]; for (int i = 0; i < FS; ++i) { accumWeights[i] = float(0.0); } const int IOOffset = batchIdx * numFeatures * sequenceLength + featureIdx * sequenceLength; const scalar_t* inputFeature = &input[IOOffset]; const scalar_t* gradInputFeature = &gradInput[IOOffset]; float* tempOutputGradWeight = &output[filterIdx * FS * minibatch * numFiltersInBlock]; for (int i = 0; i < numIterations; ++i) { const int inputOffset = i * SB; load_input_to_shared<FS, SB, padding_l>( inputFeature, inputOffset, sequenceLength, i, numIterations, false, tempInput); load_input_to_shared<FS, SB, (FS / 2)>( gradInputFeature, inputOffset, sequenceLength, i, numIterations, false, tempGradInput); __syncthreads(); #pragma unroll for (int j = 0; j < FS; ++j) { accumWeights[j] += tempInput[tid + j] * tempGradInput[tid + (FS / 2)]; } __syncthreads(); } // Row-major sum for (int filterWeightIdx = 0; filterWeightIdx < FS; ++filterWeightIdx) { // Write to shared memory before reduction if (tid < sequenceLength) { temp = accumWeights[filterWeightIdx]; } else { temp = float(0.0); } temp = blockReduce(temp); const int outputOffset = filterWeightIdx * minibatch * numFiltersInBlock + batchIdx * numFiltersInBlock + idxInFilterBlock; if (tid == 0) { tempOutputGradWeight[outputOffset] = temp; } } } template <int FS, int SB, typename scalar_t> __global__ void lightconv_grad_wrt_weights_secondpass_kernel( const float* input, const int minibatch, const int numFiltersInBlock, scalar_t* output) { assert(blockDim.x == SB); const int tid = threadIdx.x; // What is the id within a minibatch const int filterIdx = blockIdx.x; const int filterWeightIdx = blockIdx.y; const int inputOffset = filterIdx * FS * minibatch * numFiltersInBlock + filterWeightIdx * minibatch * numFiltersInBlock; const float* tempInput = &input[inputOffset]; int readIndex = tid; float sum = float(0.0); while (readIndex < (minibatch * numFiltersInBlock)) { sum += tempInput[readIndex]; readIndex += SB; } float temp = blockReduce(sum); if (tid == 0) { output[blockIdx.x * FS + blockIdx.y] = temp; } }
the_stack
#include <cuda.h> #include <cuda_runtime.h> #if CUDA_VERSION >= 9000 && __CUDA_ARCH__ >= 300 #undef __shfl #define __shfl(var, srcLane, width) __shfl_sync(0xFFFFFFFFu, var, srcLane, width) #endif #include "cryptonight.h" #define LONG_SHL32 19 // 1<<19 (uint32_t* index) #define LONG_SHL64 18 // 1<<18 (uint64_t* index) #define LONG_LOOPS32 0x80000U #include "cn_aes.cuh" __global__ void cryptonight_gpu_phase1(const uint32_t threads, uint32_t * __restrict__ d_long_state, uint32_t * __restrict__ ctx_state, uint32_t * __restrict__ ctx_key1) { __shared__ uint32_t sharedMemory[1024]; const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x) >> 3; if(thread < threads) { cn_aes_gpu_init(sharedMemory); __syncthreads(); const uint32_t sub = (threadIdx.x & 0x7U) << 2; uint32_t *longstate = &d_long_state[(thread << LONG_SHL32) + sub]; uint32_t __align__(8) key[40]; MEMCPY8(key, &ctx_key1[thread * 40U], 20); uint32_t __align__(8) text[4]; MEMCPY8(text, &ctx_state[thread * 50U + sub + 16U], 2); for(int i = 0; i < LONG_LOOPS32; i += 32) { cn_aes_pseudo_round_mut(sharedMemory, text, key); MEMCPY8(&longstate[i], text, 2); } } } // -------------------------------------------------------------------------------------------------------------- __device__ __forceinline__ ulonglong2 cuda_mul128(const uint64_t multiplier, const uint64_t multiplicand) { ulonglong2 product; product.x = __umul64hi(multiplier, multiplicand); product.y = multiplier * multiplicand; return product; } static __forceinline__ __device__ void operator += (ulonglong2 &a, const ulonglong2 b) { a.x += b.x; a.y += b.y; } static __forceinline__ __device__ ulonglong2 operator ^ (const ulonglong2 &a, const ulonglong2 &b) { return make_ulonglong2(a.x ^ b.x, a.y ^ b.y); } __device__ __forceinline__ void MUL_SUM_XOR_DST_0(const uint64_t m, uint4 &a, void* far_dst) { ulonglong2 d = AS_UL2(far_dst); ulonglong2 p = cuda_mul128(m, d.x); p += AS_UL2(&a); AS_UL2(&a) = p ^ d; AS_UL2(far_dst) = p; } __global__ #if __CUDA_ARCH__ >= 500 //__launch_bounds__(128,12) /* force 40 regs to allow -l ...x32 */ #endif void cryptonight_gpu_phase2(const uint32_t threads, const uint16_t bfactor, const uint32_t partidx, uint64_t * __restrict__ d_long_state, uint32_t * __restrict__ d_ctx_a, uint32_t * __restrict__ d_ctx_b) { __shared__ __align__(16) uint32_t sharedMemory[1024]; cn_aes_gpu_init(sharedMemory); __syncthreads(); const uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x; if (thread < threads) { const uint32_t batchsize = ITER >> (2 + bfactor); const uint32_t start = partidx * batchsize; const uint32_t end = start + batchsize; void * ctx_a = (void*)(&d_ctx_a[thread << 2U]); void * ctx_b = (void*)(&d_ctx_b[thread << 2U]); uint4 A = AS_UINT4(ctx_a); // ld.global.u32.v4 uint4 B = AS_UINT4(ctx_b); uint64_t * long_state = &d_long_state[thread << LONG_SHL64]; for (int i = start; i < end; i++) // end = 262144 { uint4 C; uint32_t j = (A.x & E2I_MASK) >> 3; cn_aes_single_round_b((uint8_t*)sharedMemory, &long_state[j], A, &C); AS_UINT4(&long_state[j]) = C ^ B; // st.global.u32.v4 MUL_SUM_XOR_DST_0((AS_UL2(&C)).x, A, &long_state[(C.x & E2I_MASK) >> 3]); j = (A.x & E2I_MASK) >> 3; cn_aes_single_round_b((uint8_t*)sharedMemory, &long_state[j], A, &B); AS_UINT4(&long_state[j]) = C ^ B; MUL_SUM_XOR_DST_0((AS_UL2(&B)).x, A, &long_state[(B.x & E2I_MASK) >> 3]); } if (bfactor) { AS_UINT4(ctx_a) = A; AS_UINT4(ctx_b) = B; } } } // -------------------------------------------------------------------------------------------------------------- __device__ __forceinline__ void store_variant1(uint64_t* long_state, uint4 Z) { const uint32_t tmp = (Z.z >> 24); // __byte_perm(src, 0, 0x7773); const uint32_t index = (((tmp >> 3) & 6u) | (tmp & 1u)) << 1; Z.z = (Z.z & 0x00ffffffu) | ((tmp ^ ((0x75310u >> index) & 0x30u)) << 24); AS_UINT4(long_state) = Z; } __device__ __forceinline__ void store_variant2(uint64_t* long_state, uint4 Z) { const uint32_t tmp = (Z.z >> 24); // __byte_perm(src, 0, 0x7773); const uint32_t index = (((tmp >> 4) & 6u) | (tmp & 1u)) << 1; Z.z = (Z.z & 0x00ffffffu) | ((tmp ^ ((0x75312u >> index) & 0x30u)) << 24); AS_UINT4(long_state) = Z; } __device__ __forceinline__ void MUL_SUM_XOR_DST_1(const uint64_t m, uint4 &a, void* far_dst, uint64_t tweak) { ulonglong2 d = AS_UL2(far_dst); ulonglong2 p = cuda_mul128(m, d.x); p += AS_UL2(&a); AS_UL2(&a) = p ^ d; p.y = p.y ^ tweak; AS_UL2(far_dst) = p; } __global__ void monero_gpu_phase2(const uint32_t threads, const uint16_t bfactor, const uint32_t partidx, uint64_t * __restrict__ d_long_state, uint32_t * __restrict__ d_ctx_a, uint32_t * __restrict__ d_ctx_b, uint64_t * __restrict__ d_tweak) { __shared__ __align__(16) uint32_t sharedMemory[1024]; cn_aes_gpu_init(sharedMemory); __syncthreads(); const uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x; if (thread < threads) { const uint32_t batchsize = ITER >> (2 + bfactor); const uint32_t start = partidx * batchsize; const uint32_t end = start + batchsize; uint64_t tweak = d_tweak[thread]; void * ctx_a = (void*)(&d_ctx_a[thread << 2]); void * ctx_b = (void*)(&d_ctx_b[thread << 2]); uint4 A = AS_UINT4(ctx_a); // ld.global.u32.v4 uint4 B = AS_UINT4(ctx_b); uint64_t * long_state = &d_long_state[thread << LONG_SHL64]; for (int i = start; i < end; i++) // end = 262144 { uint4 C; uint32_t j = (A.x & E2I_MASK) >> 3; cn_aes_single_round_b((uint8_t*)sharedMemory, &long_state[j], A, &C); store_variant1(&long_state[j], C ^ B); // st.global MUL_SUM_XOR_DST_1((AS_UL2(&C)).x, A, &long_state[(C.x & E2I_MASK) >> 3], tweak); j = (A.x & E2I_MASK) >> 3; cn_aes_single_round_b((uint8_t*)sharedMemory, &long_state[j], A, &B); store_variant1(&long_state[j], C ^ B); MUL_SUM_XOR_DST_1((AS_UL2(&B)).x, A, &long_state[(B.x & E2I_MASK) >> 3], tweak); } if (bfactor) { AS_UINT4(ctx_a) = A; AS_UINT4(ctx_b) = B; } } } // -------------------------------------------------------------------------------------------------------------- __global__ void stellite_gpu_phase2(const uint32_t threads, const uint16_t bfactor, const uint32_t partidx, uint64_t * __restrict__ d_long_state, uint32_t * __restrict__ d_ctx_a, uint32_t * __restrict__ d_ctx_b, uint64_t * __restrict__ d_tweak) { __shared__ __align__(16) uint32_t sharedMemory[1024]; cn_aes_gpu_init(sharedMemory); __syncthreads(); const uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x; if (thread < threads) { const uint32_t batchsize = ITER >> (2 + bfactor); const uint32_t start = partidx * batchsize; const uint32_t end = start + batchsize; uint64_t tweak = d_tweak[thread]; void * ctx_a = (void*)(&d_ctx_a[thread << 2]); void * ctx_b = (void*)(&d_ctx_b[thread << 2]); uint4 A = AS_UINT4(ctx_a); // ld.global.u32.v4 uint4 B = AS_UINT4(ctx_b); uint64_t * long_state = &d_long_state[thread << LONG_SHL64]; for (int i = start; i < end; i++) // end = 262144 { uint4 C; uint32_t j = (A.x & E2I_MASK) >> 3; cn_aes_single_round_b((uint8_t*)sharedMemory, &long_state[j], A, &C); store_variant2(&long_state[j], C ^ B); // st.global MUL_SUM_XOR_DST_1((AS_UL2(&C)).x, A, &long_state[(C.x & E2I_MASK) >> 3], tweak); j = (A.x & E2I_MASK) >> 3; cn_aes_single_round_b((uint8_t*)sharedMemory, &long_state[j], A, &B); store_variant2(&long_state[j], C ^ B); MUL_SUM_XOR_DST_1((AS_UL2(&B)).x, A, &long_state[(B.x & E2I_MASK) >> 3], tweak); } if (bfactor) { AS_UINT4(ctx_a) = A; AS_UINT4(ctx_b) = B; } } } // -------------------------------------------------------------------------------------------------------------- __global__ void cryptonight_gpu_phase3(const uint32_t threads, const uint32_t * __restrict__ d_long_state, uint32_t * __restrict__ d_ctx_state, const uint32_t * __restrict__ d_ctx_key2) { __shared__ uint32_t sharedMemory[1024]; cn_aes_gpu_init(sharedMemory); __syncthreads(); const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x) >> 3; if(thread < threads) { const int sub = (threadIdx.x & 7) << 2; const uint32_t *longstate = &d_long_state[(thread << LONG_SHL32) + sub]; uint32_t key[40], text[4]; MEMCPY8(key, d_ctx_key2 + thread * 40, 20); MEMCPY8(text, d_ctx_state + thread * 50 + sub + 16, 2); for(int i = 0; i < LONG_LOOPS32; i += 32) { #pragma unroll for(int j = 0; j < 4; ++j) text[j] ^= longstate[i + j]; cn_aes_pseudo_round_mut(sharedMemory, text, key); } MEMCPY8(d_ctx_state + thread * 50 + sub + 16, text, 2); } } // -------------------------------------------------------------------------------------------------------------- extern int device_bfactor[MAX_GPUS]; __host__ void cryptonight_core_cuda(int thr_id, uint32_t blocks, uint32_t threads, uint64_t *d_long_state, uint32_t *d_ctx_state, uint32_t *d_ctx_a, uint32_t *d_ctx_b, uint32_t *d_ctx_key1, uint32_t *d_ctx_key2, int variant, uint64_t *d_ctx_tweak) { dim3 grid(blocks); dim3 block(threads); dim3 block4(threads << 2); dim3 block8(threads << 3); const uint16_t bfactor = (uint16_t) device_bfactor[thr_id]; const uint32_t partcount = 1U << bfactor; const uint32_t throughput = (uint32_t) (blocks*threads); const int bsleep = bfactor ? 100 : 0; const int dev_id = device_map[thr_id]; cryptonight_gpu_phase1 <<<grid, block8>>> (throughput, (uint32_t*) d_long_state, d_ctx_state, d_ctx_key1); exit_if_cudaerror(thr_id, __FUNCTION__, __LINE__); if(partcount > 1) usleep(bsleep); for (uint32_t i = 0; i < partcount; i++) { dim3 b = device_sm[dev_id] >= 300 ? block4 : block; if (variant == 0) cryptonight_gpu_phase2 <<<grid, b>>> (throughput, bfactor, i, d_long_state, d_ctx_a, d_ctx_b); else if (variant == 1 || cryptonight_fork == 8) monero_gpu_phase2 <<<grid, b>>> (throughput, bfactor, i, d_long_state, d_ctx_a, d_ctx_b, d_ctx_tweak); else if (variant == 2 && cryptonight_fork == 3) stellite_gpu_phase2 <<<grid, b>>> (throughput, bfactor, i, d_long_state, d_ctx_a, d_ctx_b, d_ctx_tweak); exit_if_cudaerror(thr_id, __FUNCTION__, __LINE__); if(partcount > 1) usleep(bsleep); } //cudaDeviceSynchronize(); //exit_if_cudaerror(thr_id, __FUNCTION__, __LINE__); cryptonight_gpu_phase3 <<<grid, block8>>> (throughput, (uint32_t*) d_long_state, d_ctx_state, d_ctx_key2); exit_if_cudaerror(thr_id, __FUNCTION__, __LINE__); }
the_stack
#include <assert.h> #include <iostream> #include <iomanip> #include <chrono> #include <vector> #include <algorithm> #include <stdlib.h> #include <cuda.h> using namespace std; // to avoid integer overflow, n should not exceed this constant constexpr int kMaxN = 31; // kLimit = 100 million means we can use up to 3.2GB of RAM on each GPU to store // results before sorting; if more memory is needed, we detect and bail constexpr int64_t kLimit = 100000000; // set to "true" if you want each solution printed constexpr bool kPrint = false; static_assert(sizeof(int64_t) == 8, "int64_t is not 8 bytes"); static_assert(sizeof(int32_t) == 4, "int32_t is not 4 bytes"); static_assert(sizeof(int8_t) == 1, "int64_t is not 1 byte"); constexpr int64_t lsb = 1; constexpr int32_t lsb32 = 1; constexpr int div_up(int p, int q) { return (p + (q - 1)) / q; }; // a naive implementation __device__ int ffsll(int64_t x) { for (int i = 0; i < 64; i++) if ((x >> i) & 1) return (i+1); return 0; }; // This type represents a solution by letting pos[m-1] be // the position of the closing m, for m = 1, 2, ... n. template <int n> using Positions = array<int8_t, n>; // All planar sequences (including duplicates) are stored in a vector for sorting. template <int n> using Results = vector<Positions<n>>; // 8-byte alignment probably helps with concurrent writes to adjacent instances template <int n> using PositionsGPUAligned = int64_t[(n + 7) / 8]; template <int n> using PositionsGPU = int8_t[div_up(n, 8) * 8]; // at depth k in the search, the bits of availability[k+1] represent the still // unused m; thus helping ensure that each (m, m) pair is placed just once template <int n> using Availability = int32_t[2 * n + 1]; // at depth k in the search, the bits of open[2*k+2] represent the positions // of all pairs that are open above; open[2*k+3] is the same for below; // k ranges over 0, 1, ..., 2n-1 template <int n> using Open = int64_t[4 * n + 2]; template <int n> using Stack = int8_t[24 * n]; template <int n> void print(const Positions<n>& pos); // For some reason, 4 is the magic number that gives us best perf in this algorithm constexpr int kThreadsPerBlock = 4; // subdivide the search tree for this many logical threads // due to the naive math below, numbers like 2^r-1 work much better than 2^r constexpr int kNumLogicalThreads = 16383; // To do: Run on CPU (right now only runs on GPU). template <int n> __device__ void dfs(int64_t* p_result, Availability<n> &availability, Open<n> &open, Stack<n> &stack, PositionsGPUAligned<n>& pgpualigned, const int32_t logical_thread_index) { constexpr int two_n = 2 * n; constexpr int64_t msb = lsb << (int64_t)(n - 1); constexpr int64_t nn1 = lsb << (2 * n - 1); PositionsGPU<n> &pos = *((PositionsGPU<n>*)(&pgpualigned[0])); // initially none of the numbers 1, 2, ..., n have been placed; // this is represented by setting bits 0..n-1 to 1 in avail availability[0] = msb | (msb - 1); open[0] = 0; open[1] = 0; int top = 0; int8_t k, m, d, num_open; // The following "push" and "pop" should be lambdas, but unfortunately Cuda C++ does not // yet support reference capture in lambdas that can run on both CPU and GPU. // Hoping for a compiler fix soon. #define push(k, m, d, num_open) do { \ stack[top++] = k; \ stack[top++] = m; \ stack[top++] = d; \ stack[top++] = num_open; \ } while (0) #define pop(k, m, d, num_open) do { \ num_open = stack[--top]; \ d = stack[--top]; \ m = stack[--top]; \ k = stack[--top]; \ } while (0) // every solution starts out by opening a below-pair at position 0 push(0, -1, 0, 0); while (top) { pop(k, m, d, num_open); int64_t* openings = open + 2 * k + 2; openings[0] = openings[-2]; openings[1] = openings[-1]; int32_t avail = availability[k]; // On CPU, this macro trick improves perf over 10% by letting the compiler // take advantage of the fact that d can only be 0 or 1. // Makes no difference on GPU. #define place_macro(d) do { \ if (m>=0) { \ pos[m] = k; \ avail ^= (lsb32 << m); \ openings[d] &= (openings[d] - 1); \ } else { \ openings[d] |= (nn1 >> k); \ ++num_open; \ } \ } while (0) if (d) { place_macro(1); } else { place_macro(0); } ++k; availability[k] = avail; if (k == two_n) { // this is equivalent to results.push_back(pos); // p_results[0] is a counter; after it follow the data // atomic increment of counter in device memory (i.e., RAM DIMMs on the GPU board) int64_t cnt = atomicAdd((unsigned long long*)p_result, (unsigned long long)1); if (cnt < kLimit) { constexpr int kAlignedCnt = (n + 7) / 8; int64_t* dst = p_result + 1 + (kAlignedCnt * cnt); #pragma unroll for (int i=0; i<kAlignedCnt; ++i) { dst[i] = pgpualigned[i]; } } // if cnt reaches or exceeds kLimit, that will be detected and the program will fail } else { // A super-naive way to divide the work across threads. A hash of the current state at k_limit // determines whether the current thread should be pursuing a completion from this state or not. // The depth k_limit is chosen empirically to be both shallow enough so it's quick to reach and // deep enough to allow plenty of concurrency. This seems to work remarkably well in practice. constexpr int8_t k_limit = (n > 19 ? (8 + (n / 3)) : (n - 5)); if (kNumLogicalThreads > 1 && k == k_limit && // multiply by a nice Mersenne prime to divide the work evenly across the threads... it works well... uint64_t(131071 * (openings[1] - openings[0]) + avail) % kNumLogicalThreads != logical_thread_index) { // some other thread will work on this continue; } // Now push on the stack the the children of the current node in the search tree. int8_t offset = k - two_n - 2; for (d=0; d<2; ++d) { if (openings[d]) { // if there is an opening, try closing it //m = offset + __ffsll(openings[d]); m = offset + ffsll(openings[d]); // m could be -1, for example if the decision at pos k-1 was to open; // only m from 0 .. n - 1 are useful if (((unsigned)m < n) && ((avail >> m) & 1)) { if (m || k <= n) { // this dedups L <==> R reversal twins push(k, m, d, num_open); } } } } if (num_open < n) { push(k, -1, 1, num_open); push(k, -1, 0, num_open); } } } } template <int n> __global__ void dfs_gpu(int64_t* p_result) { __shared__ Availability<n> availability[kThreadsPerBlock]; // PositionsGPU<n> pos; __shared__ Open<n> open[kThreadsPerBlock]; // there are 2*n positions with 3 decisions per position and 4 bytes per decision on the stack __shared__ Stack<n> stack[kThreadsPerBlock]; __shared__ PositionsGPUAligned<n> pgpualigned[kThreadsPerBlock]; // the size of the arrays above add up to ~2KB for n=32 // this bodes well for fitting tousands of threads inside on-chip memory // assume 1D grid of 1D blocks of threads const int32_t result_index = blockIdx.x * kThreadsPerBlock + threadIdx.x; dfs<n>(p_result, availability[threadIdx.x], open[threadIdx.x], stack[threadIdx.x], pgpualigned[threadIdx.x], result_index); } // Sort the vector of solution sequences and count the unique ones. // Optionally print each unique one. template <int n> int64_t unique_count(Results<n> &results) { int64_t total = results.size(); int64_t unique = total; sort(results.begin(), results.end()); if (kPrint && total) { print<n>(results[0]); } for (int i=1; i<total; ++i) { if (results[i] == results[i-1]) { --unique; } else if (kPrint) { print<n>(results[i]); } } return unique; } // Return number of milliseconds elapsed since Jan 1, 1970 00:00 GMT. long unixtime() { using namespace chrono; return duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count(); } // Start and manage the computation on GPU device "device" template <int n> void run_gpu_d(int64_t* count, Results<n>& final_results) { assert(sizeof(int64_t) == 8); constexpr int64_t kAlignedCnt = (n + 7) / 8; int64_t* results_device; cudaMalloc((void**)&results_device, (1 + kLimit * kAlignedCnt) * sizeof(int64_t)); cudaMemcpy(results_device, count, sizeof(int64_t), cudaMemcpyHostToDevice); int blocks_x = div_up(kNumLogicalThreads, kThreadsPerBlock); dim3 blocks(blocks_x); dfs_gpu<n><<<blocks, kThreadsPerBlock>>>(results_device); cudaMemcpy(count, results_device, sizeof(int64_t), cudaMemcpyDeviceToHost); if (*count >= kLimit) { cout << "Result for n = " << n << " will be bogus because GPU " << " exceeded " << kLimit << " solutions.\n"; } int64_t r_count = *count; int64_t data_size = (1 + r_count * kAlignedCnt) * sizeof(int64_t); int64_t* results_host = (int64_t*) malloc (data_size); cudaMemcpy(results_host, results_device, data_size, cudaMemcpyDeviceToHost); cudaFree(results_device); // Compact results into a vector for (int i=0; i<r_count; ++i) { Positions<n> pos; PositionsGPU<n>& gpos = *((PositionsGPU<n>*)(results_host + 1 + (kAlignedCnt * i))); for (int j=0; j<n; ++j) { pos[j] = gpos[j]; } final_results.push_back(pos); } free(results_host); } // Start a CPU thread to manage each GPU device and wait for the computation to end. template <int n> void run_gpu(const int64_t* known_results) { cout << "\n"; cout << "------\n"; cout << unixtime() << " Computing PL(2, " << n << ")\n"; if (n > kMaxN) { cout << unixtime() << " Sorry, n = " << n << " exceeds the max allowed " << kMaxN << "\n"; return; } int64_t count = 0; int64_t total; Results<n> final_results; run_gpu_d<n>(&count, final_results); // Sort and unique count on CPU. total = unique_count<n>(final_results); cout << unixtime() << " Result " << total << " for n = " << n; if (n < 0 || n >= 64 || known_results[n] == -1) { cout << " is NEW"; } else if (known_results[n] == total) { cout << " MATCHES previously published result"; } else { cout << " MISMATCHES previously published result " << known_results[n]; } cout << "\n------\n\n"; } void init_known_results(int64_t (&known_results)[64]) { for (int i=0; i<64; ++i) { known_results[i] = 0; } // There are no published results for n >= 29 for (int i = 29; i<64; ++i) { if (i % 4 == 3 || i % 4 == 0) { known_results[i] = -1; } } known_results[3] = 1; known_results[4] = 0; known_results[7] = 0; known_results[8] = 4; known_results[11] = 16; known_results[12] = 40; known_results[15] = 194; known_results[16] = 274; known_results[19] = 2384; known_results[20] = 4719; known_results[23] = 31856; known_results[24] = 62124; known_results[27] = 426502; known_results[28] = 817717; } template <int n> void print(const Positions<n>& pos) { cout << unixtime() << " Sequence "; int s[2 * n]; for (int i=0; i<2*n; ++i) { s[i] = -1; } for (int m=1; m<=n; ++m) { int k2 = pos[m-1]; int k1 = k2 - m - 1; assert(0 <= k1); assert(k2 < 2*n); assert(s[k1] == -1); assert(s[k2] == -1); s[k1] = s[k2] = m; } for (int i=0; i<2*n; ++i) { const int64_t m = s[i]; assert(0 <= m); assert(m <= n); cout << setw(3) << m; } cout << "\n"; } int main(int argc, char **argv) { int64_t known_results[64]; init_known_results(known_results); /* we cannot do 3 and 4 anymore due to unrolling run_gpu<3>(known_results); run_gpu<4>(known_results); */ run_gpu<7>(known_results); run_gpu<8>(known_results); run_gpu<11>(known_results); run_gpu<12>(known_results); run_gpu<15>(known_results); //run_gpu<16>(known_results); //run_gpu<19>(known_results); //run_gpu<20>(known_results); //run_gpu<23>(known_results); //run_gpu<24>(known_results); //run_gpu<27>(known_results); //run_gpu<28>(known_results); return 0; }
the_stack
#include "dragon/core/context_cuda.h" #include "dragon/utils/device/common_cub.h" #include "dragon/utils/math_functions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { #define LDG(x, i) __ldg(x + i) #define LDG2(x, i) convert::To<AccT>(__ldg(x + i)) template <typename T, typename AccT, StorageOrder kOrder> __global__ void _GroupNorm( const int NxCxS, const int G, const int D, const int S, const T* x, const AccT* mu, const AccT* rsig, const AccT* gamma, const AccT* beta, T* y) { const int C = G * D; CUDA_1D_KERNEL_LOOP(i, NxCxS) { const int ng = kOrder == StorageOrder::NCHW ? i / (D * S) : i / (C * S) * G + (i / D % G); const int c = kOrder == StorageOrder::NCHW ? i / S % C : i % C; y[i] = convert::To<T>( fma((convert::To<AccT>(x[i]) - __ldg(mu + ng)) * __ldg(rsig + ng), __ldg(gamma + c), __ldg(beta + c))); } } template <typename T, typename AccT, StorageOrder kOrder> __global__ void _GroupNormWGrad( const int N, const int G, const int D, const int S, const T* x, const AccT* mu, const AccT* rsig, const T* dy, AccT* dgamma, AccT* dbeta) { const int GxD = G * D; const int NxS = N * S; __shared__ typename BlockReduce<AccT>::TempStorage dg_storage; __shared__ typename BlockReduce<AccT>::TempStorage db_storage; CUDA_2D_KERNEL_LOOP1(i, GxD) { AccT dg_val = AccT(0), db_val = AccT(0); CUDA_2D_KERNEL_LOOP2(j, NxS) { const int n = j / S; const int ng = n * G + i / D; const int idx = kOrder == StorageOrder::NCHW ? (n * GxD + i) * S + j % S : j * GxD + i; dg_val += LDG2(dy, idx) * (LDG2(x, idx) - LDG(mu, ng)) * LDG(rsig, ng); db_val += LDG2(dy, idx); } dg_val = BlockReduce<AccT>(dg_storage).Sum(dg_val); db_val = BlockReduce<AccT>(db_storage).Sum(db_val); if (threadIdx.x == 0) { dgamma[i] = dg_val; dbeta[i] = db_val; } } } template <typename T, typename AccT, StorageOrder kOrder> __global__ void _GroupNormInternalGrad( const int N, const int G, const int D, const int S, const T* x, const AccT* gamma, const T* dy, AccT* ds, AccT* db) { const int NxG = N * G; const int DxS = D * S; __shared__ typename BlockReduce<AccT>::TempStorage ds_storage; __shared__ typename BlockReduce<AccT>::TempStorage db_storage; CUDA_2D_KERNEL_LOOP1(i, NxG) { AccT ds_val = AccT(0), db_val = AccT(0); CUDA_2D_KERNEL_LOOP2(j, DxS) { const int c = i % G * D + j / S; const int idx = kOrder == StorageOrder::NCHW ? i * DxS + j : (i / G * S + j % S) * G * D + c; ds_val += LDG(gamma, c) * LDG2(dy, idx) * LDG2(x, idx); db_val += LDG(gamma, c) * LDG2(dy, idx); } ds_val = BlockReduce<AccT>(ds_storage).Sum(ds_val); db_val = BlockReduce<AccT>(db_storage).Sum(db_val); if (threadIdx.x == 0) { ds[i] = ds_val; db[i] = db_val; } } } template <typename T, typename AccT, StorageOrder kOrder> __global__ void _GroupNormGrad( const int NxCxS, const int G, const int D, const int S, const T* x, const AccT* mu, const AccT* rsig, const AccT* gamma, const AccT* ds, const AccT* db, const T* dy, T* dx) { const int C = G * D; const AccT denom = AccT(1) / AccT(D * S); CUDA_1D_KERNEL_LOOP(i, NxCxS) { const int ng = kOrder == StorageOrder::NCHW ? i / (D * S) : i / (C * S) * G + (i / D % G); const int c = kOrder == StorageOrder::NCHW ? i / S % C : i % C; const AccT u = fma(LDG(db, ng), LDG(mu, ng), -LDG(ds, ng)) * (LDG2(x, i) - LDG(mu, ng)) * math::utils::Cube(LDG(rsig, ng)); const AccT v = LDG(db, ng) * LDG(rsig, ng); dx[i] = convert::To<T>( LDG(gamma, c) * LDG2(dy, i) * LDG(rsig, ng) + (u - v) * denom); } } #undef LDG #undef LDG2 } // namespace /* ------------------- Launcher Separator ------------------- */ #define DISPATCH_GROUPNORM_KERNEL(name, T, AccT, kBlocks, kThreads, ...) \ if (data_format == "NCHW") { \ name<T, AccT, StorageOrder::NCHW> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else if (data_format == "NHWC") { \ name<T, AccT, StorageOrder::NHWC> \ <<<kBlocks, kThreads, 0, ctx->cuda_stream()>>>(__VA_ARGS__); \ } else { \ LOG(FATAL) << "Unknown DataFormat: " << data_format; \ } #define DEFINE_KERNEL_LAUNCHER(T, AccT) \ template <> \ void GroupNorm<T, AccT, CUDAContext>( \ const int N, \ const int G, \ const int D, \ const int S, \ const string& data_format, \ const T* x, \ const AccT* mu, \ const AccT* rsig, \ const AccT* gamma, \ const AccT* beta, \ T* y, \ CUDAContext* ctx) { \ const auto NxCxS = N * G * D * S; \ DISPATCH_GROUPNORM_KERNEL( \ _GroupNorm, \ math::ScalarType<T>::type, \ AccT, \ CUDA_BLOCKS(NxCxS), \ CUDA_THREADS, \ NxCxS, \ G, \ D, \ S, \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ mu, \ rsig, \ gamma, \ beta, \ reinterpret_cast<math::ScalarType<T>::type*>(y)); \ } #define DEFINE_GRAD_KERNEL_LAUNCHER(T, AccT) \ template <> \ void GroupNormGrad<T, AccT, CUDAContext>( \ const int N, \ const int G, \ const int D, \ const int S, \ const string& data_format, \ const T* x, \ const AccT* mu, \ const AccT* rsig, \ const AccT* gamma, \ const T* dy, \ AccT* ds, \ AccT* db, \ AccT* dgamma, \ AccT* dbeta, \ T* dx, \ CUDAContext* ctx) { \ const auto NxCxS = N * G * D * S; \ DISPATCH_GROUPNORM_KERNEL( \ _GroupNormWGrad, \ math::ScalarType<T>::type, \ AccT, \ G* D, \ CUDA_THREADS, \ N, \ G, \ D, \ S, \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ mu, \ rsig, \ reinterpret_cast<const math::ScalarType<T>::type*>(dy), \ dgamma, \ dbeta); \ DISPATCH_GROUPNORM_KERNEL( \ _GroupNormInternalGrad, \ math::ScalarType<T>::type, \ AccT, \ N* G, \ CUDA_THREADS, \ N, \ G, \ D, \ S, \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ gamma, \ reinterpret_cast<const math::ScalarType<T>::type*>(dy), \ ds, \ db); \ DISPATCH_GROUPNORM_KERNEL( \ _GroupNormGrad, \ math::ScalarType<T>::type, \ AccT, \ CUDA_BLOCKS(NxCxS), \ CUDA_THREADS, \ NxCxS, \ G, \ D, \ S, \ reinterpret_cast<const math::ScalarType<T>::type*>(x), \ mu, \ rsig, \ gamma, \ ds, \ db, \ reinterpret_cast<const math::ScalarType<T>::type*>(dy), \ reinterpret_cast<math::ScalarType<T>::type*>(dx)); \ } DEFINE_KERNEL_LAUNCHER(float16, float); DEFINE_KERNEL_LAUNCHER(float, float); DEFINE_KERNEL_LAUNCHER(double, double); DEFINE_GRAD_KERNEL_LAUNCHER(float16, float); DEFINE_GRAD_KERNEL_LAUNCHER(float, float); DEFINE_GRAD_KERNEL_LAUNCHER(double, double); #undef DEFINE_KERNEL_LAUNCHER #undef DEFINE_GRAD_KERNEL_LAUNCHER #undef DISPATCH_GROUPNORM_KERNEL } // namespace kernels } // namespace dragon #endif // USE_CUDA
the_stack
#include <algorithm> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <thrust/device_ptr.h> #include <thrust/scan.h> #include <thrust/sort.h> #include <thrust/gather.h> namespace cuba { namespace gpu { //////////////////////////////////////////////////////////////////////////////////// // Type alias //////////////////////////////////////////////////////////////////////////////////// template <int N> using Vecxd = Vec<Scalar, N>; template <int N> using GpuVecxd = GpuVec<Vecxd<N>>; using PxPBlockPtr = BlockPtr<Scalar, PDIM, PDIM>; using LxLBlockPtr = BlockPtr<Scalar, LDIM, LDIM>; using PxLBlockPtr = BlockPtr<Scalar, PDIM, LDIM>; using Px1BlockPtr = BlockPtr<Scalar, PDIM, 1>; using Lx1BlockPtr = BlockPtr<Scalar, LDIM, 1>; //////////////////////////////////////////////////////////////////////////////////// // Constants //////////////////////////////////////////////////////////////////////////////////// constexpr int BLOCK_ACTIVE_ERRORS = 512; constexpr int BLOCK_MAX_DIAGONAL = 512; constexpr int BLOCK_COMPUTE_SCALE = 512; __constant__ Scalar c_camera[5]; #define FX() c_camera[0] #define FY() c_camera[1] #define CX() c_camera[2] #define CY() c_camera[3] #define BF() c_camera[4] //////////////////////////////////////////////////////////////////////////////////// // Type definitions //////////////////////////////////////////////////////////////////////////////////// struct LessRowId { __device__ bool operator()(const Vec3i& lhs, const Vec3i& rhs) const { if (lhs[0] == rhs[0]) return lhs[1] < rhs[1]; return lhs[0] < rhs[0]; } }; struct LessColId { __device__ bool operator()(const Vec3i& lhs, const Vec3i& rhs) const { if (lhs[1] == rhs[1]) return lhs[0] < rhs[0]; return lhs[1] < rhs[1]; } }; template <typename T, int ROWS, int COLS> struct MatView { __device__ inline T& operator()(int i, int j) { return data[j * ROWS + i]; } __device__ inline MatView(T* data) : data(data) {} T* data; }; template <typename T, int ROWS, int COLS> struct ConstMatView { __device__ inline T operator()(int i, int j) const { return data[j * ROWS + i]; } __device__ inline ConstMatView(const T* data) : data(data) {} const T* data; }; template <typename T, int ROWS, int COLS> struct Matx { using View = MatView<T, ROWS, COLS>; using ConstView = ConstMatView<T, ROWS, COLS>; __device__ inline T& operator()(int i, int j) { return data[j * ROWS + i]; } __device__ inline T operator()(int i, int j) const { return data[j * ROWS + i]; } __device__ inline operator View() { return View(data); } __device__ inline operator ConstView() const { return ConstView(data); } T data[ROWS * COLS]; }; using MatView2x3d = MatView<Scalar, 2, 3>; using MatView2x6d = MatView<Scalar, 2, 6>; using MatView3x3d = MatView<Scalar, 3, 3>; using MatView3x6d = MatView<Scalar, 3, 6>; using ConstMatView3x3d = ConstMatView<Scalar, 3, 3>; //////////////////////////////////////////////////////////////////////////////////// // Host functions //////////////////////////////////////////////////////////////////////////////////// static int divUp(int total, int grain) { return (total + grain - 1) / grain; } //////////////////////////////////////////////////////////////////////////////////// // Device functions (template matrix and verctor operation) //////////////////////////////////////////////////////////////////////////////////// // assignment operations using AssignOP = void(*)(Scalar*, Scalar); __device__ inline void ASSIGN(Scalar* address, Scalar value) { *address = value; } __device__ inline void ACCUM(Scalar* address, Scalar value) { *address += value; } __device__ inline void DEACCUM(Scalar* address, Scalar value) { *address -= value; } __device__ inline void ACCUM_ATOMIC(Scalar* address, Scalar value) { atomicAdd(address, value); } __device__ inline void DEACCUM_ATOMIC(Scalar* address, Scalar value) { atomicAdd(address, -value); } // recursive dot product for inline expansion template <int N> __device__ inline Scalar dot_(const Scalar* a, const Scalar* b) { return dot_<N - 1>(a, b) + a[N - 1] * b[N - 1]; } template <> __device__ inline Scalar dot_<1>(const Scalar* a, const Scalar* b) { return a[0] * b[0]; } // recursive dot product for inline expansion (strided access pattern) template <int N, int S1, int S2> __device__ inline Scalar dot_stride_(const Scalar* a, const Scalar* b) { static_assert(S1 == PDIM || S1 == LDIM, "S1 must be PDIM or LDIM"); static_assert(S2 == 1 || S2 == PDIM, "S2 must be 1 or PDIM"); return dot_stride_<N - 1, S1, S2>(a, b) + a[S1 * (N - 1)] * b[S2 * (N - 1)]; } template <> __device__ inline Scalar dot_stride_<1, PDIM, 1>(const Scalar* a, const Scalar* b) { return a[0] * b[0]; } template <> __device__ inline Scalar dot_stride_<1, LDIM, 1>(const Scalar* a, const Scalar* b) { return a[0] * b[0]; } template <> __device__ inline Scalar dot_stride_<1, PDIM, PDIM>(const Scalar* a, const Scalar* b) { return a[0] * b[0]; } // matrix(tansposed)-vector product: b = AT*x template <int M, int N, AssignOP OP = ASSIGN> __device__ inline void MatTMulVec(const Scalar* A, const Scalar* x, Scalar* b, Scalar omega) { #pragma unroll for (int i = 0; i < M; i++) OP(b + i, omega * dot_<N>(A + i * N, x)); } // matrix(tansposed)-matrix product: C = AT*B template <int L, int M, int N, AssignOP OP = ASSIGN> __device__ inline void MatTMulMat(const Scalar* A, const Scalar* B, Scalar* C, Scalar omega) { #pragma unroll for (int i = 0; i < N; i++) MatTMulVec<L, M, OP>(A, B + i * M, C + i * L, omega); } // matrix-vector product: b = A*x template <int M, int N, int S = 1, AssignOP OP = ASSIGN> __device__ inline void MatMulVec(const Scalar* A, const Scalar* x, Scalar* b) { #pragma unroll for (int i = 0; i < M; i++) OP(b + i, dot_stride_<N, M, S>(A + i, x)); } // matrix-matrix product: C = A*B template <int L, int M, int N, AssignOP OP = ASSIGN> __device__ inline void MatMulMat(const Scalar* A, const Scalar* B, Scalar* C) { #pragma unroll for (int i = 0; i < N; i++) MatMulVec<L, M, 1, OP>(A, B + i * M, C + i * L); } // matrix-matrix(tansposed) product: C = A*BT template <int L, int M, int N, AssignOP OP = ASSIGN> __device__ inline void MatMulMatT(const Scalar* A, const Scalar* B, Scalar* C) { #pragma unroll for (int i = 0; i < N; i++) MatMulVec<L, M, N, OP>(A, B + i, C + i * L); } // squared L2 norm template <int N> __device__ inline Scalar squaredNorm(const Scalar* x) { return dot_<N>(x, x); } template <int N> __device__ inline Scalar squaredNorm(const Vecxd<N>& x) { return squaredNorm<N>(x.data); } // L2 norm template <int N> __device__ inline Scalar norm(const Scalar* x) { return sqrt(squaredNorm<N>(x)); } template <int N> __device__ inline Scalar norm(const Vecxd<N>& x) { return norm<N>(x.data); } //////////////////////////////////////////////////////////////////////////////////// // Device functions //////////////////////////////////////////////////////////////////////////////////// __device__ static inline void cross(const Vec4d& a, const Vec3d& b, Vec3d& c) { c[0] = a[1] * b[2] - a[2] * b[1]; c[1] = a[2] * b[0] - a[0] * b[2]; c[2] = a[0] * b[1] - a[1] * b[0]; } __device__ inline void rotate(const Vec4d& q, const Vec3d& Xw, Vec3d& Xc) { Vec3d tmp1, tmp2; cross(q, Xw, tmp1); tmp1[0] += tmp1[0]; tmp1[1] += tmp1[1]; tmp1[2] += tmp1[2]; cross(q, tmp1, tmp2); Xc[0] = Xw[0] + q[3] * tmp1[0] + tmp2[0]; Xc[1] = Xw[1] + q[3] * tmp1[1] + tmp2[1]; Xc[2] = Xw[2] + q[3] * tmp1[2] + tmp2[2]; } __device__ inline void projectW2C(const Vec4d& q, const Vec3d& t, const Vec3d& Xw, Vec3d& Xc) { rotate(q, Xw, Xc); Xc[0] += t[0]; Xc[1] += t[1]; Xc[2] += t[2]; } template <int MDIM> __device__ inline void projectC2I(const Vec3d& Xc, Vecxd<MDIM>& p) { } template <> __device__ inline void projectC2I<2>(const Vec3d& Xc, Vec2d& p) { const Scalar invZ = 1 / Xc[2]; p[0] = FX() * invZ * Xc[0] + CX(); p[1] = FY() * invZ * Xc[1] + CY(); } template <> __device__ inline void projectC2I<3>(const Vec3d& Xc, Vec3d& p) { const Scalar invZ = 1 / Xc[2]; p[0] = FX() * invZ * Xc[0] + CX(); p[1] = FY() * invZ * Xc[1] + CY(); p[2] = p[0] - BF() * invZ; } __device__ inline void quaternionToRotationMatrix(const Vec4d& q, MatView3x3d R) { const Scalar x = q[0]; const Scalar y = q[1]; const Scalar z = q[2]; const Scalar w = q[3]; const Scalar tx = 2 * x; const Scalar ty = 2 * y; const Scalar tz = 2 * z; const Scalar twx = tx * w; const Scalar twy = ty * w; const Scalar twz = tz * w; const Scalar txx = tx * x; const Scalar txy = ty * x; const Scalar txz = tz * x; const Scalar tyy = ty * y; const Scalar tyz = tz * y; const Scalar tzz = tz * z; R(0, 0) = 1 - (tyy + tzz); R(0, 1) = txy - twz; R(0, 2) = txz + twy; R(1, 0) = txy + twz; R(1, 1) = 1 - (txx + tzz); R(1, 2) = tyz - twx; R(2, 0) = txz - twy; R(2, 1) = tyz + twx; R(2, 2) = 1 - (txx + tyy); } template <int MDIM> __device__ void computeJacobians(const Vec3d& Xc, const Vec4d& q, MatView<Scalar, MDIM, PDIM> JP, MatView<Scalar, MDIM, LDIM> JL) { } template <> __device__ void computeJacobians<2>(const Vec3d& Xc, const Vec4d& q, MatView2x6d JP, MatView2x3d JL) { const Scalar X = Xc[0]; const Scalar Y = Xc[1]; const Scalar Z = Xc[2]; const Scalar invZ = 1 / Z; const Scalar x = invZ * X; const Scalar y = invZ * Y; const Scalar fu = FX(); const Scalar fv = FY(); const Scalar fu_invZ = fu * invZ; const Scalar fv_invZ = fv * invZ; Matx<Scalar, 3, 3> R; quaternionToRotationMatrix(q, R); JL(0, 0) = -fu_invZ * (R(0, 0) - x * R(2, 0)); JL(0, 1) = -fu_invZ * (R(0, 1) - x * R(2, 1)); JL(0, 2) = -fu_invZ * (R(0, 2) - x * R(2, 2)); JL(1, 0) = -fv_invZ * (R(1, 0) - y * R(2, 0)); JL(1, 1) = -fv_invZ * (R(1, 1) - y * R(2, 1)); JL(1, 2) = -fv_invZ * (R(1, 2) - y * R(2, 2)); JP(0, 0) = +fu * x * y; JP(0, 1) = -fu * (1 + x * x); JP(0, 2) = +fu * y; JP(0, 3) = -fu_invZ; JP(0, 4) = 0; JP(0, 5) = +fu_invZ * x; JP(1, 0) = +fv * (1 + y * y); JP(1, 1) = -fv * x * y; JP(1, 2) = -fv * x; JP(1, 3) = 0; JP(1, 4) = -fv_invZ; JP(1, 5) = +fv_invZ * y; } template <> __device__ void computeJacobians<3>(const Vec3d& Xc, const Vec4d& q, MatView3x6d JP, MatView3x3d JL) { const Scalar X = Xc[0]; const Scalar Y = Xc[1]; const Scalar Z = Xc[2]; const Scalar invZ = 1 / Z; const Scalar invZZ = invZ * invZ; const Scalar fu = FX(); const Scalar fv = FY(); const Scalar bf = BF(); Matx<Scalar, 3, 3> R; quaternionToRotationMatrix(q, R); JL(0, 0) = -fu * R(0, 0) * invZ + fu * X * R(2, 0) * invZZ; JL(0, 1) = -fu * R(0, 1) * invZ + fu * X * R(2, 1) * invZZ; JL(0, 2) = -fu * R(0, 2) * invZ + fu * X * R(2, 2) * invZZ; JL(1, 0) = -fv * R(1, 0) * invZ + fv * Y * R(2, 0) * invZZ; JL(1, 1) = -fv * R(1, 1) * invZ + fv * Y * R(2, 1) * invZZ; JL(1, 2) = -fv * R(1, 2) * invZ + fv * Y * R(2, 2) * invZZ; JL(2, 0) = JL(0, 0) - bf * R(2, 0) * invZZ; JL(2, 1) = JL(0, 1) - bf * R(2, 1) * invZZ; JL(2, 2) = JL(0, 2) - bf * R(2, 2) * invZZ; JP(0, 0) = X * Y * invZZ * fu; JP(0, 1) = -(1 + (X * X * invZZ)) * fu; JP(0, 2) = Y * invZ * fu; JP(0, 3) = -1 * invZ * fu; JP(0, 4) = 0; JP(0, 5) = X * invZZ * fu; JP(1, 0) = (1 + Y * Y * invZZ) * fv; JP(1, 1) = -X * Y * invZZ * fv; JP(1, 2) = -X * invZ * fv; JP(1, 3) = 0; JP(1, 4) = -1 * invZ * fv; JP(1, 5) = Y * invZZ * fv; JP(2, 0) = JP(0, 0) - bf * Y * invZZ; JP(2, 1) = JP(0, 1) + bf * X * invZZ; JP(2, 2) = JP(0, 2); JP(2, 3) = JP(0, 3); JP(2, 4) = 0; JP(2, 5) = JP(0, 5) - bf * invZZ; } __device__ inline void Sym3x3Inv(ConstMatView3x3d A, MatView3x3d B) { const Scalar A00 = A(0, 0); const Scalar A01 = A(0, 1); const Scalar A11 = A(1, 1); const Scalar A02 = A(2, 0); const Scalar A12 = A(1, 2); const Scalar A22 = A(2, 2); const Scalar det = A00 * A11 * A22 + A01 * A12 * A02 + A02 * A01 * A12 - A00 * A12 * A12 - A02 * A11 * A02 - A01 * A01 * A22; const Scalar invDet = 1 / det; const Scalar B00 = invDet * (A11 * A22 - A12 * A12); const Scalar B01 = invDet * (A02 * A12 - A01 * A22); const Scalar B11 = invDet * (A00 * A22 - A02 * A02); const Scalar B02 = invDet * (A01 * A12 - A02 * A11); const Scalar B12 = invDet * (A02 * A01 - A00 * A12); const Scalar B22 = invDet * (A00 * A11 - A01 * A01); B(0, 0) = B00; B(0, 1) = B01; B(0, 2) = B02; B(1, 0) = B01; B(1, 1) = B11; B(1, 2) = B12; B(2, 0) = B02; B(2, 1) = B12; B(2, 2) = B22; } __device__ inline void skew1(Scalar x, Scalar y, Scalar z, MatView3x3d M) { M(0, 0) = +0; M(0, 1) = -z; M(0, 2) = +y; M(1, 0) = +z; M(1, 1) = +0; M(1, 2) = -x; M(2, 0) = -y; M(2, 1) = +x; M(2, 2) = +0; } __device__ inline void skew2(Scalar x, Scalar y, Scalar z, MatView3x3d M) { const Scalar xx = x * x; const Scalar yy = y * y; const Scalar zz = z * z; const Scalar xy = x * y; const Scalar yz = y * z; const Scalar zx = z * x; M(0, 0) = -yy - zz; M(0, 1) = +xy; M(0, 2) = +zx; M(1, 0) = +xy; M(1, 1) = -zz - xx; M(1, 2) = +yz; M(2, 0) = +zx; M(2, 1) = +yz; M(2, 2) = -xx - yy; } __device__ inline void addOmega(Scalar a1, ConstMatView3x3d O1, Scalar a2, ConstMatView3x3d O2, MatView3x3d R) { R(0, 0) = 1 + a1 * O1(0, 0) + a2 * O2(0, 0); R(1, 0) = 0 + a1 * O1(1, 0) + a2 * O2(1, 0); R(2, 0) = 0 + a1 * O1(2, 0) + a2 * O2(2, 0); R(0, 1) = 0 + a1 * O1(0, 1) + a2 * O2(0, 1); R(1, 1) = 1 + a1 * O1(1, 1) + a2 * O2(1, 1); R(2, 1) = 0 + a1 * O1(2, 1) + a2 * O2(2, 1); R(0, 2) = 0 + a1 * O1(0, 2) + a2 * O2(0, 2); R(1, 2) = 0 + a1 * O1(1, 2) + a2 * O2(1, 2); R(2, 2) = 1 + a1 * O1(2, 2) + a2 * O2(2, 2); } __device__ inline void rotationMatrixToQuaternion(ConstMatView3x3d R, Vec4d& q) { Scalar t = R(0, 0) + R(1, 1) + R(2, 2); if (t > 0) { t = sqrt(t + 1); q[3] = Scalar(0.5) * t; t = Scalar(0.5) / t; q[0] = (R(2, 1) - R(1, 2)) * t; q[1] = (R(0, 2) - R(2, 0)) * t; q[2] = (R(1, 0) - R(0, 1)) * t; } else { int i = 0; if (R(1, 1) > R(0, 0)) i = 1; if (R(2, 2) > R(i, i)) i = 2; int j = (i + 1) % 3; int k = (j + 1) % 3; t = sqrt(R(i, i) - R(j, j) - R(k, k) + 1); q[i] = Scalar(0.5) * t; t = Scalar(0.5) / t; q[3] = (R(k, j) - R(j, k)) * t; q[j] = (R(j, i) + R(i, j)) * t; q[k] = (R(k, i) + R(i, k)) * t; } } __device__ inline void multiplyQuaternion(const Vec4d& a, const Vec4d& b, Vec4d& c) { c[3] = a[3] * b[3] - a[0] * b[0] - a[1] * b[1] - a[2] * b[2]; c[0] = a[3] * b[0] + a[0] * b[3] + a[1] * b[2] - a[2] * b[1]; c[1] = a[3] * b[1] + a[1] * b[3] + a[2] * b[0] - a[0] * b[2]; c[2] = a[3] * b[2] + a[2] * b[3] + a[0] * b[1] - a[1] * b[0]; } __device__ inline void normalizeQuaternion(const Vec4d& a, Vec4d& b) { Scalar invn = 1 / norm(a); if (a[3] < 0) invn = -invn; for (int i = 0; i < 4; i++) b[i] = invn * a[i]; } __device__ inline void updateExp(const Scalar* update, Vec4d& q, Vec3d& t) { Vec3d omega(update); Vec3d upsilon(update + 3); const Scalar theta = norm(omega); Matx<Scalar, 3, 3> O1, O2; skew1(omega[0], omega[1], omega[2], O1); skew2(omega[0], omega[1], omega[2], O2); Scalar R[9], V[9]; if (theta < Scalar(0.00001)) { addOmega(Scalar(1.0), O1, Scalar(0.5), O2, R); addOmega(Scalar(0.5), O1, Scalar(1)/6, O2, V); } else { const Scalar a1 = sin(theta) / theta; const Scalar a2 = (1 - cos(theta)) / (theta * theta); const Scalar a3 = (theta - sin(theta)) / (pow(theta, 3)); addOmega(a1, O1, a2, O2, R); addOmega(a2, O1, a3, O2, V); } rotationMatrixToQuaternion(R, q); MatMulVec<3, 3>(V, upsilon.data, t.data); } __device__ inline void updatePose(const Vec4d& q1, const Vec3d& t1, Vec4d& q2, Vec3d& t2) { Vec3d u; rotate(q1, t2, u); for (int i = 0; i < 3; i++) t2[i] = t1[i] + u[i]; Vec4d r; multiplyQuaternion(q1, q2, r); normalizeQuaternion(r, q2); } template <int N> __device__ inline void copy(const Scalar* src, Scalar* dst) { for (int i = 0; i < N; i++) dst[i] = src[i]; } __device__ inline Vec3i makeVec3i(int i, int j, int k) { Vec3i vec; vec[0] = i; vec[1] = j; vec[2] = k; return vec; } //////////////////////////////////////////////////////////////////////////////////// // Kernel functions //////////////////////////////////////////////////////////////////////////////////// template <int MDIM> __global__ void computeActiveErrorsKernel(int nedges, const Vec4d* qs, const Vec3d* ts, const Vec3d* Xws, const Vecxd<MDIM>* measurements, const Scalar* omegas, const Vec2i* edge2PL, Vecxd<MDIM>* errors, Vec3d* Xcs, Scalar* chi) { using Vecmd = Vecxd<MDIM>; const int sharedIdx = threadIdx.x; __shared__ Scalar cache[BLOCK_ACTIVE_ERRORS]; Scalar sumchi = 0; for (int iE = blockIdx.x * blockDim.x + threadIdx.x; iE < nedges; iE += gridDim.x * blockDim.x) { const Vec2i index = edge2PL[iE]; const int iP = index[0]; const int iL = index[1]; const Vec4d& q = qs[iP]; const Vec3d& t = ts[iP]; const Vec3d& Xw = Xws[iL]; const Vecmd& measurement = measurements[iE]; // project world to camera Vec3d Xc; projectW2C(q, t, Xw, Xc); // project camera to image Vecmd proj; projectC2I(Xc, proj); // compute residual Vecmd error; for (int i = 0; i < MDIM; i++) error[i] = proj[i] - measurement[i]; errors[iE] = error; Xcs[iE] = Xc; sumchi += omegas[iE] * squaredNorm(error); } cache[sharedIdx] = sumchi; __syncthreads(); for (int stride = BLOCK_ACTIVE_ERRORS / 2; stride > 0; stride >>= 1) { if (sharedIdx < stride) cache[sharedIdx] += cache[sharedIdx + stride]; __syncthreads(); } if (sharedIdx == 0) atomicAdd(chi, cache[0]); } template <int MDIM> __global__ void constructQuadraticFormKernel(int nedges, const Vec3d* Xcs, const Vec4d* qs, const Vecxd<MDIM>* errors, const Scalar* omegas, const Vec2i* edge2PL, const int* edge2Hpl, const uint8_t* flags, PxPBlockPtr Hpp, Px1BlockPtr bp, LxLBlockPtr Hll, Lx1BlockPtr bl, PxLBlockPtr Hpl) { using Vecmd = Vecxd<MDIM>; const int iE = blockIdx.x * blockDim.x + threadIdx.x; if (iE >= nedges) return; const Scalar omega = omegas[iE]; const int iP = edge2PL[iE][0]; const int iL = edge2PL[iE][1]; const int iPL = edge2Hpl[iE]; const int flag = flags[iE]; const Vec4d& q = qs[iP]; const Vec3d& Xc = Xcs[iE]; const Vecmd& error = errors[iE]; // compute Jacobians Scalar JP[MDIM * PDIM]; Scalar JL[MDIM * LDIM]; computeJacobians<MDIM>(Xc, q, JP, JL); if (!(flag & EDGE_FLAG_FIXED_P)) { // Hpp += = JPT*Ω*JP MatTMulMat<PDIM, MDIM, PDIM, ACCUM_ATOMIC>(JP, JP, Hpp.at(iP), omega); // bp += = JPT*Ω*r MatTMulVec<PDIM, MDIM, ACCUM_ATOMIC>(JP, error.data, bp.at(iP), omega); } if (!(flag & EDGE_FLAG_FIXED_L)) { // Hll += = JLT*Ω*JL MatTMulMat<LDIM, MDIM, LDIM, ACCUM_ATOMIC>(JL, JL, Hll.at(iL), omega); // bl += = JLT*Ω*r MatTMulVec<LDIM, MDIM, ACCUM_ATOMIC>(JL, error.data, bl.at(iL), omega); } if (!flag) { // Hpl += = JPT*Ω*JL MatTMulMat<PDIM, MDIM, LDIM, ASSIGN>(JP, JL, Hpl.at(iPL), omega); } } template <int DIM> __global__ void maxDiagonalKernel(int size, const Scalar* D, Scalar* maxD) { const int sharedIdx = threadIdx.x; __shared__ Scalar cache[BLOCK_MAX_DIAGONAL]; Scalar maxVal = 0; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { const int j = i / DIM; const int k = i % DIM; const Scalar* ptrBlock = D + j * DIM * DIM; maxVal = max(maxVal, ptrBlock[k * DIM + k]); } cache[sharedIdx] = maxVal; __syncthreads(); for (int stride = BLOCK_MAX_DIAGONAL / 2; stride > 0; stride >>= 1) { if (sharedIdx < stride) cache[sharedIdx] = max(cache[sharedIdx], cache[sharedIdx + stride]); __syncthreads(); } if (sharedIdx == 0) maxD[blockIdx.x] = cache[0]; } template <int DIM> __global__ void addLambdaKernel(int size, Scalar* D, Scalar lambda, Scalar* backup) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= size) return; const int j = i / DIM; const int k = i % DIM; Scalar* ptrBlock = D + j * DIM * DIM; backup[i] = ptrBlock[k * DIM + k]; ptrBlock[k * DIM + k] += lambda; } template <int DIM> __global__ void restoreDiagonalKernel(int size, Scalar* D, const Scalar* backup) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= size) return; const int j = i / DIM; const int k = i % DIM; Scalar* ptrBlock = D + j * DIM * DIM; ptrBlock[k * DIM + k] = backup[i]; } __global__ void computeBschureKernel(int cols, LxLBlockPtr Hll, LxLBlockPtr invHll, Lx1BlockPtr bl, PxLBlockPtr Hpl, const int* HplColPtr, const int* HplRowInd, Px1BlockPtr bsc, PxLBlockPtr Hpl_invHll) { const int colId = blockIdx.x * blockDim.x + threadIdx.x; if (colId >= cols) return; Scalar iHll[LDIM * LDIM]; Scalar Hpl_iHll[PDIM * LDIM]; Sym3x3Inv(Hll.at(colId), iHll); copy<LDIM * LDIM>(iHll, invHll.at(colId)); for (int i = HplColPtr[colId]; i < HplColPtr[colId + 1]; i++) { MatMulMat<6, 3, 3>(Hpl.at(i), iHll, Hpl_iHll); MatMulVec<6, 3, 1, DEACCUM_ATOMIC>(Hpl_iHll, bl.at(colId), bsc.at(HplRowInd[i])); copy<PDIM * LDIM>(Hpl_iHll, Hpl_invHll.at(i)); } } __global__ void initializeHschurKernel(int rows, PxPBlockPtr Hpp, PxPBlockPtr Hsc, const int* HscRowPtr) { const int rowId = blockIdx.x * blockDim.x + threadIdx.x; if (rowId >= rows) return; copy<PDIM * PDIM>(Hpp.at(rowId), Hsc.at(HscRowPtr[rowId])); } __global__ void computeHschureKernel(int size, const Vec3i* mulBlockIds, PxLBlockPtr Hpl_invHll, PxLBlockPtr Hpl, PxPBlockPtr Hschur) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= size) return; const Vec3i index = mulBlockIds[tid]; Scalar A[PDIM * LDIM]; Scalar B[PDIM * LDIM]; copy<PDIM * LDIM>(Hpl_invHll.at(index[0]), A); copy<PDIM * LDIM>(Hpl.at(index[1]), B); MatMulMatT<6, 3, 6, DEACCUM_ATOMIC>(A, B, Hschur.at(index[2])); } __global__ void findHschureMulBlockIndicesKernel(int cols, const int* HplColPtr, const int* HplRowInd, const int* HscRowPtr, const int* HscColInd, Vec3i* mulBlockIds, int* nindices) { const int colId = blockIdx.x * blockDim.x + threadIdx.x; if (colId >= cols) return; const int i0 = HplColPtr[colId]; const int i1 = HplColPtr[colId + 1]; for (int i = i0; i < i1; i++) { const int iP1 = HplRowInd[i]; int k = HscRowPtr[iP1]; for (int j = i; j < i1; j++) { const int iP2 = HplRowInd[j]; while (HscColInd[k] < iP2) k++; const int pos = atomicAdd(nindices, 1); mulBlockIds[pos] = makeVec3i(i, j, k); } } } __global__ void permuteNnzPerRowKernel(int size, const int* srcRowPtr, const int* P, int* nnzPerRow) { const int rowId = blockIdx.x * blockDim.x + threadIdx.x; if (rowId >= size) return; nnzPerRow[P[rowId]] = srcRowPtr[rowId + 1] - srcRowPtr[rowId]; } __global__ void permuteColIndKernel(int size, const int* srcRowPtr, const int* srcColInd, const int* P, int* dstColInd, int* dstMap, int* nnzPerRow) { const int rowId = blockIdx.x * blockDim.x + threadIdx.x; if (rowId >= size) return; const int i0 = srcRowPtr[rowId]; const int i1 = srcRowPtr[rowId + 1]; const int permRowId = P[rowId]; for (int srck = i0; srck < i1; srck++) { const int dstk = nnzPerRow[permRowId]++; dstColInd[dstk] = P[srcColInd[srck]]; dstMap[dstk] = srck; } } __global__ void schurComplementPostKernel(int cols, LxLBlockPtr invHll, Lx1BlockPtr bl, PxLBlockPtr Hpl, const int* HplColPtr, const int* HplRowInd, Px1BlockPtr xp, Lx1BlockPtr xl) { const int colId = blockIdx.x * blockDim.x + threadIdx.x; if (colId >= cols) return; Scalar cl[LDIM]; copy<LDIM>(bl.at(colId), cl); for (int i = HplColPtr[colId]; i < HplColPtr[colId + 1]; i++) MatTMulVec<3, 6, DEACCUM>(Hpl.at(i), xp.at(HplRowInd[i]), cl, 1); MatMulVec<3, 3>(invHll.at(colId), cl, xl.at(colId)); } __global__ void updatePosesKernel(int size, Px1BlockPtr xp, Vec4d* qs, Vec3d* ts) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= size) return; Vec4d expq; Vec3d expt; updateExp(xp.at(i), expq, expt); updatePose(expq, expt, qs[i], ts[i]); } __global__ void updateLandmarksKernel(int size, Lx1BlockPtr xl, Vec3d* Xws) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= size) return; const Scalar* dXw = xl.at(i); Vec3d& Xw = Xws[i]; Xw[0] += dXw[0]; Xw[1] += dXw[1]; Xw[2] += dXw[2]; } __global__ void computeScaleKernel(const Scalar* x, const Scalar* b, Scalar* scale, Scalar lambda, int size) { const int sharedIdx = threadIdx.x; __shared__ Scalar cache[BLOCK_COMPUTE_SCALE]; Scalar sum = 0; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) sum += x[i] * (lambda * x[i] + b[i]); cache[sharedIdx] = sum; __syncthreads(); for (int stride = BLOCK_COMPUTE_SCALE / 2; stride > 0; stride >>= 1) { if (sharedIdx < stride) cache[sharedIdx] += cache[sharedIdx + stride]; __syncthreads(); } if (sharedIdx == 0) atomicAdd(scale, cache[0]); } __global__ void convertBSRToCSRKernel(int size, const Scalar* src, Scalar* dst, const int* map) { const int dstk = blockIdx.x * blockDim.x + threadIdx.x; if (dstk >= size) return; dst[dstk] = src[map[dstk]]; } __global__ void nnzPerColKernel(const Vec3i* blockpos, int nblocks, int* nnzPerCol) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= nblocks) return; const int colId = blockpos[i][1]; atomicAdd(&nnzPerCol[colId], 1); } __global__ void setRowIndKernel(const Vec3i* blockpos, int nblocks, int* rowInd, int* indexPL) { const int k = blockIdx.x * blockDim.x + threadIdx.x; if (k >= nblocks) return; const int rowId = blockpos[k][0]; const int edgeId = blockpos[k][2]; rowInd[k] = rowId; indexPL[edgeId] = k; } //////////////////////////////////////////////////////////////////////////////////// // Public functions //////////////////////////////////////////////////////////////////////////////////// void waitForKernelCompletion() { CUDA_CHECK(cudaDeviceSynchronize()); } void setCameraParameters(const Scalar* camera) { CUDA_CHECK(cudaMemcpyToSymbol(c_camera, camera, sizeof(Scalar) * 5)); } void exclusiveScan(const int* src, int* dst, int size) { auto ptrSrc = thrust::device_pointer_cast(src); auto ptrDst = thrust::device_pointer_cast(dst); thrust::exclusive_scan(ptrSrc, ptrSrc + size, ptrDst); } void buildHplStructure(GpuVec3i& blockpos, GpuHplBlockMat& Hpl, GpuVec1i& indexPL, GpuVec1i& nnzPerCol) { const int nblocks = Hpl.nnz(); const int block = 1024; const int grid = divUp(nblocks, block); int* colPtr = Hpl.outerIndices(); int* rowInd = Hpl.innerIndices(); auto ptrBlockPos = thrust::device_pointer_cast(blockpos.data()); thrust::sort(ptrBlockPos, ptrBlockPos + nblocks, LessColId()); CUDA_CHECK(cudaMemset(nnzPerCol, 0, sizeof(int) * (Hpl.cols() + 1))); nnzPerColKernel<<<grid, block>>>(blockpos, nblocks, nnzPerCol); exclusiveScan(nnzPerCol, colPtr, Hpl.cols() + 1); setRowIndKernel<<<grid, block>>>(blockpos, nblocks, rowInd, indexPL); } void findHschureMulBlockIndices(const GpuHplBlockMat& Hpl, const GpuHscBlockMat& Hsc, GpuVec3i& mulBlockIds) { const int block = 1024; const int grid = divUp(Hpl.cols(), block); DeviceBuffer<int> nindices(1); nindices.fillZero(); findHschureMulBlockIndicesKernel<<<grid, block>>>(Hpl.cols(), Hpl.outerIndices(), Hpl.innerIndices(), Hsc.outerIndices(), Hsc.innerIndices(), mulBlockIds, nindices); CUDA_CHECK(cudaGetLastError()); auto ptrSrc = thrust::device_pointer_cast(mulBlockIds.data()); thrust::sort(ptrSrc, ptrSrc + mulBlockIds.size(), LessRowId()); } template <int M> Scalar computeActiveErrors_(const GpuVec4d& qs, const GpuVec3d& ts, const GpuVec3d& Xws, const GpuVecxd<M>& measurements, const GpuVec1d& omegas, const GpuVec2i& edge2PL, GpuVecxd<M>& errors, GpuVec3d& Xcs, Scalar* chi) { const int nedges = measurements.ssize(); const int block = BLOCK_ACTIVE_ERRORS; const int grid = 16; if (nedges <= 0) return 0; CUDA_CHECK(cudaMemset(chi, 0, sizeof(Scalar))); computeActiveErrorsKernel<M><<<grid, block>>>(nedges, qs, ts, Xws, measurements, omegas, edge2PL, errors, Xcs, chi); CUDA_CHECK(cudaGetLastError()); Scalar h_chi = 0; CUDA_CHECK(cudaMemcpy(&h_chi, chi, sizeof(Scalar), cudaMemcpyDeviceToHost)); return h_chi; } Scalar computeActiveErrors(const GpuVec4d& qs, const GpuVec3d& ts, const GpuVec3d& Xws, const GpuVec2d& measurements, const GpuVec1d& omegas, const GpuVec2i& edge2PL, GpuVec2d& errors, GpuVec3d& Xcs, Scalar* chi) { return computeActiveErrors_(qs, ts, Xws, measurements, omegas, edge2PL, errors, Xcs, chi); } Scalar computeActiveErrors(const GpuVec4d& qs, const GpuVec3d& ts, const GpuVec3d& Xws, const GpuVec3d& measurements, const GpuVec1d& omegas, const GpuVec2i& edge2PL, GpuVec3d& errors, GpuVec3d& Xcs, Scalar* chi) { return computeActiveErrors_(qs, ts, Xws, measurements, omegas, edge2PL, errors, Xcs, chi); } template <int M> void constructQuadraticForm_(const GpuVec3d& Xcs, const GpuVec4d& qs, const GpuVecxd<M>& errors, const GpuVec1d& omegas, const GpuVec2i& edge2PL, const GpuVec1i& edge2Hpl, const GpuVec1b& flags, GpuPxPBlockVec& Hpp, GpuPx1BlockVec& bp, GpuLxLBlockVec& Hll, GpuLx1BlockVec& bl, GpuHplBlockMat& Hpl) { const int nedges = errors.ssize(); const int block = 512; const int grid = divUp(nedges, block); if (nedges <= 0) return; constructQuadraticFormKernel<M><<<grid, block>>>(nedges, Xcs, qs, errors, omegas, edge2PL, edge2Hpl, flags, Hpp, bp, Hll, bl, Hpl); CUDA_CHECK(cudaGetLastError()); } void constructQuadraticForm(const GpuVec3d& Xcs, const GpuVec4d& qs, const GpuVec2d& errors, const GpuVec1d& omegas, const GpuVec2i& edge2PL, const GpuVec1i& edge2Hpl, const GpuVec1b& flags, GpuPxPBlockVec& Hpp, GpuPx1BlockVec& bp, GpuLxLBlockVec& Hll, GpuLx1BlockVec& bl, GpuHplBlockMat& Hpl) { constructQuadraticForm_(Xcs, qs, errors, omegas, edge2PL, edge2Hpl, flags, Hpp, bp, Hll, bl, Hpl); } void constructQuadraticForm(const GpuVec3d& Xcs, const GpuVec4d& qs, const GpuVec3d& errors, const GpuVec1d& omegas, const GpuVec2i& edge2PL, const GpuVec1i& edge2Hpl, const GpuVec1b& flags, GpuPxPBlockVec& Hpp, GpuPx1BlockVec& bp, GpuLxLBlockVec& Hll, GpuLx1BlockVec& bl, GpuHplBlockMat& Hpl) { constructQuadraticForm_(Xcs, qs, errors, omegas, edge2PL, edge2Hpl, flags, Hpp, bp, Hll, bl, Hpl); } template <typename T, int DIM> Scalar maxDiagonal_(const DeviceBlockVector<T, DIM, DIM>& D, Scalar* maxD) { constexpr int block = BLOCK_MAX_DIAGONAL; constexpr int grid = 4; const int size = D.size() * DIM; maxDiagonalKernel<DIM><<<grid, block>>>(size, D.values(), maxD); CUDA_CHECK(cudaGetLastError()); Scalar tmpMax[grid]; CUDA_CHECK(cudaMemcpy(tmpMax, maxD, sizeof(Scalar) * grid, cudaMemcpyDeviceToHost)); Scalar maxv = 0; for (int i = 0; i < grid; i++) maxv = std::max(maxv, tmpMax[i]); return maxv; } Scalar maxDiagonal(const GpuPxPBlockVec& Hpp, Scalar* maxD) { return maxDiagonal_(Hpp, maxD); } Scalar maxDiagonal(const GpuLxLBlockVec& Hll, Scalar* maxD) { return maxDiagonal_(Hll, maxD); } template <typename T, int DIM> void addLambda_(DeviceBlockVector<T, DIM, DIM>& D, Scalar lambda, DeviceBlockVector<T, DIM, 1>& backup) { const int size = D.size() * DIM; const int block = 1024; const int grid = divUp(size, block); addLambdaKernel<DIM><<<grid, block>>>(size, D.values(), lambda, backup.values()); CUDA_CHECK(cudaGetLastError()); } void addLambda(GpuPxPBlockVec& Hpp, Scalar lambda, GpuPx1BlockVec& backup) { addLambda_(Hpp, lambda, backup); } void addLambda(GpuLxLBlockVec& Hll, Scalar lambda, GpuLx1BlockVec& backup) { addLambda_(Hll, lambda, backup); } template <typename T, int DIM> void restoreDiagonal_(DeviceBlockVector<T, DIM, DIM>& D, const DeviceBlockVector<T, DIM, 1>& backup) { const int size = D.size() * DIM; const int block = 1024; const int grid = divUp(size, block); restoreDiagonalKernel<DIM><<<grid, block>>>(size, D.values(), backup.values()); CUDA_CHECK(cudaGetLastError()); } void restoreDiagonal(GpuPxPBlockVec& Hpp, const GpuPx1BlockVec& backup) { restoreDiagonal_(Hpp, backup); } void restoreDiagonal(GpuLxLBlockVec& Hll, const GpuLx1BlockVec& backup) { restoreDiagonal_(Hll, backup); } void computeBschure(const GpuPx1BlockVec& bp, const GpuHplBlockMat& Hpl, const GpuLxLBlockVec& Hll, const GpuLx1BlockVec& bl, GpuPx1BlockVec& bsc, GpuLxLBlockVec& invHll, GpuPxLBlockVec& Hpl_invHll) { const int cols = Hll.size(); const int block = 256; const int grid = divUp(cols, block); bp.copyTo(bsc); computeBschureKernel<<<grid, block>>>(cols, Hll, invHll, bl, Hpl, Hpl.outerIndices(), Hpl.innerIndices(), bsc, Hpl_invHll); CUDA_CHECK(cudaGetLastError()); } void computeHschure(const GpuPxPBlockVec& Hpp, const GpuPxLBlockVec& Hpl_invHll, const GpuHplBlockMat& Hpl, const GpuVec3i& mulBlockIds, GpuHscBlockMat& Hsc) { const int nmulBlocks = mulBlockIds.ssize(); const int block = 256; const int grid1 = divUp(Hsc.rows(), block); const int grid2 = divUp(nmulBlocks, block); Hsc.fillZero(); initializeHschurKernel<<<grid1, block>>>(Hsc.rows(), Hpp, Hsc, Hsc.outerIndices()); computeHschureKernel<<<grid2, block>>>(nmulBlocks, mulBlockIds, Hpl_invHll, Hpl, Hsc); CUDA_CHECK(cudaGetLastError()); } void convertHschureBSRToCSR(const GpuHscBlockMat& HscBSR, const GpuVec1i& BSR2CSR, GpuVec1d& HscCSR) { const int size = HscCSR.ssize(); const int block = 1024; const int grid = divUp(size, block); convertBSRToCSRKernel<<<grid, block>>>(size, HscBSR.values(), HscCSR, BSR2CSR); } void twistCSR(int size, int nnz, const int* srcRowPtr, const int* srcColInd, const int* P, int* dstRowPtr, int* dstColInd, int* dstMap, int* nnzPerRow) { const int block = 512; const int grid = divUp(size, block); permuteNnzPerRowKernel<<<grid, block>>>(size, srcRowPtr, P, nnzPerRow); exclusiveScan(nnzPerRow, dstRowPtr, size + 1); CUDA_CHECK(cudaMemcpy(nnzPerRow, dstRowPtr, sizeof(int) * (size + 1), cudaMemcpyDeviceToDevice)); permuteColIndKernel<<<grid, block>>>(size, srcRowPtr, srcColInd, P, dstColInd, dstMap, nnzPerRow); } void permute(int size, const Scalar* src, Scalar* dst, const int* P) { auto ptrSrc = thrust::device_pointer_cast(src); auto ptrDst = thrust::device_pointer_cast(dst); auto ptrMap = thrust::device_pointer_cast(P); thrust::gather(ptrMap, ptrMap + size, ptrSrc, ptrDst); } void schurComplementPost(const GpuLxLBlockVec& invHll, const GpuLx1BlockVec& bl, const GpuHplBlockMat& Hpl, const GpuPx1BlockVec& xp, GpuLx1BlockVec& xl) { const int block = 1024; const int grid = divUp(Hpl.cols(), block); schurComplementPostKernel<<<grid, block>>>(Hpl.cols(), invHll, bl, Hpl, Hpl.outerIndices(), Hpl.innerIndices(),xp, xl); CUDA_CHECK(cudaGetLastError()); } void updatePoses(const GpuPx1BlockVec& xp, GpuVec4d& qs, GpuVec3d& ts) { const int block = 256; const int grid = divUp(xp.size(), block); updatePosesKernel<<<grid, block>>>(xp.size(), xp, qs, ts); CUDA_CHECK(cudaGetLastError()); } void updateLandmarks(const GpuLx1BlockVec& xl, GpuVec3d& Xws) { const int block = 1024; const int grid = divUp(xl.size(), block); updateLandmarksKernel<<<grid, block>>>(xl.size(), xl, Xws); CUDA_CHECK(cudaGetLastError()); } void computeScale(const GpuVec1d& x, const GpuVec1d& b, Scalar* scale, Scalar lambda) { const int block = BLOCK_COMPUTE_SCALE; const int grid = 4; CUDA_CHECK(cudaMemset(scale, 0, sizeof(Scalar))); computeScaleKernel<<<grid, block>>>(x, b, scale, lambda, x.ssize()); CUDA_CHECK(cudaGetLastError()); } } // namespace gpu } // namespace cuba
the_stack
#define BLOCK_X 16 #define BLOCK_Y 16 #define PI 3.1415926535897932f #define A 1103515245 #define C 12345 #define M INT_MAX #define SCALE_FACTOR 300.0f #ifndef BLOCK_SIZE #define BLOCK_SIZE 256 #endif #include "kernel_find_index.h" #include "kernel_likelihood.h" #include "kernel_normalize_weights.h" #include "kernel_sum.h" #ifndef FLT_MAX #define FLT_MAX 3.40282347e+38 #endif /***************************** *GET_TIME *returns a long int representing the time *****************************/ long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) +tv.tv_usec; } /* Returns the number of seconds elapsed between the two specified times */ float elapsed_time(long long start_time, long long end_time) { return (float) (end_time - start_time) / (1000 * 1000); } /** * Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG) * @see http://en.wikipedia.org/wiki/Linear_congruential_generator * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a uniformly distributed number [0, 1) */ float randu(int * seed, int index) { int num = A * seed[index] + C; seed[index] = num % M; return fabs(seed[index] / ((float) M)); } /** * Generates a normally distributed random number using the Box-Muller transformation * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a float representing random number generated using the Box-Muller algorithm * @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution */ float randn(int * seed, int index) { /*Box-Muller algorithm*/ float u = randu(seed, index); float v = randu(seed, index); float cosine = cos(2 * PI * v); float rt = -2 * log(u); return sqrt(rt) * cosine; } /** * Takes in a float and returns an integer that approximates to that float * @return if the mantissa < .5 => return value < input value; else return value > input value */ float roundFloat(float value) { int newValue = (int) (value); if (value - newValue < .5) return newValue; else return newValue++; } /** * Set values of the 3D array to a newValue if that value is equal to the testValue * @param testValue The value to be replaced * @param newValue The value to replace testValue with * @param array3D The image vector * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames */ void setIf(int testValue, int newValue, unsigned char * array3D, int * dimX, int * dimY, int * dimZ) { int x, y, z; for (x = 0; x < *dimX; x++) { for (y = 0; y < *dimY; y++) { for (z = 0; z < *dimZ; z++) { if (array3D[x * *dimY * *dimZ + y * *dimZ + z] == testValue) array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue; } } } } /** * Sets values of 3D matrix using randomly generated numbers from a normal distribution * @param array3D The video to be modified * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames * @param seed The seed array */ void addNoise(unsigned char * array3D, int * dimX, int * dimY, int * dimZ, int * seed) { int x, y, z; for (x = 0; x < *dimX; x++) { for (y = 0; y < *dimY; y++) { for (z = 0; z < *dimZ; z++) { array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (unsigned char) (5 * randn(seed, 0)); } } } } /** * Fills a radius x radius matrix representing the disk * @param disk The pointer to the disk to be made * @param radius The radius of the disk to be made */ void strelDisk(int * disk, int radius) { int diameter = radius * 2 - 1; int x, y; for (x = 0; x < diameter; x++) { for (y = 0; y < diameter; y++) { float distance = sqrt(pow((float) (x - radius + 1), 2) + pow((float) (y - radius + 1), 2)); if (distance < radius) disk[x * diameter + y] = 1; else disk[x * diameter + y] = 0; } } } /** * Dilates the provided video * @param matrix The video to be dilated * @param posX The x location of the pixel to be dilated * @param posY The y location of the pixel to be dilated * @param poxZ The z location of the pixel to be dilated * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames * @param error The error radius */ void dilate_matrix(unsigned char * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) { int startX = posX - error; while (startX < 0) startX++; int startY = posY - error; while (startY < 0) startY++; int endX = posX + error; while (endX > dimX) endX--; int endY = posY + error; while (endY > dimY) endY--; int x, y; for (x = startX; x < endX; x++) { for (y = startY; y < endY; y++) { float distance = sqrt(pow((float) (x - posX), 2) + pow((float) (y - posY), 2)); if (distance < error) matrix[x * dimY * dimZ + y * dimZ + posZ] = 1; } } } /** * Dilates the target matrix using the radius as a guide * @param matrix The reference matrix * @param dimX The x dimension of the video * @param dimY The y dimension of the video * @param dimZ The z dimension of the video * @param error The error radius to be dilated * @param newMatrix The target matrix */ void imdilate_disk(unsigned char * matrix, int dimX, int dimY, int dimZ, int error, unsigned char * newMatrix) { int x, y, z; for (z = 0; z < dimZ; z++) { for (x = 0; x < dimX; x++) { for (y = 0; y < dimY; y++) { if (matrix[x * dimY * dimZ + y * dimZ + z] == 1) { dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error); } } } } } /** * Fills a 2D array describing the offsets of the disk object * @param se The disk object * @param numOnes The number of ones in the disk * @param neighbors The array that will contain the offsets * @param radius The radius used for dilation */ void getneighbors(int * se, int numOnes, int * neighbors, int radius) { int x, y; int neighY = 0; int center = radius - 1; int diameter = radius * 2 - 1; for (x = 0; x < diameter; x++) { for (y = 0; y < diameter; y++) { if (se[x * diameter + y]) { neighbors[neighY * 2] = (int) (y - center); neighbors[neighY * 2 + 1] = (int) (x - center); neighY++; } } } } /** * The synthetic video sequence we will work with here is composed of a * single moving object, circular in shape (fixed radius) * The motion here is a linear motion * the foreground intensity and the backgrounf intensity is known * the image is corrupted with zero mean Gaussian noise * @param I The video itself * @param IszX The x dimension of the video * @param IszY The y dimension of the video * @param Nfr The number of frames of the video * @param seed The seed array used for number generation */ void videoSequence(unsigned char * I, int IszX, int IszY, int Nfr, int * seed) { int k; int max_size = IszX * IszY * Nfr; /*get object centers*/ int x0 = (int) roundFloat(IszY / 2.0); int y0 = (int) roundFloat(IszX / 2.0); I[x0 * IszY * Nfr + y0 * Nfr + 0] = 1; /*move point*/ int xk, yk, pos; for (k = 1; k < Nfr; k++) { xk = abs(x0 + (k - 1)); yk = abs(y0 - 2 * (k - 1)); pos = yk * IszY * Nfr + xk * Nfr + k; if (pos >= max_size) pos = 0; I[pos] = 1; } /*dilate matrix*/ unsigned char * newMatrix = (unsigned char *) calloc(IszX * IszY * Nfr, sizeof(unsigned char)); imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix); int x, y; for (x = 0; x < IszX; x++) { for (y = 0; y < IszY; y++) { for (k = 0; k < Nfr; k++) { I[x * IszY * Nfr + y * Nfr + k] = newMatrix[x * IszY * Nfr + y * Nfr + k]; } } } free(newMatrix); /*define background, add noise*/ setIf(0, 100, I, &IszX, &IszY, &Nfr); setIf(1, 228, I, &IszX, &IszY, &Nfr); /*add noise*/ addNoise(I, &IszX, &IszY, &Nfr, seed); } /** * Finds the first element in the CDF that is greater than or equal to the provided value and returns that index * @note This function uses sequential search * @param CDF The CDF * @param lengthCDF The length of CDF * @param value The value to be found * @return The index of value in the CDF; if value is never found, returns the last index */ int findIndex(float * CDF, int lengthCDF, float value) { int index = -1; int x; for (x = 0; x < lengthCDF; x++) { if (CDF[x] >= value) { index = x; break; } } if (index == -1) { return lengthCDF - 1; } return index; } /** * The implementation of the particle filter using OpenMP for many frames * @see http://openmp.org/wp/ * @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods * @param I The video to be run * @param IszX The x dimension of the video * @param IszY The y dimension of the video * @param Nfr The number of frames * @param seed The seed array used for random number generation * @param Nparticles The number of particles to be used */ int particleFilter(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles) { int max_size = IszX * IszY*Nfr; //original particle centroid float xe = roundFloat(IszY / 2.0); float ye = roundFloat(IszX / 2.0); //expected object locations, compared to center int radius = 5; int diameter = radius * 2 - 1; int * disk = (int*) calloc(diameter * diameter, sizeof (int)); strelDisk(disk, radius); int countOnes = 0; int x, y; for (x = 0; x < diameter; x++) { for (y = 0; y < diameter; y++) { if (disk[x * diameter + y] == 1) countOnes++; } } int * objxy = (int *) calloc(countOnes * 2, sizeof(int)); getneighbors(disk, countOnes, objxy, radius); //initial weights are all equal (1/Nparticles) float * weights = (float *) calloc(Nparticles, sizeof(float)); for (x = 0; x < Nparticles; x++) { weights[x] = 1 / ((float) (Nparticles)); } /**************************************************************** ************** B E G I N A L L O C A T E ******************* ****************************************************************/ float * likelihood = (float *) calloc(Nparticles + 1, sizeof (float)); float * arrayX = (float *) calloc(Nparticles, sizeof (float)); float * arrayY = (float *) calloc(Nparticles, sizeof (float)); float * xj = (float *) calloc(Nparticles, sizeof (float)); float * yj = (float *) calloc(Nparticles, sizeof (float)); float * CDF = (float *) calloc(Nparticles, sizeof(float)); //GPU copies of arrays int * ind = (int*) calloc(countOnes * Nparticles, sizeof(int)); float * u = (float *) calloc(Nparticles, sizeof(float)); //Donnie - this loop is different because in this kernel, arrayX and arrayY // are set equal to xj before every iteration, so effectively, arrayX and // arrayY will be set to xe and ye before the first iteration. for (x = 0; x < Nparticles; x++) { xj[x] = xe; yj[x] = ye; } long long offload_start = get_time(); int num_blocks = (Nparticles + BLOCK_SIZE - 1) / BLOCK_SIZE; #ifdef DEBUG printf("BLOCK_SIZE=%d \n",BLOCK_SIZE); #endif float* likelihood_GPU; float* arrayX_GPU; float* arrayY_GPU; float* xj_GPU; float* yj_GPU; float* CDF_GPU; float* partial_sums_GPU; float* u_GPU; int* objxy_GPU; int* ind_GPU; int* seed_GPU; float* weights_GPU; unsigned char* I_GPU; cudaMalloc((void**)&likelihood_GPU, (Nparticles + 1)*sizeof(float)); cudaMalloc((void**)&arrayX_GPU, Nparticles*sizeof(float)); cudaMalloc((void**)&arrayY_GPU, Nparticles*sizeof(float)); cudaMalloc((void**)&xj_GPU, Nparticles*sizeof(float)); cudaMalloc((void**)&yj_GPU, Nparticles*sizeof(float)); cudaMemcpy(xj_GPU, xj, Nparticles*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(yj_GPU, yj, Nparticles*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&CDF_GPU, Nparticles*sizeof(float)); cudaMalloc((void**)&u_GPU, Nparticles*sizeof(float)); //cudaMemcpy(u_GPU, u, Nparticles*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&ind_GPU, countOnes*Nparticles*sizeof(int)); //cudaMemcpy(ind_GPU, ind, countOnes*Nparticles*sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&weights_GPU, Nparticles*sizeof(float)); // memory copy is not needed, because all the weights are updated first before // they are read in the likelihood kernel. // Just be consistent with the original cuda version cudaMemcpy(weights_GPU, weights, Nparticles*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&I_GPU, IszX * IszY * Nfr * sizeof(unsigned char)); cudaMemcpy(I_GPU, I, IszX * IszY * Nfr * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMalloc((void**)&seed_GPU, Nparticles*sizeof(int)); cudaMemcpy(seed_GPU, seed, Nparticles*sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&partial_sums_GPU, (Nparticles+1)*sizeof(float)); //cudaMemcpy(partial_sums_GPU, likelihood, (Nparticles+1)*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&objxy_GPU, 2*countOnes*sizeof(int)); cudaMemcpy(objxy_GPU, objxy, 2*countOnes*sizeof(int), cudaMemcpyHostToDevice); for (int k = 1; k < Nfr; k++) { /****************** L I K E L I H O O D ************************************/ kernel_likelihood<<<num_blocks, BLOCK_SIZE>>>( arrayX_GPU, arrayY_GPU, xj_GPU, yj_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, weights_GPU, seed_GPU, partial_sums_GPU, Nparticles, countOnes, IszY, Nfr, k, max_size); #ifdef DEBUG float * sum = (float *) calloc(Nparticles + 1, sizeof (float)); cudaMemcpy(sum, partial_sums_GPU, (Nparticles+1)*sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < Nparticles+1; i++) printf("%f ", sum[i]); printf("\n"); #endif kernel_sum<<<1, 1>>>(partial_sums_GPU, Nparticles); #ifdef DEBUG // this shows the sum of all partial_sum results cudaMemcpy(sum, partial_sums_GPU, sizeof(float), cudaMemcpyDeviceToHost); printf("kernel sum: frame=%d partial_sums[0]=%f\n", k, sum[0]); #endif kernel_normalize_weights<<<num_blocks, BLOCK_SIZE>>>( weights_GPU, partial_sums_GPU, CDF_GPU, u_GPU, seed_GPU, Nparticles ); kernel_find_index<<<num_blocks, BLOCK_SIZE>>>( arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles ); }//end loop cudaMemcpy(arrayX, arrayX_GPU, Nparticles*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(arrayY, arrayY_GPU, Nparticles*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(weights, weights_GPU, Nparticles*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(likelihood_GPU); cudaFree(arrayX_GPU); cudaFree(arrayY_GPU); cudaFree(xj_GPU); cudaFree(yj_GPU); cudaFree(CDF_GPU); cudaFree(partial_sums_GPU); cudaFree(objxy_GPU); cudaFree(u_GPU); cudaFree(ind_GPU); cudaFree(seed_GPU); cudaFree(weights_GPU); cudaFree(I_GPU); long long offload_end = get_time(); printf("Device offloading time: %lf (s)\n", elapsed_time(offload_start, offload_end)); xe = 0; ye = 0; // estimate the object location by expected values for (x = 0; x < Nparticles; x++) { xe += arrayX[x] * weights[x]; ye += arrayY[x] * weights[x]; } float distance = sqrt(pow((float) (xe - (int) roundFloat(IszY / 2.0)), 2) + pow((float) (ye - (int) roundFloat(IszX / 2.0)), 2)); //Output results FILE *fid; fid=fopen("output.txt", "w+"); if( fid == NULL ){ printf( "The file was not opened for writing\n" ); return -1; } fprintf(fid, "XE: %lf\n", xe); fprintf(fid, "YE: %lf\n", ye); fprintf(fid, "distance: %lf\n", distance); fclose(fid); //free regular memory free(likelihood); free(arrayX); free(arrayY); free(xj); free(yj); free(CDF); free(ind); free(u); return 0; } int main(int argc, char * argv[]) { const char* usage = "float.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>"; //check number of arguments if (argc != 9) { printf("%s\n", usage); return 0; } //check args deliminators if (strcmp(argv[1], "-x") || strcmp(argv[3], "-y") || strcmp(argv[5], "-z") || strcmp(argv[7], "-np")) { printf("%s\n", usage); return 0; } int IszX, IszY, Nfr, Nparticles; //converting a string to a integer if (sscanf(argv[2], "%d", &IszX) == EOF) { printf("ERROR: dimX input is incorrect"); return 0; } if (IszX <= 0) { printf("dimX must be > 0\n"); return 0; } //converting a string to a integer if (sscanf(argv[4], "%d", &IszY) == EOF) { printf("ERROR: dimY input is incorrect"); return 0; } if (IszY <= 0) { printf("dimY must be > 0\n"); return 0; } //converting a string to a integer if (sscanf(argv[6], "%d", &Nfr) == EOF) { printf("ERROR: Number of frames input is incorrect"); return 0; } if (Nfr <= 0) { printf("number of frames must be > 0\n"); return 0; } //converting a string to a integer if (sscanf(argv[8], "%d", &Nparticles) == EOF) { printf("ERROR: Number of particles input is incorrect"); return 0; } if (Nparticles <= 0) { printf("Number of particles must be > 0\n"); return 0; } #ifdef DEBUG printf("dimX=%d dimY=%d Nfr=%d Nparticles=%d\n", IszX, IszY, Nfr, Nparticles); #endif //establish seed int * seed = (int *) calloc(Nparticles, sizeof(int)); int i; for (i = 0; i < Nparticles; i++) seed[i] = i+1; // seed[i] = time(0) * i; //calloc matrix unsigned char * I = (unsigned char *) calloc(IszX * IszY * Nfr, sizeof(unsigned char)); long long start = get_time(); //call video sequence videoSequence(I, IszX, IszY, Nfr, seed); long long endVideoSequence = get_time(); printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence)); //call particle filter particleFilter(I, IszX, IszY, Nfr, seed, Nparticles); long long endParticleFilter = get_time(); printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter)); printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter)); free(seed); free(I); return 0; }
the_stack
extern "C" { #include <stdint.h> #include <memory.h> } #include <cuda_helper.h> #include <cuda_vectors.h> #define TPB52 1024 #define TPB50 384 #define NPT 2 #define NBN 2 static uint32_t *d_nonces[MAX_GPUS]; static uint32_t *h_nonces[MAX_GPUS]; __constant__ uint2 c_message48[6]; __constant__ uint2 c_mid[17]; __constant__ uint2 keccak_round_constants[24] = { { 0x00000001, 0x00000000 }, { 0x00008082, 0x00000000 }, { 0x0000808a, 0x80000000 }, { 0x80008000, 0x80000000 }, { 0x0000808b, 0x00000000 }, { 0x80000001, 0x00000000 }, { 0x80008081, 0x80000000 }, { 0x00008009, 0x80000000 }, { 0x0000008a, 0x00000000 }, { 0x00000088, 0x00000000 }, { 0x80008009, 0x00000000 }, { 0x8000000a, 0x00000000 }, { 0x8000808b, 0x00000000 }, { 0x0000008b, 0x80000000 }, { 0x00008089, 0x80000000 }, { 0x00008003, 0x80000000 }, { 0x00008002, 0x80000000 }, { 0x00000080, 0x80000000 }, { 0x0000800a, 0x00000000 }, { 0x8000000a, 0x80000000 }, { 0x80008081, 0x80000000 }, { 0x00008080, 0x80000000 }, { 0x80000001, 0x00000000 }, { 0x80008008, 0x80000000 } }; __device__ __forceinline__ uint2 chi(const uint2 a,const uint2 b,const uint2 c) { // keccak chi uint2 result; #if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050 asm ("lop3.b32 %0, %1, %2, %3, 0xD2;" : "=r"(result.x) : "r"(a.x), "r"(b.x),"r"(c.x)); //0xD2 = 0xF0 ^ ((~0xCC) & 0xAA) asm ("lop3.b32 %0, %1, %2, %3, 0xD2;" : "=r"(result.y) : "r"(a.y), "r"(b.y),"r"(c.y)); //0xD2 = 0xF0 ^ ((~0xCC) & 0xAA) #else result = a ^ (~b) & c; #endif return result; } __device__ __forceinline__ uint64_t xor5(uint64_t a, uint64_t b, uint64_t c, uint64_t d, uint64_t e) { uint64_t result; asm("xor.b64 %0, %1, %2;" : "=l"(result) : "l"(d) ,"l"(e)); asm("xor.b64 %0, %0, %1;" : "+l"(result) : "l"(c)); asm("xor.b64 %0, %0, %1;" : "+l"(result) : "l"(b)); asm("xor.b64 %0, %0, %1;" : "+l"(result) : "l"(a)); return result; } #if __CUDA_ARCH__ <= 500 __global__ __launch_bounds__(TPB50, 2) #else __global__ __launch_bounds__(TPB52, 1) #endif void keccak256_gpu_hash_80(uint32_t threads, uint32_t startNonce, uint32_t *resNounce, const uint2 highTarget) { uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x; uint2 s[25], t[5], v, w, u[5]; #if __CUDA_ARCH__ > 500 uint64_t step = gridDim.x * blockDim.x; uint64_t maxNonce = startNonce + threads; for(uint64_t nounce = startNonce + thread; nounce<maxNonce;nounce+=step) { #else uint32_t nounce = startNonce+thread; if(thread<threads) { #endif s[ 9] = make_uint2(c_message48[0].x,cuda_swab32(nounce)); s[10] = keccak_round_constants[0]; t[ 4] = c_message48[1]^s[ 9]; /* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */ u[ 0] = t[4] ^ c_mid[ 0]; u[ 1] = c_mid[ 1] ^ ROL2(t[4],1); u[ 2] = c_mid[ 2]; /* thetarho pi: b[..] = rotl(a[..] ^ d[...], ..)*/ s[ 7] = ROL2(s[10]^u[0], 3); s[10] = c_mid[ 3]; w = c_mid[ 4]; s[20] = c_mid[ 5]; s[ 6] = ROL2(s[ 9]^u[2],20); s[ 9] = c_mid[ 6]; s[22] = c_mid[ 7]; s[14] = ROL2(u[0],18); s[ 2] = c_mid[ 8]; s[12] = ROL2(u[1],25); s[13] = c_mid[ 9]; s[19] = ROR8(u[1]); s[23] = ROR2(u[0],23); s[15] = c_mid[10]; s[ 4] = c_mid[11]; s[24] = c_mid[12]; s[21] = ROR2(c_message48[2]^u[1], 9); s[ 8] = c_mid[13]; s[16] = ROR2(c_message48[3]^u[0],28); s[ 5] = ROL2(c_message48[4]^u[1],28); s[ 3] = ROL2(u[1],21); s[18] = c_mid[14]; s[17] = c_mid[15]; s[11] = c_mid[16]; /* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */ v = c_message48[5]^u[0]; s[ 0] = chi(v,w,s[ 2]); s[ 1] = chi(w,s[ 2],s[ 3]); s[ 2] = chi(s[ 2],s[ 3],s[ 4]); s[ 3] = chi(s[ 3],s[ 4],v); s[ 4] = chi(s[ 4],v,w); v = s[ 5]; w = s[ 6]; s[ 5] = chi(v,w,s[ 7]); s[ 6] = chi(w,s[ 7],s[ 8]); s[ 7] = chi(s[ 7],s[ 8],s[ 9]); s[ 8] = chi(s[ 8],s[ 9],v);s[ 9] = chi(s[ 9],v,w); v = s[10]; w = s[11]; s[10] = chi(v,w,s[12]); s[11] = chi(w,s[12],s[13]); s[12] = chi(s[12],s[13],s[14]); s[13] = chi(s[13],s[14],v);s[14] = chi(s[14],v,w); v = s[15]; w = s[16]; s[15] = chi(v,w,s[17]); s[16] = chi(w,s[17],s[18]); s[17] = chi(s[17],s[18],s[19]); s[18] = chi(s[18],s[19],v);s[19] = chi(s[19],v,w); v = s[20]; w = s[21]; s[20] = chi(v,w,s[22]); s[21] = chi(w,s[22],s[23]); s[22] = chi(s[22],s[23],s[24]); s[23] = chi(s[23],s[24],v);s[24] = chi(s[24],v,w); /* iota: a[0,0] ^= round constant */ s[ 0] ^=keccak_round_constants[ 0]; #if __CUDA_ARCH__ > 500 #pragma unroll 22 #else #pragma unroll 4 #endif for (int i = 1; i < 23; i++) { #pragma unroll for(int j=0;j<5;j++) { t[ j] = vectorize(xor5(devectorize(s[ j]),devectorize(s[j+5]),devectorize(s[j+10]),devectorize(s[j+15]),devectorize(s[j+20]))); } /*theta*/ #pragma unroll for(int j=0;j<5;j++) { u[j] = ROL2(t[j], 1); } s[ 4] = xor3x(s[ 4], t[3], u[0]);s[ 9] = xor3x(s[ 9], t[3], u[0]);s[14] = xor3x(s[14], t[3], u[0]);s[19] = xor3x(s[19], t[3], u[0]);s[24] = xor3x(s[24], t[3], u[0]); s[ 0] = xor3x(s[ 0], t[4], u[1]);s[ 5] = xor3x(s[ 5], t[4], u[1]);s[10] = xor3x(s[10], t[4], u[1]);s[15] = xor3x(s[15], t[4], u[1]);s[20] = xor3x(s[20], t[4], u[1]); s[ 1] = xor3x(s[ 1], t[0], u[2]);s[ 6] = xor3x(s[ 6], t[0], u[2]);s[11] = xor3x(s[11], t[0], u[2]);s[16] = xor3x(s[16], t[0], u[2]);s[21] = xor3x(s[21], t[0], u[2]); s[ 2] = xor3x(s[ 2], t[1], u[3]);s[ 7] = xor3x(s[ 7], t[1], u[3]);s[12] = xor3x(s[12], t[1], u[3]);s[17] = xor3x(s[17], t[1], u[3]);s[22] = xor3x(s[22], t[1], u[3]); s[ 3] = xor3x(s[ 3], t[2], u[4]);s[ 8] = xor3x(s[ 8], t[2], u[4]);s[13] = xor3x(s[13], t[2], u[4]);s[18] = xor3x(s[18], t[2], u[4]);s[23] = xor3x(s[23], t[2], u[4]); /*rho pi: b[..] = rotl(a[..] ^ d[...], ..)*/ v = s[ 1]; s[ 1] = ROL2(s[ 6],44); s[ 6] = ROL2(s[ 9],20); s[ 9] = ROL2(s[22],61); s[22] = ROL2(s[14],39); s[14] = ROL2(s[20],18); s[20] = ROL2(s[ 2],62); s[ 2] = ROL2(s[12],43); s[12] = ROL2(s[13],25); s[13] = ROL8(s[19]); s[19] = ROR8(s[23]); s[23] = ROL2(s[15],41); s[15] = ROL2(s[ 4],27); s[ 4] = ROL2(s[24],14); s[24] = ROL2(s[21], 2); s[21] = ROL2(s[ 8],55); s[ 8] = ROL2(s[16],45); s[16] = ROL2(s[ 5],36); s[ 5] = ROL2(s[ 3],28); s[ 3] = ROL2(s[18],21); s[18] = ROL2(s[17],15); s[17] = ROL2(s[11],10); s[11] = ROL2(s[ 7], 6); s[ 7] = ROL2(s[10], 3); s[10] = ROL2(v, 1); /* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */ #pragma unroll for(int j=0;j<25;j+=5) { v=s[j];w=s[j + 1];s[j] = chi(s[j],s[j+1],s[j+2]);s[j+1] = chi(s[j+1],s[j+2],s[j+3]);s[j+2]=chi(s[j+2],s[j+3],s[j+4]);s[j+3]=chi(s[j+3],s[j+4],v);s[j+4]=chi(s[j+4],v,w); } /* iota: a[0,0] ^= round constant */ s[ 0] ^=keccak_round_constants[ i]; } /* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */ #pragma unroll 5 for(int j=0;j<5;j++) { t[ j] = xor3x(xor3x(s[j+0],s[j+5],s[j+10]), s[j+15], s[j+20]); } s[24] = xor3x(s[24],t[3],ROL2(t[0],1)); s[18] = xor3x(s[18],t[2],ROL2(t[4],1)); s[ 0] = xor3x(s[ 0],t[4],ROL2(t[1],1)); /* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */ s[24] = ROL2(s[24],14); s[18] = ROL2(s[18],21); if (devectorize(chi(s[18],s[24],s[ 0])) <= devectorize(highTarget)) { // if(chi(s[18].x,s[24].x,s[0].x)<=highTarget.x) { // if(chi(s[18].y,s[24].y,s[0].y)<=highTarget.y) { const uint32_t tmp = atomicExch(&resNounce[0], nounce); if (tmp != UINT32_MAX) resNounce[1] = tmp; // return; // } } } } __host__ void keccak256_cpu_hash_80(int thr_id, uint32_t threads, uint32_t startNonce, uint32_t* resNonces, const uint2 highTarget) { uint32_t tpb; dim3 grid; if (device_sm[device_map[thr_id]] <= 500) { tpb = TPB50; grid.x = (threads + tpb-1)/tpb; } else { tpb = TPB52; grid.x = (threads + (NPT*tpb)-1)/(NPT*tpb); } const dim3 block(tpb); keccak256_gpu_hash_80<<<grid, block>>>(threads, startNonce, d_nonces[thr_id], highTarget); // cudaThreadSynchronize(); cudaMemcpy(h_nonces[thr_id], d_nonces[thr_id], NBN*sizeof(uint32_t), cudaMemcpyDeviceToHost); memcpy(resNonces, h_nonces[thr_id], NBN*sizeof(uint32_t)); } #if 0 #if __CUDA_ARCH__ <= 500 __global__ __launch_bounds__(TPB50, 2) #else __global__ __launch_bounds__(TPB52, 1) #endif void keccak256_gpu_hash_32(uint32_t threads, uint2* outputHash) { uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x; uint2 s[25], t[5], v, w, u[5]; if(thread < threads) { #pragma unroll 25 for (int i = 0; i<25; i++) { if (i<4) s[i] = __ldg(&outputHash[i*threads+thread]); else s[i] = make_uint2(0, 0); } s[4] = keccak_round_constants[ 0]; s[16] = make_uint2(0, 0x80000000); #if __CUDA_ARCH__ > 500 #pragma unroll #else #pragma unroll 4 #endif for (uint32_t i = 0; i < 23; i++) { /*theta*/ #pragma unroll 5 for(int j=0; j<5; j++) { t[ j] = vectorize(xor5(devectorize(s[ j]),devectorize(s[j+5]),devectorize(s[j+10]),devectorize(s[j+15]),devectorize(s[j+20]))); } /*theta*/ #pragma unroll 5 for(int j=0; j<5; j++) { u[j] = ROL2(t[j], 1); } s[ 4] = xor3x(s[ 4], t[3], u[0]);s[ 9] = xor3x(s[ 9], t[3], u[0]);s[14] = xor3x(s[14], t[3], u[0]);s[19] = xor3x(s[19], t[3], u[0]);s[24] = xor3x(s[24], t[3], u[0]); s[ 0] = xor3x(s[ 0], t[4], u[1]);s[ 5] = xor3x(s[ 5], t[4], u[1]);s[10] = xor3x(s[10], t[4], u[1]);s[15] = xor3x(s[15], t[4], u[1]);s[20] = xor3x(s[20], t[4], u[1]); s[ 1] = xor3x(s[ 1], t[0], u[2]);s[ 6] = xor3x(s[ 6], t[0], u[2]);s[11] = xor3x(s[11], t[0], u[2]);s[16] = xor3x(s[16], t[0], u[2]);s[21] = xor3x(s[21], t[0], u[2]); s[ 2] = xor3x(s[ 2], t[1], u[3]);s[ 7] = xor3x(s[ 7], t[1], u[3]);s[12] = xor3x(s[12], t[1], u[3]);s[17] = xor3x(s[17], t[1], u[3]);s[22] = xor3x(s[22], t[1], u[3]); s[ 3] = xor3x(s[ 3], t[2], u[4]);s[ 8] = xor3x(s[ 8], t[2], u[4]);s[13] = xor3x(s[13], t[2], u[4]);s[18] = xor3x(s[18], t[2], u[4]);s[23] = xor3x(s[23], t[2], u[4]); /*rho pi: b[..] = rotl(a[..] ^ d[...], ..)*/ v = s[ 1]; s[ 1] = ROL2(s[ 6],44); s[ 6] = ROL2(s[ 9],20); s[ 9] = ROL2(s[22],61); s[22] = ROL2(s[14],39); s[14] = ROL2(s[20],18); s[20] = ROL2(s[ 2],62); s[ 2] = ROL2(s[12],43); s[12] = ROL2(s[13],25); s[13] = ROL8(s[19]); s[19] = ROR8(s[23]); s[23] = ROL2(s[15],41); s[15] = ROL2(s[ 4],27); s[ 4] = ROL2(s[24],14); s[24] = ROL2(s[21], 2); s[21] = ROL2(s[ 8],55); s[ 8] = ROL2(s[16],45); s[16] = ROL2(s[ 5],36); s[ 5] = ROL2(s[ 3],28); s[ 3] = ROL2(s[18],21); s[18] = ROL2(s[17],15); s[17] = ROL2(s[11],10); s[11] = ROL2(s[ 7], 6); s[ 7] = ROL2(s[10], 3); s[10] = ROL2(v, 1); /* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */ #pragma unroll 5 for(int j=0; j<25; j+=5) { v=s[j];w=s[j + 1]; s[j] = chi(v,w,s[j+2]); s[j+1] = chi(w,s[j+2],s[j+3]); s[j+2]=chi(s[j+2],s[j+3],s[j+4]); s[j+3]=chi(s[j+3],s[j+4],v); s[j+4]=chi(s[j+4],v,w); } /* iota: a[0,0] ^= round constant */ s[ 0] ^=keccak_round_constants[ i]; } /* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */ #pragma unroll 5 for(int j=0;j<5;j++) { t[ j] = xor3x(xor3x(s[j+0],s[j+5],s[j+10]), s[j+15], s[j+20]); } /* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */ #pragma unroll 5 for(int j=0;j<5;j++) { u[j] = ROL2(t[j],1); } /* thetarho pi: b[..] = rotl(a[..] ^ d[...], ..) //There's no need to perform theta and -store- the result since it's unique for each a[..]*/ s[ 4] = xor3x(s[24], t[3], u[0]); s[ 0] = xor3x(s[ 0], t[4], u[1]); s[ 1] = xor3x(s[ 6], t[0], u[2]); s[ 2] = xor3x(s[12], t[1], u[3]); s[ 3] = xor3x(s[18], t[2], u[4]); s[ 1] = ROR2(s[ 1],20); s[ 2] = ROR2(s[ 2],21); s[ 3] = ROL2(s[ 3],21); s[ 4] = ROL2(s[ 4],14); /* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */ outputHash[0*threads+thread] = chi(s[ 0],s[ 1],s[ 2]) ^ keccak_round_constants[23]; outputHash[1*threads+thread] = chi(s[ 1],s[ 2],s[ 3]); outputHash[2*threads+thread] = chi(s[ 2],s[ 3],s[ 4]); outputHash[3*threads+thread] = chi(s[ 3],s[ 4],s[ 0]); } } __host__ void keccak256_cpu_hash_32(const int thr_id,const uint32_t threads, uint2* d_hash) { uint32_t tpb = TPB52; if (device_sm[device_map[thr_id]] == 500) tpb = TPB50; const dim3 grid((threads + tpb-1)/tpb); const dim3 block(tpb); keccak256_gpu_hash_32 <<<grid, block>>> (threads, d_hash); } #endif __host__ void keccak256_setBlock_80(uint64_t *endiandata) { uint64_t midstate[17], s[25]; uint64_t t[5], u[5]; s[10] = 1; //(uint64_t)make_uint2(1, 0); s[16] = ((uint64_t)1)<<63; //(uint64_t)make_uint2(0, 0x80000000); t[0] = endiandata[0] ^ endiandata[5] ^ s[10]; t[1] = endiandata[1] ^ endiandata[6] ^ s[16]; t[2] = endiandata[2] ^ endiandata[7]; t[3] = endiandata[3] ^ endiandata[8]; midstate[ 0] = ROTL64(t[1], 1); //u[0] -partial u[1] = t[ 0] ^ ROTL64(t[2], 1); //u[1] u[2] = t[ 1] ^ ROTL64(t[3], 1); //u[2] midstate[ 1] = t[ 2]; //u[3] -partial midstate[ 2] = t[ 3] ^ ROTL64(t[0], 1); //u[4] midstate[ 3] = ROTL64(endiandata[1]^u[1], 1); //v midstate[ 4] = ROTL64(endiandata[6]^u[1], 44); midstate[ 5] = ROTL64(endiandata[2]^u[2], 62); midstate[ 6] = ROTL64(u[2], 61); midstate[ 7] = ROTL64(midstate[2], 39); midstate[ 8] = ROTL64(u[2], 43); midstate[ 9] = ROTL64(midstate[2], 8); midstate[10] = ROTL64(endiandata[4]^midstate[ 2],27); midstate[11] = ROTL64(midstate[2], 14); midstate[12] = ROTL64(u[1], 2); midstate[13] = ROTL64(s[16] ^ u[1], 45); midstate[14] = ROTL64(u[2],15); midstate[15] = ROTL64(u[1],10); midstate[16] = ROTL64(endiandata[7]^u[2], 6); CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_mid, midstate,17*sizeof(uint64_t), 0, cudaMemcpyHostToDevice)); // pass only what's needed uint64_t message48[6]; message48[0] = endiandata[9]; message48[1] = endiandata[4]; message48[2] = endiandata[8]; message48[3] = endiandata[5]; message48[4] = endiandata[3]; message48[5] = endiandata[0]; CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_message48, message48, 6*sizeof(uint64_t), 0, cudaMemcpyHostToDevice)); } __host__ void keccak256_cpu_init(int thr_id) { CUDA_SAFE_CALL(cudaMalloc(&d_nonces[thr_id], NBN*sizeof(uint32_t))); //CUDA_SAFE_CALL(cudaMallocHost(&h_nonces[thr_id], NBN*sizeof(uint32_t))); h_nonces[thr_id] = (uint32_t*) malloc(NBN * sizeof(uint32_t)); if(h_nonces[thr_id] == NULL) { gpulog(LOG_ERR,thr_id,"Host memory allocation failed"); exit(EXIT_FAILURE); } } __host__ void keccak256_setOutput(int thr_id) { CUDA_SAFE_CALL(cudaMemset(d_nonces[thr_id], 0xff, NBN*sizeof(uint32_t))); } __host__ void keccak256_cpu_free(int thr_id) { cudaFree(d_nonces[thr_id]); //cudaFreeHost(h_nonces[thr_id]); free(h_nonces[thr_id]); }
the_stack
namespace anakin { namespace saber { namespace { template <ReduceType type> class ReOp { public: __device__ static float compute(float a, float b) { return -1.f; } }; template <> __device__ float ReOp<Reduce_max>::compute(float a, float b) { return ((a > b) ? a : b); } template <> __device__ float ReOp<Reduce_min>::compute(float a, float b) { return ((a > b) ? b : a); } template <> __device__ float ReOp<Reduce_sum>::compute(float a, float b) { return a + b; } template <> __device__ float ReOp<Reduce_avg>::compute(float a, float b) { return a + b; } template <> __device__ float ReOp<Reduce_prod>::compute(float a, float b) { return a * b; } template <int nDim> class IndexCompute { public: __device__ static int input_idx(const int* dims, const int* odims, int out_idx); }; template <> __device__ int IndexCompute<4>::input_idx( const int* in_stride, const int* out_stride, int out_idx) { int i0 = out_idx / out_stride[0]; int i1 = (out_idx % out_stride[0]) / out_stride[1]; int i2 = (out_idx % out_stride[1]) / out_stride[2]; int i3 = (out_idx % out_stride[2]) / out_stride[3]; int idx = i0 * in_stride[0] + i1 * in_stride[1] + i2 * in_stride[2] + i3 * in_stride[3]; return idx; } template <> __device__ int IndexCompute<3>::input_idx( const int* in_stride, const int* out_stride, int out_idx) { int i0 = out_idx / out_stride[0]; int i1 = (out_idx % out_stride[0]) / out_stride[1]; int i2 = (out_idx % out_stride[1]) / out_stride[2]; int idx = i0 * in_stride[0] + i1 * in_stride[1] + i2 * in_stride[2]; return idx; } template <> __device__ int IndexCompute<2>::input_idx( const int* in_stride, const int* out_stride, int out_idx) { int i0 = out_idx / out_stride[0]; int i1 = (out_idx % out_stride[0]) / out_stride[1]; int idx = i0 * in_stride[0] + i1 * in_stride[1]; return idx; } template <> __device__ int IndexCompute<1>::input_idx( const int* in_stride, const int* out_stride, int out_idx) { int i0 = out_idx / out_stride[0]; int idx = i0 * in_stride[0]; return idx; } // if you are reading this, there are still a lot // optimize here to do, This class is the right class // to make parallel reduction. // the compute function can run inside one block, // try to use shuffle instruction here. // int tdim is the threads num of one block. template <int rdim, int tdim, ReduceType type> class ReduceCompute{ public: __device__ static float compute( const int* dims, const int* rdims, const int* in_stride, const float* in_data, int in_idx) { return 0; } }; template <int tdim, ReduceType type> class ReduceCompute<1, tdim, type> { public: __device__ static float compute( const int* dims, const int* rdims, const int* in_stride, const float *in_data, int in_idx) { // int tid = threadIdx.x; float res = in_data[in_idx]; int idx = in_idx + in_stride[rdims[0]]; // here is the reduction op. for (int i = 1; i < dims[rdims[0]]; ++i) { res = ReOp<type>::compute(res, in_data[idx]); idx += in_stride[rdims[0]]; } return res; } }; template <int tdim, ReduceType type> class ReduceCompute<2, tdim, type> { public: __device__ static float compute( const int* dims, const int* rdims, const int* in_stride, const float *in_data, int in_idx) { float res0 = 0.f; int idx0 = in_idx; for (int i = 0; i < dims[rdims[0]]; ++i) { float res1 = in_data[idx0]; int idx1 = idx0 + in_stride[rdims[1]]; for (int j = 1; j < dims[rdims[1]]; ++j) { res1 = ReOp<type>::compute(res1, in_data[idx1]); idx1 += in_stride[rdims[1]]; } idx0 += in_stride[rdims[0]]; if (i == 0) { res0 = res1; } else { res0 = ReOp<type>::compute(res0, res1); } } return res0; } }; template <int tdim, ReduceType type> class ReduceCompute<3, tdim, type> { public: __device__ static float compute( const int* dims, const int* rdims, const int* in_stride, const float *in_data, int in_idx) { float res0 = 0.f; int idx0 = in_idx; for (int i = 0; i < dims[rdims[0]]; ++i) { float res1 = 0.f; int idx1 = idx0; for (int j = 0; j < dims[rdims[1]]; ++j) { float res2 = in_data[idx1]; int idx2 = idx1 + in_stride[rdims[2]]; for (int k = 1; k < dims[rdims[2]]; ++k) { res2 = ReOp<type>::compute(res2, in_data[idx2]); idx2 += in_stride[rdims[2]]; } if (j == 0) { res1 = res2; } else { res1 = ReOp<type>::compute(res1, res2); } idx1 += in_stride[rdims[1]]; } if (i == 0) { res0 = res1; } else { res0 = ReOp<type>::compute(res0, res1); } idx0 += in_stride[rdims[0]]; } return res0; } }; template <int tdim, ReduceType type> class ReduceCompute<4, tdim, type> { public: __device__ static float compute( const int* dims, const int* rdims, const int* in_stride, const float *in_data, int in_idx) { float res0 = 0.f; int idx0 = in_idx; for (int i = 0; i < dims[rdims[0]]; ++i) { float res1 = 0.f; int idx1 = idx0; for (int j = 0; j < dims[rdims[1]]; ++j) { float res2 = 0.f; int idx2 = idx1; for (int k = 0; k < dims[rdims[2]]; ++k) { float res3 = in_data[idx2]; int idx3 = idx2 + in_stride[rdims[3]]; for (int u = 0; u < dims[rdims[3]]; ++u) { res3 = ReOp<type>::compute(res3, in_data[idx3]); idx3 += in_stride[rdims[3]]; } if (k == 0) { res2 = res3; } else { res2 = ReOp<type>::compute(res2, res3); } idx2 += in_stride[rdims[2]]; } if (j == 0) { res1 = res2; } else { res1 = ReOp<type>::compute(res1, res2); } idx1 += in_stride[rdims[1]]; } if (i == 0) { res0 = res1; } else { res0 = ReOp<type>::compute(res0, res1); } idx0 += in_stride[rdims[0]]; } return res0; } }; template <typename dtype, ReduceType type, int nDim, int rDim> __global__ void reduce( const dtype* src, dtype* dst, const int* rdim, const int* dims, const int* i_stride, const int* o_stride, int out_size) { int reduce_size = 1; for (int i = 0; i < rDim; ++i) { reduce_size *= dims[rdim[i]]; } float reduce_size_1 = 1.f / ((float)reduce_size); int bid = blockIdx.x; int out_idx = bid; //init; int in_idx = IndexCompute<nDim>::input_idx(i_stride, o_stride, out_idx); float res = ReduceCompute<rDim, CUDA_NUM_THREADS, type>::compute( dims, rdim, i_stride, src, in_idx); dst[out_idx] = res; if (Reduce_avg == type) { dst[out_idx] *= reduce_size_1; } } __global__ void reduce_unknow( const float* src, float* dst, const int* rdim, const int* dims, const int* i_stride, const int* o_stride, int out_size) {return;} template <typename dtype, ReduceType type, int nDim, int rDim> __global__ void reduce_all( const dtype* src, dtype* dst, const int* rdim, const int* dims, const int* i_stride, const int* o_stride, int out_size) { int reduce_size = 1; for (int i = 0; i < rDim; ++i) { reduce_size *= dims[rdim[i]]; } float reduce_size_1 = 1.f / ((float)reduce_size); //init; float res = src[0]; for (int i = 1; i < reduce_size; ++i) { res = ReOp<type>::compute(res, src[i]); } dst[0] = res; if (Reduce_avg == type) { dst[0] *= reduce_size_1; } } } #define REG_REDUCE_TYPE_KERNEL(REDUCE_TYPE) \ _kernel_direct_map[REDUCE_TYPE] = { \ {reduce_unknow}, \ {reduce_unknow, \ reduce_all<float, REDUCE_TYPE, 1, 1>}, \ {reduce_unknow, \ reduce<float, REDUCE_TYPE, 2, 1>, \ reduce_all<float, REDUCE_TYPE, 2, 2>}, \ {reduce_unknow, \ reduce<float, REDUCE_TYPE, 3, 1>, \ reduce<float, REDUCE_TYPE, 3, 2>, \ reduce_all<float, REDUCE_TYPE, 3, 3>}, \ {reduce_unknow, \ reduce<float, REDUCE_TYPE, 4, 1>, \ reduce<float, REDUCE_TYPE, 4, 2>, \ reduce<float, REDUCE_TYPE, 4, 3>, \ reduce_all<float, REDUCE_TYPE, 4, 4>}} template <typename dtype> void async_copy_to_buffer(Buffer<NV> &buffer, dtype* data, unsigned long size, cudaStream_t stream) { buffer.re_alloc(size * sizeof(dtype)); cudaMemcpyAsync(buffer.get_data_mutable(), data, size * sizeof(dtype), cudaMemcpyHostToDevice, stream); } template <> SaberStatus SaberReduce<NV, AK_FLOAT>::create( const std::vector<Tensor<NV>*>& inputs, std::vector<Tensor<NV>*>& outputs, ReduceParam<NV>& param, Context<NV>& ctx) { this->_ctx = &ctx; if (_template_reduction) { auto stream = _ctx->get_compute_stream(); auto i_stride = inputs[0]->get_stride(); auto o_stride = outputs[0]->get_stride(); std::vector<int> ndim(inputs[0]->valid_shape()); async_copy_to_buffer<int>(_rdim_b, param.reduce_dim.data(), param.reduce_dim.size(), stream); async_copy_to_buffer<int>(_ndim_b, inputs[0]->valid_shape().data(), inputs[0]->valid_shape().size(), stream); async_copy_to_buffer<int>(_i_stride_b, i_stride.data(), i_stride.size(), stream); async_copy_to_buffer<int>(_o_stride_b, o_stride.data(), o_stride.size(), stream); return SaberSuccess; } else { return _impl->create(inputs, outputs, param, ctx); } } template <> SaberStatus SaberReduce<NV, AK_FLOAT>::init( const std::vector<Tensor<NV>*>& inputs, std::vector<Tensor<NV>*>& outputs, ReduceParam<NV>& param, Context<NV>& ctx) { this->_ctx = &ctx; if (param.reduce_type == Reduce_avg) { _template_reduction = true; } if (_template_reduction) { REG_REDUCE_TYPE_KERNEL(Reduce_avg); REG_REDUCE_TYPE_KERNEL(Reduce_min); REG_REDUCE_TYPE_KERNEL(Reduce_max); REG_REDUCE_TYPE_KERNEL(Reduce_sum); REG_REDUCE_TYPE_KERNEL(Reduce_prod); } else { _impl = new VenderReduce<NV, AK_FLOAT>; _impl->init(inputs, outputs, param, ctx); } return create(inputs, outputs, param, ctx); } template <> SaberStatus SaberReduce<NV, AK_FLOAT>::dispatch( const std::vector<Tensor<NV>*>& inputs, std::vector<Tensor<NV>*>& outputs, ReduceParam<NV>& param) { if (_template_reduction) { int out_size = outputs[0]->valid_size(); _kernel_direct_map[param.reduce_type] [inputs[0]->dims()] [param.reduce_dim.size()] << < out_size, 1, 0, _ctx->get_compute_stream() >> > ( (const float *) inputs[0]->data(), (float *) outputs[0]->mutable_data(), (const int *) _rdim_b.get_data(), (const int *) _ndim_b.get_data(), (const int *) _i_stride_b.get_data(), (const int *) _o_stride_b.get_data(), outputs[0]->valid_size()); return SaberSuccess; } else { return _impl->dispatch(inputs, outputs, param); } } template class SaberReduce<NV, AK_FLOAT>; DEFINE_OP_TEMPLATE(SaberReduce, ReduceParam, NV, AK_HALF); DEFINE_OP_TEMPLATE(SaberReduce, ReduceParam, NV, AK_INT8); } // namespace saber. } // namespace anakin.
the_stack
#define SEQ_LEN 33 __device__ inline double firstRepeatOffsetProb(const double probMult, const int maxRepeatOffset) { if (probMult < 1 || probMult > 1) return (1 - probMult) / (1 - pow(probMult, (double)maxRepeatOffset)); else return 1.0 / maxRepeatOffset; } __device__ void maskProbableLetters(const int size, unsigned char *seqBeg, const float *probabilities, const unsigned char *maskTable) { const double minMaskProb = 0.5; for (int i=0; i<size; i++) if (probabilities[i] >= minMaskProb) seqBeg[i] = maskTable[seqBeg[i]]; } __device__ int calcRepeatProbs(float *letterProbs, const unsigned char *seqBeg, const int size, const int maxRepeatOffset, const double *likelihoodRatioMatrix, // 64 by 64 matrix, const double b2b, const double f2f0, const double f2b, const double b2fLast_inv, const double *pow_lkp, double *foregroundProbs, const int scaleStepSize, double *scaleFactors) { double backgroundProb = 1.0; for (int k=0; k < size ; k++) { const int v0 = seqBeg[k]; const int k_cap = k < maxRepeatOffset ? k : maxRepeatOffset; const int pad1 = k_cap - 1; const int pad2 = maxRepeatOffset - k_cap; // maxRepeatOffset - k, then 0 when k > maxRepeatOffset const int pad3 = k - k_cap; // 0 , then maxRepeatOffset - k when k > maxRepeatOffset double accu = 0; for (int i = 0; i < k; i++) { const int idx1 = pad1 - i; const int idx2 = pad2 + i; const int idx3 = pad3 + i; const int v1 = seqBeg[idx3]; accu += foregroundProbs[idx1]; foregroundProbs[idx1] = ( (f2f0 * foregroundProbs[idx1]) + (backgroundProb * pow_lkp[idx2]) ) * likelihoodRatioMatrix[v0*size+v1]; } backgroundProb = (backgroundProb * b2b) + (accu * f2b); if (k % scaleStepSize == scaleStepSize - 1) { const double scale = 1 / backgroundProb; scaleFactors[k / scaleStepSize] = scale; for (int i=0; i< k_cap; i++) foregroundProbs[i] = foregroundProbs[i] * scale; backgroundProb = 1; } letterProbs[k] = (float)(backgroundProb); } double accu = 0; for (int i=0 ; i < maxRepeatOffset; i++) { accu += foregroundProbs[i]; foregroundProbs[i] = f2b; } const double fTot = backgroundProb * b2b + accu * f2b; backgroundProb = b2b; const double fTot_inv = 1/ fTot ; for (int k=(size-1) ; k >= 0 ; k--){ double nonRepeatProb = letterProbs[k] * backgroundProb * fTot_inv; letterProbs[k] = 1 - (float)(nonRepeatProb); //const int k_cap = std::min(k, maxRepeatOffset); const int k_cap = k < maxRepeatOffset ? k : maxRepeatOffset; if (k % scaleStepSize == scaleStepSize - 1) { const double scale = scaleFactors[k/ scaleStepSize]; for (int i=0; i< k_cap; i++) foregroundProbs[i] = foregroundProbs[i] * scale; backgroundProb *= scale; } const double c0 = f2b * backgroundProb; const int v0= seqBeg[k]; double accu = 0; for (int i = 0; i < k_cap; i++) { const int v1 = seqBeg[k-(i+1)]; const double f = foregroundProbs[i] * likelihoodRatioMatrix[v0*size+v1]; accu += pow_lkp[k_cap-(i+1)]*f; foregroundProbs[i] = c0 + f2f0 * f; } const double p = k > maxRepeatOffset ? 1. : pow_lkp[maxRepeatOffset - k]*b2fLast_inv; backgroundProb = (b2b * backgroundProb) + accu*p; } const double bTot = backgroundProb; return (fabs(fTot - bTot) > fmax(fTot, bTot) / 1e6); } __global__ void maskSequences(unsigned char * seqs, const double * likelihoodRatioMatrix, const unsigned char * maskTable, const int size , const int maxRepeatOffset , const double repeatProb , const double repeatEndProb , const double repeatOffsetProbDecay , const double firstGapProb , const double otherGapProb , const double minMaskProb , int seqs_len ) { int gid = blockIdx.x*blockDim.x+threadIdx.x; if (gid >= seqs_len) return; unsigned char* seqBeg = seqs+gid*33; float probabilities[SEQ_LEN]; const double b2b = 1 - repeatProb; const double f2f0 = 1 - repeatEndProb; const double f2b = repeatEndProb; const double b2fGrowth = 1 / repeatOffsetProbDecay; const double b2fLast = repeatProb * firstRepeatOffsetProb(b2fGrowth, maxRepeatOffset); const double b2fLast_inv = 1 / b2fLast ; double p = b2fLast; double ar_1[50]; for (int i=0 ; i < maxRepeatOffset; i++){ ar_1[i] = p ; p *= b2fGrowth; } const int scaleStepSize = 16; double scaleFactors[SEQ_LEN / scaleStepSize]; double foregroundProbs[50]; for (int i=0 ; i < maxRepeatOffset; i++){ foregroundProbs[i] = 0; }; const int err = calcRepeatProbs(probabilities,seqBeg, size, maxRepeatOffset, likelihoodRatioMatrix, b2b, f2f0, f2b, b2fLast_inv,ar_1,foregroundProbs,scaleStepSize, scaleFactors); //if (err) printf("tantan: warning: possible numeric inaccuracy\n"); maskProbableLetters(size,seqBeg, probabilities, maskTable); } auto_ptr<Masking> Masking::instance; const uint8_t Masking::bit_mask = 128; Masking::Masking(const Score_matrix &score_matrix) { const double lambda = score_matrix.lambda(); // 0.324032 for (unsigned i = 0; i < size; ++i) { mask_table_x_[i] = value_traits.mask_char; mask_table_bit_[i] = (uint8_t)i | bit_mask; for (unsigned j = 0; j < size; ++j) if (i < value_traits.alphabet_size && j < value_traits.alphabet_size) likelihoodRatioMatrix_[i][j] = exp(lambda * score_matrix(i, j)); } std::copy(likelihoodRatioMatrix_, likelihoodRatioMatrix_ + size, probMatrixPointers_); int firstGapCost = score_matrix.gap_extend() + score_matrix.gap_open(); firstGapProb_ = exp(-lambda * firstGapCost); otherGapProb_ = exp(-lambda * score_matrix.gap_extend()); firstGapProb_ /= (1 - otherGapProb_); } void Masking::operator()(Letter *seq, size_t len) const { tantan::maskSequences((tantan::uchar*)seq, (tantan::uchar*)(seq + len), 50, (tantan::const_double_ptr*)probMatrixPointers_, 0.005, 0.05, 0.9, 0, 0, 0.5, (const tantan::uchar*)mask_table_x_); } unsigned char* Masking::call_opt(Sequence_set &seqs) const { const int n = seqs.get_length(); int total = 0; for (int i=0; i < n; i++) total += seqs.length(i); printf("There are %d sequences and the total sequence length is %d\n", n, total); unsigned char *seqs_device = NULL; posix_memalign((void**)&seqs_device, 1024, total); unsigned char *p = seqs_device; for (int i=0; i < n; i++) { memcpy(p, seqs.ptr(i), seqs.length(i)); p += seqs.length(i); } double *probMat_device = NULL; posix_memalign((void**)&probMat_device, 1024, size*size*sizeof(double)); for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) probMat_device[i*size+j] = probMatrixPointers_[i][j]; unsigned char *mask_table_device = NULL; posix_memalign((void**)&mask_table_device, 1024, size*sizeof(unsigned char)); for (int i = 0; i < size; i++) mask_table_device[i] = mask_table_x_[i]; int len = 33; printf("Timing the mask sequences on device...\n"); Timer t; t.start(); const int size = len; const int maxRepeatOffset = 50; const double repeatProb = 0.005; const double repeatEndProb = 0.05; const double repeatOffsetProbDecay = 0.9; const double firstGapProb = 0; const double otherGapProb = 0; const double minMaskProb = 0.5; const int seqs_len = n; unsigned char* d_seqs; hipMalloc((void**)&d_seqs, total); hipMemcpy(d_seqs, seqs_device, total, hipMemcpyHostToDevice); unsigned char* d_maskTable; hipMalloc((void**)&d_maskTable, size*size); hipMemcpy(d_maskTable, mask_table_device, size, hipMemcpyHostToDevice); double* d_probMat; hipMalloc((void**)&d_probMat, size*size); hipMemcpy(d_probMat, probMat_device, sizeof(double)*size*size, hipMemcpyHostToDevice); dim3 grids ((seqs_len+128)/128); dim3 threads (128); hipLaunchKernelGGL(maskSequences, grids, threads, 0, 0, d_seqs, d_probMat, d_maskTable, size, maxRepeatOffset, repeatProb, repeatEndProb, repeatOffsetProbDecay, firstGapProb, otherGapProb, minMaskProb, seqs_len); hipMemcpy(seqs_device, d_seqs, total, hipMemcpyDeviceToHost); hipFree(d_seqs); hipFree(d_maskTable); hipFree(d_probMat); message_stream << "Total time (maskSequences) on the device = " << t.getElapsedTimeInMicroSec() / 1e6 << " s" << std::endl; free(probMat_device); free(mask_table_device); return seqs_device; } void Masking::call_opt(Letter *seq, size_t len) const { // CPU tantale::maskSequences((tantan::uchar*)seq, (tantan::uchar*)(seq + len), 50, (tantan::const_double_ptr*)probMatrixPointers_, 0.005, 0.05, 0.9, 0, 0, 0.5, (const tantan::uchar*)mask_table_x_); } void Masking::mask_bit(Letter *seq, size_t len) const { tantan::maskSequences((tantan::uchar*)seq, (tantan::uchar*)(seq + len), 50, (tantan::const_double_ptr*)probMatrixPointers_, 0.005, 0.05, 0.9, 0, 0, 0.5, (const tantan::uchar*)mask_table_bit_); } void Masking::bit_to_hard_mask(Letter *seq, size_t len, size_t &n) const { for (size_t i = 0; i < len; ++i) if (seq[i] & bit_mask) { seq[i] = value_traits.mask_char; ++n; } } void Masking::remove_bit_mask(Letter *seq, size_t len) const { for (size_t i = 0; i < len; ++i) if (seq[i] & bit_mask) seq[i] &= ~bit_mask; } void mask_worker(Atomic<size_t> *next, Sequence_set *seqs, const Masking *masking, bool hard_mask) { size_t i; int cnt = 0; while ((i = (*next)++) < seqs->get_length()) { if (hard_mask) //masking->operator()(seqs->ptr(i), seqs->length(i)); masking->call_opt(seqs->ptr(i), seqs->length(i)); else masking->mask_bit(seqs->ptr(i), seqs->length(i)); //cnt++; //if (cnt == 2) break; } } void mask_seqs(Sequence_set &seqs, const Masking &masking, bool hard_mask) { assert(hard_mask==true); const int n = seqs.get_length(); printf("Timing the mask sequences on CPU...\n"); Timer total; total.start(); #if not defined(_OPENMP) Thread_pool threads; Atomic<size_t> next(0); for (size_t i = 0; i < config.threads_; ++i) threads.push_back(launch_thread(mask_worker, &next, &seqs, &masking, hard_mask)); threads.join_all(); #else #pragma omp parallel for num_threads(config.threads_) for (int i=0; i < n; i++){ masking.call_opt(seqs.ptr(i), seqs.length(i)); } #endif message_stream << "Total time (maskSequences) on the CPU = " << total.getElapsedTimeInMicroSec() / 1e6 << " s" << std::endl; // on the device unsigned char* seqs_device = masking.call_opt(seqs); printf("Verify the sequences...\n"); unsigned char* p = seqs_device; int error = 0; for (int i = 0; i < n; i++) { if (0 != strncmp((const char*)p, seqs.ptr(i), seqs.length(i))) { printf("error at i=%d length=%zu\n", i, seqs.length(i)); printf("host="); char* s = seqs.ptr(i); for (int j = 0; j < seqs.length(i); j++) { printf("%02d", s[j]); } printf("\ndevice="); for (int j = 0; j < seqs.length(i); j++) printf("%02d", *(seqs_device+i*33+j)); printf("\n"); error++; } p += seqs.length(i); } if (error == 0) printf("Success\n"); }
the_stack
#define _SIZE_T_DEFINED #include <cuda.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include "float.h" #include <builtin_types.h> #include <vector_functions.h> #include <math.h> #include "../NeuralNetwork/Activation/ActivationFunction.cu" extern "C" { __global__ void LSTMDeltaKernelBPTT( float* deltas, float* cellStates, float* previousCellStates, float* cellStateErrors, float* nextCellStateErrors, float* outputGateDeltas, float* forgetGateDeltas, float* nextForgetGateDeltas, float* inputGateDeltas, float* nextInputGateDeltas, float* cellInputDeltas, float* cellInputActivations, float* cellStateActivations, float* outputGateActivations, float* nextForgetGateActivations, float* inputGateActivations, float* cellInputActivationDerivatives, float* cellStateActivationDerivatives, float* outputGateActivationDerivatives, float* forgetGateActivationDerivatives, float* inputGateActivationDerivatives, float* cellInputWeights, float* outputGateWeights, float* forgetGateWeights, float* inputGateWeights, int inputCount, int cellCount, int cellsPerBlock ) { int memoryBlockId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (memoryBlockId < cellCount / cellsPerBlock) { outputGateDeltas[memoryBlockId] = 0; for (int cellId = memoryBlockId * cellsPerBlock; cellId < (memoryBlockId + 1) * cellsPerBlock; cellId++) { outputGateDeltas[memoryBlockId] += cellStateActivations[cellId] * deltas[cellId]; } outputGateDeltas[memoryBlockId] *= outputGateActivationDerivatives[memoryBlockId]; for (int cellId = memoryBlockId * cellsPerBlock; cellId < (memoryBlockId + 1) * cellsPerBlock; cellId++) { int relativeCellId = cellId - (memoryBlockId * cellsPerBlock); int peepHoleWeightId = (memoryBlockId * (inputCount + cellCount + cellsPerBlock + 1)) + inputCount + cellCount + relativeCellId; cellStateErrors[cellId] = deltas[cellId] * outputGateActivations[memoryBlockId] * cellStateActivationDerivatives[cellId] + nextCellStateErrors[cellId] * nextForgetGateActivations[memoryBlockId] + nextInputGateDeltas[memoryBlockId] * inputGateWeights[peepHoleWeightId] + nextForgetGateDeltas[memoryBlockId] * forgetGateWeights[peepHoleWeightId] + outputGateDeltas[memoryBlockId] * outputGateWeights[peepHoleWeightId]; cellInputDeltas[cellId] = inputGateActivations[memoryBlockId] * cellInputActivationDerivatives[cellId] * cellStateErrors[cellId]; } inputGateDeltas[memoryBlockId] = 0; forgetGateDeltas[memoryBlockId] = 0; for (int cellId = memoryBlockId * cellsPerBlock; cellId < (memoryBlockId + 1) * cellsPerBlock; cellId++) { inputGateDeltas[memoryBlockId] += cellStateErrors[cellId] * cellInputActivations[cellId]; forgetGateDeltas[memoryBlockId] += cellStateErrors[cellId] * previousCellStates[cellId]; } inputGateDeltas[memoryBlockId] *= inputGateActivationDerivatives[memoryBlockId]; forgetGateDeltas[memoryBlockId] *= forgetGateActivationDerivatives[memoryBlockId]; } } __global__ void LSTMGateGradientKernelBPTT( float *input, float *previousOutput, float *cellStates, float *inputGateDeltas, float *forgetGateDeltas, float *outputGateDeltas, float* outputGateWeightGradient, float* inputGateWeightGradient, float* forgetGateWeightGradient, int inputCount, int previousOutputCount, int cellsPerBlock ) { int weightId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; int weightsPerGate = inputCount + previousOutputCount + cellsPerBlock + 1; if (weightId < weightsPerGate * previousOutputCount / cellsPerBlock) { int fromId = weightId % weightsPerGate; int toId = weightId / weightsPerGate; //calculate output gate weight gradient int isFromInputUnit = fromId >= 0 && fromId < inputCount; int isFromPreviousOutputUnit = (fromId >= inputCount) && (fromId < inputCount + previousOutputCount); int isPeephole = (fromId >= inputCount + previousOutputCount) && (fromId < inputCount + previousOutputCount + cellsPerBlock); int isFromBiasUnit = fromId == (inputCount + previousOutputCount + cellsPerBlock); float inputFromWeight = isFromInputUnit * input[isFromInputUnit * fromId] + isFromPreviousOutputUnit * previousOutput[isFromPreviousOutputUnit * (fromId - inputCount)] + isPeephole * cellStates[isPeephole * (toId * cellsPerBlock + (fromId - inputCount - previousOutputCount))] + isFromBiasUnit * 1; outputGateWeightGradient[weightId] = outputGateDeltas[toId] * inputFromWeight; inputGateWeightGradient[weightId] = inputGateDeltas[toId] * inputFromWeight; forgetGateWeightGradient[weightId] = forgetGateDeltas[toId] * inputFromWeight; } } __global__ void LSTMCellInputGradientKernelBPTT( float *input, float *previousOutput, float *cellInputDeltas, float *cellInputWeightGradient, int inputCount, int previousOutputCount, int cellsPerBlock ) { int weightId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; int weightsPerCell = inputCount + previousOutputCount + 1; if (weightId < weightsPerCell * previousOutputCount) { int fromId = weightId % weightsPerCell; int toId = weightId / weightsPerCell; int isFromInputUnit = fromId >= 0 && fromId < inputCount; int isFromPreviousOutputUnit = (fromId >= inputCount) && (fromId < inputCount + previousOutputCount); int isFromBiasUnit = fromId == (inputCount + previousOutputCount); float inputFromWeight = isFromInputUnit * input[isFromInputUnit * fromId] + isFromPreviousOutputUnit * previousOutput[isFromPreviousOutputUnit * (fromId - inputCount)] + isFromBiasUnit * 1; cellInputWeightGradient[weightId] = cellInputDeltas[toId] * inputFromWeight; } } __global__ void LSTMDeltaBackKernelBPTT( ActivationFunctionEnum prevLayerActivationFunction, float *prevWeighedInputPtr, float *prevDeltaPtr, float* cellInputDeltas, float* outputGateDeltas, float* forgetGateDeltas, float* inputGateDeltas, float *cellInputWeights, float *inputGateWeights, float *forgetGateWeights, float *outputGateWeights, int prevLayerNeurons, int cellCount, int cellsPerBlock ) { int neuronId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; int weightsPerCell = prevLayerNeurons + cellCount + 1; int weightsPerGate = prevLayerNeurons + cellCount + cellsPerBlock + 1; if (neuronId < prevLayerNeurons) { int memoryBlockCount = cellCount / cellsPerBlock; float prevNeuronActivationDerivative = EvaluateDerivative(prevLayerActivationFunction, prevWeighedInputPtr[neuronId]); for (int memoryBlockId = 0; memoryBlockId < memoryBlockCount; memoryBlockId++) { int gateWeightId = memoryBlockId * weightsPerGate + neuronId; for (int cellId = 0; cellId < cellsPerBlock; cellId++) { int cellWeightId = (memoryBlockId * cellsPerBlock + cellId) * weightsPerCell + neuronId; prevDeltaPtr[neuronId] += prevNeuronActivationDerivative * cellInputDeltas[memoryBlockId * cellsPerBlock + cellId] * cellInputWeights[cellWeightId]; } prevDeltaPtr[neuronId] += prevNeuronActivationDerivative * inputGateDeltas[memoryBlockId] * inputGateWeights[gateWeightId]; prevDeltaPtr[neuronId] += prevNeuronActivationDerivative * forgetGateDeltas[memoryBlockId] * forgetGateWeights[gateWeightId]; prevDeltaPtr[neuronId] += prevNeuronActivationDerivative * outputGateDeltas[memoryBlockId] * outputGateWeights[gateWeightId]; } } } /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /* /* ORIGINAL FROM KAREL */ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ __global__ void LSTMDeltaKernel( float *cellStateErrors, float *outputGateDeltas, float *cellStates, float *outputGateActivations, float *outputGateActivationDerivatives, float *deltas, int cellCount, int cellsPerBlock ) { int memoryBlockId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (memoryBlockId < cellCount / cellsPerBlock) { float outputGateDeltaSum = 0.0; for (int cellId = memoryBlockId * cellsPerBlock; cellId < (memoryBlockId + 1) * cellsPerBlock; cellId++) { float delta = deltas[cellId]; cellStateErrors[cellId] = outputGateActivations[memoryBlockId] * delta; outputGateDeltaSum += cellStates[cellId] * delta; } outputGateDeltas[memoryBlockId] = outputGateActivationDerivatives[memoryBlockId] * outputGateDeltaSum; } } __global__ void LSTMDeltaBackKernel( ActivationFunctionEnum prevLayerActivationFunction, float *prevWeighedInputPtr, float *prevDeltaPtr, float *cellStateErrors, float *previousCellStates, float *inputGateActivations, float *cellInputActivationDerivatives, float *inputGateActivationDerivatives, float *forgetGateActivationDerivatives, float *cellInputWeights, float *inputGateWeights, float *forgetGateWeights, float *outputGateWeights, float *outputGateDeltas, int prevLayerNeurons, int cellCount, int cellsPerBlock ) { int neuronId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (neuronId < prevLayerNeurons) { float delta = 0.0f; for (int memoryBlockId = 0; memoryBlockId < cellCount / cellsPerBlock; memoryBlockId++) { float inputGateError = 0.0f; float forgetGateError = 0.0f; for (int cellId = memoryBlockId * cellsPerBlock; cellId < (memoryBlockId + 1) * cellsPerBlock; cellId++) { inputGateError += inputGateActivationDerivatives[memoryBlockId] * cellStateErrors[cellId] * inputGateActivations[memoryBlockId]; forgetGateError += forgetGateActivationDerivatives[memoryBlockId] * cellStateErrors[cellId] * previousCellStates[cellId]; // cell input error delta += cellInputWeights[cellId * (prevLayerNeurons + cellCount + 1) + neuronId] * inputGateActivations[memoryBlockId] * cellStateErrors[cellId] * cellInputActivationDerivatives[cellId]; } delta += inputGateWeights[memoryBlockId * (prevLayerNeurons + cellCount + cellsPerBlock + 1) + neuronId] * inputGateError; delta += forgetGateWeights[memoryBlockId * (prevLayerNeurons + cellCount + cellsPerBlock + 1) + neuronId] * forgetGateError; delta += outputGateWeights[memoryBlockId * (prevLayerNeurons + cellCount + cellsPerBlock + 1) + neuronId] * outputGateDeltas[memoryBlockId]; } prevDeltaPtr[neuronId] = delta * EvaluateDerivative(prevLayerActivationFunction, prevWeighedInputPtr[neuronId]); } } }
the_stack
#include <cudf/utilities/traits.hpp> #include <cmath> namespace cudf { namespace binops { namespace compiled { // All binary operations namespace ops { struct Add { template <typename T1, typename T2> CUDA_DEVICE_CALLABLE auto operator()(T1 const& lhs, T2 const& rhs) -> decltype(lhs + rhs) { return lhs + rhs; } }; struct Sub { template <typename T1, typename T2> CUDA_DEVICE_CALLABLE auto operator()(T1 const& lhs, T2 const& rhs) -> decltype(lhs - rhs) { return lhs - rhs; } }; struct Mul { template <typename TypeLhs, typename TypeRhs> static constexpr inline bool is_supported() { return has_common_type_v<TypeLhs, TypeRhs> or // FIXME: without the following line, compilation error // _deps/libcudacxx-src/include/cuda/std/detail/libcxx/include/chrono(917): error: // identifier "cuda::std::__3::ratio<(long)86400000000l, (long)1l> ::num" is undefined in // device code (is_duration<TypeLhs>() and std::is_integral<TypeRhs>()) or (std::is_integral<TypeLhs>() and is_duration<TypeRhs>()) or (is_fixed_point<TypeLhs>() and is_numeric<TypeRhs>()) or (is_numeric<TypeLhs>() and is_fixed_point<TypeRhs>()); } template <typename T1, typename T2, std::enable_if_t<is_supported<T1, T2>()>* = nullptr> CUDA_DEVICE_CALLABLE auto operator()(T1 const& lhs, T2 const& rhs) -> decltype(lhs * rhs) { return lhs * rhs; } }; struct Div { template <typename TypeLhs, typename TypeRhs> static constexpr inline bool is_supported() { return has_common_type_v<TypeLhs, TypeRhs> or // FIXME: without this, compilation error on chrono:917 (is_duration<TypeLhs>() and (std::is_integral<TypeRhs>() or is_duration<TypeRhs>())) or (is_fixed_point<TypeLhs>() and is_numeric<TypeRhs>()) or (is_numeric<TypeLhs>() and is_fixed_point<TypeRhs>()); } template <typename T1, typename T2, std::enable_if_t<is_supported<T1, T2>()>* = nullptr> CUDA_DEVICE_CALLABLE auto operator()(T1 const& lhs, T2 const& rhs) -> decltype(lhs / rhs) { return lhs / rhs; } }; struct TrueDiv { template <typename T1, typename T2> CUDA_DEVICE_CALLABLE auto operator()(T1 const& lhs, T2 const& rhs) -> decltype((static_cast<double>(lhs) / static_cast<double>(rhs))) { return (static_cast<double>(lhs) / static_cast<double>(rhs)); } }; struct FloorDiv { template <typename T1, typename T2> CUDA_DEVICE_CALLABLE auto operator()(T1 const& lhs, T2 const& rhs) -> decltype(floor(static_cast<double>(lhs) / static_cast<double>(rhs))) { return floor(static_cast<double>(lhs) / static_cast<double>(rhs)); } }; struct Mod { template <typename TypeLhs, typename TypeRhs> static constexpr inline bool is_supported() { return has_common_type_v<TypeLhs, TypeRhs> or // FIXME: without this, compilation error //_deps/libcudacxx-src/include/cuda/std/detail/libcxx/include/chrono(1337): // error : expression must have integral or unscoped enum type (is_duration<TypeLhs>() and (std::is_integral<TypeRhs>() or is_duration<TypeRhs>())); } template <typename T1, typename T2, std::enable_if_t<is_supported<T1, T2>()>* = nullptr> CUDA_DEVICE_CALLABLE auto operator()(T1 const& lhs, T2 const& rhs) -> decltype(lhs % rhs) { return lhs % rhs; } template <typename T1, typename T2, std::enable_if_t<(std::is_same_v<float, std::common_type_t<T1, T2>>)>* = nullptr> CUDA_DEVICE_CALLABLE auto operator()(T1 const& lhs, T2 const& rhs) -> float { return fmodf(static_cast<float>(lhs), static_cast<float>(rhs)); } template <typename T1, typename T2, std::enable_if_t<(std::is_same_v<double, std::common_type_t<T1, T2>>)>* = nullptr> CUDA_DEVICE_CALLABLE auto operator()(T1 const& lhs, T2 const& rhs) -> double { return fmod(static_cast<double>(lhs), static_cast<double>(rhs)); } }; struct PMod { // Ideally, these two specializations - one for integral types and one for non integral // types shouldn't be required, as std::fmod should promote integral types automatically // to double and call the std::fmod overload for doubles. Sadly, doing this in jitified // code does not work - it is having trouble deciding between float/double overloads template <typename TypeLhs, typename TypeRhs, std::enable_if_t<(std::is_integral_v<std::common_type_t<TypeLhs, TypeRhs>>)>* = nullptr> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) { using common_t = std::common_type_t<TypeLhs, TypeRhs>; common_t xconv = static_cast<common_t>(x); common_t yconv = static_cast<common_t>(y); auto rem = xconv % yconv; if constexpr (std::is_signed_v<decltype(rem)>) if (rem < 0) rem = (rem + yconv) % yconv; return rem; } template < typename TypeLhs, typename TypeRhs, std::enable_if_t<(std::is_floating_point_v<std::common_type_t<TypeLhs, TypeRhs>>)>* = nullptr> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) { using common_t = std::common_type_t<TypeLhs, TypeRhs>; common_t xconv = static_cast<common_t>(x); common_t yconv = static_cast<common_t>(y); auto rem = std::fmod(xconv, yconv); if (rem < 0) rem = std::fmod(rem + yconv, yconv); return rem; } }; struct PyMod { template <typename TypeLhs, typename TypeRhs, std::enable_if_t<(std::is_integral_v<std::common_type_t<TypeLhs, TypeRhs>>)>* = nullptr> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(((x % y) + y) % y) { return ((x % y) + y) % y; } template < typename TypeLhs, typename TypeRhs, std::enable_if_t<(std::is_floating_point_v<std::common_type_t<TypeLhs, TypeRhs>>)>* = nullptr> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> double { double x1 = static_cast<double>(x); double y1 = static_cast<double>(y); return fmod(fmod(x1, y1) + y1, y1); } template <typename TypeLhs, typename TypeRhs, std::enable_if_t<(is_duration<TypeLhs>())>* = nullptr> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(((x % y) + y) % y) { return ((x % y) + y) % y; } }; struct Pow { template <typename TypeLhs, typename TypeRhs, std::enable_if_t<(std::is_convertible_v<TypeLhs, double> and std::is_convertible_v<TypeRhs, double>)>* = nullptr> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> double { return pow(static_cast<double>(x), static_cast<double>(y)); } }; struct LogBase { template <typename TypeLhs, typename TypeRhs, std::enable_if_t<(std::is_convertible_v<TypeLhs, double> and std::is_convertible_v<TypeRhs, double>)>* = nullptr> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> double { return (std::log(static_cast<double>(x)) / std::log(static_cast<double>(y))); } }; struct ATan2 { template <typename TypeLhs, typename TypeRhs, std::enable_if_t<(std::is_convertible_v<TypeLhs, double> and std::is_convertible_v<TypeRhs, double>)>* = nullptr> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> double { return std::atan2(static_cast<double>(x), static_cast<double>(y)); } }; struct ShiftLeft { template <typename TypeLhs, typename TypeRhs> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(x << y) { return (x << y); } }; struct ShiftRight { template <typename TypeLhs, typename TypeRhs> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(x >> y) { return (x >> y); } }; struct ShiftRightUnsigned { template < typename TypeLhs, typename TypeRhs, std::enable_if_t<(std::is_integral_v<TypeLhs> and not is_boolean<TypeLhs>())>* = nullptr> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(static_cast<std::make_unsigned_t<TypeLhs>>(x) >> y) { return (static_cast<std::make_unsigned_t<TypeLhs>>(x) >> y); } }; struct BitwiseAnd { template <typename TypeLhs, typename TypeRhs> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(x & y) { return (x & y); } }; struct BitwiseOr { template <typename TypeLhs, typename TypeRhs> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(x | y) { return (x | y); } }; struct BitwiseXor { template <typename TypeLhs, typename TypeRhs> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(x ^ y) { return (x ^ y); } }; struct LogicalAnd { template <typename TypeLhs, typename TypeRhs> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(x && y) { return (x && y); } }; struct LogicalOr { template <typename TypeLhs, typename TypeRhs> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(x || y) { return (x || y); } }; struct Equal { template <typename TypeLhs, typename TypeRhs> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(x == y) { return (x == y); } }; struct NotEqual { template <typename TypeLhs, typename TypeRhs> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(x != y) { return (x != y); } }; struct Less { template <typename TypeLhs, typename TypeRhs> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(x < y) { return (x < y); } }; struct Greater { template <typename TypeLhs, typename TypeRhs> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(x > y) { return (x > y); } }; struct LessEqual { template <typename TypeLhs, typename TypeRhs> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(x <= y) { return (x <= y); } }; struct GreaterEqual { template <typename TypeLhs, typename TypeRhs> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(x >= y) { return (x >= y); } }; struct NullEquals { template <typename TypeLhs, typename TypeRhs> CUDA_DEVICE_CALLABLE auto operator()( TypeLhs x, TypeRhs y, bool lhs_valid, bool rhs_valid, bool& output_valid) -> decltype(x == y) { output_valid = true; if (!lhs_valid && !rhs_valid) return true; if (lhs_valid && rhs_valid) return x == y; return false; } // To allow std::is_invocable_v = true template <typename TypeLhs, typename TypeRhs> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(x == y); }; struct NullMax { template <typename TypeLhs, typename TypeRhs, typename common_t = std::common_type_t<TypeLhs, TypeRhs>> CUDA_DEVICE_CALLABLE auto operator()( TypeLhs x, TypeRhs y, bool lhs_valid, bool rhs_valid, bool& output_valid) -> decltype(static_cast<common_t>(static_cast<common_t>(x) > static_cast<common_t>(y) ? x : y)) { output_valid = true; auto const x_conv = static_cast<common_t>(x); auto const y_conv = static_cast<common_t>(y); if (!lhs_valid && !rhs_valid) { output_valid = false; return common_t{}; } else if (lhs_valid && rhs_valid) { return (x_conv > y_conv) ? x_conv : y_conv; } else if (lhs_valid) return x_conv; else return y_conv; } // To allow std::is_invocable_v = true template <typename TypeLhs, typename TypeRhs, typename common_t = std::common_type_t<TypeLhs, TypeRhs>> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(static_cast<common_t>(static_cast<common_t>(x) > static_cast<common_t>(y) ? x : y)); }; struct NullMin { template <typename TypeLhs, typename TypeRhs, typename common_t = std::common_type_t<TypeLhs, TypeRhs>> CUDA_DEVICE_CALLABLE auto operator()( TypeLhs x, TypeRhs y, bool lhs_valid, bool rhs_valid, bool& output_valid) -> decltype(static_cast<common_t>(static_cast<common_t>(x) < static_cast<common_t>(y) ? x : y)) { output_valid = true; auto const x_conv = static_cast<common_t>(x); auto const y_conv = static_cast<common_t>(y); if (!lhs_valid && !rhs_valid) { output_valid = false; return common_t{}; } else if (lhs_valid && rhs_valid) { return (x_conv < y_conv) ? x_conv : y_conv; } else if (lhs_valid) return x_conv; else return y_conv; } // To allow std::is_invocable_v = true template <typename TypeLhs, typename TypeRhs, typename common_t = std::common_type_t<TypeLhs, TypeRhs>> CUDA_DEVICE_CALLABLE auto operator()(TypeLhs x, TypeRhs y) -> decltype(static_cast<common_t>(static_cast<common_t>(x) < static_cast<common_t>(y) ? x : y)); }; } // namespace ops } // namespace compiled } // namespace binops } // namespace cudf
the_stack
#include <math.h> // required for fabs() #include <float.h> // required for DBL_EPSILON // Externally Defined Routines // extern "C" __host__ __device__ double xChebyshev_Tn_Series(double x, const double a[], int degree); // Internally Defined Routines // __host__ __device__ double Fresnel_Auxiliary_Cosine_Integral( double x ); __host__ __device__ double xFresnel_Auxiliary_Cosine_Integral( double x ); __host__ __device__ static double Chebyshev_Expansion_0_1(double x); __host__ __device__ static double Chebyshev_Expansion_1_3(double x); __host__ __device__ static double Chebyshev_Expansion_3_5(double x); __host__ __device__ static double Chebyshev_Expansion_5_7(double x); __host__ __device__ static double Asymptotic_Series( double x ); // Internally Defined Constants // static double const sqrt_2pi = 2.506628274631000502415765284811045253006; //////////////////////////////////////////////////////////////////////////////// // double xFresnel_Auxiliary_Cosine_Integral( double x ) // // // // Description: // // The Fresnel auxiliary cosine integral, f(x), is the integral from 0 to // // infinity of the integrand // // sqrt(2/pi) exp(-2xt) cos(t^2) dt // // where x >= 0. // // // // Arguments: // // double x The argument of the Fresnel auxiliary cosine integral // // f() where x >= 0. // // // // Return Value: // // The value of the Fresnel auxiliary cosine integral f evaluated at // // x >= 0. // // // // Example: // // double y, x; // // // // ( code to initialize x ) // // // // y = xFresnel_Auxiliary_Cosine_Integral( x ); // //////////////////////////////////////////////////////////////////////////////// __host__ __device__ double xFresnel_Auxiliary_Cosine_Integral( double x ) { if (x == 0.0) return 0.5; if (x <= 1.0) return Chebyshev_Expansion_0_1(x); if (x <= 3.0) return Chebyshev_Expansion_1_3(x); if (x <= 5.0) return Chebyshev_Expansion_3_5(x); if (x <= 7.0) return Chebyshev_Expansion_5_7(x); return Asymptotic_Series( x ); } //////////////////////////////////////////////////////////////////////////////// // static double Chebyshev_Expansion_0_1( double x ) // // // // Description: // // Evaluate the Fresnel auxiliary cosine integral, f(x), on the interval // // 0 < x <= 1 using the Chebyshev interpolation formula. // // // // Arguments: // // double x The argument of the Fresnel auxiliary cosine integral // // where 0 < x <= 1. // // // // Return Value: // // The value of the Fresnel auxiliary cosine integral f evaluated at // // x where 0 < x <= 1. // // // // Example: // // double y, x; // // // // ( code to initialize x ) // // // // y = Chebyshev_Expansion_0_1(x); // //////////////////////////////////////////////////////////////////////////////// __host__ __device__ static double Chebyshev_Expansion_0_1( double x ) { static double const c[] = { +4.200987560240514577713e-1, -9.358785913634965235904e-2, -7.642539415723373644927e-3, +4.958117751796130135544e-3, -9.750236036106120253456e-4, +1.075201474958704192865e-4, -4.415344769301324238886e-6, -7.861633919783064216022e-7, +1.919240966215861471754e-7, -2.175775608982741065385e-8, +1.296559541430849437217e-9, +2.207205095025162212169e-11, -1.479219615873704298874e-11, +1.821350127295808288614e-12, -1.228919312990171362342e-13, +2.227139250593818235212e-15, +5.734729405928016301596e-16, -8.284965573075354177016e-17, +6.067422701530157308321e-18, -1.994908519477689596319e-19, -1.173365630675305693390e-20 }; static const int degree = sizeof(c) / sizeof(double) - 1; static const double midpoint = 0.5; static const double scale = 0.5; return xChebyshev_Tn_Series( (x - midpoint) / scale, c, degree ); } //////////////////////////////////////////////////////////////////////////////// // static double Chebyshev_Expansion_1_3( double x ) // // // // Description: // // Evaluate the Fresnel auxiliary cosine integral, f(x), on the interval // // 1 < x <= 3 using the Chebyshev interpolation formula. // // // // Arguments: // // double x The argument of the Fresnel auxiliary cosine integral // // where 1 < x <= 3. // // // // Return Value: // // The value of the Fresnel auxiliary cosine integral f evaluated at // // x where 1 < x <= 3. // // // // Example: // // double y, x; // // // // ( code to initialize x ) // // // // y = Chebyshev_Expansion_1_3(x); // //////////////////////////////////////////////////////////////////////////////// __host__ __device__ static double Chebyshev_Expansion_1_3( double x ) { static double const c[] = { +2.098677278318224971989e-1, -9.314234883154103266195e-2, +1.739905936938124979297e-2, -2.454274824644285136137e-3, +1.589872606981337312438e-4, +4.203943842506079780413e-5, -2.018022256093216535093e-5, +5.125709636776428285284e-6, -9.601813551752718650057e-7, +1.373989484857155846826e-7, -1.348105546577211255591e-8, +2.745868700337953872632e-10, +2.401655517097260106976e-10, -6.678059547527685587692e-11, +1.140562171732840809159e-11, -1.401526517205212219089e-12, +1.105498827380224475667e-13, +2.040731455126809208066e-16, -1.946040679213045143184e-15, +4.151821375667161733612e-16, -5.642257647205149369594e-17, +5.266176626521504829010e-18, -2.299025577897146333791e-19, -2.952226367506641078731e-20, +8.760405943193778149078e-21 }; static const int degree = sizeof(c) / sizeof(double) - 1; static const double midpoint = 2.0; return xChebyshev_Tn_Series( (x - midpoint), c, degree ); } //////////////////////////////////////////////////////////////////////////////// // static double Chebyshev_Expansion_3_5( double x ) // // // // Description: // // Evaluate the Fresnel auxiliary cosine integral, g(x), on the interval // // 3 < x <= 5 using the Chebyshev interpolation formula. // // // // Arguments: // // double x The argument of the Fresnel auxiliary cosine integral // // where 3 < x <= 5. // // // // Return Value: // // The value of the Fresnel auxiliary cosine integral f evaluated at // // x where 3 < x <= 5. // // // // Example: // // double y, x; // // // // ( code to initialize x ) // // // // y = Chebyshev_Expansion_3_5(x); // //////////////////////////////////////////////////////////////////////////////// __host__ __device__ static double Chebyshev_Expansion_3_5( double x ) { static double const c[] = { +1.025703371090289562388e-1, -2.569833023232301400495e-2, +3.160592981728234288078e-3, -3.776110718882714758799e-4, +4.325593433537248833341e-5, -4.668447489229591855730e-6, +4.619254757356785108280e-7, -3.970436510433553795244e-8, +2.535664754977344448598e-9, -2.108170964644819803367e-11, -2.959172018518707683013e-11, +6.727219944906606516055e-12, -1.062829587519902899001e-12, +1.402071724705287701110e-13, -1.619154679722651005075e-14, +1.651319588396970446858e-15, -1.461704569438083772889e-16, +1.053521559559583268504e-17, -4.760946403462515858756e-19, -1.803784084922403924313e-20, +7.873130866418738207547e-21 }; static const int degree = sizeof(c) / sizeof(double) - 1; static const double midpoint = 4.0; return xChebyshev_Tn_Series( (x - midpoint), c, degree ); } //////////////////////////////////////////////////////////////////////////////// // static double Chebyshev_Expansion_5_7( double x ) // // // // Description: // // Evaluate the Fresnel auxiliary cosine integral, g(x), on the interval // // 5 < x <= 7 using the Chebyshev interpolation formula. // // // // Arguments: // // double x The argument of the Fresnel auxiliary cosine integral // // where 5 < x <= 7. // // // // Return Value: // // The value of the Fresnel auxiliary cosine integral f evaluated at // // x where 5 < x <= 7. // // // // Example: // // double y, x; // // // // ( code to initialize x ) // // // // y = Chebyshev_Expansion_5_7(x); // //////////////////////////////////////////////////////////////////////////////// __host__ __device__ static double Chebyshev_Expansion_5_7( double x ) { static double const c[] = { +6.738667333400589274018e-2, -1.128146832637904868638e-2, +9.408843234170404670278e-4, -7.800074103496165011747e-5, +6.409101169623350885527e-6, -5.201350558247239981834e-7, +4.151668914650221476906e-8, -3.242202015335530552721e-9, +2.460339340900396789789e-10, -1.796823324763304661865e-11, +1.244108496436438952425e-12, -7.950417122987063540635e-14, +4.419142625999150971878e-15, -1.759082736751040110146e-16, -1.307443936270786700760e-18, +1.362484141039320395814e-18, -2.055236564763877250559e-19, +2.329142055084791308691e-20, -2.282438671525884861970e-21 }; static const int degree = sizeof(c) / sizeof(double) - 1; static const double midpoint = 6.0; return xChebyshev_Tn_Series( (x - midpoint), c, degree ); } //////////////////////////////////////////////////////////////////////////////// // static double Asymptotic_Series( double x ) // // // // Description: // // For a large argument x, the auxiliary Fresnel cosine integral, f(x), // // can be expressed as the asymptotic series // // f(x) ~ 1/(x*sqrt(2pi))[1 - 3/4x^4 + 105/16x^8 + ... + // // (4j-1)!!/(-4x^4)^j + ... ] // // // // Arguments: // // double x The argument of the Fresnel auxiliary cosine integral // // where x > 7. // // // // Return Value: // // The value of the Fresnel auxiliary cosine integral f evaluated at // // x where x > 7. // // // // Example: // // double y, x; // // // // ( code to initialize x ) // // // // y = Asymptotic_Series( x ); // //////////////////////////////////////////////////////////////////////////////// #define NUM_ASYMPTOTIC_TERMS 35 __host__ __device__ static double Asymptotic_Series( double x ) { double x2 = x * x; double x4 = -4.0 * x2 * x2; double xn = 1.0; double factorial = 1.0; double f = 0.0; double term[NUM_ASYMPTOTIC_TERMS + 1]; double epsilon = DBL_EPSILON / 4.0; int j = 3; int i = 0; term[0] = 1.0; term[NUM_ASYMPTOTIC_TERMS] = 0.0; for (i = 1; i < NUM_ASYMPTOTIC_TERMS; i++) { factorial *= ( (double)j * (double)(j - 2)); xn *= x4; term[i] = factorial / xn; j += 4; if (fabs(term[i]) >= fabs(term[i-1])) { i--; break; } if (fabs(term[i]) <= epsilon) break; } for (; i >= 0; i--) f += term[i]; return f / (x * sqrt_2pi); }
the_stack
namespace MegBA { namespace geo { namespace { template <typename T> __global__ void AngleAxisToRotationKernel( const int nItem, const int N, const T *valueDevicePtr0, const T *valueDevicePtr1, const T *valueDevicePtr2, const T *gradDevicePtr0, const T *gradDevicePtr1, const T *gradDevicePtr2, T *R0, T *R1, T *R2, T *R3, T *R4, T *R5, T *R6, T *R7, T *R8, T *gradDevicePtrR0, T *gradDevicePtrR1, T *gradDevicePtrR2, T *gradDevicePtrR3, T *gradDevicePtrR4, T *gradDevicePtrR5, T *gradDevicePtrR6, T *gradDevicePtrR7, T *gradDevicePtrR8) { unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= nItem) return; const T angle_axis_x = valueDevicePtr0[idx]; const T angle_axis_y = valueDevicePtr1[idx]; const T angle_axis_z = valueDevicePtr2[idx]; const T theta2 = angle_axis_x * angle_axis_x + angle_axis_y * angle_axis_y + angle_axis_z * angle_axis_z; if (theta2 > std::numeric_limits<T>::epsilon()) { const T theta = Wrapper::sqrtG<T>::call(theta2); // sqrt double const T wx = angle_axis_x / theta; const T wy = angle_axis_y / theta; const T wz = angle_axis_z / theta; T sintheta, costheta; Wrapper::sincosG<T>::call(theta, &sintheta, &costheta); const T one_minor_costheta = T(1.0) - costheta; const T wx_mul_one_minor_costheta = wx * one_minor_costheta; const T wy_mul_one_minor_costheta = wy * one_minor_costheta; const T wz_mul_one_minor_costheta = wz * one_minor_costheta; const T wx_mul_wy_mul_one_minor_costheta = wy * wx_mul_one_minor_costheta; const T wx_mul_wz_mul_one_minor_costheta = wz * wx_mul_one_minor_costheta; const T wy_mul_wz_mul_one_minor_costheta = wz * wy_mul_one_minor_costheta; const T wx_mul_sintheta = wx * sintheta; const T wy_mul_sintheta = wy * sintheta; const T wz_mul_sintheta = wz * sintheta; // clang-format on const T reciprocal_theta = 1 / theta; const T tmp1 = sintheta * reciprocal_theta; const T tmpwx = tmp1 * (wx * wx - T(1.0)); const T tmpwy = tmp1 * (wy * wy - T(1.0)); const T tmpwz = tmp1 * (wz * wz - T(1.0)); for (int i = 0; i < N; ++i) { unsigned int index = idx + i * nItem; const T dv_angle_axis_x = gradDevicePtr0[index]; const T dv_angle_axis_y = gradDevicePtr1[index]; const T dv_angle_axis_z = gradDevicePtr2[index]; const T dv_tmp1 = (angle_axis_x * dv_angle_axis_x + angle_axis_y * dv_angle_axis_y + angle_axis_z * dv_angle_axis_z); const T dv_theta = reciprocal_theta * dv_tmp1; const T dv_wx = reciprocal_theta * (dv_angle_axis_x - angle_axis_x * reciprocal_theta * dv_theta); const T dv_wy = reciprocal_theta * (dv_angle_axis_y - angle_axis_y * reciprocal_theta * dv_theta); const T dv_wz = reciprocal_theta * (dv_angle_axis_z - angle_axis_z * reciprocal_theta * dv_theta); gradDevicePtrR0[index] = tmpwx * dv_tmp1 + 2 * wx_mul_one_minor_costheta * dv_wx; gradDevicePtrR4[index] = tmpwy * dv_tmp1 + 2 * wy_mul_one_minor_costheta * dv_wy; gradDevicePtrR8[index] = tmpwz * dv_tmp1 + 2 * wz_mul_one_minor_costheta * dv_wz; gradDevicePtrR1[index] = (wz * costheta + wx * wy_mul_sintheta) * dv_theta + sintheta * dv_wz + wy_mul_one_minor_costheta * dv_wx + wx_mul_one_minor_costheta * dv_wy; gradDevicePtrR5[index] = (wx * costheta + wy * wz_mul_sintheta) * dv_theta + sintheta * dv_wx + wz_mul_one_minor_costheta * dv_wy + wy_mul_one_minor_costheta * dv_wz; gradDevicePtrR6[index] = (wy * costheta + wx * wz_mul_sintheta) * dv_theta + sintheta * dv_wy + wz_mul_one_minor_costheta * dv_wx + wx_mul_one_minor_costheta * dv_wz; gradDevicePtrR2[index] = (-wy * costheta + wx * wz_mul_sintheta) * dv_theta - sintheta * dv_wy + wz_mul_one_minor_costheta * dv_wx + wx_mul_one_minor_costheta * dv_wz; gradDevicePtrR3[index] = (-wz * costheta + wx * wy_mul_sintheta) * dv_theta - sintheta * dv_wz + wy_mul_one_minor_costheta * dv_wx + wx_mul_one_minor_costheta * dv_wy; gradDevicePtrR7[index] = (-wx * costheta + wy * wz_mul_sintheta) * dv_theta - sintheta * dv_wx + wz_mul_one_minor_costheta * dv_wy + wy_mul_one_minor_costheta * dv_wz; } R0[idx] = costheta + wx * wx_mul_one_minor_costheta; R1[idx] = wz_mul_sintheta + wx_mul_wy_mul_one_minor_costheta; R2[idx] = -wy_mul_sintheta + wx_mul_wz_mul_one_minor_costheta; R3[idx] = -wz_mul_sintheta + wx_mul_wy_mul_one_minor_costheta; R4[idx] = costheta + wy * wy_mul_one_minor_costheta; R5[idx] = wx_mul_sintheta + wy_mul_wz_mul_one_minor_costheta; R6[idx] = wy_mul_sintheta + wx_mul_wz_mul_one_minor_costheta; R7[idx] = -wx_mul_sintheta + wy_mul_wz_mul_one_minor_costheta; R8[idx] = costheta + wz * wz_mul_one_minor_costheta; } else { // Near zero, we switch to using the first order Taylor expansion. for (int i = 0; i < N; ++i) { unsigned int index = idx + i * nItem; const T dv_angle_axis_x = gradDevicePtr0[index]; const T dv_angle_axis_y = gradDevicePtr1[index]; const T dv_angle_axis_z = gradDevicePtr2[index]; gradDevicePtrR0[index] = 0; gradDevicePtrR1[index] = dv_angle_axis_z; gradDevicePtrR2[index] = -dv_angle_axis_y; gradDevicePtrR3[index] = -dv_angle_axis_z; gradDevicePtrR4[index] = 0; gradDevicePtrR5[index] = dv_angle_axis_x; gradDevicePtrR6[index] = dv_angle_axis_y; gradDevicePtrR7[index] = -dv_angle_axis_x; gradDevicePtrR8[index] = 0; } R0[idx] = T(1.0); R1[idx] = angle_axis_z; R2[idx] = -angle_axis_y; R3[idx] = -angle_axis_z; R4[idx] = T(1.0); R5[idx] = angle_axis_x; R6[idx] = angle_axis_y; R7[idx] = -angle_axis_x; R8[idx] = T(1.0); } } template <typename T> __global__ void AngleAxisToRotationKernelFastGradKernel( const int nItem, const int N, const T *valueDevicePtr0, const T *valueDevicePtr1, const T *valueDevicePtr2, const int grad_position0, const int grad_position1, const int grad_position2, T *R0, T *R1, T *R2, T *R3, T *R4, T *R5, T *R6, T *R7, T *R8, T *gradDevicePtrR0, T *gradDevicePtrR1, T *gradDevicePtrR2, T *gradDevicePtrR3, T *gradDevicePtrR4, T *gradDevicePtrR5, T *gradDevicePtrR6, T *gradDevicePtrR7, T *gradDevicePtrR8) { unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= nItem) return; const T angle_axis_x = valueDevicePtr0[idx]; const T angle_axis_y = valueDevicePtr1[idx]; const T angle_axis_z = valueDevicePtr2[idx]; const T theta2 = angle_axis_x * angle_axis_x + angle_axis_y * angle_axis_y + angle_axis_z * angle_axis_z; if (theta2 > std::numeric_limits<T>::epsilon()) { const T theta = Wrapper::sqrtG<T>::call(theta2); // sqrt double const T wx = angle_axis_x / theta; const T wy = angle_axis_y / theta; const T wz = angle_axis_z / theta; T sintheta, costheta; Wrapper::sincosG<T>::call(theta, &sintheta, &costheta); const T one_minor_costheta = T(1.0) - costheta; const T wx_mul_one_minor_costheta = wx * one_minor_costheta; const T wy_mul_one_minor_costheta = wy * one_minor_costheta; const T wz_mul_one_minor_costheta = wz * one_minor_costheta; const T wx_mul_wy_mul_one_minor_costheta = wy * wx_mul_one_minor_costheta; const T wx_mul_wz_mul_one_minor_costheta = wz * wx_mul_one_minor_costheta; const T wy_mul_wz_mul_one_minor_costheta = wz * wy_mul_one_minor_costheta; const T wx_mul_sintheta = wx * sintheta; const T wy_mul_sintheta = wy * sintheta; const T wz_mul_sintheta = wz * sintheta; const T reciprocal_theta = 1 / theta; const T tmp1 = sintheta * reciprocal_theta; const T tmpwx = tmp1 * (wx * wx - T(1.0)); const T tmpwy = tmp1 * (wy * wy - T(1.0)); const T tmpwz = tmp1 * (wz * wz - T(1.0)); for (int i = 0; i < N; ++i) { unsigned int index = idx + i * nItem; const T dv_angle_axis_x = i == grad_position0 ? 1 : 0; const T dv_angle_axis_y = i == grad_position1 ? 1 : 0; const T dv_angle_axis_z = i == grad_position2 ? 1 : 0; const T dv_tmp1 = (angle_axis_x * dv_angle_axis_x + angle_axis_y * dv_angle_axis_y + angle_axis_z * dv_angle_axis_z); const T dv_theta = reciprocal_theta * dv_tmp1; const T dv_wx = reciprocal_theta * (dv_angle_axis_x - angle_axis_x * reciprocal_theta * dv_theta); const T dv_wy = reciprocal_theta * (dv_angle_axis_y - angle_axis_y * reciprocal_theta * dv_theta); const T dv_wz = reciprocal_theta * (dv_angle_axis_z - angle_axis_z * reciprocal_theta * dv_theta); gradDevicePtrR0[index] = tmpwx * dv_tmp1 + 2 * wx_mul_one_minor_costheta * dv_wx; gradDevicePtrR4[index] = tmpwy * dv_tmp1 + 2 * wy_mul_one_minor_costheta * dv_wy; gradDevicePtrR8[index] = tmpwz * dv_tmp1 + 2 * wz_mul_one_minor_costheta * dv_wz; gradDevicePtrR1[index] = (wz * costheta + wx * wy_mul_sintheta) * dv_theta + sintheta * dv_wz + wy_mul_one_minor_costheta * dv_wx + wx_mul_one_minor_costheta * dv_wy; gradDevicePtrR5[index] = (wx * costheta + wy * wz_mul_sintheta) * dv_theta + sintheta * dv_wx + wz_mul_one_minor_costheta * dv_wy + wy_mul_one_minor_costheta * dv_wz; gradDevicePtrR6[index] = (wy * costheta + wx * wz_mul_sintheta) * dv_theta + sintheta * dv_wy + wz_mul_one_minor_costheta * dv_wx + wx_mul_one_minor_costheta * dv_wz; gradDevicePtrR2[index] = (-wy * costheta + wx * wz_mul_sintheta) * dv_theta - sintheta * dv_wy + wz_mul_one_minor_costheta * dv_wx + wx_mul_one_minor_costheta * dv_wz; gradDevicePtrR3[index] = (-wz * costheta + wx * wy_mul_sintheta) * dv_theta - sintheta * dv_wz + wy_mul_one_minor_costheta * dv_wx + wx_mul_one_minor_costheta * dv_wy; gradDevicePtrR7[index] = (-wx * costheta + wy * wz_mul_sintheta) * dv_theta - sintheta * dv_wx + wz_mul_one_minor_costheta * dv_wy + wy_mul_one_minor_costheta * dv_wz; } R0[idx] = costheta + wx * wx_mul_one_minor_costheta; R1[idx] = wz_mul_sintheta + wx_mul_wy_mul_one_minor_costheta; R2[idx] = -wy_mul_sintheta + wx_mul_wz_mul_one_minor_costheta; R3[idx] = -wz_mul_sintheta + wx_mul_wy_mul_one_minor_costheta; R4[idx] = costheta + wy * wy_mul_one_minor_costheta; R5[idx] = wx_mul_sintheta + wy_mul_wz_mul_one_minor_costheta; R6[idx] = wy_mul_sintheta + wx_mul_wz_mul_one_minor_costheta; R7[idx] = -wx_mul_sintheta + wy_mul_wz_mul_one_minor_costheta; R8[idx] = costheta + wz * wz_mul_one_minor_costheta; } else { // Near zero, we switch to using the first order Taylor expansion. for (int i = 0; i < N; ++i) { unsigned int index = idx + i * nItem; const T dv_angle_axis_x = i == grad_position0 ? 1 : 0; const T dv_angle_axis_y = i == grad_position1 ? 1 : 0; const T dv_angle_axis_z = i == grad_position2 ? 1 : 0; gradDevicePtrR0[index] = 0; gradDevicePtrR1[index] = dv_angle_axis_z; gradDevicePtrR2[index] = -dv_angle_axis_y; gradDevicePtrR3[index] = -dv_angle_axis_z; gradDevicePtrR4[index] = 0; gradDevicePtrR5[index] = dv_angle_axis_x; gradDevicePtrR6[index] = dv_angle_axis_y; gradDevicePtrR7[index] = -dv_angle_axis_x; gradDevicePtrR8[index] = 0; } R0[idx] = T(1.0); R1[idx] = angle_axis_z; R2[idx] = -angle_axis_y; R3[idx] = -angle_axis_z; R4[idx] = T(1.0); R5[idx] = angle_axis_x; R6[idx] = angle_axis_y; R7[idx] = -angle_axis_x; R8[idx] = T(1.0); } } } template <typename T> JM33<T> AngleAxisToRotationKernelMatrix(const JV3<T> &AxisAngle) { JM33<T> R{}; const MegBA::JetVector<T> &JV_Template = AxisAngle(0, 0); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { R(i, j).initAs(JV_Template); } } bool use_fast_grad{true}; for (int i = 0; i < 3; ++i) use_fast_grad &= AxisAngle(i).getGradPosition() != -1; const auto N = JV_Template.getGradShape(); for (int i = 0; i < MemoryPool::getWorldSize(); ++i) { cudaSetDevice(i); const auto nItem = JV_Template.getItemNum(i); // 512 instead of 1024 for the limitation of registers dim3 block_dim(std::min(decltype(nItem)(512), nItem)); dim3 grid_dim((nItem - 1) / block_dim.x + 1); ASSERT_CUDA_NO_ERROR(); if (use_fast_grad) AngleAxisToRotationKernelFastGradKernel<T><<<grid_dim, block_dim>>>( nItem, N, AxisAngle(0).getCUDAResPtr()[i], AxisAngle(1).getCUDAResPtr()[i], AxisAngle(2).getCUDAResPtr()[i], AxisAngle(0).getGradPosition(), AxisAngle(1).getGradPosition(), AxisAngle(2).getGradPosition(), R(0, 0).getCUDAResPtr()[i], R(1, 0).getCUDAResPtr()[i], R(2, 0).getCUDAResPtr()[i], R(0, 1).getCUDAResPtr()[i], R(1, 1).getCUDAResPtr()[i], R(2, 1).getCUDAResPtr()[i], R(0, 2).getCUDAResPtr()[i], R(1, 2).getCUDAResPtr()[i], R(2, 2).getCUDAResPtr()[i], R(0, 0).getCUDAGradPtr()[i], R(1, 0).getCUDAGradPtr()[i], R(2, 0).getCUDAGradPtr()[i], R(0, 1).getCUDAGradPtr()[i], R(1, 1).getCUDAGradPtr()[i], R(2, 1).getCUDAGradPtr()[i], R(0, 2).getCUDAGradPtr()[i], R(1, 2).getCUDAGradPtr()[i], R(2, 2).getCUDAGradPtr()[i]); else AngleAxisToRotationKernel<T><<<grid_dim, block_dim>>>( nItem, N, AxisAngle(0, 0).getCUDAResPtr()[i], AxisAngle(1, 0).getCUDAResPtr()[i], AxisAngle(2, 0).getCUDAResPtr()[i], AxisAngle(0, 0).getCUDAGradPtr()[i], AxisAngle(1, 0).getCUDAGradPtr()[i], AxisAngle(2, 0).getCUDAGradPtr()[i], R(0, 0).getCUDAResPtr()[i], R(1, 0).getCUDAResPtr()[i], R(2, 0).getCUDAResPtr()[i], R(0, 1).getCUDAResPtr()[i], R(1, 1).getCUDAResPtr()[i], R(2, 1).getCUDAResPtr()[i], R(0, 2).getCUDAResPtr()[i], R(1, 2).getCUDAResPtr()[i], R(2, 2).getCUDAResPtr()[i], R(0, 0).getCUDAGradPtr()[i], R(1, 0).getCUDAGradPtr()[i], R(2, 0).getCUDAGradPtr()[i], R(0, 1).getCUDAGradPtr()[i], R(1, 1).getCUDAGradPtr()[i], R(2, 1).getCUDAGradPtr()[i], R(0, 2).getCUDAGradPtr()[i], R(1, 2).getCUDAGradPtr()[i], R(2, 2).getCUDAGradPtr()[i]); ASSERT_CUDA_NO_ERROR(); } return R; } template <typename T> JM33<T> AngleAxisToRotationKernelMatrix( const Eigen::Map<const JVD<T>> &AxisAngle) { JM33<T> R{}; const MegBA::JetVector<T> &JV_Template = AxisAngle(0, 0); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { R(i, j).initAs(JV_Template); } } bool use_fast_grad{true}; for (int i = 0; i < 3; ++i) use_fast_grad &= AxisAngle(i).getGradPosition() != -1; const auto N = JV_Template.getGradShape(); for (int i = 0; i < MemoryPool::getWorldSize(); ++i) { cudaSetDevice(i); const auto nItem = JV_Template.getItemNum(i); // 512 instead of 1024 for the limitation of registers dim3 block_dim(std::min(decltype(nItem)(512), nItem)); dim3 grid_dim((nItem - 1) / block_dim.x + 1); ASSERT_CUDA_NO_ERROR(); if (use_fast_grad) AngleAxisToRotationKernelFastGradKernel<T><<<grid_dim, block_dim>>>( nItem, N, AxisAngle(0).getCUDAResPtr()[i], AxisAngle(1).getCUDAResPtr()[i], AxisAngle(2).getCUDAResPtr()[i], AxisAngle(0).getGradPosition(), AxisAngle(1).getGradPosition(), AxisAngle(2).getGradPosition(), R(0, 0).getCUDAResPtr()[i], R(1, 0).getCUDAResPtr()[i], R(2, 0).getCUDAResPtr()[i], R(0, 1).getCUDAResPtr()[i], R(1, 1).getCUDAResPtr()[i], R(2, 1).getCUDAResPtr()[i], R(0, 2).getCUDAResPtr()[i], R(1, 2).getCUDAResPtr()[i], R(2, 2).getCUDAResPtr()[i], R(0, 0).getCUDAGradPtr()[i], R(1, 0).getCUDAGradPtr()[i], R(2, 0).getCUDAGradPtr()[i], R(0, 1).getCUDAGradPtr()[i], R(1, 1).getCUDAGradPtr()[i], R(2, 1).getCUDAGradPtr()[i], R(0, 2).getCUDAGradPtr()[i], R(1, 2).getCUDAGradPtr()[i], R(2, 2).getCUDAGradPtr()[i]); else AngleAxisToRotationKernel<T><<<grid_dim, block_dim>>>( nItem, N, AxisAngle(0, 0).getCUDAResPtr()[i], AxisAngle(1, 0).getCUDAResPtr()[i], AxisAngle(2, 0).getCUDAResPtr()[i], AxisAngle(0, 0).getCUDAGradPtr()[i], AxisAngle(1, 0).getCUDAGradPtr()[i], AxisAngle(2, 0).getCUDAGradPtr()[i], R(0, 0).getCUDAResPtr()[i], R(1, 0).getCUDAResPtr()[i], R(2, 0).getCUDAResPtr()[i], R(0, 1).getCUDAResPtr()[i], R(1, 1).getCUDAResPtr()[i], R(2, 1).getCUDAResPtr()[i], R(0, 2).getCUDAResPtr()[i], R(1, 2).getCUDAResPtr()[i], R(2, 2).getCUDAResPtr()[i], R(0, 0).getCUDAGradPtr()[i], R(1, 0).getCUDAGradPtr()[i], R(2, 0).getCUDAGradPtr()[i], R(0, 1).getCUDAGradPtr()[i], R(1, 1).getCUDAGradPtr()[i], R(2, 1).getCUDAGradPtr()[i], R(0, 2).getCUDAGradPtr()[i], R(1, 2).getCUDAGradPtr()[i], R(2, 2).getCUDAGradPtr()[i]); ASSERT_CUDA_NO_ERROR(); } return R; } template JM33<float> AngleAxisToRotationKernelMatrix( const JV3<float> &AxisAngle); template JM33<double> AngleAxisToRotationKernelMatrix( const JV3<double> &AxisAngle); template JM33<float> AngleAxisToRotationKernelMatrix( const Eigen::Map<const JVD<float>> &AxisAngle); template JM33<double> AngleAxisToRotationKernelMatrix( const Eigen::Map<const JVD<double>> &AxisAngle); } }
the_stack
#include "caffe2/operators/group_norm_op.h" #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "caffe2/utils/math/reduce.cuh" namespace caffe2 { namespace { template <typename T> __global__ void ComputeFusedParamsCUDAKernel( const int N, const int G, const int K, const T* mu, const T* rsig, const T* gamma, const T* beta, T* scale, T* bias); template <> __global__ void ComputeFusedParamsCUDAKernel<float>( const int N, const int G, const int K, const float* mu, const float* rsig, const float* gamma, const float* beta, float* scale, float* bias) { const int C = G * K; const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x; if (index < N * C) { const int ng = index / K; const int c = index % C; #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) const float scale_val = __ldg(gamma + c) * __ldg(rsig + ng); scale[index] = scale_val; bias[index] = fmaf(-scale_val, __ldg(mu + ng), __ldg(beta + c)); #else const float scale_val = gamma[c] * rsig[ng]; scale[index] = scale_val; bias[index] = fmaf(-scale_val, mu[ng], beta[c]); #endif } } template <typename T, StorageOrder kOrder> __global__ void GroupNormForwardCUDAKernel( const int N, const int C, const int HxW, const T* X, const T* scale, const T* bias, T* Y); template <> __global__ void GroupNormForwardCUDAKernel<float, StorageOrder::NCHW>( const int N, const int C, const int HxW, const float* X, const float* scale, const float* bias, float* Y) { const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x; if (index < N * C * HxW) { const int nc = index / HxW; #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) Y[index] = fmaf(__ldg(X + index), __ldg(scale + nc), __ldg(bias + nc)); #else Y[index] = fmaf(X[index], scale[nc], bias[nc]); #endif } } template <> __global__ void GroupNormForwardCUDAKernel<float, StorageOrder::NHWC>( const int N, const int C, const int HxW, const float* X, const float* scale, const float* bias, float* Y) { const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x; if (index < N * C * HxW) { const int nc = index / (HxW * C) * C + index % C; #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) Y[index] = fmaf(__ldg(X + index), __ldg(scale + nc), __ldg(bias + nc)); #else Y[index] = fmaf(X[index], scale[nc], bias[nc]); #endif } } template <typename T> __global__ void ComputeInternalGradientsNCHWCUDAKernel( const int HxW, const T* dY, const T* X, T* ds, T* db) { __shared__ typename BlockReduce<T>::TempStorage ds_storage; __shared__ typename BlockReduce<T>::TempStorage db_storage; const int nc = blockIdx.x; T ds_sum = 0; T db_sum = 0; for (int i = threadIdx.x; i < HxW; i += blockDim.x) { const int index = nc * HxW + i; #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) ds_sum += __ldg(dY + index) * __ldg(X + index); db_sum += __ldg(dY + index); #else ds_sum += dY[index] * X[index]; db_sum += dY[index]; #endif } ds_sum = BlockReduce<T>(ds_storage).Sum(ds_sum); db_sum = BlockReduce<T>(db_storage).Sum(db_sum); if (threadIdx.x == 0) { ds[nc] = ds_sum; db[nc] = db_sum; } } // Math: // Y = gamma * (X - mu) * rsig + beta // let s = gamma * rsig // let b = beta - gamma * mu * rsig // Y = s * X + b // let n = K * HxW // dL/dX = dL/dY * dY/dX = dL/dY * (d(s * X)/dX + db/dX) // d(s * X)/dX = s + X * ds/dX = s + gamma * X * drsig/dX // db/dX = -gamma * u * drsig/dX - gamma * rsig * dmu/dX // drsig/dX = -rsig^3 * (X - mu) / n // dmu/dX = 1 / n template <typename T> __global__ void ComputeYGradientScaleCUDAKernel( const int N, const int G, const int K, const T* rsig, const T* gamma, T* dY_scale) { const int C = G * K; const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x; if (index < N * C) { const int ng = index / K; const int c = index % C; #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) dY_scale[index] = __ldg(gamma + c) * __ldg(rsig + ng); #else dY_scale[index] = gamma[c] * rsig[ng]; #endif } } template <typename T> __global__ void ComputeXScaleAndBiasCUDAKernel( const int G, const int K, const T alpha, const T* ds, const T* db, const T* mu, const T* rsig, const T* gamma, T* X_scale, T* bias); template <> __global__ void ComputeXScaleAndBiasCUDAKernel<float>( const int G, const int K, const float alpha, const float* ds, const float* db, const float* mu, const float* rsig, const float* gamma, float* X_scale, float* bias) { __shared__ typename BlockReduce<float>::TempStorage ds_storage; __shared__ typename BlockReduce<float>::TempStorage db_storage; const int n = blockIdx.x; const int g = blockIdx.y; const int ng = n * G + g; float ds_sum = 0; float db_sum = 0; for (int i = threadIdx.x; i < K; i += blockDim.x) { const int index = ng * K + i; const int c = g * K + i; #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) ds_sum += __ldg(ds + index) * __ldg(gamma + c); db_sum += __ldg(db + index) * __ldg(gamma + c); #else ds_sum += ds[index] * gamma[c]; db_sum += db[index] * gamma[c]; #endif } ds_sum = BlockReduce<float>(ds_storage).Sum(ds_sum); db_sum = BlockReduce<float>(db_storage).Sum(db_sum); if (threadIdx.x == 0) { #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) const float x = fmaf(db_sum, __ldg(mu + ng), -ds_sum) * math::utils::Cube<float>(__ldg(rsig + ng)) * alpha; X_scale[ng] = x; bias[ng] = -fmaf(x, __ldg(mu + ng), db_sum * __ldg(rsig + ng) * alpha); #else const float x = fmaf(db_sum, mu[ng], -ds_sum) * math::utils::Cube<float>(rsig[ng]) * alpha; X_scale[ng] = x; bias[ng] = -fmaf(x, mu[ng], db_sum * rsig[ng] * alpha); #endif } } template <typename T, StorageOrder kOrder> __global__ void GroupNormBackwardCUDAKernel( const int N, const int G, const int K, const int HxW, const T* dY_scale, const T* dY, const T* X_scale, const T* X, const T* bias, T* dX); template <> __global__ void GroupNormBackwardCUDAKernel<float, StorageOrder::NCHW>( const int N, const int G, const int K, const int HxW, const float* dY_scale, const float* dY, const float* X_scale, const float* X, const float* bias, float* dX) { const int C = G * K; const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x; if (index < N * C * HxW) { const int nc = index / HxW; const int ng = nc / K; #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) dX[index] = fmaf( __ldg(dY_scale + nc), __ldg(dY + index), fmaf(__ldg(X_scale + ng), __ldg(X + index), __ldg(bias + ng))); #else dX[index] = fmaf(dY_scale[nc], dY[index], fmaf(X_scale[ng], X[index], bias[ng])); #endif } } template <> __global__ void GroupNormBackwardCUDAKernel<float, StorageOrder::NHWC>( const int N, const int G, const int K, const int HxW, const float* dY_scale, const float* dY, const float* X_scale, const float* X, const float* bias, float* dX) { const int C = G * K; const int index = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x; if (index < N * C * HxW) { const int nc = index / (HxW * C) * C + index % C; const int ng = nc / K; #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) dX[index] = fmaf( __ldg(dY_scale + nc), __ldg(dY + index), fmaf(__ldg(X_scale + ng), __ldg(X + index), __ldg(bias + ng))); #else dX[index] = fmaf(dY_scale[nc], dY[index], fmaf(X_scale[ng], X[index], bias[ng])); #endif } } template <typename T> __global__ void GammaBetaBackwardCUDAKernel( const int N, const int G, const int K, const T* ds, const T* db, const T* mu, const T* rsig, T* dgamma, T* dbeta); template <> __global__ void GammaBetaBackwardCUDAKernel<float>( const int N, const int G, const int K, const float* ds, const float* db, const float* mu, const float* rsig, float* dgamma, float* dbeta) { __shared__ typename BlockReduce<float>::TempStorage dg_storage; __shared__ typename BlockReduce<float>::TempStorage db_storage; const int C = G * K; const int g = blockIdx.x; const int k = blockIdx.y; const int c = g * K + k; float dg_sum = 0; float db_sum = 0; for (int i = threadIdx.x; i < N; i += blockDim.x) { const int nc = i * C + c; const int ng = i * G + g; #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) dg_sum += fmaf(-__ldg(db + nc), __ldg(mu + ng), __ldg(ds + nc)) * __ldg(rsig + ng); db_sum += __ldg(db + nc); #else dg_sum += fmaf(-db[nc], mu[ng], ds[nc]) * rsig[ng]; db_sum += db[nc]; #endif } dg_sum = BlockReduce<float>(dg_storage).Sum(dg_sum); db_sum = BlockReduce<float>(db_storage).Sum(db_sum); if (threadIdx.x == 0) { dgamma[c] = dg_sum; dbeta[c] = db_sum; } } } // namespace template <> void GroupNormOp<float, CUDAContext>::ComputeFusedParams( const int N, const int G, const int K, const float* mu, const float* rsig, const float* gamma, const float* beta, float* scale, float* bias) { const int M = math::DivUp(N * G * K, CAFFE_CUDA_NUM_THREADS); ComputeFusedParamsCUDAKernel<float> <<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, G, K, mu, rsig, gamma, beta, scale, bias); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <> void GroupNormOp<float, CUDAContext>::GroupNormForwardNCHW( const int N, const int C, const int HxW, const float* X, const float* scale, const float* bias, float* Y) { const int M = math::DivUp(N * C * HxW, CAFFE_CUDA_NUM_THREADS); GroupNormForwardCUDAKernel<float, StorageOrder::NCHW> <<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, HxW, X, scale, bias, Y); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <> void GroupNormOp<float, CUDAContext>::GroupNormForwardNHWC( const int N, const int C, const int HxW, const float* X, const float* scale, const float* bias, float* Y) { const int M = math::DivUp(N * C * HxW, CAFFE_CUDA_NUM_THREADS); GroupNormForwardCUDAKernel<float, StorageOrder::NHWC> <<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, HxW, X, scale, bias, Y); C10_CUDA_KERNEL_LAUNCH_CHECK(); } // Math: // let: s = gamma * rsig // let: b = beta - mu * gamma * rsig // then: Y = s * X + b template <> bool GroupNormGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW( const int N, const int G, const int K, const int HxW, const float* dY_data, const float* X_data, const float* mu_data, const float* rsig_data, const float* gamma_data, float* dX_data, float* dgamma_data, float* dbeta_data) { const int C = G * K; ReinitializeTensor(&ds_, {N, C}, at::dtype<float>().device(CUDA)); ReinitializeTensor(&db_, {N, C}, at::dtype<float>().device(CUDA)); ReinitializeTensor(&dY_scale_, {N, C}, at::dtype<float>().device(CUDA)); ReinitializeTensor(&X_scale_, {N, G}, at::dtype<float>().device(CUDA)); ReinitializeTensor(&bias_, {N, G}, at::dtype<float>().device(CUDA)); float* ds_data = ds_.mutable_data<float>(); float* db_data = db_.mutable_data<float>(); float* dY_scale_data = dY_scale_.mutable_data<float>(); float* X_scale_data = X_scale_.mutable_data<float>(); float* bias_data = bias_.mutable_data<float>(); ComputeInternalGradientsNCHWCUDAKernel<float> <<<N * C, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( HxW, dY_data, X_data, ds_data, db_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); // Computes dL/dX. int M = math::DivUp(N * C, CAFFE_CUDA_NUM_THREADS); ComputeYGradientScaleCUDAKernel<float> <<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, G, K, rsig_data, gamma_data, dY_scale_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); ComputeXScaleAndBiasCUDAKernel<float> <<<dim3(N, G), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( G, K, 1.0f / static_cast<float>(K * HxW), ds_data, db_data, mu_data, rsig_data, gamma_data, X_scale_data, bias_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); M = math::DivUp(N * C * HxW, CAFFE_CUDA_NUM_THREADS); GroupNormBackwardCUDAKernel<float, StorageOrder::NCHW> <<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, G, K, HxW, dY_scale_data, dY_data, X_scale_data, X_data, bias_data, dX_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); // Computes dL/dgamma and dL/dbeta. GammaBetaBackwardCUDAKernel< float><<<dim3(G, K), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, G, K, ds_data, db_data, mu_data, rsig_data, dgamma_data, dbeta_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); return true; } template <> bool GroupNormGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC( const int N, const int G, const int K, const int HxW, const float* dY_data, const float* X_data, const float* mu_data, const float* rsig_data, const float* gamma_data, float* dX_data, float* dgamma_data, float* dbeta_data) { const int C = G * K; ReinitializeTensor(&ds_, {N, C}, at::dtype<float>().device(CUDA)); ReinitializeTensor(&db_, {N, C}, at::dtype<float>().device(CUDA)); ReinitializeTensor(&dY_scale_, {N, C}, at::dtype<float>().device(CUDA)); ReinitializeTensor(&X_scale_, {N, G}, at::dtype<float>().device(CUDA)); ReinitializeTensor(&bias_, {N, G}, at::dtype<float>().device(CUDA)); ReinitializeTensor(&ones_, {HxW}, at::dtype<float>().device(CUDA)); float* ds_data = ds_.mutable_data<float>(); float* db_data = db_.mutable_data<float>(); float* dY_scale_data = dY_scale_.mutable_data<float>(); float* X_scale_data = X_scale_.mutable_data<float>(); float* bias_data = bias_.mutable_data<float>(); float* ones_data = ones_.mutable_data<float>(); math::Set<float, CUDAContext>(HxW, 1.0f, ones_data, &context_); math::Mul<float, CUDAContext>( N * C * HxW, dY_data, X_data, dX_data, &context_); math::GemmStridedBatched<float, CUDAContext>( CblasTrans, CblasNoTrans, N, C, 1, HxW, 1.0f, dX_data, C * HxW, ones_data, 0, 0.0f, ds_data, C, &context_); math::GemmStridedBatched<float, CUDAContext>( CblasTrans, CblasNoTrans, N, C, 1, HxW, 1.0f, dY_data, C * HxW, ones_data, 0, 0.0f, db_data, C, &context_); // Computes dL/dX. int M = math::DivUp(N * C, CAFFE_CUDA_NUM_THREADS); ComputeYGradientScaleCUDAKernel<float> <<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, G, K, rsig_data, gamma_data, dY_scale_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); ComputeXScaleAndBiasCUDAKernel<float> <<<dim3(N, G), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( G, K, 1.0f / static_cast<float>(K * HxW), ds_data, db_data, mu_data, rsig_data, gamma_data, X_scale_data, bias_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); M = math::DivUp(N * C * HxW, CAFFE_CUDA_NUM_THREADS); GroupNormBackwardCUDAKernel<float, StorageOrder::NHWC> <<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, G, K, HxW, dY_scale_data, dY_data, X_scale_data, X_data, bias_data, dX_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); // Computes dL/dgamma and dL/dbeta. GammaBetaBackwardCUDAKernel< float><<<dim3(G, K), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, G, K, ds_data, db_data, mu_data, rsig_data, dgamma_data, dbeta_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); return true; } REGISTER_CUDA_OPERATOR(GroupNorm, GroupNormOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( GroupNormGradient, GroupNormGradientOp<float, CUDAContext>); } // namespace caffe2
the_stack
#include "cupoch/geometry/kdtree_flann.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/registration/feature.h" #include "cupoch/utility/eigenvalue.h" #include "cupoch/utility/console.h" namespace cupoch { namespace registration { __constant__ float PST_RAD_45 = 0.78539816339744830961566084581988; __constant__ float PST_RAD_90 = 1.5707963267948966192313216916398; __constant__ float PST_RAD_135 = 2.3561944901923449288469825374596; __constant__ float PST_RAD_PI_7_8 = 2.7488935718910690836548129603691; namespace { struct compute_shot_functor { compute_shot_functor(const Eigen::Vector3f *points, const Eigen::Vector3f *normals, const int *indices, const float *distance2, float radius, int knn) : points_(points), normals_(normals), indices_(indices), distance2_(distance2), radius_(radius), radius1_2_(radius * 0.5), radius3_4_(radius * 3.0 / 4.0), radius1_4_(radius * 0.25), knn_(knn) {}; const Eigen::Vector3f *points_; const Eigen::Vector3f *normals_; const int *indices_; const float *distance2_; const float radius_; const float radius1_2_; const float radius3_4_; const float radius1_4_; const int knn_; const int n_bins_ = 10; const int min_neighbors_ = 5; const int max_angular_sectors_ = 32; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector3f, int> compute_shot_lrf(size_t idx, const Eigen::Vector3f& point_i) const { Eigen::Matrix3f cov = Eigen::Matrix3f::Zero(); float w_total = 0.0; int n_nb = 0; for (size_t k = 0; k < knn_; k++) { int idx_knn = indices_[idx * knn_ + k]; if (idx_knn < 0 || idx == idx_knn) continue; const Eigen::Vector3f q = points_[idx_knn] - point_i; const float dist2 = distance2_[idx * knn_ + k]; const float w = radius_ - sqrt(dist2); cov += w * q * q.transpose(); w_total += w; n_nb += 1; } cov /= w_total; auto evecs = utility::FastEigen3x3MinMaxVec(cov); Eigen::Vector3f zaxis = thrust::get<0>(evecs); Eigen::Vector3f xaxis = thrust::get<1>(evecs); int n_px = 0; int n_pz = 0; for (size_t k = 0; k < knn_; k++) { int idx_knn = indices_[idx * knn_ + k]; if (idx_knn < 0 || idx == idx_knn) continue; const Eigen::Vector3f q = points_[idx_knn] - point_i; n_px += int(q.dot(xaxis) >= 0); n_pz += int(q.dot(zaxis) >= 0); } if (n_px < n_nb - n_px) { xaxis *= -1.0; } if (n_pz < n_nb - n_pz) { zaxis *= -1.0; } Eigen::Vector3f yaxis = zaxis.cross(xaxis); return thrust::make_tuple(xaxis, yaxis, zaxis, n_nb); } __device__ Feature<352>::FeatureType operator()(size_t idx) const { Feature<352>::FeatureType ft = Feature<352>::FeatureType::Zero(); const Eigen::Vector3f point_i = points_[idx]; const Eigen::Vector3f normal_i = normals_[idx]; auto lrf = compute_shot_lrf(idx, point_i); const int n_nb = thrust::get<3>(lrf); if (n_nb < min_neighbors_) { return ft; } for (size_t k = 0; k < knn_; k++) { int idx_knn = indices_[idx * knn_ + k]; if (idx_knn < 0 || idx == idx_knn) continue; const Eigen::Vector3f q = points_[idx_knn] - point_i; const float dist = sqrt(distance2_[idx * knn_ + k]); if (dist == 0) continue; const float cos_desc = min(max(thrust::get<2>(lrf).dot(normal_i), -1.0), 1.0); float bindist = ((1.0 + cos_desc) * n_bins_) / 2.0; float x_lrf = q.dot(thrust::get<0>(lrf)); float y_lrf = q.dot(thrust::get<1>(lrf)); float z_lrf = q.dot(thrust::get<2>(lrf)); if (abs(x_lrf) < 1.0e-30) { x_lrf = 0.0; } if (abs(y_lrf) < 1.0e-30) { y_lrf = 0.0; } if (abs(z_lrf) < 1.0e-30) { z_lrf = 0.0; } unsigned char bit4 = (y_lrf > 0 || (y_lrf == 0.0 && x_lrf < 0)) ? 1 : 0; unsigned char bit3 = (unsigned char) ((x_lrf > 0 || (x_lrf == 0.0 && y_lrf > 0)) ? !bit4 : bit4); int desc_index = (bit4 << 3) + (bit3 << 2); desc_index = desc_index << 1; if (x_lrf * y_lrf > 0 || x_lrf == 0.0) { desc_index += (abs(x_lrf) >= abs(y_lrf)) ? 0 : 4; } else { desc_index += (abs(x_lrf) > abs(y_lrf)) ? 4 : 0; } desc_index += z_lrf > 0 ? 1 : 0; // 2 RADII desc_index += (dist > radius1_2_) ? 2 : 0; int step_index = (bindist < 0.0) ? (int)(ceilf(bindist - 0.5)) : (int)(floorf(bindist + 0.5)); int volume_index = desc_index * (n_bins_ + 1); //Interpolation on the cosine (adjacent bins in the histogram) bindist -= step_index; float init_weight = 1 - abs(bindist); if (bindist > 0) { ft[volume_index + ((step_index + 1) % n_bins_)] += bindist; } else { ft[volume_index + ((step_index - 1 + n_bins_) % n_bins_)] += -bindist; } //Interpolation on the distance (adjacent husks) if (dist > radius1_2_) { float radius_dist = (dist - radius3_4_) / radius1_2_; if (dist > radius3_4_){ init_weight += 1 - radius_dist; } else { init_weight += 1 + radius_dist; ft[(desc_index - 2) * (n_bins_ + 1) + step_index] -= radius_dist; } } else { float radius_dist = (dist - radius1_4_) / radius1_2_; if (dist < radius1_4_) { init_weight += 1 + radius_dist; } else { init_weight += 1 - radius_dist; ft[(desc_index + 2) * (n_bins_ + 1) + step_index] += radius_dist; } } //Interpolation on the inclination (adjacent vertical volumes) float inclination_cos = min(max(z_lrf / dist, -1.0), 1.0); float inclination = acos(inclination_cos); if (inclination > PST_RAD_90 || (abs(inclination - PST_RAD_90) < 1e-30 && z_lrf <= 0)) { float inclination_dist = (inclination - PST_RAD_135) / PST_RAD_90; if (inclination > PST_RAD_135) { init_weight += 1 - inclination_dist; } else { init_weight += 1 + inclination_dist; ft[(desc_index + 1) * (n_bins_ + 1) + step_index] -= inclination_dist; } } else { float inclination_dist = (inclination - PST_RAD_45) / PST_RAD_90; if (inclination < PST_RAD_45) { init_weight += 1 + inclination_dist; } else { init_weight += 1 - inclination_dist; ft[(desc_index - 1) * (n_bins_ + 1) + step_index] += inclination_dist; } } if (y_lrf != 0.0 || x_lrf != 0.0) { //Interpolation on the azimuth (adjacent horizontal volumes) float azimuth = atan2(y_lrf, x_lrf); int sel = desc_index >> 2; float angular_sector_span = PST_RAD_45; float angular_sector_start= - PST_RAD_PI_7_8; float azimuth_dist = (azimuth - (angular_sector_start + angular_sector_span * sel)) / angular_sector_span; azimuth_dist = max(-0.5, min(azimuth_dist, 0.5)); if (azimuth_dist > 0) { init_weight += 1 - azimuth_dist; int interp_index = (desc_index + 4) % max_angular_sectors_; ft[interp_index * (n_bins_ + 1) + step_index] += azimuth_dist; } else { int interp_index = (desc_index - 4 + max_angular_sectors_) % max_angular_sectors_; init_weight += 1 + azimuth_dist; ft[interp_index * (n_bins_ + 1) + step_index] -= azimuth_dist; } } ft[volume_index + step_index] += init_weight; } const float ftnorm = ft.norm(); if (ftnorm > 0) { ft /= ftnorm; } return ft; } }; } std::shared_ptr<Feature<352>> ComputeSHOTFeature( const geometry::PointCloud &input, float radius, const geometry::KDTreeSearchParam &search_param) { auto feature = std::make_shared<Feature<352>>(); feature->Resize((int)input.points_.size()); geometry::KDTreeFlann kdtree(input); utility::device_vector<int> indices; utility::device_vector<float> distance2; int knn; switch (search_param.GetSearchType()) { case geometry::KDTreeSearchParam::SearchType::Knn: knn = ((const geometry::KDTreeSearchParamKNN &)search_param).knn_; break; case geometry::KDTreeSearchParam::SearchType::Radius: knn = ((const geometry::KDTreeSearchParamRadius &)search_param) .max_nn_; break; default: utility::LogError("Unsupport search param type."); return feature; } kdtree.Search(input.points_, search_param, indices, distance2); compute_shot_functor func(thrust::raw_pointer_cast(input.points_.data()), thrust::raw_pointer_cast(input.normals_.data()), thrust::raw_pointer_cast(indices.data()), thrust::raw_pointer_cast(distance2.data()), radius, knn); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(input.points_.size()), feature->data_.begin(), func); return feature; } } }
the_stack
* This sample demonstrates how use texture fetches in CUDA * * This sample takes an input PGM image (image_filename) and generates * an output PGM image (image_filename_out). This CUDA kernel performs * a simple 2D transform (rotation) on the texture coordinates (u,v). */ #include <shrUtils.h> // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <sdkHelper.h> // helper for shared functions common to CUDA SDK samples #include <shrQATest.h> // includes, kernels #include <simpleTexture_kernel.cu> char *image_filename = "lena_bw.pgm"; char *ref_filename = "ref_rotated.pgm"; float angle = 0.5f; // angle to rotate image by (in radians) #define MIN_EPSILON_ERROR 5e-3f //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors( cudaError err, const char *file, const int line ) { if( cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError( const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // General GPU Device CUDA Initialization int gpuDeviceInit(int devID) { int deviceCount; checkCudaErrors(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n"); exit(-1); } if (devID < 0) devID = 0; if (devID > deviceCount-1) { fprintf(stderr, "\n"); fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount); fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID); fprintf(stderr, "\n"); return -devID; } cudaDeviceProp deviceProp; checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) ); if (deviceProp.major < 1) { fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n"); exit(-1); \ } checkCudaErrors( cudaSetDevice(devID) ); printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name); return devID; } // This function returns the best GPU (with maximum GFLOPS) int gpuGetMaxGflopsDeviceId() { int current_device = 0, sm_per_multiproc = 0; int max_compute_perf = 0, max_perf_device = 0; int device_count = 0, best_SM_arch = 0; cudaDeviceProp deviceProp; cudaGetDeviceCount( &device_count ); // Find the best major SM Architecture GPU device while ( current_device < device_count ) { cudaGetDeviceProperties( &deviceProp, current_device ); if (deviceProp.major > 0 && deviceProp.major < 9999) { best_SM_arch = MAX(best_SM_arch, deviceProp.major); } current_device++; } // Find the best CUDA capable GPU device current_device = 0; while( current_device < device_count ) { cudaGetDeviceProperties( &deviceProp, current_device ); if (deviceProp.major == 9999 && deviceProp.minor == 9999) { sm_per_multiproc = 1; } else { sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor); } int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate; if( compute_perf > max_compute_perf ) { // If we find GPU with SM major > 2, search only these if ( best_SM_arch > 2 ) { // If our device==dest_SM_arch, choose this, or else pass if (deviceProp.major == best_SM_arch) { max_compute_perf = compute_perf; max_perf_device = current_device; } } else { max_compute_perf = compute_perf; max_perf_device = current_device; } } ++current_device; } return max_perf_device; } // Initialization code to find the best CUDA Device int findCudaDevice(int argc, const char **argv) { cudaDeviceProp deviceProp; int devID = 0; // If the command-line has a device number specified, use it if (checkCmdLineFlag(argc, argv, "device")) { devID = getCmdLineArgumentInt(argc, argv, "device="); if (devID < 0) { printf("Invalid command line parameters\n"); exit(-1); } else { devID = gpuDeviceInit(devID); if (devID < 0) { printf("exiting...\n"); shrQAFinishExit(argc, (const char **)argv, QA_FAILED); exit(-1); } } } else { // Otherwise pick the device with highest Gflops/s devID = gpuGetMaxGflopsDeviceId(); checkCudaErrors( cudaSetDevice( devID ) ); checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) ); printf("> Using CUDA device [%d]: %s\n", devID, deviceProp.name); } return devID; } // end of CUDA Helper Functions //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { bool bTestResult = true; shrQAStart(argc, argv); int devID = findCudaDevice(argc, (const char **)argv); // load image from disk float* h_data = NULL; unsigned int width, height; char* image_path = sdkFindFilePath(image_filename, argv[0]); if (image_path == NULL) { printf("Unable to source image file: %s\n", image_filename); shrQAFinishExit(argc, (const char **)argv, QA_FAILED); } sdkLoadPGM(image_path, &h_data, &width, &height); unsigned int size = width * height * sizeof(float); printf("Loaded '%s', %d x %d pixels\n", image_filename, width, height); // load reference image from image (output) float *h_data_ref = (float*) malloc(size); char* ref_path = sdkFindFilePath(ref_filename, argv[0]); if (ref_path == NULL) { printf("Unable to find reference image file: %s\n", ref_filename); shrQAFinishExit(argc, (const char **)argv, QA_FAILED); } sdkLoadPGM(ref_path, &h_data_ref, &width, &height); // allocate device memory for result float* d_data = NULL; checkCudaErrors( cudaMalloc( (void**) &d_data, size) ); // allocate array and copy image data cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaArray* cu_array; checkCudaErrors( cudaMallocArray( &cu_array, &channelDesc, width, height )); checkCudaErrors( cudaMemcpyToArray( cu_array, 0, 0, h_data, size, cudaMemcpyHostToDevice)); // set texture parameters tex.addressMode[0] = cudaAddressModeWrap; tex.addressMode[1] = cudaAddressModeWrap; tex.filterMode = cudaFilterModeLinear; tex.normalized = true; // access with normalized texture coordinates // Bind the array to the texture checkCudaErrors( cudaBindTextureToArray( tex, cu_array, channelDesc)); dim3 dimBlock(8, 8, 1); dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1); // warmup transformKernel<<< dimGrid, dimBlock, 0 >>>( d_data, width, height, angle); checkCudaErrors( cudaDeviceSynchronize() ); StopWatchInterface *timer = NULL; sdkCreateTimer( &timer ); sdkStartTimer( &timer ); // execute the kernel transformKernel<<< dimGrid, dimBlock, 0 >>>( d_data, width, height, angle); // check if kernel execution generated an error getLastCudaError("Kernel execution failed"); checkCudaErrors( cudaDeviceSynchronize() ); sdkStopTimer( &timer ); printf("Processing time: %f (ms)\n", sdkGetTimerValue( &timer )); printf("%.2f Mpixels/sec\n", (width*height / (sdkGetTimerValue( &timer ) / 1000.0f)) / 1e6); sdkDeleteTimer( &timer ); // allocate mem for the result on host side float* h_odata = (float*) malloc( size); // copy result from device to host checkCudaErrors( cudaMemcpy( h_odata, d_data, size, cudaMemcpyDeviceToHost) ); // write result to file char output_filename[1024]; strcpy(output_filename, image_path); strcpy(output_filename + strlen(image_path) - 4, "_out.pgm"); sdkSavePGM( output_filename, h_odata, width, height ); printf("Wrote '%s'\n", output_filename); // write regression file if necessary if( checkCmdLineFlag( argc, (const char**) argv, "regression")) { // write file for regression test sdkWriteFile<float>( "./data/regression.dat", h_odata, width*height, 0.0f, false ); } else { // We need to reload the data from disk, because it is inverted upon output sdkLoadPGM(output_filename, &h_odata, &width, &height); printf("Comparing files\n"); printf("\toutput: <%s>\n", output_filename); printf("\treference: <%s>\n", ref_path); bTestResult = compareData( h_odata, h_data_ref, width*height, MIN_EPSILON_ERROR, 0.15f ); } checkCudaErrors(cudaFree(d_data)); checkCudaErrors(cudaFreeArray(cu_array)); free(image_path); free(ref_path); cudaDeviceReset(); shrQAFinishExit(argc, (const char **)argv, (bTestResult ? QA_PASSED : QA_FAILED) ); }
the_stack
#include <cudaconv2.cuh> /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread. * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numColors, filterPixels, numFilters) otherwise * targets: (numColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * Number of filters must be divisible by 16. * Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. */ template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_color(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[numColors*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int blockCaseIdx = blockIdx.x * 16*imgsPerThread; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const int numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeX * imgSizeY; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + loadY * numImages * numModules + loadX; filters += threadIdx.x; targets += pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[numColors][imgsPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f]; #pragma unroll for (int c = 0; c < numColors; c++) { shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread) * blockIdx.x.y = 1..numImgColors/colorsPerThread * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * numImageColors/numGroups must be divisible by colorsPerThread. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are 4-16 color channels. */ template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,16*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const uint numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX; filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x; targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image. * * threadIdx.x determines case. * threadIdx.y determines color. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by 16. * * B_X * imgsPerThread must be divisible by 32. * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by 32. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16 __shared__ float shHidActs[16][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32; const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16; const int numModules = numModulesY * numModulesX; hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * B_X; i += 32) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) { if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread) * blockIdx.x.y = 1..numImgColors/colorsPerThread * blockIdx.y determines 4x4 image region in target image, also sample * In essence, blockIdx.y.x = 1..numRegions * blockIdx.y.y = 1..overSample * * threadIdx.x determines case. * threadIdx.y determines pixel. * * overSample := numFilterColors*numGroups/numImgColors * ^ this is the number of groups that each color channel is connected to * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgSizeY, imgSizeX, numImages) * * colorIndices: (numGroups, numFilterColors) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * numFilterColors must be divisible by colorsPerThread. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are 4-16 color channels. */ template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_mediumcolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numFilterColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; __shared__ int shColors[colorsPerThread]; // not really necessary -- can repurpose the other shmems const int numImgBlocks = DIVUP(numImages,16*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread; const int numRegionsX = DIVUP(imgSizeX, 4); const int numRegions = numRegionsX * numRegionsX; const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int overSample = gridDim.y / numRegions; const int blockSample = blockIdx.y / numRegions; const int groupsPerSample = numGroups / overSample; const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockRegionIdx = blockIdx.y % numRegions; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const uint numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX; filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x; targets += blockSample * numImgColors * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; if (tidx < colorsPerThread) { shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages; } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[shColors[c] + i * 16] = scaleTargets * targets[shColors[c] + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[shColors[c] + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image, sample idx. * In essence, blockIdx.y.x = 1..imgPixels * blockIdx.y.y = 1..overSample * * threadIdx.x determines case. * threadIdx.y determines color. * * overSample := numFilterColors*numGroups/numImgColors * ^ this is the number of groups that each color channel is connected to * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgSizeY, imgSizeX, numImages) * * colorIndices: (numGroups, numFilterColors) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by 16. * numFilterColors*numGroups must be divisible by numImgColors. * * B_X * imgsPerThread must be divisible by 32. * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by 32. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_manycolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numFilterColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16 __shared__ float shHidActs[16][B_X*imgsPerThread]; __shared__ int shColors[colorsPerThread * B_Y]; // not really necessary -- can repurpose the other shmems const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32; const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16; const int numModules = numModulesY * numModulesX; const int overSample = gridDim.y / imgPixels; const int blockSample = blockIdx.y / imgPixels; const int groupsPerSample = numGroups / overSample; // const int overSample = (numFilterColors * numGroups) / numImgColors; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y * colorsPerThread; // color idx globally const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample; // const int filterColorsPerSample = numFilterColors / overSample; const int blockPixelIdx = blockIdx.y % imgPixels; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += blockSample * numImgColors * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; if (tidx < colorsPerThread * B_Y) { shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages; } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * B_X; i += 32) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) { if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleTargets * targets[shColors[c * B_Y + threadIdx.y] + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * hidActs: (numFilters, numModules, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgPixels, numImages) * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _imgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numFilterColors = numImgColors / numGroups; int numImages = hidActs.getNumCols(); int numFilters = filters.getNumCols(); int numModules = hidActs.getNumRows() / numFilters; int filterModuleMult = conv ? 1 : numModules; int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors); int filterSize = sqrt((double)filterPixels); int imgPixels = imgSizeY * imgSizeX; int numModulesX = numModules / numModulesY; assert(numImgColors % numGroups == 0); assert(numFilters % (16*numGroups) == 0); assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); assert(numGroups == 1 || numFilterColors % 4 == 0); assert(filterPixels == filterSize * filterSize); assert(hidActs.getNumRows() == numModules * numFilters); assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels); assert(numModules == numModulesY * numModulesX); assert(hidActs.isContiguous()); assert(filters.isContiguous()); assert(!hidActs.isTrans()); assert(!filters.isTrans()); assert(!targets.isTrans()); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0); // assert changed into if statement by Ian Goodfellow if (paddingStart + (numModulesX-1)*moduleStride + filterSize < imgSizeX) { printf("imgSizeX: %d\n", imgSizeX); printf("Bound on image size: %d\n", paddingStart + (numModulesX-1)*moduleStride+filterSize); printf("paddingStart: %d\n", paddingStart); printf("numModulesX: %d\n", numModulesX); printf("moduleStride: %d\n", moduleStride); printf("filterSize: %d\n", filterSize); assert(false); } assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); assert(moduleStride <= filterSize); assert(targets.isContiguous()); // no stride support here! dim3 blocks; dim3 threads(16,16); int colorsPerThread; int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; if (numFilterColors % 8 == 0) { threads = dim3(32, 4); colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2; imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; assert(numFilterColors % (threads.y * colorsPerThread) == 0); blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels); } else if (numFilterColors > 3) { colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2; blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } else { blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0; if (scaleTargets == 0) { // do not scale or use targets matrix targets.resize(numImgColors*imgPixels, numImages); } else { assert(targets.getNumRows() == numImgColors * imgPixels); assert(targets.getNumCols() == numImages); } if (conv) { // convolutional units if (scaleTargets == 0) { // do not scale or use targets matrix if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else { if (imgsPerThread == 8) { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, false, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 1, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, false, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 3, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, false, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 1, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, false, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 3, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<4, 1, false, true, true>, cudaFuncCachePreferShared); img_acts_color<4, 1, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<4, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_color<4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<4, 3, false, true, true>, cudaFuncCachePreferShared); img_acts_color<4, 3, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<4, 1, false, false, true>, cudaFuncCachePreferShared); img_acts_color<4, 1, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<4, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_color<4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<4, 3, false, false, true>, cudaFuncCachePreferShared); img_acts_color<4, 3, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<2, 1, false, true, true>, cudaFuncCachePreferShared); img_acts_color<2, 1, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<2, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_color<2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<2, 3, false, true, true>, cudaFuncCachePreferShared); img_acts_color<2, 3, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<2, 1, false, false, true>, cudaFuncCachePreferShared); img_acts_color<2, 1, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<2, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_color<2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<2, 3, false, false, true>, cudaFuncCachePreferShared); img_acts_color<2, 3, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else { // do scale if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else { if (imgsPerThread == 8) { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, true, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 1, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, true, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 3, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, true, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 1, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, true, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 3, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<4, 1, true, true, true>, cudaFuncCachePreferShared); img_acts_color<4, 1, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<4, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_color<4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<4, 3, true, true, true>, cudaFuncCachePreferShared); img_acts_color<4, 3, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<4, 1, true, false, true>, cudaFuncCachePreferShared); img_acts_color<4, 1, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<4, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_color<4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<4, 3, true, false, true>, cudaFuncCachePreferShared); img_acts_color<4, 3, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<2, 1, true, true, true>, cudaFuncCachePreferShared); img_acts_color<2, 1, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<2, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_color<2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<2, 3, true, true, true>, cudaFuncCachePreferShared); img_acts_color<2, 3, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<2, 1, true, false, true>, cudaFuncCachePreferShared); img_acts_color<2, 1, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<2, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_color<2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<2, 3, true, false, true>, cudaFuncCachePreferShared); img_acts_color<2, 3, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } else { // local, unshared units if (scaleTargets == 0) { // do not scale or use targets matrix if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else { if (imgsPerThread == 8) { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, false, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 1, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, false, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 3, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, false, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 1, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, false, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 3, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<4, 1, false, true, false>, cudaFuncCachePreferShared); img_acts_color<4, 1, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<4, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_color<4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<4, 3, false, true, false>, cudaFuncCachePreferShared); img_acts_color<4, 3, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<4, 1, false, false, false>, cudaFuncCachePreferShared); img_acts_color<4, 1, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<4, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_color<4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<4, 3, false, false, false>, cudaFuncCachePreferShared); img_acts_color<4, 3, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<2, 1, false, true, false>, cudaFuncCachePreferShared); img_acts_color<2, 1, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<2, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_color<2, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<2, 3, false, true, false>, cudaFuncCachePreferShared); img_acts_color<2, 3, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<2, 1, false, false, false>, cudaFuncCachePreferShared); img_acts_color<2, 1, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<2, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_color<2, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<2, 3, false, false, false>, cudaFuncCachePreferShared); img_acts_color<2, 3, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else { // do scale if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 2, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 1, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<2, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else { if (imgsPerThread == 8) { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, true, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 1, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, true, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 3, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, true, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 1, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, true, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 3, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<4, 1, true, true, false>, cudaFuncCachePreferShared); img_acts_color<4, 1, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<4, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_color<4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<4, 3, true, true, false>, cudaFuncCachePreferShared); img_acts_color<4, 3, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<4, 1, true, false, false>, cudaFuncCachePreferShared); img_acts_color<4, 1, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<4, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_color<4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<4, 3, true, false, false>, cudaFuncCachePreferShared); img_acts_color<4, 3, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<2, 1, true, true, false>, cudaFuncCachePreferShared); img_acts_color<2, 1, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<2, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_color<2, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<2, 3, true, true, false>, cudaFuncCachePreferShared); img_acts_color<2, 3, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<2, 1, true, false, false>, cudaFuncCachePreferShared); img_acts_color<2, 1, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<2, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_color<2, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<2, 3, true, false, false>, cudaFuncCachePreferShared); img_acts_color<2, 3, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } cutilCheckMsg("imgActs: kernel execution failed"); } void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true); } void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true); } void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false); } void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false); } /* * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgSizeY, imgSizeX, numImages) * colorIndices: (numGroups, numFilterColors) * * where overSample := (numFilterColors * numGroups) / numImgColors * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _imgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numImages = hidActs.getNumCols(); int numFilters = filters.getNumCols(); // int numFiltersPerGroup = numFilters / numGroups; int numModules = hidActs.getNumRows() / numFilters; int filterModuleMult = conv ? 1 : numModules; int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors); int filterSize = sqrt((double)filterPixels); int imgPixels = imgSizeY * imgSizeX; int numModulesX = numModules / numModulesY; int overSample = (numFilterColors * numGroups) / numImgColors; assert(numImgColors % numFilterColors == 0); assert(numFilters % (16*numGroups) == 0); assert((numFilterColors * numGroups) % numImgColors == 0); assert(numGroups > 1); assert(numFilterColors > 3 && numFilterColors % 2 == 0); assert(filterPixels == filterSize * filterSize); assert(hidActs.getNumRows() == numModules * numFilters); assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels); assert(numModules == numModulesY * numModulesX); assert(hidActs.isContiguous()); assert(filters.isContiguous()); assert(!hidActs.isTrans()); assert(!filters.isTrans()); assert(!targets.isTrans()); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0); assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); assert(moduleStride <= filterSize); assert(targets.isContiguous()); // no stride support here! dim3 blocks; dim3 threads; int colorsPerThread; int imgsPerThread; if (numFilterColors % 8 == 0) { threads = dim3(32, 4); colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2; imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; assert(numFilterColors % (threads.y * colorsPerThread) == 0); blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), overSample * imgPixels); } else if (numFilterColors > 3) { imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; threads = dim3(16, 16); colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2; blocks = dim3(DIVUP(numImages,16*imgsPerThread) * (numImgColors / colorsPerThread), overSample * DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0; if (scaleTargets == 0) { // do not scale or use targets matrix targets.resize(overSample*numImgColors*imgPixels, numImages); } else { assert(targets.getNumRows() == overSample * numImgColors * imgPixels); assert(targets.getNumCols() == numImages); } if (conv) { if (scaleTargets == 0) { // do not scale or use targets matrix if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } } else { // do scale if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } } } else { if (scaleTargets == 0) { // do not scale or use targets matrix if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } } else { // do scale if (numFilterColors % 8 == 0) { if (imgsPerThread == 4) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 2) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (imgsPerThread == 8) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<8, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else if (imgsPerThread == 4) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor_sparse_rand<2, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices, numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput); } } } } } } cutilCheckMsg("imgActsSparse: kernel execution failed"); } void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) { _imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, true); } void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, true); } void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) { _imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, false); } void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, false); }
the_stack
#include "interpolation_cuda_kernel.cuh" #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #define min(a,b) ((a<b)?(a):(b)) #define max(a,b) ((a>b)?(a):(b)) #define DEBUG (0) #ifndef BLOCKDIMX #define BLOCKDIMX (32) #endif #ifndef BLOCKDIMY #define BLOCKDIMY (16) #endif using at::Half; //forward path of our layer template <typename scalar_t> __global__ void InterpolationLayer_gpu_forward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2, scalar_t* output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { float fx = input2[batch_i * input2_b_stride + 0 * input2_c_stride + h_i * input2_h_stride + w_i ]; float fy = input2[batch_i * input2_b_stride + 1 * input2_c_stride + h_i * input2_h_stride + w_i ]; float x2 = (float)(w_i) + fx; float y2 = (float)(h_i) + fy; if(x2 >= 0.0f && y2 >=0.0f && x2 < (float)w && y2 < (float)h){ int ix2_L = int(x2); int iy2_T = int(y2); int ix2_R = min(ix2_L + 1, w - 1); int iy2_B = min(iy2_T + 1, h - 1); float alpha = x2 - ix2_L; float beta = y2 - iy2_T; for(int c_i = 0 ; c_i < channel ; c_i ++){ float TL = input1[off + c_i * input1_c_stride + iy2_T * input1_h_stride + ix2_L]; float TR = input1[off + c_i * input1_c_stride + iy2_T * input1_h_stride + ix2_R]; float BL = input1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_L]; float BR = input1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_R]; output[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i] = (1- alpha ) *(1-beta) *TL + alpha *(1- beta) * TR + (1-alpha) *beta *BL + alpha *beta * BR; } } else{ //the warping data is out of range, we fill it with zeros for(int c_i = 0 ; c_i < channel; c_i ++){ output[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i] = fillvalue; } } } return ; } template <typename scalar_t> __global__ void InterpolationLayer_gpu_backward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2, const scalar_t* __restrict__ gradoutput, scalar_t* gradinput1, scalar_t* gradinput2 ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); if(withinXbounds && withinYbounds){ float fx= input2[batch_i * input2_b_stride + 0 * input2_c_stride + h_i * input2_h_stride + w_i ]; float fy = input2[batch_i * input2_b_stride + 1* input2_c_stride + h_i * input2_h_stride + w_i]; float x2 = float(w_i) + fx; float y2 = float(h_i) + fy; if(x2 >= 0.0f && y2 >= 0.0f && x2 < (float)w && y2 < (float)h){ int ix2_L = int(x2); int iy2_T = int(y2); int ix2_R = min(ix2_L+ 1, w - 1); int iy2_B = min(iy2_T + 1, h - 1); float alpha = x2 - ix2_L; float beta = y2 - iy2_T; for (int c_i = 0 ; c_i < channel; c_i++){ float gradoutput_value = gradoutput[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i]; atomicAdd( & gradinput1[off + c_i * input1_c_stride + iy2_T * input1_h_stride + ix2_L], gradoutput_value * ( 1- alpha) * (1- beta)); atomicAdd( & gradinput1[off + c_i * input1_c_stride + iy2_T * input1_h_stride + ix2_R], gradoutput_value * alpha * (1-beta)); atomicAdd( & gradinput1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_L], gradoutput_value * (1-alpha ) * beta); atomicAdd( & gradinput1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_R], gradoutput_value * alpha * beta); } float gamma = iy2_B - y2; float bot_diff = 0.0f; for(int c_i =0 ; c_i< channel; c_i ++ ){ float temp = 0; temp += gamma * (input1[off + c_i * input1_c_stride + iy2_T * input1_h_stride +ix2_R] - input1[off + c_i* input1_c_stride+ iy2_T * input1_h_stride + ix2_L]); temp += (1 - gamma) *( input1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_R] - input1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_L]); float warped_diff_value = gradoutput[off+ c_i * input1_c_stride+ h_i* input1_h_stride + w_i]; bot_diff += warped_diff_value * temp ; } //the gradients of the x direction/ horizontal direction gradinput2[batch_i * input2_b_stride + 0 * input2_c_stride + h_i * input2_h_stride + w_i] = bot_diff; gamma = ix2_R- x2; bot_diff = 0.0f; for(int c_i = 0 ; c_i < channel;c_i ++ ){ float temp = 0.0f; temp += gamma * (input1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_L] - input1[off + c_i * input1_c_stride + iy2_T * input1_h_stride + ix2_L]); temp += (1-gamma) *( input1[off + c_i * input1_c_stride+ iy2_B* input1_h_stride+ix2_R] - input1[off+ c_i* input1_c_stride+ iy2_T * input1_h_stride +ix2_R]); float warped_diff_value = gradoutput[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i]; bot_diff += warped_diff_value * temp; } gradinput2[batch_i * input2_b_stride + 1 * input2_c_stride + h_i * input2_h_stride + w_i]= bot_diff; } } return ; } int InterpolationLayer_gpu_forward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, at::Tensor& input1, at::Tensor& input2, at::Tensor& output ) { int error = -1; dim3 grid; dim3 block; // blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); //extract the data of CudaTensor and use kernel to calculate. AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjection_gpu_forward", ([&] { InterpolationLayer_gpu_forward_kernelfunc<<<grid,block,0, stream >>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input1.data<scalar_t>(),input2.data<scalar_t>(),output.data<scalar_t>() ); })); // THCudaCheck(cudaGetLastError()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } int InterpolationLayer_gpu_backward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, at::Tensor& input1, at::Tensor& input2, at::Tensor& gradoutput, at::Tensor& gradinput1, at::Tensor& gradinput2 ) { int error = -1; dim3 grid; dim3 block; //blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjection_gpu_forward", ([&] { InterpolationLayer_gpu_backward_kernelfunc <<<grid,block,0, stream>>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input1.data<scalar_t>(), input2.data<scalar_t>(), gradoutput.data<scalar_t>(), gradinput1.data<scalar_t>(), gradinput2.data<scalar_t>() ); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpu error in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; }
the_stack
// define to creat new layout on device instead of copying from host //#define CREATE_POISSON_LUT_ON_DEVICE #define DEBUG_TABLE_GAMMA 0 __device__ void ComputeHydrogenForMultiFlowFit_dev( int sId, int flow_ndx, int nucid, float * nucRise, float A, float Krate, float gain, float SP, float d, int c_dntp_top_ndx, int num_frames, int num_beads, float* ival, int nonZeroEmpFrames); __device__ void ComputeSignalForMultiFlowFit_dev( bool useNonZeroEmphasis, int nonZeroEmpFrames, float restrict_clonal, int sId, int flow_ndx, float A, float tauB, float etbR, float gain, int num_frames, int num_beads, float* non_integer_penalty, float* dark_matter, float* pPCA_vals, float* sbg, float* ival, float* output, bool useEmphasis = false, float diff = 0.0f, float* emphasis = NULL, float* fval = NULL); __device__ void ComputeMidNucTime_dev(float& tmid, const ConstParams*pCP, int nucId, int flow_ndx); __device__ void ComputeTauB_dev(float& tauB, const ConstParams* pCP, float etbR, int sId); __device__ void ComputeEtbR_dev(float& etbR, float R, int sId, int nucid, int absFnum); __device__ void ComputeSP_dev(float& SP, float Copies, int flow_ndx, int sId); __device__ void GenerateSmoothingKernelForExponentialTailFit_dev( int size, float taub, int exp_start, float* kern, const ConstParams* pCP ); __device__ float CalculateMeanResidualErrorPerFlow( int startFrame, const float* fg_buffers, const float* fval, const float* weight, const int num_beads, const int num_frames); __device__ void ModelFunctionEvaluationForExponentialTailFit_dev( int tail_start, int num_frames, int num_beads, float A, float taub, float dc_offset, float* fval, const ConstParams* pCP, float* tmp_fval = NULL); __device__ void CalculateResidualForExponentialTailFit_dev( float* obs, float* pred, float* start, float* end, float* err, float& residual); ///// Implemented functions /* __device__ float CalcNucAvgDarkMatterPerFrame( int frame, float*darkMatter) { return darkMatter[frame]+ CP[sId].darkness[0]; //CP_MULTIFLOWFIT } __device__ float CalcPCADarkMatterPerFrame( int frame, float *pca_vals, float *darkMatter) { return darkMatter[frame]+ CP[sId].darkness[0]; //CP_MULTIFLOWFIT } */ namespace { enum ModelFuncEvaluationOutputMode { NoOutput, OneParam, TwoParams }; } __device__ void Keplar_ModelFuncEvaluationForSingleFlowFit( // int * pMonitor, const bool twoParamFit, const int sId, const int flow_ndx, const int nucid, const float * nucRise, float A, const float Krate, const float tau, const float gain, const float SP, const float d, float sens, int c_dntp_top_ndx, const int num_frames, const int num_beads, float* fval, float* ConstP_deltaFrame, int endFrames, const ModelFuncEvaluationOutputMode flag, float * jac_out = NULL, const float * emLeft = NULL, const float * emRight = NULL, const float frac = 0, const float * fval_in = NULL, const float * err = NULL, float *aa = NULL, float *rhs0 = NULL, float *krkr = NULL, float *rhs1 = NULL, float *akr = NULL ) { // At this point, every thread is an independent bead. // We're looping over flows, evaluating how one flow over one bead // fits into a particular nucleotide. if ( A!=A ) A=0.0001f; // safety check if (A < 0.0f) { A = -A; sens = -sens; } else if (A > LAST_POISSON_TABLE_COL) A = LAST_POISSON_TABLE_COL; if ( A<0.0001f ) A = 0.0001f; // safety int ileft, iright; float ifrac, idelta; // step 2 float occ_l,occ_r; float totocc; float totgen; float pact; int i, st; // step 3 float ldt; // step 4 float c_dntp_int; float pact_new; ileft = ( int ) A; idelta = A-ileft; iright = ileft+1; ifrac = 1-idelta; ileft--; iright--; occ_l = ifrac; // lower mixture occ_r = idelta; // upper mixture if (ileft < 0) { occ_l = 0.0; ileft = 0; } if (iright == LAST_POISSON_TABLE_COL) { iright = ileft; occ_r = occ_l; occ_l = 0; } occ_l *= SP; occ_r *= SP; pact = occ_l + occ_r; totocc = SP*A; totgen = totocc; #ifndef POISS_FLOAT4 const float* rptr = precompute_pois_params_streaming (iright); const float* lptr = precompute_pois_params_streaming (ileft); #else const float4 * LUTptr = precompute_pois_LUT_params_streaming (ileft, iright); // atomicAdd(&pMonitor[ileft], 1); #endif // We reuse this constant every loop... float cp_sid_kmax_nucid = CP[sId].kmax[nucid]; float c_dntp_bot = 0.0; // concentration of dNTP in the well float c_dntp_sum = 0.0; float c_dntp_old_rate = 0; float c_dntp_new_rate = 0; float scaled_kr = Krate*CP[sId].molecules_to_micromolar_conversion/d; //CP_SINGLEFLOWFIT float half_kr = Krate*0.5f; // variables used for solving background signal shape float aval = 0.0f; //new Solve HydrogenFlowInWell float one_over_two_tauB = 1.0f; float one_over_one_plus_aval = 1.0f/ (1.0f+aval); float red_hydro_prev; float fval_local = 0.0f; float red_hydro; c_dntp_top_ndx += flow_ndx*num_frames*ISIG_SUB_STEPS_SINGLE_FLOW; float c_dntp_bot_plus_kmax = 1.0f/cp_sid_kmax_nucid; //CP_SINGLEFLOWFIT //for (i=CP[sId].fine_nuc_start[flow_ndx];i < num_frames;i++) //CP_SINGLEFLOWFIT for (i=CP[sId].fine_nuc_start[flow_ndx];i < endFrames; i++) //CP_SINGLEFLOWFIT { if (totgen > 0.0f) { ldt = (ConstP_deltaFrame[i]/( ISIG_SUB_STEPS_SINGLE_FLOW * FRAMESPERSEC)) * half_kr; //CP_SINGLEFLOWFIT for (st=1; (st <= ISIG_SUB_STEPS_SINGLE_FLOW) && (totgen > 0.0f);st++) { // assume instantaneous equilibrium c_dntp_old_rate = c_dntp_new_rate; // All the threads should be grabbing from the same nucRise location. c_dntp_bot = nucRise[c_dntp_top_ndx++]/ (1.0f + scaled_kr*pact*c_dntp_bot_plus_kmax); c_dntp_bot_plus_kmax = 1.0f/ (c_dntp_bot + cp_sid_kmax_nucid); //CP_SINGLEFLOWFIT c_dntp_new_rate = c_dntp_bot*c_dntp_bot_plus_kmax; c_dntp_int = ldt* (c_dntp_new_rate+c_dntp_old_rate); c_dntp_sum += c_dntp_int; // calculate new number of active polymerase #ifndef POISS_FLOAT4 pact_new = poiss_cdf_approx_streaming (c_dntp_sum,rptr) * occ_r; // if (occ_l > 0.0f) pact_new += poiss_cdf_approx_streaming (c_dntp_sum,lptr) * occ_l; #else pact_new = poiss_cdf_approx_float4(c_dntp_sum, LUTptr, occ_l, occ_r); #endif totgen -= ( (pact+pact_new) * 0.5f) * c_dntp_int; pact = pact_new; } if (totgen < 0.0f) totgen = 0.0f; red_hydro = (totocc-totgen); }else{ red_hydro = totocc; } // calculate the 'background' part (the accumulation/decay of the protons in the well // normally accounted for by the background calc) red_hydro *= sens; one_over_two_tauB = 1.0f/ (2.0f*tau); aval = ConstP_deltaFrame[i]*one_over_two_tauB; //CP_SINGLEFLOWFIT one_over_one_plus_aval = 1.0f/ (1.0f+aval); if(i==CP[sId].fine_nuc_start[flow_ndx]) //CP_SINGLEFLOWFIT fval_local = red_hydro; // *one_over_one_plus_aval; else fval_local = red_hydro - red_hydro_prev + (1.0f-aval)*fval_local; // *one_over_one_plus_aval; red_hydro_prev = red_hydro; fval_local *= one_over_one_plus_aval; switch( flag ) { case NoOutput: #ifdef FVAL_L1 fval[i] = fval_local * gain; #else fval[num_beads*i] = fval_local * gain; #endif break; case OneParam: case TwoParams: float weight = emRight != NULL ? frac*emLeft[i*(MAX_POISSON_TABLE_COL)] + (1.0f - frac)*emRight[i*(MAX_POISSON_TABLE_COL)] : emLeft[i*(MAX_POISSON_TABLE_COL)]; int bxi = num_beads * i; float err_bxi = err[bxi]; // Grab this early so that we only get it once. #ifdef FVAL_L1 float jac_tmp = weight * (fval_local*gain - fval_in[i]) * 1000.0f; #else float jac_tmp = weight * (fval_local*gain - fval_in[bxi]) * 1000.0f; #endif if(flag==OneParam){ #ifdef JAC_L1 jac_out[i] = jac_tmp; #else jac_out[bxi] = jac_tmp; #endif *aa += jac_tmp * jac_tmp; if (!twoParamFit) *rhs0 += (jac_tmp * err_bxi); } else { // Two params. #ifdef JAC_L1 float my_jac_out = jac_out[i]; // Only grab it from memory once. #else float my_jac_out = jac_out[bxi]; // Only grab it from memory once. #endif *akr += my_jac_out * jac_tmp; *rhs0 += my_jac_out * err_bxi; *rhs1 += jac_tmp * err_bxi; *krkr += jac_tmp * jac_tmp; } } } } __device__ void Fermi_ModelFuncEvaluationForSingleFlowFitNoOutput( // int * pMonitor, const ConstParams* pCP, const int flow_ndx, const int nucid, const float * nucRise, float A, const float Krate, const float tau, const float gain, const float SP, const float d, float sens, int nucStart, int nucRiseSubSteps, const int num_frames, const int num_beads, float * fval, float* ConstP_deltaFrame, int endFrames ) { // At this point, every thread is an independent bead. // We're looping over flows, evaluating how one flow over one bead // fits into a particular nucleotide. if ( A!=A ) A=0.0001f; // safety check if (A < 0.0f) { A = -A; sens = -sens; } else if (A > LAST_POISSON_TABLE_COL) A = LAST_POISSON_TABLE_COL; if ( A<0.0001f ) A = 0.0001f; // safety #if USE_TABLE_GAMMA int ileft = ( int ) A; float idelta = A-ileft; int iright = ileft+1; float ifrac = 1-idelta; ileft--; iright--; float occ_l = ifrac; // lower mixture float occ_r = idelta; // upper mixture if (ileft < 0) { occ_l = 0.0; ileft = 0; } if (iright == LAST_POISSON_TABLE_COL) { iright = ileft; occ_r = occ_l; occ_l = 0; } occ_l *= SP; occ_r *= SP; float pact = occ_l + occ_r; #ifndef POISS_FLOAT4 const float* rptr = precompute_pois_params_streaming (iright); const float* lptr = precompute_pois_params_streaming (ileft); #else const float4 * LUTptr = precompute_pois_LUT_params_streaming (ileft, iright); #endif #else float pact = SP; #endif // USE_TABLE_GAMMA float totocc = SP*A; float totgen = totocc; // We reuse this constant every loop... float cp_sid_kmax_nucid = pCP->kmax[nucid]; float c_dntp_sum = 0.0; float c_dntp_old_rate = 0; float c_dntp_new_rate = 0; float scaled_kr = Krate*pCP->molecules_to_micromolar_conversion/d; //CP_SINGLEFLOWFIT float half_kr = Krate*0.5f; // variables used for solving background signal shape float aval = 0.0f; //new Solve HydrogenFlowInWell float one_over_two_tauB = 1.0f; float one_over_one_plus_aval = 1.0f/ (1.0f+aval); float red_hydro_prev; float fval_local = 0.0f; float red_hydro; int c_dntp_top_ndx = nucRiseSubSteps * (flow_ndx*num_frames + nucStart); float c_dntp_bot_plus_kmax = 1.0f/cp_sid_kmax_nucid; //CP_SINGLEFLOWFIT bool start_frame = true; for (int i=nucStart; i < endFrames;i++) //CP_SINGLEFLOWFIT { if (totgen > 0.0f) { float ldt = (ConstP_deltaFrame[i]/( nucRiseSubSteps * FRAMESPERSEC)) * half_kr; //CP_SINGLEFLOWFIT for (int st=1; (st <= nucRiseSubSteps) && (totgen > 0.0f);st++) { // assume instantaneous equilibrium c_dntp_old_rate = c_dntp_new_rate; // All the threads should be grabbing from the same nucRise location. // c_dntp_bot is the concentration of dNTP in the well float c_dntp_bot = nucRise[c_dntp_top_ndx++]/ (1.0f + scaled_kr*pact*c_dntp_bot_plus_kmax); c_dntp_bot_plus_kmax = 1.0f/ (c_dntp_bot + cp_sid_kmax_nucid); //CP_SINGLEFLOWFIT c_dntp_new_rate = c_dntp_bot*c_dntp_bot_plus_kmax; float c_dntp_int = ldt* (c_dntp_new_rate+c_dntp_old_rate); c_dntp_sum += c_dntp_int; // calculate new number of active polymerase #if USE_TABLE_GAMMA #ifndef POISS_FLOAT4 float pact_new = poiss_cdf_approx_streaming (c_dntp_sum,rptr) * occ_r; // if (occ_l > 0.0f) pact_new += poiss_cdf_approx_streaming (c_dntp_sum,lptr) * occ_l; #else float pact_new = poiss_cdf_approx_float4(c_dntp_sum, LUTptr, occ_l, occ_r); #if DEBUG_TABLE_GAMMA printf("A=%g, c_dntp_sum=%g, table=%g, calc=%g\n" " calc_interp %g %g %g %g\n" " table_interp %g %g %g %g\n", A, c_dntp_sum, pact_new, PoissonCDF( A, c_dntp_sum ) * SP, PoissonCDF( floor(A), floor(c_dntp_sum*20.f)/20.f ), PoissonCDF( floor(A), ceil(c_dntp_sum*20.f)/20.f ), PoissonCDF( ceil(A), floor(c_dntp_sum*20.f)/20.f ), PoissonCDF( ceil(A), ceil(c_dntp_sum*20.f)/20.f ), LUTptr[(int)(c_dntp_sum*20.f)].x, LUTptr[(int)(c_dntp_sum*20.f)].y, LUTptr[(int)(c_dntp_sum*20.f)].z, LUTptr[(int)(c_dntp_sum*20.f)].w ); #endif // DEBUG_TABLE_GAMMA #endif #else float pact_new = PoissonCDF( A, c_dntp_sum ) * SP; #endif // USE_TABLE_GAMMA totgen -= ( (pact+pact_new) * 0.5f) * c_dntp_int; pact = pact_new; } if (totgen < 0.0f) totgen = 0.0f; red_hydro = (totocc-totgen); }else{ red_hydro = totocc; } // calculate the 'background' part (the accumulation/decay of the protons in the well // normally accounted for by the background calc) red_hydro *= sens; one_over_two_tauB = 1.0f/ (2.0f*tau); aval = ConstP_deltaFrame[i]*one_over_two_tauB; //CP_SINGLEFLOWFIT one_over_one_plus_aval = 1.0f/ (1.0f+aval); if(start_frame) { //CP_SINGLEFLOWFIT fval_local = red_hydro; // *one_over_one_plus_aval; start_frame = false; } else { fval_local = red_hydro - red_hydro_prev + (1.0f-aval)*fval_local; // *one_over_one_plus_aval; } red_hydro_prev = red_hydro; fval_local *= one_over_one_plus_aval; #ifdef FVAL_L1 fval[i] = fval_local * gain; #else fval[num_beads*i] = fval_local * gain; #endif } } __device__ void Fermi_ModelFuncEvaluationForSingleFlowFit( const int sId, const int flow_ndx, const int nucid, const float * const nucRise, float A1, float A2, const float Krate1, const float Krate2, const float tau, const float gain, const float SP, const float d, const float sens_in, int c_dntp_top_ndx, const int num_frames, const int num_beads, const ModelFuncEvaluationOutputMode flag, const float * const emLeft, const float * const emRight, const float frac, const float * const fval_in, const float * const err, float *const aa, float *const rhs0, float *const krkr, float *const rhs1, float *const akr, float *ConstP_deltaFrame, int endFrames ) { float sens1 = sens_in; // At this point, every thread is an independent bead. // We're looping over flows, evaluating how one flow over one bead // fits into a particular nucleotide. if ( A1!=A1 ) A1=0.0001f; // safety check if (A1 < 0.0f) { A1 = -A1; sens1 = -sens1; } else if (A1 > LAST_POISSON_TABLE_COL) A1 = LAST_POISSON_TABLE_COL; if ( A1<0.0001f ) A1 = 0.0001f; // safety float sens2 = sens_in; if ( A2!=A2 ) A2=0.0001f; // safety check if (A2 < 0.0f) { A2 = -A2; sens2 = -sens2; } else if (A2 > LAST_POISSON_TABLE_COL) A2 = LAST_POISSON_TABLE_COL; if ( A2<0.0001f ) A2 = 0.0001f; // safety #if USE_TABLE_GAMMA int ileft1 = ( int ) A1; float occ_r1 = A1-ileft1; // upper mixture int iright1 = ileft1+1; float occ_l1 = 1-occ_r1; // lower mixture ileft1--; iright1--; if (ileft1 < 0) { occ_l1 = 0.0; ileft1 = 0; } if (iright1 == LAST_POISSON_TABLE_COL) { iright1 = ileft1; occ_r1 = occ_l1; occ_l1 = 0; } occ_l1 *= SP; occ_r1 *= SP; float pact1 = occ_l1 + occ_r1; int ileft2 = ( int ) A2; float occ_r2 = A2-ileft2; // upper mixture int iright2 = ileft2+1; float occ_l2 = 1-occ_r2; // lower mixture ileft2--; iright2--; if (ileft2 < 0) { occ_l2 = 0.0; ileft2 = 0; } if (iright2 == LAST_POISSON_TABLE_COL) { iright2 = ileft2; occ_r2 = occ_l2; occ_l2 = 0; } occ_l2 *= SP; occ_r2 *= SP; float pact2 = occ_l2 + occ_r2; #ifndef POISS_FLOAT4 const float* rptr1 = precompute_pois_params_streaming (iright1); const float* lptr1 = precompute_pois_params_streaming (ileft1); #else const float4 * LUTptr1 = precompute_pois_LUT_params_streaming (ileft1, iright1); #endif #ifndef POISS_FLOAT4 const float* rptr2 = precompute_pois_params_streaming (iright2); const float* lptr2 = precompute_pois_params_streaming (ileft2); #else const float4 * LUTptr2 = precompute_pois_LUT_params_streaming (ileft2, iright2); #endif #else // !USE_TABLE_GAMMA float pact1 = SP; float pact2 = SP; #endif // USE_TABLE_GAMMA const float totocc1 = SP*A1; float totgen1 = totocc1; const float totocc2 = SP*A2; float totgen2 = totocc2; // We reuse this constant every loop... const float cp_sid_kmax_nucid = CP[sId].kmax[nucid]; float c_dntp_sum1 = 0.0; float c_dntp_new_rate1 = 0; const float scaled_kr1 = Krate1*CP[sId].molecules_to_micromolar_conversion/d; //CP_SINGLEFLOWFIT float red_hydro_prev1; c_dntp_top_ndx += flow_ndx*num_frames*ISIG_SUB_STEPS_SINGLE_FLOW; float c_dntp_bot_plus_kmax1 = 1.0f/cp_sid_kmax_nucid; //CP_SINGLEFLOWFIT float c_dntp_sum2 = 0.0; float c_dntp_new_rate2 = 0; float fval_local1 = 0.f; float fval_local2 = 0.f; const float scaled_kr2 = Krate2*CP[sId].molecules_to_micromolar_conversion/d; //CP_SINGLEFLOWFIT float red_hydro_prev2; float c_dntp_bot_plus_kmax2 = 1.0f/cp_sid_kmax_nucid; //CP_SINGLEFLOWFIT int starting_frame = CP[sId].fine_nuc_start[flow_ndx]; //for (int i=starting_frame;i < num_frames;i++) //CP_SINGLEFLOWFIT for (int i=starting_frame;i < endFrames; i++) //CP_SINGLEFLOWFIT { float delta_frame = ConstP_deltaFrame[i]; float red_hydro1 = totocc1; float red_hydro2 = totocc2; // Move memory fetches well ahead of where they're used. #ifdef FVAL_L1 const float fval_in_i = fval_in[i]; #else const float fval_in_i = fval_in[num_beads * i]; #endif if (totgen1 > 0.0f || (totgen2 > 0.f && flag == TwoParams ) ) { //CP_SINGLEFLOWFIT float ldt1 = (delta_frame/( ISIG_SUB_STEPS_SINGLE_FLOW * FRAMESPERSEC)) * (Krate1*0.5f); float ldt2 = (delta_frame/( ISIG_SUB_STEPS_SINGLE_FLOW * FRAMESPERSEC)) * (Krate2*0.5f); for (int st=1; st <= ISIG_SUB_STEPS_SINGLE_FLOW ;st++) { // All the threads should be grabbing from the same nucRise location. float nuc_rise = nucRise[ c_dntp_top_ndx++ ]; if ( totgen1 > 0.f ) { // assume instantaneous equilibrium const float c_dntp_old_rate1 = c_dntp_new_rate1; // c_dntp_bot is concentration of dNTP in the well const float c_dntp_bot = nuc_rise / (1.0f + scaled_kr1*pact1*c_dntp_bot_plus_kmax1); c_dntp_bot_plus_kmax1 = 1.0f/ (c_dntp_bot + cp_sid_kmax_nucid); //CP_SINGLEFLOWFIT c_dntp_new_rate1 = c_dntp_bot*c_dntp_bot_plus_kmax1; float c_dntp_int1 = ldt1* (c_dntp_new_rate1+c_dntp_old_rate1); c_dntp_sum1 += c_dntp_int1; // calculate new number of active polymerase #if USE_TABLE_GAMMA #ifndef POISS_FLOAT4 float pact_new1 = poiss_cdf_approx_streaming (c_dntp_sum1,rptr1) * occ_r1; // if (occ_l1 > 0.0f) pact_new1 += poiss_cdf_approx_streaming (c_dntp_sum1,lptr1) * occ_l1; #else float pact_new1 = poiss_cdf_approx_float4(c_dntp_sum1, LUTptr1, occ_l1, occ_r1); #endif #else float pact_new1 = PoissonCDF( A1, c_dntp_sum1 ) * SP; #endif // USE_TABLE_GAMMA totgen1 -= ( (pact1+pact_new1) * 0.5f) * c_dntp_int1; pact1 = pact_new1; } if ( totgen2 > 0.f && flag == TwoParams ) { // assume instantaneous equilibrium const float c_dntp_old_rate2 = c_dntp_new_rate2; // c_dntp_bot is concentration of dNTP in the well const float c_dntp_bot = nuc_rise / (1.0f + scaled_kr2*pact2*c_dntp_bot_plus_kmax2); c_dntp_bot_plus_kmax2 = 1.0f/ (c_dntp_bot + cp_sid_kmax_nucid); //CP_SINGLEFLOWFIT c_dntp_new_rate2 = c_dntp_bot*c_dntp_bot_plus_kmax2; float c_dntp_int2 = ldt2* (c_dntp_new_rate2+c_dntp_old_rate2); c_dntp_sum2 += c_dntp_int2; // calculate new number of active polymerase #if USE_TABLE_GAMMA #ifndef POISS_FLOAT4 float pact_new2 = poiss_cdf_approx_streaming (c_dntp_sum2,rptr2) * occ_r2; // if (occ_l2 > 0.0f) pact_new2 += poiss_cdf_approx_streaming (c_dntp_sum2,lptr2) * occ_l2; #else float pact_new2 = poiss_cdf_approx_float4(c_dntp_sum2, LUTptr2, occ_l2, occ_r2); #endif #else float pact_new2 = PoissonCDF( A2, c_dntp_sum2 ) * SP; #endif // USE_TABLE_GAMMA totgen2 -= ( (pact2+pact_new2) * 0.5f) * c_dntp_int2; pact2 = pact_new2; } } if (totgen1 < 0.0f) totgen1 = 0.0f; red_hydro1 -= totgen1; if ( flag == TwoParams ) { if (totgen2 < 0.0f) totgen2 = 0.0f; red_hydro2 -= totgen2; } } float err_bxi = err[num_beads * i]; // Grab this early so that we only get it once. // calculate the 'background' part (the accumulation/decay of the protons in the well // normally accounted for by the background calc) red_hydro1 *= sens1; // variables used for solving background signal shape const float one_over_two_tauB = 1.0f/ (2.0f*tau); const float aval = delta_frame*one_over_two_tauB; //CP_SINGLEFLOWFIT const float one_over_one_plus_aval = 1.0f/ (1.0f+aval); if( i == starting_frame ) //CP_SINGLEFLOWFIT fval_local1 = red_hydro1; // *one_over_one_plus_aval; else fval_local1 = red_hydro1 - red_hydro_prev1 + (1.0f-aval)*fval_local1; // *one_over_one_plus_aval; red_hydro_prev1 = red_hydro1; fval_local1 *= one_over_one_plus_aval; float weight = emRight != NULL ? frac*emLeft[i*(MAX_POISSON_TABLE_COL)] + (1.0f - frac)*emRight[i*(MAX_POISSON_TABLE_COL)] : emLeft[i*(MAX_POISSON_TABLE_COL)]; float jac_1 = weight * (fval_local1*gain - fval_in_i) * 1000.0f; *aa += jac_1 * jac_1; *rhs0 += (jac_1 * err_bxi); if ( flag == TwoParams ) { // calculate the 'background' part (the accumulation/decay of the protons in the well // normally accounted for by the background calc) red_hydro2 *= sens2; if( i == starting_frame ) //CP_SINGLEFLOWFIT fval_local2 = red_hydro2; // *one_over_one_plus_aval; else fval_local2 = red_hydro2 - red_hydro_prev2 + (1.0f-aval)*fval_local2; // *one_over_one_plus_aval; red_hydro_prev2 = red_hydro2; fval_local2 *= one_over_one_plus_aval; float jac_2 = weight * (fval_local2*gain - fval_in_i) * 1000.0f; *akr += jac_1 * jac_2; *rhs1 += jac_2 * err_bxi; *krkr += jac_2 * jac_2; } // end flag == TwoParams } // loop over i } __device__ void ComputeHydrogenForMultiFlowFit_dev( int sId, int flow_ndx, int nucid, float * nucRise, float A, float Krate, float gain, float SP, float d, int c_dntp_top_ndx, int num_frames, int num_beads, float* ival, int nonZeroEmpFrames) { float sens = CP[sId].sens*SENSMULTIPLIER; //CP_MULTIFLOWFIT if (A < 0.0f) { A = -A; sens = -sens; } else if (A > LAST_POISSON_TABLE_COL) A = LAST_POISSON_TABLE_COL; if ( A<0.0001f ) A = 0.0001f; // safety int ileft, iright; float ifrac; // step 2 float occ_l,occ_r; float totocc; float totgen; float pact; int i, st; // step 3 float ldt; // step 4 float c_dntp_int; // initialize diffusion/reaction simulation for this flow ileft = (int) A; iright = ileft + 1; ifrac = iright - A; occ_l = ifrac; occ_r = A - ileft; ileft--; iright--; if (ileft < 0) { occ_l = 0.0; ileft = 0; } if (iright >= LAST_POISSON_TABLE_COL) { iright = ileft = LAST_POISSON_TABLE_COL-1; occ_r = occ_l; occ_l = 0; } occ_l *= SP; occ_r *= SP; pact = occ_l + occ_r; totocc = SP*A; totgen = totocc; #ifndef POISS_FLOAT4 const float* rptr = precompute_pois_params_streaming (iright); const float* lptr = precompute_pois_params_streaming (ileft); #else const float4* LUTptr = precompute_pois_LUT_params_streaming (ileft, iright); #endif float c_dntp_bot = 0.0; // concentration of dNTP in the well float c_dntp_sum = 0.0; float c_dntp_old_rate = 0; float c_dntp_new_rate = 0; float c_dntp_bot_plus_kmax = 1.0f/CP[sId].kmax[nucid]; //CP_MULTIFLOWFIT float scaled_kr = Krate*CP[sId].molecules_to_micromolar_conversion/d; //CP_MULTIFLOWFIT float half_kr = Krate*0.5f; c_dntp_top_ndx += flow_ndx*num_frames*ISIG_SUB_STEPS_MULTI_FLOW; for(i=0;i<CP[sId].coarse_nuc_start[flow_ndx]; i++) { //CP_MULTIFLOWFIT *ival = 0; ival += num_beads; } for (i=CP[sId].coarse_nuc_start[flow_ndx];i < nonZeroEmpFrames;i++) //CP_MULTIFLOWFIT { if (totgen > 0.0f) { ldt = (CP[sId].deltaFrames[i]/( ISIG_SUB_STEPS_MULTI_FLOW * FRAMESPERSEC)) * half_kr; //CP_MULTIFLOWFIT for (st=1; (st <= ISIG_SUB_STEPS_MULTI_FLOW) && (totgen > 0.0f);st++) { // assume instantaneous equilibrium c_dntp_old_rate = c_dntp_new_rate; c_dntp_bot = nucRise[c_dntp_top_ndx++]/ (1.0f + scaled_kr*pact*c_dntp_bot_plus_kmax); c_dntp_bot_plus_kmax = 1.0f/ (c_dntp_bot + CP[sId].kmax[nucid]); //CP_MULTIFLOWFIT c_dntp_new_rate = c_dntp_bot*c_dntp_bot_plus_kmax; c_dntp_int = ldt* (c_dntp_new_rate+c_dntp_old_rate); c_dntp_sum += c_dntp_int; // calculate new number of active polymerase #if USE_TABLE_GAMMA #ifndef POISS_FLOAT4 float pact_new = poiss_cdf_approx_streaming (c_dntp_sum,rptr) * occ_r; // if (occ_l > 0.0f) pact_new += poiss_cdf_approx_streaming (c_dntp_sum,lptr) * occ_l; #else float pact_new = poiss_cdf_approx_float4(c_dntp_sum, LUTptr, occ_l, occ_r); #endif #else float pact_new = PoissonCDF( A, c_dntp_sum ) * SP; #endif // USE_TABLE_GAMMA totgen -= ( (pact+pact_new) * 0.5f) * c_dntp_int; pact = pact_new; } if (totgen < 0.0f) totgen = 0.0f; *ival = (totocc-totgen) * sens; }else{ *ival = totocc * sens; } ival += num_beads; } } __device__ void ComputeSignalForMultiFlowFit_dev( bool useNonZeroEmphasis, int nonZeroEmpFrames, float restrict_clonal, int sId, int flow_ndx, float A, float tauB, float etbR, float gain, int num_frames, int num_beads, float* non_integer_penalty, float* dark_matter, float* pPCA_vals, float* sbg, float* ival, float* output, bool useEmphasis, float diff, float* emphasis, float* fval) { float xt; float fval_local, purple_hydr; float clonal_error_term = 0.0f; int i=0; if ((A < restrict_clonal) && (flow_ndx > KEY_LEN)) { int intcall = A + 0.5f; clamp_streaming(intcall, 0, MAGIC_MAX_CLONAL_HP_LEVEL); clonal_error_term = fabs(A - intcall) * non_integer_penalty[intcall]; } float one_over_two_taub = 1.0f / (2.0f*tauB); xt = CP[sId].deltaFrames[i]*one_over_two_taub; //CP_MULTIFLOWFIT float one_over_one_plus_aval = 1.0f/ (1.0f+xt); sbg += flow_ndx*num_frames; purple_hydr = ( *ival + (etbR+xt)*sbg[i])*one_over_one_plus_aval; //fval_local = dark_matter[i]*CP[sId].darkness[0] + //CP_MULTIFLOWFIT fval_local = ApplyDarkMatterToFrame(dark_matter, pPCA_vals, i, num_frames, num_beads, sId); fval_local += purple_hydr*gain + clonal_error_term * ((float) (i&1) - 0.5f); *output = useEmphasis ? (fval_local - *fval)*emphasis[i] / diff : fval_local; output += num_beads; i++; int frames = useNonZeroEmphasis ? nonZeroEmpFrames : num_frames; for (; i<frames; ++i) { xt = CP[sId].deltaFrames[i]*one_over_two_taub; //CP_MULTIFLOWFIT one_over_one_plus_aval = 1.0f/(1.0f+xt); purple_hydr = ((ival[i*num_beads] - ival[(i-1)*num_beads]) + (etbR+xt)*sbg[i] - (etbR-xt) * sbg[i-1]+ (1.0f-xt) * purple_hydr) * one_over_one_plus_aval; fval_local = purple_hydr*gain + ApplyDarkMatterToFrame(dark_matter, pPCA_vals , i, num_frames, num_beads, sId); // dark_matter[i]*CP[sId].darkness[0]; //CP_MULTIFLOWFIT if (i < MAXCLONALMODIFYPOINTSERROR) fval_local += clonal_error_term * ((float) (i&1) - 0.5f); *output = useEmphasis ? (fval_local - fval[i*num_beads])*emphasis[i] / diff : fval_local; output += num_beads; } } // smoothing kernel to provide weights for smoothing exponential tail __device__ void GenerateSmoothingKernelForExponentialTailFit_dev( int size, float taubInv, int exp_start, float* kern, const ConstParams* pCP ) { float dt; for (int i=0; i<size; ++i) { dt = (pCP->frameNumber[i+exp_start] - pCP->frameNumber[exp_start + 3])*taubInv; kern[i] = __expf(dt); } } __device__ float ResidualCalculationPerFlow( int startFrame, const float* fg_buffers, const float* fval, const float* emLeft, const float* emRight, const float frac, float* err, const int num_beads, const int nonZeroEmpFrames) { float e; float weight; float wtScale = 0; float residual = 0; int i; for (i=0; i<startFrame; ++i) { weight = (emRight != NULL) ?( frac* (*emLeft) + (1.0f - frac)*emRight[i*(MAX_POISSON_TABLE_COL)]) :( (*emLeft)); emLeft += (MAX_POISSON_TABLE_COL); #if __CUDA_ARCH__ >= 350 *err = e = weight * __ldg(fg_buffers); #else *err = e = weight * (*fg_buffers); #endif residual += e*e; wtScale += weight*weight; err += num_beads; fg_buffers += num_beads; #ifdef FVAL_L1 fval ++; #else fval += num_beads; #endif } for (i=startFrame; i<nonZeroEmpFrames; ++i) { weight = (emRight != NULL) ?( frac* (*emLeft) + (1.0f - frac)*emRight[i*(MAX_POISSON_TABLE_COL)]) :( (*emLeft)); //[i*(MAX_POISSON_TABLE_COL)]; emLeft += (MAX_POISSON_TABLE_COL); #if __CUDA_ARCH__ >= 350 *err = e = weight * (__ldg(fg_buffers) - *fval); #else *err = e = weight * (*fg_buffers - *fval); #endif residual += e*e; wtScale += weight*weight; err += num_beads; fg_buffers += num_beads; #ifdef FVAL_L1 fval ++; #else fval += num_beads; #endif } residual /= wtScale; return residual; } __device__ float CalculateMeanResidualErrorPerFlow( int startFrame, const float* fg_buffers, const float* fval, const float* weight, // highest hp weighting emphasis vector const int num_beads, const int num_frames) { float wtScale = 0.0f; float residual = 0; float e; for (int i=0; i<num_frames; ++i) { wtScale += *weight * *weight; if (i < startFrame) e = *weight * *fg_buffers; else e = *weight * (*fg_buffers - *fval); residual += e*e; weight += (LAST_POISSON_TABLE_COL + 1); fg_buffers+=num_beads; #ifdef FVAL_L1 fval++; #else fval += num_beads; #endif } residual = sqrtf(residual/wtScale); return residual; } __device__ float dotProduct(float *ptr1, float * ptr2, int length, int stride) { float result = 0; for(int i = 0; i < length; i++) result += ptr1[i*stride] *ptr2[i*stride]; return result; } __device__ void dotProduct(float *result, float *ptr1, float * ptr2, int length, int stride) { for(int i = 0; i < length; i++) *result += ptr1[i*stride] *ptr2[i*stride]; } __device__ void dotProduct(float2 *result2, float *ptr1, float * ptr2, int length, int stride) { float2 tempA; float2 tempB; for(int i = 0; i < length; i++){ tempA = *((float2*)(&ptr1[i*stride])); tempB = *((float2*)(&ptr2[i*stride])); result2->x += tempA.x*tempB.x; result2->y += tempA.y*tempB.y; } } __device__ void dotProduct(float4 *result4, float *ptr1, float * ptr2, int length, int stride) { float4 tempA; float4 tempB; for(int i = 0; i < length; i++){ tempA = *((float4*)(&ptr1[i*stride])); tempB = *((float4*)(&ptr2[i*stride])); result4->x += tempA.x*tempB.x; result4->y += tempA.y*tempB.y; result4->z += tempA.z*tempB.z; result4->w += tempA.w*tempB.w; } } __device__ float CalculateJTJEntry( unsigned int mask, float* input, int idb, int num_beads, int num_frames, int flow_block_size ) { unsigned int stepIdx; float * basePtr1; float * basePtr2; float result = 0; if ((mask & 0xFFFFF) == 0) return 0; stepIdx = mask >> PARAM1_STEPIDX_SHIFT; // printf("%u/", stepIdx ); basePtr1 = input + stepIdx * num_beads*num_frames *flow_block_size + idb; stepIdx = (mask >> PARAM2_STEPIDX_SHIFT) & 63; // 63 == 0011 1111 // printf("%u: ", stepIdx ); basePtr2 = input + stepIdx * num_beads*num_frames *flow_block_size + idb; for(int flow_ndx = 0; flow_ndx<flow_block_size; flow_ndx++){ bool doDotProductForFlow = (mask >> flow_ndx) & 1; // printf("%d", doDotProductForFlow ); if(doDotProductForFlow){ float * ptr1 = basePtr1 + flow_ndx*num_frames*num_beads; float * ptr2 = basePtr2 + flow_ndx*num_frames*num_beads; result += dotProduct(ptr1,ptr2,num_frames,num_beads); //dotProduct(&result, ptr1,ptr2,num_frames,num_beads); } } //printf(" " ); return result; } __device__ float2 CalculateJTJEntryVec2( unsigned int mask, float* input, int idb, int num_beads, int num_frames, int flow_block_size ) { unsigned int stepIdx; float * basePtr1; float * basePtr2; float2 result2; result2.x = 0; result2.y = 0; if ((mask & 0xFFFFF) == 0) return result2; stepIdx = mask >> PARAM1_STEPIDX_SHIFT; basePtr1 = input + stepIdx * num_beads*num_frames *flow_block_size + idb; stepIdx = (mask >> PARAM2_STEPIDX_SHIFT) & 63; // 63 == 0011 1111 basePtr2 = input + stepIdx * num_beads*num_frames *flow_block_size + idb; for(int flow_ndx = 0; flow_ndx<flow_block_size; flow_ndx++){ bool doDotProductForFlow = (mask >> flow_ndx) & 1; if(doDotProductForFlow){ float * ptr1 = basePtr1 + flow_ndx*num_frames*num_beads; float * ptr2 = basePtr2 + flow_ndx*num_frames*num_beads; dotProduct(&result2, ptr1,ptr2,num_frames,num_beads); } } return result2; } __device__ float4 CalculateJTJEntryVec4( unsigned int mask, float* input, int idb, int num_beads, int num_frames, int flow_block_size ) { unsigned int stepIdx; float * basePtr1; float * basePtr2; float4 result4; result4.x = 0; result4.y = 0; result4.z = 0; result4.w = 0; stepIdx = mask >> PARAM1_STEPIDX_SHIFT; basePtr1 = input + stepIdx * num_beads*num_frames *flow_block_size + idb; stepIdx = (mask >> PARAM2_STEPIDX_SHIFT) & 63; // 63 == 0011 1111 basePtr2 = input + stepIdx * num_beads*num_frames *flow_block_size + idb; for(int flow_ndx = 0; flow_ndx<flow_block_size; flow_ndx++){ bool doDotProductForFlow = (mask >> flow_ndx) & 1; if(doDotProductForFlow){ float * ptr1 = basePtr1 + flow_ndx*num_frames*num_beads; float * ptr2 = basePtr2 + flow_ndx*num_frames*num_beads; dotProduct(&result4, ptr1,ptr2,num_frames,num_beads); } } return result4; } __device__ float CalculateRHSEntry( unsigned int mask, float* input, int idb, int num_steps, int num_beads, int num_frames, int flow_block_size ) { int stepIdx; float * basePtr1; float * basePtr2; float result = 0; stepIdx = mask >> PARAM1_STEPIDX_SHIFT; // printf("%d %d \n", stepIdx, num_steps); basePtr1 = input + stepIdx * num_beads*num_frames *flow_block_size + idb; basePtr2 = input + (num_steps-1) * num_beads*num_frames *flow_block_size + idb; for(int flow_ndx = 0; flow_ndx<flow_block_size; flow_ndx++){ bool doDotProductForFlow = (mask >> flow_ndx) & 1; if(doDotProductForFlow){ float * ptr1 = basePtr1 + flow_ndx*num_frames*num_beads; float * ptr2 = basePtr2 + flow_ndx*num_frames*num_beads; result += dotProduct(ptr1,ptr2,num_frames,num_beads); //dotProduct(&result, ptr1,ptr2,num_frames,num_beads); } } return result; } __device__ float2 CalculateRHSEntryVec2( unsigned int mask, float* input, int idb, int num_steps, int num_beads, int num_frames, int flow_block_size ) { int stepIdx; float * basePtr1; float * basePtr2; float2 result2; result2.x = 0; result2.y = 0; stepIdx = mask >> PARAM1_STEPIDX_SHIFT; // printf("%d %d \n", stepIdx, num_steps); basePtr1 = input + stepIdx * num_beads*num_frames *flow_block_size + idb; basePtr2 = input + (num_steps-1) * num_beads*num_frames *flow_block_size + idb; for(int flow_ndx = 0; flow_ndx<flow_block_size; flow_ndx++){ bool doDotProductForFlow = (mask >> flow_ndx) & 1; if(doDotProductForFlow){ float * ptr1 = basePtr1 + flow_ndx*num_frames*num_beads; float * ptr2 = basePtr2 + flow_ndx*num_frames*num_beads; dotProduct(&result2, ptr1,ptr2,num_frames,num_beads); //dotProduct(&result, ptr1,ptr2,num_frames,num_beads); } } return result2; } __device__ float4 CalculateRHSEntryVec4( unsigned int mask, float* input, int idb, int num_steps, int num_beads, int num_frames, int flow_block_size ) { int stepIdx; float * basePtr1; float * basePtr2; float4 result4; result4.x = 0; result4.y = 0; result4.z = 0; result4.w = 0; stepIdx = mask >> PARAM1_STEPIDX_SHIFT; // printf("%d %d \n", stepIdx, num_steps); basePtr1 = input + stepIdx * num_beads*num_frames *flow_block_size + idb; basePtr2 = input + (num_steps-1) * num_beads*num_frames *flow_block_size + idb; for(int flow_ndx = 0; flow_ndx<flow_block_size; flow_ndx++){ bool doDotProductForFlow = (mask >> flow_ndx) & 1; if(doDotProductForFlow){ float * ptr1 = basePtr1 + flow_ndx*num_frames*num_beads; float * ptr2 = basePtr2 + flow_ndx*num_frames*num_beads; dotProduct(&result4, ptr1,ptr2,num_frames,num_beads); //dotProduct(&result, ptr1,ptr2,num_frames,num_beads); } } return result4; } __device__ float CalculateNonDiagLowerTriangularElements_dev( int bead_ndx, int row, float** curJtj, float* ltr, float** curLtr, int stride) { //if (bead_ndx == 33) printf("Non Diag Ele Calculation\n"); float dotP = 0; float runningSumNonDiagonalEntries = 0; float curRowElement = 0; for (int i=0; i<row; ++i) { curRowElement = ((*curJtj)[bead_ndx] - runningSumNonDiagonalEntries) / ltr[bead_ndx]; //if (bead_ndx == 96) printf("r: %d, c: %d, curRowElement: %f\n", row, i, curRowElement); dotP += (curRowElement*curRowElement); (*curLtr)[i*stride + bead_ndx] = curRowElement; runningSumNonDiagonalEntries = 0; ltr += stride; for (int j=0; j<=i; ++j) { //if (bead_ndx == 33) printf("j: %d, ltr: %f, curltr: %f\n", j, ltr[bead_ndx], (*curLtr)[j*stride + bead_ndx]); runningSumNonDiagonalEntries += (ltr[bead_ndx]*((*curLtr)[j*stride + bead_ndx])); ltr += stride; } (*curJtj) += stride; } (*curLtr) += row*stride; return dotP; } // Solving for Ly = b __device__ void SolveLowerTriangularMatrix_dev( float* y, // y solution vector float* ltr, // lower triangular matrix float* rhs, // b vector int bead_ndx, int num_params, int stride) { //printf("Solve Lower Triangular Matrix\n"); float sum; int i,j; for (i=0; i<num_params; ++i) { sum = 0; for (j=0; j<i; ++j) { sum += y[j*stride + bead_ndx] * ltr[bead_ndx]; ltr += stride; } y[i*stride + bead_ndx] = (rhs[bead_ndx] - sum) / ltr[bead_ndx]; //printf("sum: %f, param: %d rhs: %f, y: %f\n", sum, i, rhs[bead_ndx], y[i*stride + bead_ndx]); //if (bead_ndx == 96) printf("sum: %f, rhs: %f, y: %f\n", sum, rhs[bead_ndx], y[i*stride + bead_ndx]); ltr += stride; rhs += stride; } } // Solving for LTx = y hwere LT is upper triangular __device__ void SolveUpperTriangularMatrix_dev( float* x, // x solution vector float* ltr, // lower triangular matrix float* y, // y vector int bead_ndx, int num_params, int stride) { //printf("Solve Upper Triangular Matrix\n"); float sum; int i, j; int lastRowIdx = ((num_params * (num_params + 1)) / 2) - 1; int idx = lastRowIdx; for (i=(num_params - 1); i>=0; --i) { sum = 0; for (j=num_params; j>(i+1); --j) { sum += (ltr[idx*stride + bead_ndx] * x[(j-1)*stride + bead_ndx]); //printf("ltr: %f, x: %f, idx: %d\n", ltr[idx*stride + bead_ndx], x[(j-1)*stride + bead_ndx], idx); idx = idx - j + 1; } //if (bead_ndx == 96) printf("y: %f\n", y[i*stride + bead_ndx]); x[i*stride + bead_ndx] = (y[i*stride + bead_ndx] - sum)/ltr[idx*stride + bead_ndx]; //if (bead_ndx == 96) printf("sum: %f, param: %d, y: %f, x: %f, idx: %d\n", sum, i, y[i*stride + bead_ndx], x[i*stride + bead_ndx], idx); lastRowIdx--; idx = lastRowIdx; } } // Zero out the JTJ matrix before building the matrix // It might be a device function called from the kernel performing lev mar fitting // Solve Ax = b // Write A as A= L(LT) where lT implies transpose of L. Here L is lower triangular matrix // L(LT)x = b // Assume (LT)x = y // Ly = b // Solve for y and back substitue in (LT)x = y to solve for x // Here A is JTJ matrix, x is delta step for the params to fit and b is the squared residual times (JT) __device__ void CholeskySolve_dev( float lambda, float* jtj, // matrix from build matrix kernel float* scratch_mat, float* rhs, float* delta, int bead_ndx, int num_params, int num_beads // bit mask for beads we want to compute. Need to filter beads // whose JTJ matrix is not positive definite ) { //printf("Cholesky Solve\n"); int row; float dotProduct; // lrr is diagonal entry in lower triangular matrix where c and r are column and row float* curJtjPtr = jtj; float* ltr = scratch_mat; float* curLtr = scratch_mat; //printf("lambda: %f\n", lambda); for (row=0; row<num_params; ++row) { // product of square of non diagonal entries in a row in lower triangular matrix dotProduct = CalculateNonDiagLowerTriangularElements_dev(bead_ndx, row, &curJtjPtr, ltr, &curLtr, num_beads); // diagonal entry calculation curLtr[bead_ndx] = sqrtf(curJtjPtr[bead_ndx]*(1.0f + lambda) - dotProduct); //if (bead_ndx == 96) printf("row: %d, arr: %f, dotP: %f, lrr: %f\n", row, curJtjPtr[bead_ndx], dotProduct, curLtr[bead_ndx]); curLtr += num_beads; curJtjPtr += num_beads; } SolveLowerTriangularMatrix_dev(delta, ltr, rhs, bead_ndx, num_params, num_beads); SolveUpperTriangularMatrix_dev(delta, ltr, delta, bead_ndx, num_params, num_beads); } __device__ void CalculateNewBeadParams_dev( float* orig_params, float* new_params, float* delta, unsigned int* paramIdxMap, int bead_ndx, int num_params, int num_beads, int sId, int flow_block_size ) { unsigned int paramIdx; //printf("New Params\n"); /* for (int i=0; i<num_params; ++i) { paramIdx = paramIdxMap[i]; printf("old: %f new: %f pIdx: %d\n", params[paramIdx*num_beads + bead_ndx], params[paramIdx*num_beads + bead_ndx] + delta[i*num_beads + bead_ndx], paramIdx); params[paramIdx*num_beads + bead_ndx] += delta[i*num_beads + bead_ndx]; }*/ unsigned int AmplIdx = BEAD_OFFSET(Ampl[0]); unsigned int RIdx = BEAD_OFFSET(R); unsigned int CopiesIdx = BEAD_OFFSET(Copies); unsigned int DmultIdx = BEAD_OFFSET(dmult); float paramVal; for (int i=0; i<num_params; ++i) { paramIdx = paramIdxMap[i]; if (paramIdx == RIdx) { paramVal = orig_params[paramIdx*num_beads + bead_ndx] + delta[i*num_beads + bead_ndx]; clamp_streaming(paramVal, CP[sId].beadParamsMinConstraints.R, CP[sId].beadParamsMaxConstraints.R); //CP_MULTIFLOWFIT //CP_MULTIFLOWFIT } if (paramIdx == CopiesIdx) { paramVal = orig_params[paramIdx*num_beads + bead_ndx] + delta[i*num_beads + bead_ndx]; clamp_streaming(paramVal, CP[sId].beadParamsMinConstraints.Copies, CP[sId].beadParamsMaxConstraints.Copies); //CP_MULTIFLOWFIT //CP_MULTIFLOWFIT } if (paramIdx == DmultIdx) { paramVal = orig_params[paramIdx*num_beads + bead_ndx] + delta[i*num_beads + bead_ndx]; clamp_streaming(paramVal, CP[sId].beadParamsMinConstraints.dmult, CP[sId].beadParamsMaxConstraints.dmult); //CP_MULTIFLOWFIT //CP_MULTIFLOWFIT } if (paramIdx >= AmplIdx && paramIdx <= (AmplIdx + flow_block_size - 1)) { paramVal = orig_params[paramIdx*num_beads + bead_ndx] + delta[i*num_beads + bead_ndx]; clamp_streaming(paramVal, CP[sId].beadParamsMinConstraints.Ampl, CP[sId].beadParamsMaxConstraints.Ampl); //CP_MULTIFLOWFIT //CP_MULTIFLOWFIT } //printf("old: %f new: %f pIdx: %d\n", params[paramIdx*num_beads + bead_ndx], paramVal, paramIdx); new_params[paramIdx*num_beads + bead_ndx] = paramVal; } } __device__ void UpdateBeadParams_dev( float* orig_params, float* new_params, unsigned int* paramIdxMap, int bead_ndx, int num_params, int num_beads ) { unsigned int paramIdx; //printf("Updated Params in Lev Mar Iter\n"); for (int i=0; i<num_params; ++i) { paramIdx = paramIdxMap[i]; //printf("new: %f pIdx: %d\n", new_params[paramIdx*num_beads + bead_ndx], paramIdx); orig_params[paramIdx*num_beads + bead_ndx] = new_params[paramIdx*num_beads + bead_ndx]; } } __device__ void CalculateMultiFlowFitResidual_dev( float& residual, float* pObservedTrace, float* pModelTrace, float* pEmphasisVec, int flow_ndx, int num_beads, int num_frames, int nonZeroEmpFrames ) { float eval; pObservedTrace += flow_ndx*num_beads*num_frames; for (int j=0; j<nonZeroEmpFrames; ++j) { eval = (*pObservedTrace - *pModelTrace)*pEmphasisVec[j]; residual += eval*eval; pObservedTrace += num_beads; pModelTrace += num_beads; } } __device__ float DecideOnEmphasisVectorsForInterpolation( const int* nonZeroEmpFramesVec, const float** emLeft, const float** emRight, const float Ampl, const float* emphasis, const int num_frames, int &nonZeroEmpFrames ) { float frac; int left; if (Ampl < LAST_POISSON_TABLE_COL) { left = (int) Ampl; frac = (left + 1.0f - Ampl); if (left < 0) { left = 0; frac = 1.0f; } *emLeft = &emphasis[left]; *emRight = &emphasis[left + 1]; }else{ left = LAST_POISSON_TABLE_COL; *emLeft = &emphasis[left]; *emRight = NULL; frac = 1.0f; } nonZeroEmpFrames = (left == LAST_POISSON_TABLE_COL) ? nonZeroEmpFramesVec[left] : max(nonZeroEmpFramesVec[left], nonZeroEmpFramesVec[left + 1]); return frac; } __device__ void DynamicConstraintKrate( float copies, float Ampl, float& kmult, bool& twoParamFit) { float magic = 2.0f/copies; float thresh = Ampl > 0.0f ? Ampl : 0.0f; float lower_bound = 2.0f*magic/ (magic+thresh); float upper_bound = 1.0f/lower_bound; if (lower_bound > 1.0f) { kmult = 1.0f; twoParamFit = false; } else { if (kmult > upper_bound) kmult = upper_bound; if (kmult < lower_bound) kmult = lower_bound; twoParamFit = true; } } __device__ void ModelFunctionEvaluationForExponentialTailFit_dev( int start, int num_frames, int num_beads, float A, float taubInv, float dc_offset, float* fval, const ConstParams* pCP, float* tmp_fval) { fval += start*num_beads; if (tmp_fval) tmp_fval += start*num_beads; float val; for (int i=start; i<num_frames; ++i) { val = A * __expf(-(pCP->frameNumber[i] - pCP->frameNumber[start])*taubInv) + dc_offset; if (tmp_fval) { *tmp_fval = (val - *fval) / 0.001f; tmp_fval += num_beads; } else { *fval = val; } fval += num_beads; } } __device__ void CalculateResidualForExponentialTailFit_dev( float* obs, float* pred, int start, int end, float* err, int num_beads, float& residual) { residual = 0; float e; obs += start*num_beads; pred += start*num_beads; err += start*num_beads; for (int i=start; i<end; ++i) { e = *obs - *pred; *err = e; residual += e*e; obs += num_beads; pred += num_beads; err += num_beads; } } /***************************************************************************** SINGLE FLOW FIT KERNELS *****************************************************************************/ // Let number of beads be N and frames be F. The size for each input argument in // comments is in bytes. __global__ void PerFlowGaussNewtonFit_k( // inputs float* fg_buffers, // NxF float* emphasisVec, float* nucRise, float * pBeadParamsBase, //N bead_state* pState, // scratch space in global memory float* err, // NxF #ifndef FVAL_L1 float* fval, // NxF float* tmp_fval, // NxF #endif float* meanErr, // other inputs float minAmpl, float maxKmult, float minKmult, float adjKmult, bool fitKmult, int realFnum, int num_beads, // 4 int num_frames, // 4 bool useDynamicEmphasis, // int * pMonitor, int sId, int flow_block_size ) { //useDynamicEmphasis = false; #ifdef FVAL_L1 float fval[MAX_COMPRESSED_FRAMES_GPU]; float tmp_fval[MAX_COMPRESSED_FRAMES_GPU]; #endif extern __shared__ float emphasis[]; int numWarps = blockDim.x/32; int threadWarpIdx = threadIdx.x%32; int warpIdx = threadIdx.x/32; for(int i=warpIdx; i<num_frames; i += numWarps) { if (threadWarpIdx < MAX_POISSON_TABLE_COL) emphasis[(MAX_POISSON_TABLE_COL)*i + threadWarpIdx ] = emphasisVec[num_frames*threadWarpIdx + i ]; } __syncthreads(); int bead_ndx = blockIdx.x * blockDim.x + threadIdx.x; if(bead_ndx >= num_beads) return; num_beads = ((num_beads+31)/32) * 32; pBeadParamsBase += bead_ndx; pState += bead_ndx; float *pCopies = &pBeadParamsBase[BEAD_OFFSET(Copies)*num_beads]; float *pAmpl = &pBeadParamsBase[BEAD_OFFSET(Ampl[0])*num_beads]; float *pKmult = &pBeadParamsBase[BEAD_OFFSET(kmult[0])*num_beads]; #ifdef FVAL_L1 // fval = fval_l1; // tmp_fval = tmp_fval_l1; #else fval += bead_ndx; tmp_fval += bead_ndx; #endif err += bead_ndx; meanErr += bead_ndx; fg_buffers += bead_ndx; if (pState->corrupt || !pState->clonal_read || pState->pinned) return; float avg_err; float* deltaFrames = CP[sId].deltaFrames; int* nonZeroEmpFramesVec = CP[sId].non_zero_fine_emphasis_frames; for(int flow_ndx=0; flow_ndx<flow_block_size; flow_ndx++){ int nucid = CP[sId].flowIdxMap[flow_ndx]; //CP_SINGLEFLOWFIT float sens = CP[sId].sens*SENSMULTIPLIER; //CP_SINGLEFLOWFIT float copies = *pCopies; float R = *(pCopies + num_beads); float d = *(pCopies + 2*num_beads); float gain = *(pCopies + 3 * num_beads) ; d *= CP[sId].d[nucid]; //CP_SINGLEFLOWFIT //offset for next value gets added to address at end of flow_ndx loop float krate = *pKmult; float Ampl = *pAmpl; float etbR; float tauB; float SP; ComputeEtbR_dev(etbR, &CP[sId], R, copies, pBeadParamsBase[BEAD_OFFSET(phi)*num_beads], sId, nucid, realFnum+flow_ndx); //CP_SINGLEFLOWFIT ComputeTauB_dev(tauB, &CP[sId], etbR, sId); //CP_SINGLEFLOWFIT ComputeSP_dev(SP, &CP[sId], copies, realFnum+flow_ndx, sId); //CP_SINGLEFLOWFIT bool twoParamFit = fitKmult || ( copies * Ampl > adjKmult ); float residual, newresidual; // lambdaThreshold; int i; // These values before start are always zero since there is no nucrise yet. Don't need to // zero it out. Have to change the residual calculation accordingly for the frames before the // start. for (i =0; i < CP[sId].fine_nuc_start[flow_ndx]; i++) { //CP_SINGLEFLOWFIT #ifdef FVAL_L1 //fval[i] = 0; //tmp_fval[i] = 0; #else //fval[num_beads*i] = 0; //tmp_fval[num_beads*i] = 0; #endif } // first step // Evaluate model function using input Ampl and Krate and get starting residual Fermi_ModelFuncEvaluationForSingleFlowFitNoOutput(&CP[sId], flow_ndx, nucid, nucRise, Ampl, krate*CP[sId].krate[nucid], tauB, gain, SP, d, //CP_SINGLEFLOWFIT sens, CP[sId].fine_nuc_start[flow_ndx], ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, fval, deltaFrames, num_frames); const float *emLeft, *emRight; float frac; // calculating weighted sum of square residuals for the convergence test int nonZeroEmpFrames = 0; frac = DecideOnEmphasisVectorsForInterpolation(nonZeroEmpFramesVec, &emLeft,&emRight,Ampl,emphasis, num_frames, nonZeroEmpFrames); residual = ResidualCalculationPerFlow(CP[sId].fine_nuc_start[flow_ndx], fg_buffers, fval, emLeft, emRight, frac, err, num_beads, nonZeroEmpFrames); // new Ampl and Krate generated from the Lev mar Fit float newAmpl, newKrate; // convergence test variables //int flowDone = 0; float delta0 = 0, delta1 = 0; // Lev Mar Fit Outer Loop int iter; for (iter = 0; iter < ITER; ++iter) { // new Ampl and kmult by adding delta to existing values newAmpl = Ampl + 0.001f; newKrate = (twoParamFit)?(krate + 0.001f):(krate); // Evaluate model function for new Ampl keeping Krate constant float aa = 0, akr= 0, krkr = 0, rhs0 = 0, rhs1 = 0; Fermi_ModelFuncEvaluationForSingleFlowFit(sId, flow_ndx, nucid, nucRise, newAmpl, Ampl, krate*CP[sId].krate[nucid], newKrate*CP[sId].krate[nucid], tauB, gain, SP, d, //CP_SINGLEFLOWFIT sens, CP[sId].fine_nuc_start[flow_ndx]*ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, twoParamFit ? TwoParams : OneParam, emLeft, emRight, frac, fval, err, &aa, &rhs0, &krkr, &rhs1, &akr, deltaFrames, nonZeroEmpFrames); // Now start the solving. if(twoParamFit){ float det = 1.0f / (aa*krkr - akr*akr); delta1 = (-akr*rhs0 + aa*rhs1)*det; delta0 = (krkr*rhs0 - akr*rhs1)*det; }else delta0 = rhs0 / aa; if( !::isnan(delta0) && !::isnan(delta1)){ // add delta to params to obtain new params newAmpl = Ampl + delta0; if(twoParamFit)newKrate = krate + delta1; clamp_streaming(newAmpl, minAmpl, (float)LAST_POISSON_TABLE_COL); if(twoParamFit)clamp_streaming(newKrate, minKmult, maxKmult); // Evaluate using new params Fermi_ModelFuncEvaluationForSingleFlowFitNoOutput(&CP[sId], flow_ndx, nucid, nucRise, newAmpl, newKrate*CP[sId].krate[nucid], tauB, gain, SP, //CP_SINGLEFLOWFIT d, sens, CP[sId].fine_nuc_start[flow_ndx], ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, tmp_fval, deltaFrames, num_frames); // residual calculation using new parameters if (useDynamicEmphasis) { int newNonZeroEmpFrames; frac = DecideOnEmphasisVectorsForInterpolation(nonZeroEmpFramesVec, &emLeft,&emRight,newAmpl,emphasis, num_frames, newNonZeroEmpFrames); nonZeroEmpFrames = max(nonZeroEmpFrames, newNonZeroEmpFrames); } newresidual = ResidualCalculationPerFlow(CP[sId].fine_nuc_start[flow_ndx], fg_buffers, tmp_fval, emLeft, emRight, frac, err, num_beads, nonZeroEmpFrames); if (newresidual < residual) { Ampl = newAmpl; if(twoParamFit)krate = newKrate; // copy new function val to fval for (i=CP[sId].fine_nuc_start[flow_ndx]; i<num_frames; ++i){ //CP_SINGLEFLOWFIT #ifdef FVAL_L1 fval[i] = tmp_fval[i]; #else fval[num_beads*i] = tmp_fval[num_beads*i]; #endif } residual = newresidual; } else { if (useDynamicEmphasis) { frac = DecideOnEmphasisVectorsForInterpolation(nonZeroEmpFramesVec, &emLeft,&emRight,Ampl,emphasis, num_frames, nonZeroEmpFrames); } } } if ((delta0*delta0) < 0.0000025f){ iter++; break; } } // end ITER loop //atomicAdd(&pMonitor[iter-1], 1); if(flow_ndx==0) avg_err = pState->avg_err * realFnum; if(twoParamFit) *pKmult = krate; *pAmpl= Ampl; residual = CalculateMeanResidualErrorPerFlow(CP[sId].fine_nuc_start[flow_ndx], fg_buffers, fval, emphasis+LAST_POISSON_TABLE_COL, num_beads, num_frames); avg_err += residual; meanErr[num_beads * flow_ndx] = residual; pAmpl += num_beads; pKmult += num_beads; fg_buffers += num_frames*num_beads; } // end flow_ndx loop avg_err /= (realFnum + flow_block_size); pState->avg_err = avg_err; int high_err_cnt = 0; avg_err *= WASHOUT_THRESHOLD; for (int flow_ndx = flow_block_size - 1; flow_ndx >= 0 && (meanErr[num_beads* flow_ndx] > avg_err); flow_ndx--) high_err_cnt++; if (high_err_cnt > WASHOUT_FLOW_DETECTION) pState->corrupt = true; } __global__ void PerFlowHybridFit_k( // inputs float* fg_buffers, // NxF float* emphasisVec, float* nucRise, float * pBeadParamsBase, //N bead_state* pState, // scratch space in global memory float* err, // NxF #ifndef FVAL_L1 float* fval, // NxF float* tmp_fval, // NxF #endif float* meanErr, // other inputs float minAmpl, float maxKmult, float minKmult, float adjKmult, bool fitKmult, int realFnum, int num_beads, // 4 int num_frames, // 4 bool useDynamicEmphasis, // int * pMonitor, int sId, int switchToLevMar, int flow_block_size ) { #ifdef FVAL_L1 float fval[MAX_COMPRESSED_FRAMES_GPU]; float tmp_fval[MAX_COMPRESSED_FRAMES_GPU]; #endif extern __shared__ float emphasis[]; int numWarps = blockDim.x/32; int threadWarpIdx = threadIdx.x%32; int warpIdx = threadIdx.x/32; for(int i=warpIdx; i<num_frames; i += numWarps) { if (threadWarpIdx < MAX_POISSON_TABLE_COL) emphasis[(MAX_POISSON_TABLE_COL)*i + threadWarpIdx ] = emphasisVec[num_frames*threadWarpIdx + i ]; } __syncthreads(); int bead_ndx = blockIdx.x * blockDim.x + threadIdx.x; if(bead_ndx >= num_beads) return; num_beads = ((num_beads+31)/32) * 32; pBeadParamsBase += bead_ndx; pState += bead_ndx; float *pCopies = &pBeadParamsBase[BEAD_OFFSET(Copies)*num_beads]; float *pAmpl = &pBeadParamsBase[BEAD_OFFSET(Ampl[0])*num_beads]; float *pKmult = &pBeadParamsBase[BEAD_OFFSET(kmult[0])*num_beads]; #ifdef FVAL_L1 // fval = fval_l1; // tmp_fval = tmp_fval_l1; #else fval += bead_ndx; tmp_fval += bead_ndx; #endif err += bead_ndx; meanErr += bead_ndx; fg_buffers += bead_ndx; if (pState->corrupt || !pState->clonal_read || pState->pinned) return; float avg_err; float* deltaFrames = CP[sId].deltaFrames; int* nonZeroEmpFramesVec = CP[sId].non_zero_fine_emphasis_frames; for(int flow_ndx=0; flow_ndx<flow_block_size; flow_ndx++){ int nucid = CP[sId].flowIdxMap[flow_ndx]; //CP_SINGLEFLOWFIT float sens = CP[sId].sens*SENSMULTIPLIER; //CP_SINGLEFLOWFIT float copies = *pCopies; float R = *(pCopies + num_beads); float d = *(pCopies + 2*num_beads); float gain = *(pCopies + 3 * num_beads) ; d *= CP[sId].d[nucid]; //CP_SINGLEFLOWFIT //offset for next value gets added to address at end of flow_ndx loop float krate = *pKmult; float Ampl = *pAmpl; float etbR; float tauB; // = tmp.x; // *ptauB; float SP; //= tmp.y; // *pSP; ComputeEtbR_dev(etbR, &CP[sId], R, copies, pBeadParamsBase[BEAD_OFFSET(phi)*num_beads], sId, nucid, realFnum+flow_ndx); //CP_SINGLEFLOWFIT ComputeTauB_dev(tauB, &CP[sId], etbR, sId); //CP_SINGLEFLOWFIT ComputeSP_dev(SP, &CP[sId], copies, realFnum+flow_ndx, sId); //CP_SINGLEFLOWFIT bool twoParamFit = fitKmult || ( copies * Ampl > adjKmult ); float residual, newresidual; // lambdaThreshold; int i; // These values before start are always zero since there is no nucrise yet. Don't need to // zero it out. Have to change the residual calculation accordingly for the frames before the // start. for (i =0; i < CP[sId].fine_nuc_start[flow_ndx]; i++) { //CP_SINGLEFLOWFIT #ifdef FVAL_L1 //fval[i] = 0; //tmp_fval[i] = 0; #else //fval[num_beads*i] = 0; //tmp_fval[num_beads*i] = 0; #endif } // first step // Evaluate model function using input Ampl and Krate and get starting residual Fermi_ModelFuncEvaluationForSingleFlowFitNoOutput(&CP[sId], flow_ndx, nucid, nucRise, Ampl, krate*CP[sId].krate[nucid], tauB, gain, SP, d, //CP_SINGLEFLOWFIT sens, CP[sId].fine_nuc_start[flow_ndx], ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, fval, deltaFrames, num_frames); const float *emLeft, *emRight; float frac; // calculating weighted sum of square residuals for the convergence test int nonZeroEmpFrames = 0; frac = DecideOnEmphasisVectorsForInterpolation(nonZeroEmpFramesVec, &emLeft,&emRight,Ampl,emphasis, num_frames, nonZeroEmpFrames); residual = ResidualCalculationPerFlow(CP[sId].fine_nuc_start[flow_ndx], fg_buffers, fval, emLeft, emRight, frac, err, num_beads, nonZeroEmpFrames); // new Ampl and Krate generated from the Lev mar Fit float newAmpl, newKrate; // convergence test variables float delta0 = 0, delta1 = 0; float det; // Indicates whether a flow has converged //int flowDone = 0; float lambda = 1E-20; // Lev Mar Fit Outer Loop int iter; for (iter = 0; iter < ITER; ++iter) { // new Ampl and krate by adding delta to existing values newAmpl = Ampl + 0.001f; newKrate = (twoParamFit)?(krate + 0.001f):(krate); // Evaluate model function for new Ampl keeping Krate constant float aa = 0, akr= 0, krkr = 0, rhs0 = 0, rhs1 = 0; Fermi_ModelFuncEvaluationForSingleFlowFit(sId, flow_ndx, nucid, nucRise, newAmpl, Ampl, krate*CP[sId].krate[nucid], newKrate*CP[sId].krate[nucid], tauB, gain, SP, d, //CP_SINGLEFLOWFIT sens, CP[sId].fine_nuc_start[flow_ndx]*ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, twoParamFit ? TwoParams : OneParam, emLeft, emRight, frac, fval, err, &aa, &rhs0, &krkr, &rhs1, &akr, deltaFrames, nonZeroEmpFrames); // Now start the solving. if(iter< switchToLevMar){ if(twoParamFit){ float det = 1.0f / (aa*krkr - akr*akr); delta1 = (-akr*rhs0 + aa*rhs1)*det; delta0 = (krkr*rhs0 - akr*rhs1)*det; }else delta0 = rhs0 / aa; if( !::isnan(delta0) && !::isnan(delta1)){ // add delta to params to obtain new params newAmpl = Ampl + delta0; if(twoParamFit)newKrate = krate + delta1; clamp_streaming(newAmpl, minAmpl, (float)LAST_POISSON_TABLE_COL); if(twoParamFit)clamp_streaming(newKrate, minKmult, maxKmult); // Evaluate using new params Fermi_ModelFuncEvaluationForSingleFlowFitNoOutput(&CP[sId], flow_ndx, nucid, nucRise, newAmpl, newKrate*CP[sId].krate[nucid], tauB, gain, SP, //CP_SINGLEFLOWFIT d, sens, CP[sId].fine_nuc_start[flow_ndx], ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, tmp_fval, deltaFrames, num_frames); // residual calculation using new parameters if (useDynamicEmphasis){ int newNonZeroEmpFrames; frac = DecideOnEmphasisVectorsForInterpolation(nonZeroEmpFramesVec, &emLeft,&emRight,newAmpl,emphasis, num_frames, newNonZeroEmpFrames); nonZeroEmpFrames = max(nonZeroEmpFrames, newNonZeroEmpFrames); } newresidual = ResidualCalculationPerFlow(CP[sId].fine_nuc_start[flow_ndx], fg_buffers, tmp_fval, emLeft, emRight, frac, err, num_beads, nonZeroEmpFrames); if (newresidual < residual) { Ampl = newAmpl; if(twoParamFit)krate = newKrate; // copy new function val to fval for (i=CP[sId].fine_nuc_start[flow_ndx]; i<num_frames; ++i){ //CP_SINGLEFLOWFIT #ifdef FVAL_L1 fval[i] = tmp_fval[i]; #else fval[num_beads*i] = tmp_fval[num_beads*i]; #endif } residual = newresidual; } else { if (useDynamicEmphasis) { frac = DecideOnEmphasisVectorsForInterpolation(nonZeroEmpFramesVec, &emLeft,&emRight,Ampl,emphasis, num_frames, nonZeroEmpFrames); } } } }else{ //LevMar Instead bool cont_proc = false; while (!cont_proc) { if(twoParamFit){ det = 1.0f / (aa*krkr*(1.0f + lambda)*(1.0f + lambda) - akr*akr); delta0 = (krkr*(1.0f + lambda)*rhs0 - akr*rhs1)*det; delta1 = (-akr*rhs0 + aa*(1.0f + lambda)*rhs1)*det; }else delta0 = rhs0 / (aa*(1.0f + lambda)); // NAN check bool nan_detected = false; if( !::isnan(delta0) && !::isnan(delta1)){ // add delta to params to obtain new params newAmpl = Ampl + delta0; if(twoParamFit)newKrate = krate + delta1; clamp_streaming(newAmpl, minAmpl, (float)LAST_POISSON_TABLE_COL); if(twoParamFit)clamp_streaming(newKrate, minKmult, maxKmult); // Evaluate using new params Fermi_ModelFuncEvaluationForSingleFlowFitNoOutput(&CP[sId], flow_ndx, nucid, nucRise, newAmpl, newKrate*CP[sId].krate[nucid], tauB, gain, SP, //CP_SINGLEFLOWFIT d, sens, CP[sId].fine_nuc_start[flow_ndx], ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, tmp_fval, deltaFrames, num_frames); // residual calculation using new parameters if (useDynamicEmphasis) { int newNonZeroEmpFrames; frac = DecideOnEmphasisVectorsForInterpolation(nonZeroEmpFramesVec, &emLeft,&emRight,newAmpl,emphasis, num_frames, newNonZeroEmpFrames); nonZeroEmpFrames = max(nonZeroEmpFrames, newNonZeroEmpFrames); } newresidual = ResidualCalculationPerFlow(CP[sId].fine_nuc_start[flow_ndx], fg_buffers, tmp_fval, emLeft, emRight, frac, err, num_beads, nonZeroEmpFrames); } else nan_detected = true; // this might be killing...Need to rethink for some alternative here // If new residual is less than the earlier recorded residual, accept the solution and // obtain new parameters and copy them to original parameters and copy the new model function // to the earlier recorded model function till this point if (newresidual < residual && !nan_detected) { lambda /= 10.0f; if (lambda < FLT_MIN) lambda = FLT_MIN; Ampl = newAmpl; if(twoParamFit)krate = newKrate; // copy new function val to fval for (i=CP[sId].fine_nuc_start[flow_ndx]; i<num_frames; ++i){ //CP_SINGLEFLOWFIT #ifdef FVAL_L1 fval[i] = tmp_fval[i]; #else fval[num_beads*i] = tmp_fval[num_beads*i]; #endif } residual = newresidual; cont_proc = true; } else { lambda *= 10.0f; } if (lambda > 1.0f) { cont_proc = true; if (useDynamicEmphasis) { frac = DecideOnEmphasisVectorsForInterpolation(nonZeroEmpFramesVec, &emLeft,&emRight,Ampl,emphasis, num_frames, nonZeroEmpFrames); } } } } if ((delta0*delta0) < 0.0000025f){ iter++; break; } } // end ITER loop // atomicAdd(&pMonitor[iter-1], 1); if(flow_ndx==0) avg_err = pState->avg_err * realFnum; if(twoParamFit) *pKmult = krate; *pAmpl= Ampl; residual = CalculateMeanResidualErrorPerFlow(CP[sId].fine_nuc_start[flow_ndx], fg_buffers, fval, emphasis+LAST_POISSON_TABLE_COL, num_beads, num_frames); avg_err += residual; meanErr[num_beads * flow_ndx] = residual; pAmpl += num_beads; pKmult += num_beads; fg_buffers += num_frames*num_beads; } // end flow_ndx loop avg_err /= (realFnum + flow_block_size); pState->avg_err = avg_err; int high_err_cnt = 0; avg_err *= WASHOUT_THRESHOLD; for (int flow_ndx = flow_block_size - 1; flow_ndx >= 0 && (meanErr[num_beads* flow_ndx] > avg_err); flow_ndx--) high_err_cnt++; if (high_err_cnt > WASHOUT_FLOW_DETECTION) pState->corrupt = true; } __global__ void PerFlowLevMarFit_k( // inputs float* fg_buffers, // NxF float* emphasisVec, float* nucRise, float * pBeadParamsBase, //N bead_state* pState, // scratch space in global memory float* err, // NxF #ifndef FVAL_L1 float* fval, // NxF float* tmp_fval, // NxF #endif float* meanErr, // other inputs float minAmpl, float maxKmult, float minKmult, float adjKmult, bool fitKmult, int realFnum, int num_beads, // 4 int num_frames, // 4 bool useDynamicEmphasis, // int * pMonitor, int sId, int flow_block_size ) { #ifdef FVAL_L1 float fval[MAX_COMPRESSED_FRAMES_GPU]; float tmp_fval[MAX_COMPRESSED_FRAMES_GPU]; #endif extern __shared__ float emphasis[]; int numWarps = blockDim.x/32; int threadWarpIdx = threadIdx.x%32; int warpIdx = threadIdx.x/32; for(int i=warpIdx; i<num_frames; i += numWarps) { if (threadWarpIdx < MAX_POISSON_TABLE_COL) emphasis[(MAX_POISSON_TABLE_COL)*i + threadWarpIdx ] = emphasisVec[num_frames*threadWarpIdx + i ]; } __syncthreads(); int bead_ndx = blockIdx.x * blockDim.x + threadIdx.x; if(bead_ndx >= num_beads) return; num_beads = ((num_beads+31)/32) * 32; pBeadParamsBase += bead_ndx; pState += bead_ndx; float *pCopies = &pBeadParamsBase[BEAD_OFFSET(Copies)*num_beads]; float *pAmpl = &pBeadParamsBase[BEAD_OFFSET(Ampl[0])*num_beads]; float *pKmult = &pBeadParamsBase[BEAD_OFFSET(kmult[0])*num_beads]; #ifdef FVAL_L1 // fval = fval_l1; // tmp_fval = tmp_fval_l1; #else fval += bead_ndx; tmp_fval += bead_ndx; #endif err += bead_ndx; meanErr += bead_ndx; fg_buffers += bead_ndx; if (pState->corrupt || !pState->clonal_read || pState->pinned) return; float avg_err; float* deltaFrames = CP[sId].deltaFrames; int* nonZeroEmpFramesVec = CP[sId].non_zero_fine_emphasis_frames; for(int flow_ndx=0; flow_ndx<flow_block_size; flow_ndx++){ int nucid = CP[sId].flowIdxMap[flow_ndx]; //CP_SINGLEFLOWFIT float sens = CP[sId].sens*SENSMULTIPLIER; //CP_SINGLEFLOWFIT float copies = *pCopies; float R = *(pCopies + num_beads); float d = *(pCopies + 2*num_beads); float gain = *(pCopies + 3 * num_beads) ; d *= CP[sId].d[nucid]; //CP_SINGLEFLOWFIT //offset for next value gets added to address at end of flow_ndx loop float krate = *pKmult; float Ampl = *pAmpl; float etbR; float tauB; // = tmp.x; // *ptauB; float SP; //= tmp.y; // *pSP; ComputeEtbR_dev(etbR, &CP[sId], R, copies, pBeadParamsBase[BEAD_OFFSET(phi)*num_beads], sId, nucid, realFnum+flow_ndx); //CP_SINGLEFLOWFIT ComputeTauB_dev(tauB, &CP[sId], etbR, sId); //CP_SINGLEFLOWFIT ComputeSP_dev(SP, &CP[sId], copies, realFnum+flow_ndx, sId); //CP_SINGLEFLOWFIT bool twoParamFit = fitKmult || ( copies * Ampl > adjKmult ); float residual, newresidual; // lambdaThreshold; int i, iter; // These values before start are always zero since there is no nucrise yet. Don't need to // zero it out. Have to change the residual calculation accordingly for the frames before the // start. for (i=0; i < CP[sId].fine_nuc_start[flow_ndx]; i++) { //CP_SINGLEFLOWFIT #ifdef FVAL_L1 //fval[i] = 0; //tmp_fval[i] = 0; #else //fval[num_beads*i] = 0; //tmp_fval[num_beads*i] = 0; #endif } // first step // Evaluate model function using input Ampl and Krate and get starting residual Fermi_ModelFuncEvaluationForSingleFlowFitNoOutput(&CP[sId], flow_ndx, nucid, nucRise, Ampl, krate*CP[sId].krate[nucid], tauB, gain, SP, d, //CP_SINGLEFLOWFIT sens, CP[sId].fine_nuc_start[flow_ndx], ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, fval, deltaFrames, num_frames); const float *emLeft, *emRight; float frac; // calculating weighted sum of square residuals for the convergence test int nonZeroEmpFrames = 0; frac = DecideOnEmphasisVectorsForInterpolation(nonZeroEmpFramesVec, &emLeft,&emRight,Ampl,emphasis, num_frames, nonZeroEmpFrames); residual = ResidualCalculationPerFlow(CP[sId].fine_nuc_start[flow_ndx], fg_buffers, fval, emLeft, emRight, frac, err, num_beads, nonZeroEmpFrames); // new Ampl and Krate generated from the Lev mar Fit float newAmpl, newKrate; // convergence test variables float delta0 = 0, delta1 = 0; // determinant for the JTJ matrix in Lev Mar Solve float det; // Indicates whether a flow has converged int flowDone = 0; float lambda = 1E-20; // Lev Mar Fit Outer Loop for (iter = 0; iter < 40; ++iter) { // convergence test...need to think of an alternate approach if ((delta0*delta0) < 0.0000025f) flowDone++; else flowDone = 0; // stop the loop for this bead here if (flowDone >= 2) { break; } // new Ampl and krate by adding delta to existing values newAmpl = Ampl + 0.001f; newKrate = (twoParamFit)?(krate + 0.001f):(krate); // Evaluate model function for new Ampl keeping Krate constant float aa = 0, akr= 0, krkr = 0, rhs0 = 0, rhs1 = 0; Fermi_ModelFuncEvaluationForSingleFlowFit(sId, flow_ndx, nucid, nucRise, newAmpl, Ampl, krate*CP[sId].krate[nucid], newKrate*CP[sId].krate[nucid], tauB, gain, SP, d, //CP_SINGLEFLOWFIT sens, CP[sId].fine_nuc_start[flow_ndx]*ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, twoParamFit ? TwoParams : OneParam, emLeft, emRight, frac, fval, err, &aa, &rhs0, &krkr, &rhs1, &akr, deltaFrames, nonZeroEmpFrames); // Now start the solving. bool cont_proc = false; while (!cont_proc) { if(twoParamFit){ det = 1.0f / (aa*krkr*(1.0f + lambda)*(1.0f + lambda) - akr*akr); delta0 = (krkr*(1.0f + lambda)*rhs0 - akr*rhs1)*det; delta1 = (-akr*rhs0 + aa*(1.0f + lambda)*rhs1)*det; }else delta0 = rhs0 / (aa*(1.0f + lambda)); // NAN check bool nan_detected = false; if( !::isnan(delta0) && !::isnan(delta1)){ // add delta to params to obtain new params newAmpl = Ampl + delta0; if(twoParamFit)newKrate = krate + delta1; clamp_streaming(newAmpl, minAmpl, (float)LAST_POISSON_TABLE_COL); if(twoParamFit)clamp_streaming(newKrate, minKmult, maxKmult); // Evaluate using new params Fermi_ModelFuncEvaluationForSingleFlowFitNoOutput(&CP[sId], flow_ndx, nucid, nucRise, newAmpl, newKrate*CP[sId].krate[nucid], tauB, gain, SP, //CP_SINGLEFLOWFIT d, sens, CP[sId].fine_nuc_start[flow_ndx], ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, tmp_fval, deltaFrames, num_frames); // residual calculation using new parameters if (useDynamicEmphasis) { int newNonZeroEmpFrames; frac = DecideOnEmphasisVectorsForInterpolation(nonZeroEmpFramesVec, &emLeft,&emRight,newAmpl,emphasis, num_frames, newNonZeroEmpFrames); nonZeroEmpFrames = max(nonZeroEmpFrames, newNonZeroEmpFrames); } newresidual = ResidualCalculationPerFlow(CP[sId].fine_nuc_start[flow_ndx], fg_buffers, tmp_fval, emLeft, emRight, frac, err, num_beads, nonZeroEmpFrames); } else nan_detected = true; // this might be killing...Need to rethink for some alternative here // If new residual is less than the earlier recorded residual, accept the solution and // obtain new parameters and copy them to original parameters and copy the new model function // to the earlier recorded model function till this point if (newresidual < residual && !nan_detected) { lambda /= 10.0f; if (lambda < FLT_MIN) lambda = FLT_MIN; Ampl = newAmpl; if(twoParamFit)krate = newKrate; // copy new function val to fval for (i=CP[sId].fine_nuc_start[flow_ndx]; i<num_frames; ++i){ //CP_SINGLEFLOWFIT #ifdef FVAL_L1 fval[i] = tmp_fval[i]; #else fval[num_beads*i] = tmp_fval[num_beads*i]; #endif } residual = newresidual; cont_proc = true; } else { lambda *= 10.0f; } if (lambda > 1.0f) { cont_proc = true; if (useDynamicEmphasis) { frac = DecideOnEmphasisVectorsForInterpolation(nonZeroEmpFramesVec, &emLeft,&emRight,Ampl,emphasis, num_frames, nonZeroEmpFrames); } } } } // end ITER loop // atomicAdd(&pMonitor[iter-1], 1); if(flow_ndx==0) avg_err = pState->avg_err * realFnum; if(twoParamFit) *pKmult = krate; *pAmpl= Ampl; residual = CalculateMeanResidualErrorPerFlow(CP[sId].fine_nuc_start[flow_ndx], fg_buffers, fval, emphasis+LAST_POISSON_TABLE_COL, num_beads, num_frames); avg_err += residual; meanErr[num_beads * flow_ndx] = residual; pAmpl += num_beads; pKmult += num_beads; fg_buffers += num_frames*num_beads; } // end flow_ndx loop avg_err /= (realFnum + flow_block_size); pState->avg_err = avg_err; int high_err_cnt = 0; avg_err *= WASHOUT_THRESHOLD; for (int flow_ndx = flow_block_size - 1; flow_ndx >= 0 && (meanErr[num_beads* flow_ndx] > avg_err); flow_ndx--) high_err_cnt++; if (high_err_cnt > WASHOUT_FLOW_DETECTION) pState->corrupt = true; } // Let number of beads be N and frames be F. The size for each input argument in // comments is in bytes. __global__ void PerFlowRelaxedKmultGaussNewtonFit_k( // inputs const float* fg_buffers, // NxF const float* emphasisVec, const float* nucRise, float * pBeadParamsBase, //N bead_state* pState, // scratch space in global memory float* err, // NxF #ifndef FVAL_L1 float* fval, // NxF float* tmp_fval, // NxF #endif float* jac, // NxF float* meanErr, // other inputs const float minAmpl, float maxKmult, float minKmult, const float adjKmult, const bool fitKmult, const int realFnum, int num_beads, // 4 const int num_frames, // 4 const bool useDynamicEmphasis, const bool useSlowKmultInit, // int * pMonitor, const int sId, const int flow_block_size ) { //useDynamicEmphasis = false; #ifdef FVAL_L1 float fval[MAX_COMPRESSED_FRAMES_GPU]; float tmp_fval[MAX_COMPRESSED_FRAMES_GPU]; #endif // Preload the emphasis table. This is fairly quick. extern __shared__ float emphasis[]; const int numWarps = blockDim.x/32; const int threadWarpIdx = threadIdx.x%32; const int warpIdx = threadIdx.x/32; for(int i=warpIdx; i<num_frames; i += numWarps) { if (threadWarpIdx < MAX_POISSON_TABLE_COL) emphasis[(MAX_POISSON_TABLE_COL)*i + threadWarpIdx ] = emphasisVec[num_frames*threadWarpIdx + i ]; } __syncthreads(); const int bead_ndx = blockIdx.x * blockDim.x + threadIdx.x; if(bead_ndx >= num_beads) return; num_beads = ((num_beads+31)/32) * 32; pBeadParamsBase += bead_ndx; pState += bead_ndx; const float * pCopies = &pBeadParamsBase[BEAD_OFFSET(Copies)*num_beads]; float * pAmpl = &pBeadParamsBase[BEAD_OFFSET(Ampl[0])*num_beads]; float *pKmult = &pBeadParamsBase[BEAD_OFFSET(kmult[0])*num_beads]; #ifndef FVAL_L1 fval += bead_ndx; tmp_fval += bead_ndx; #endif jac += bead_ndx; // For Keplar err += bead_ndx; meanErr += bead_ndx; fg_buffers += bead_ndx; if (pState->corrupt || !pState->clonal_read || pState->pinned) return; float avg_err; float* deltaFrames = CP[sId].deltaFrames; int* nonZeroEmpFramesVec = CP[sId].non_zero_fine_emphasis_frames; const float defKmult = 1.0f; for(int flow_ndx=0; flow_ndx<flow_block_size; flow_ndx++) { const int nucid = CP[sId].flowIdxMap[flow_ndx]; //CP_SINGLEFLOWFIT const float sens = CP[sId].sens*SENSMULTIPLIER; //CP_SINGLEFLOWFIT const float copies = *pCopies; const float R = *(pCopies + num_beads); float d = *(pCopies + 2*num_beads); const float gain = *(pCopies + 3 * num_beads) ; d *= CP[sId].d[nucid]; //CP_SINGLEFLOWFIT // force kmult to be 1 here float krate = defKmult; float localMinKmult = minKmult; float localMaxKmult= maxKmult; float Ampl = *pAmpl; /*if (bead_ndx == 0 && realFnum == 20) { printf("%f %d\n",Ampl, num_frames); const float* out = fg_buffers; printf("Corrected Trace:\n"); for (int fr=0; fr < num_frames; ++fr) { printf("%f ",*out); out += num_beads; } printf("\n"); printf("DeltaFrames:\n"); for (int fr=0; fr < num_frames; ++fr) { printf("%f ",deltaFrames[fr]); } printf("\n"); printf("FrameNum:\n"); for (int fr=0; fr < num_frames; ++fr) { printf("%f ",CP[sId].frameNumber[fr]); } printf("\n"); }*/ float etbR; float tauB; float SP; ComputeEtbR_dev(etbR, &CP[sId], R, copies, pBeadParamsBase[BEAD_OFFSET(phi)*num_beads], sId, nucid, realFnum+flow_ndx); //CP_SINGLEFLOWFIT ComputeTauB_dev(tauB, &CP[sId], etbR, sId); //CP_SINGLEFLOWFIT ComputeSP_dev(SP, &CP[sId], copies, realFnum+flow_ndx, sId); //CP_SINGLEFLOWFIT const bool twoParamFit = fitKmult || ( copies * Ampl > adjKmult ); const float *emLeft, *emRight; int nonZeroEmpFrames; // Decide on kmult start if (twoParamFit) { float frac = DecideOnEmphasisVectorsForInterpolation(nonZeroEmpFramesVec, &emLeft, &emRight,Ampl,emphasis, num_frames, nonZeroEmpFrames); // default kmult #if __CUDA_ARCH__ >= 350 Keplar_ModelFuncEvaluationForSingleFlowFit(twoParamFit,sId, flow_ndx, nucid, nucRise, Ampl, krate*CP[sId].krate[nucid], tauB, gain, SP, d, //CP_SINGLEFLOWFIT sens, ISIG_SUB_STEPS_SINGLE_FLOW* CP[sId].fine_nuc_start[flow_ndx], //CP_SINGLEFLOWFIT num_frames, num_beads, fval, deltaFrames, nonZeroEmpFrames, NoOutput ); #else Fermi_ModelFuncEvaluationForSingleFlowFitNoOutput(&CP[sId], flow_ndx, nucid, nucRise, Ampl, krate*CP[sId].krate[nucid], tauB, gain, SP, d, //CP_SINGLEFLOWFIT sens, CP[sId].fine_nuc_start[flow_ndx], ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, fval, deltaFrames, nonZeroEmpFrames); #endif float residual_default_kmult = ResidualCalculationPerFlow(CP[sId].fine_nuc_start[flow_ndx], fg_buffers, fval, emLeft, emRight, frac, err, num_beads, nonZeroEmpFrames); // min kmult krate = minKmult; #if __CUDA_ARCH__ >= 350 Keplar_ModelFuncEvaluationForSingleFlowFit(twoParamFit,sId, flow_ndx, nucid, nucRise, Ampl, krate*CP[sId].krate[nucid], tauB, gain, SP, d, //CP_SINGLEFLOWFIT sens, ISIG_SUB_STEPS_SINGLE_FLOW* CP[sId].fine_nuc_start[flow_ndx], //CP_SINGLEFLOWFIT num_frames, num_beads, fval, deltaFrames, nonZeroEmpFrames, NoOutput ); #else Fermi_ModelFuncEvaluationForSingleFlowFitNoOutput(&CP[sId], flow_ndx, nucid, nucRise, Ampl, krate*CP[sId].krate[nucid], tauB, gain, SP, d, //CP_SINGLEFLOWFIT sens, CP[sId].fine_nuc_start[flow_ndx], ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, fval, deltaFrames, nonZeroEmpFrames); #endif float residual_min_kmult = ResidualCalculationPerFlow(CP[sId].fine_nuc_start[flow_ndx], fg_buffers, fval, emLeft, emRight, frac, err, num_beads, nonZeroEmpFrames); if (residual_min_kmult < residual_default_kmult) krate = minKmult; else krate = useSlowKmultInit ? minKmult : defKmult; } float residual, newresidual; int relax_kmult_pass = 0; while (relax_kmult_pass < 2) { // first step // Evaluate model function using input Ampl and Krate and get starting residual #if __CUDA_ARCH__ >= 350 Keplar_ModelFuncEvaluationForSingleFlowFit(twoParamFit,sId, flow_ndx, nucid, nucRise, Ampl, krate*CP[sId].krate[nucid], tauB, gain, SP, d, //CP_SINGLEFLOWFIT sens, ISIG_SUB_STEPS_SINGLE_FLOW* CP[sId].fine_nuc_start[flow_ndx], //CP_SINGLEFLOWFIT num_frames, num_beads, fval, deltaFrames, num_frames, NoOutput ); #else Fermi_ModelFuncEvaluationForSingleFlowFitNoOutput(&CP[sId], flow_ndx, nucid, nucRise, Ampl, krate*CP[sId].krate[nucid], tauB, gain, SP, d, //CP_SINGLEFLOWFIT sens, CP[sId].fine_nuc_start[flow_ndx], ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, fval, deltaFrames, num_frames); #endif const float *emLeft, *emRight; // calculating weighted sum of square residuals for the convergence test //const float EmphSel = (relax_kmult_pass == 1) ? (Ampl + 2.0f) : Ampl; const float EmphSel = Ampl; int nonZeroEmpFrames; float frac = DecideOnEmphasisVectorsForInterpolation(nonZeroEmpFramesVec, &emLeft,&emRight,EmphSel,emphasis, num_frames, nonZeroEmpFrames); residual = ResidualCalculationPerFlow(CP[sId].fine_nuc_start[flow_ndx], fg_buffers, fval, emLeft, emRight, frac, err, num_beads, nonZeroEmpFrames); // new Ampl and Krate generated from the Lev mar Fit float newAmpl, newKrate; float delta0 = 0, delta1 = 0; int iter; int done = 0; for (iter = 0; iter < ITER; ++iter) { if ((delta0*delta0) < 0.0000025f) done++; else done = 0; if (done > 1) break; // new Ampl and krate by adding delta to existing values newAmpl = Ampl + 0.001f; newKrate = (twoParamFit)?(krate + 0.001f):(krate); // Evaluate model function for new Ampl keeping Krate constant float aa = 0, akr= 0, krkr = 0, rhs0 = 0, rhs1 = 0; #if __CUDA_ARCH__ >= 350 Keplar_ModelFuncEvaluationForSingleFlowFit(twoParamFit,sId, flow_ndx, nucid, nucRise, newAmpl, krate*CP[sId].krate[nucid], tauB, gain, SP, d, //CP_SINGLEFLOWFIT sens, CP[sId].fine_nuc_start[flow_ndx]*ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, tmp_fval, deltaFrames, nonZeroEmpFrames, OneParam, jac, emLeft, emRight, frac, fval, err, &aa, &rhs0, &krkr, &rhs1, &akr); if (twoParamFit) Keplar_ModelFuncEvaluationForSingleFlowFit(twoParamFit,sId, flow_ndx, nucid, nucRise, Ampl, newKrate*CP[sId].krate[nucid], tauB, gain, SP, d, //CP_SINGLEFLOWFIT sens, CP[sId].fine_nuc_start[flow_ndx]*ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, tmp_fval, deltaFrames, nonZeroEmpFrames, TwoParams, jac, emLeft, emRight, frac, fval, err, &aa, &rhs0, &krkr, &rhs1, &akr); #else Fermi_ModelFuncEvaluationForSingleFlowFit(sId, flow_ndx, nucid, nucRise, newAmpl, Ampl, krate*CP[sId].krate[nucid], newKrate*CP[sId].krate[nucid], tauB, gain, SP, d, //CP_SINGLEFLOWFIT sens, CP[sId].fine_nuc_start[flow_ndx]*ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, twoParamFit ? TwoParams : OneParam, emLeft, emRight, frac, fval, err, &aa, &rhs0, &krkr, &rhs1, &akr, deltaFrames, nonZeroEmpFrames); #endif // Now start the solving. if(twoParamFit){ const float det = 1.0f / (aa*krkr - akr*akr); delta1 = (-akr*rhs0 + aa*rhs1)*det; delta0 = (krkr*rhs0 - akr*rhs1)*det; }else delta0 = rhs0 / aa; if( !::isnan(delta0) && !::isnan(delta1)){ // add delta to params to obtain new params newAmpl = Ampl + delta0; if(twoParamFit)newKrate = krate + delta1; clamp_streaming(newAmpl, minAmpl, (float)LAST_POISSON_TABLE_COL); if(twoParamFit)clamp_streaming(newKrate, localMinKmult, localMaxKmult); // Evaluate using new params if (useDynamicEmphasis) { int newNonZeroEmpFrames; frac = DecideOnEmphasisVectorsForInterpolation(nonZeroEmpFramesVec, &emLeft,&emRight,newAmpl,emphasis, num_frames, newNonZeroEmpFrames); nonZeroEmpFrames = max(nonZeroEmpFrames, newNonZeroEmpFrames); } #if __CUDA_ARCH__ >= 350 Keplar_ModelFuncEvaluationForSingleFlowFit(twoParamFit,sId, flow_ndx, nucid, nucRise, newAmpl, newKrate*CP[sId].krate[nucid], tauB, gain, SP, //CP_SINGLEFLOWFIT d, sens, CP[sId].fine_nuc_start[flow_ndx]*ISIG_SUB_STEPS_SINGLE_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, tmp_fval, deltaFrames, num_frames, NoOutput ); #else Fermi_ModelFuncEvaluationForSingleFlowFitNoOutput(&CP[sId], flow_ndx, nucid, nucRise, newAmpl, newKrate*CP[sId].krate[nucid], tauB, gain, SP, //CP_SINGLEFLOWFIT d, sens, CP[sId].fine_nuc_start[flow_ndx], ISIG_SUB_STEPS_SINGLE_FLOW, num_frames, num_beads, tmp_fval, deltaFrames, num_frames); #endif // residual calculation using new parameters newresidual = ResidualCalculationPerFlow(CP[sId].fine_nuc_start[flow_ndx], fg_buffers, tmp_fval, emLeft, emRight, frac, err, num_beads, nonZeroEmpFrames); if (newresidual < residual) { Ampl = newAmpl; if(twoParamFit)krate = newKrate; // copy new function val to fval for (int i=CP[sId].fine_nuc_start[flow_ndx]; i<num_frames; ++i){ //CP_SINGLEFLOWFIT #ifdef FVAL_L1 fval[i] = tmp_fval[i]; #else fval[num_beads*i] = tmp_fval[num_beads*i]; #endif } residual = newresidual; } else { if (useDynamicEmphasis) { frac = DecideOnEmphasisVectorsForInterpolation(nonZeroEmpFramesVec, &emLeft,&emRight,Ampl,emphasis, num_frames, nonZeroEmpFrames); } } } else { delta0 = 0; delta1 = 0; } } // end ITER loop // probably slower incorporation if (fabs(krate - localMinKmult) < 0.01f) { if (sqrtf(residual) > 20.0f) { localMaxKmult = localMinKmult; //krate = 0.3f; localMinKmult = 0.3f; relax_kmult_pass++; continue; } } relax_kmult_pass = 2; } if(flow_ndx==0) avg_err = pState->avg_err * realFnum; if(twoParamFit) *pKmult = krate; *pAmpl= Ampl; residual = CalculateMeanResidualErrorPerFlow(CP[sId].fine_nuc_start[flow_ndx], fg_buffers, fval, emphasis+LAST_POISSON_TABLE_COL, num_beads, num_frames); avg_err += residual; meanErr[num_beads * flow_ndx] = residual; pAmpl += num_beads; pKmult += num_beads; fg_buffers += num_frames*num_beads; } // end flow_ndx loop avg_err /= (realFnum + flow_block_size); pState->avg_err = avg_err; int high_err_cnt = 0; avg_err *= WASHOUT_THRESHOLD; for (int flow_ndx = flow_block_size - 1; flow_ndx >= 0 && (meanErr[num_beads* flow_ndx] > avg_err); flow_ndx--) high_err_cnt++; if (high_err_cnt > WASHOUT_FLOW_DETECTION) pState->corrupt = true; } ///////// Pre-processing kernel (bkg correct and well params calculation) __global__ void PreSingleFitProcessing_k(// Here FL stands for flows // inputs from data reorganization float* pCopies, // N float* pR, // N float* pPhi, // N float* pgain, // N float* pAmpl, // FLxN float* sbg, // FLxF float* dark_matter, // FLxF float* pPCA_vals, // FxNUM_DM_PCA float* fgbuffers, // FLxFxN // other inputs int flowNum, // starting flow number to calculate absolute flow num int num_beads, // 4 int num_frames, // 4 bool alternatingFit, int sId, int flow_block_size ) { int bead_ndx = blockIdx.x * blockDim.x + threadIdx.x; if(bead_ndx >= num_beads) return; num_beads = ((num_beads+32-1)/32) * 32; int NucId, i; float Rval, tau, SP; float gain = pgain[bead_ndx]; float *pca_vals = pPCA_vals + bead_ndx; float *fval, *sbgPtr; float *et = dark_matter; // set to dark matter base pointer for PCA for (int flow_ndx=0; flow_ndx < flow_block_size; ++flow_ndx) { sbgPtr = sbg + flow_ndx*num_frames; // may shift to constant memory NucId = CP[sId].flowIdxMap[flow_ndx]; //CP_SINGLEFLOWFIT ComputeEtbR_dev(Rval, &CP[sId], pR[bead_ndx], pCopies[bead_ndx], pPhi[bead_ndx], sId, NucId, flowNum + flow_ndx); //CP_SINGLEFLOWFIT ComputeTauB_dev(tau, &CP[sId], Rval, sId); //CP_SINGLEFLOWFIT ComputeSP_dev(SP, &CP[sId], pCopies[bead_ndx], flowNum + flow_ndx, sId); //CP_SINGLEFLOWFIT Rval -= 1.0f; float dv = 0.0f; float dv_rs = 0.0f; float dvn = 0.0f; float curSbgVal; float aval; // need to go in constant memory since same word access for each thread in the warp // if PCA vectors keep base pointer otherwise bend to nuv average if(! CP[sId].useDarkMatterPCA ) et = &dark_matter[NucId*num_frames]; fval = &fgbuffers[flow_ndx*num_beads*num_frames]; for (i=0; i<num_frames; i++) { aval = CP[sId].deltaFrames[i]/(2.0f * tau); //CP_SINGLEFLOWFIT // calculate new dv curSbgVal = sbgPtr[i]; dvn = (Rval*curSbgVal - dv_rs/tau - dv*aval) / (1.0f + aval); dv_rs += (dv+dvn) * CP[sId].deltaFrames[i] * 0.5f; //CP_SINGLEFLOWFIT dv = dvn; float ftmp = fval[i*num_beads + bead_ndx] - ((dv+curSbgVal)*gain + ApplyDarkMatterToFrame(et, pca_vals, i, num_frames, num_beads, sId)); fval[i*num_beads + bead_ndx] = ftmp; } } } // xtalk calculation from excess hydrogen by neighbours __global__ void NeighbourContributionToXtalk_k(// Here FL stands for flows // inputs from data reorganization float* pR, // N float* pCopies, // N float* pPhi, // N float* sbg, // FLxF float* fgbuffers, // FLxFxN bead_state *pState, // other inputs int startingFlowNum, // starting flow number to calculate absolute flow num int currentFlowIteration, int num_beads, // 4 int num_frames, // 4 // temporaries float* scratch_buf, // 3xFxN float* nei_xtalk, // neixNxF int sId ) { int bead_ndx = blockIdx.x * blockDim.x + threadIdx.x; if(bead_ndx >= num_beads) return; if (pState[bead_ndx].pinned || pState[bead_ndx].corrupt) return; num_beads = ((num_beads+32-1)/32) * 32; int NucId; float Rval, tau; float* incorp_rise = scratch_buf; float* lost_hydrogen = incorp_rise + num_beads*num_frames; float* bulk_signal = lost_hydrogen + num_beads*num_frames; incorp_rise += bead_ndx; lost_hydrogen += bead_ndx; bulk_signal += bead_ndx; fgbuffers += bead_ndx; nei_xtalk += bead_ndx; NucId = CP[sId].flowIdxMap[currentFlowIteration]; //CP_SINGLEFLOWFIT ComputeEtbR_dev(Rval, &CP[sId], pR[bead_ndx], pCopies[bead_ndx], pPhi[bead_ndx], sId, NucId, startingFlowNum + currentFlowIteration); //CP_SINGLEFLOWFIT ComputeTauB_dev(tau, &CP[sId], Rval, sId); //CP_SINGLEFLOWFIT // Calculate approximate incorporation signal int f = 0; float one_over_two_taub = 1.0f / (2.0f*tau); float xt = CP[sId].deltaFrames[f]*one_over_two_taub; //CP_SINGLEFLOWFIT incorp_rise[f] = (1.0f+xt)*fgbuffers[f] - (Rval+xt)*sbg[f]; f++; for (;f<num_frames; ++f) { xt = CP[sId].deltaFrames[f]*one_over_two_taub; //CP_SINGLEFLOWFIT incorp_rise[f*num_beads] = (1.0+xt)*fgbuffers[f*num_beads] - (1.0f-xt)*fgbuffers[(f-1)*num_beads] - ((Rval+xt)*sbg[f]-(Rval-xt)*sbg[f-1]) + incorp_rise[(f-1)*num_beads]; } // calculate contribution to xtalk from this bead as a neighbour in the grid if (!CP_XTALKPARAMS[sId].simpleXtalk) { float old_tautop = 0, old_taufluid = 0; for (int i=0; i<CP_XTALKPARAMS[sId].neis; ++i) { bool changed = false; // Calculate lost hydrogen using tau_top if (old_tautop != CP_XTALKPARAMS[sId].tau_top[i]) { f = CP[sId].coarse_nuc_start[currentFlowIteration]; //CP_SINGLEFLOWFIT one_over_two_taub = 1.0f / (2.0f*CP_XTALKPARAMS[sId].tau_top[i]); xt = 1.0f/(1.0f + (CP[sId].deltaFrames[f]*one_over_two_taub)); //CP_SINGLEFLOWFIT lost_hydrogen[f*num_beads] = incorp_rise[f*num_beads]*xt; f++; for (;f<num_frames; ++f) { xt = 1.0f/(1.0f + (CP[sId].deltaFrames[f]*one_over_two_taub)); //CP_SINGLEFLOWFIT lost_hydrogen[f*num_beads] = (incorp_rise[f*num_beads] - incorp_rise[(f-1)*num_beads] + (1.0f-(CP[sId].deltaFrames[f]*one_over_two_taub))*lost_hydrogen[(f-1)*num_beads])*xt; //CP_SINGLEFLOWFIT } for (f = CP[sId].coarse_nuc_start[currentFlowIteration];f<num_frames; ++f) { //CP_SINGLEFLOWFIT lost_hydrogen[f*num_beads] = incorp_rise[f*num_beads] - lost_hydrogen[f*num_beads]; } changed = true; } // Calculate ions from bulk if (changed || ( !changed && (old_taufluid != CP_XTALKPARAMS[sId].tau_fluid[i]))) { f = CP[sId].coarse_nuc_start[currentFlowIteration]; //CP_SINGLEFLOWFIT one_over_two_taub = 1.0f / (2.0f*CP_XTALKPARAMS[sId].tau_fluid[i]); xt = 1.0f/(1.0f + (CP[sId].deltaFrames[f]*one_over_two_taub)); //CP_SINGLEFLOWFIT bulk_signal[f*num_beads] = lost_hydrogen[f*num_beads]*xt; f++; for (;f<num_frames; ++f) { xt = 1.0f/(1.0f + (CP[sId].deltaFrames[f]*one_over_two_taub)); //CP_SINGLEFLOWFIT bulk_signal[f*num_beads] = (lost_hydrogen[f*num_beads] - lost_hydrogen[(f-1)*num_beads] + (1.0f-(CP[sId].deltaFrames[f]*one_over_two_taub))*bulk_signal[(f-1)*num_beads])*xt; //CP_SINGLEFLOWFIT } } // Scale down the ion by neighbour multiplier for (f=0; f<CP[sId].coarse_nuc_start[currentFlowIteration]; ++f) { //CP_SINGLEFLOWFIT *nei_xtalk = 0; nei_xtalk += num_beads; } for (; f<num_frames; ++f) { *nei_xtalk = bulk_signal[f*num_beads] * CP_XTALKPARAMS[sId].multiplier[i]; nei_xtalk += num_beads; } old_tautop = CP_XTALKPARAMS[sId].tau_top[i]; old_taufluid = CP_XTALKPARAMS[sId].tau_fluid[i]; } } else { // Calculate lost hydrogen f = CP[sId].coarse_nuc_start[currentFlowIteration]; //CP_SINGLEFLOWFIT xt = 1.0f/(1.0f + (CP[sId].deltaFrames[f]*one_over_two_taub)); //CP_SINGLEFLOWFIT lost_hydrogen[f*num_beads] = incorp_rise[f*num_beads]*xt; f++; for (;f<num_frames; ++f) { xt = 1.0f/(1.0f + (CP[sId].deltaFrames[f]*one_over_two_taub)); //CP_SINGLEFLOWFIT lost_hydrogen[f*num_beads] = (incorp_rise[f*num_beads] - incorp_rise[(f-1)*num_beads] + (1.0f-(CP[sId].deltaFrames[f]*one_over_two_taub))*lost_hydrogen[(f-1)*num_beads])*xt; //CP_SINGLEFLOWFIT } for (f = CP[sId].coarse_nuc_start[currentFlowIteration];f<num_frames; ++f) { //CP_SINGLEFLOWFIT lost_hydrogen[f*num_beads] = incorp_rise[f*num_beads] - lost_hydrogen[f*num_beads]; } // Calculate ions from bulk float taue = Rval * tau; f = CP[sId].coarse_nuc_start[currentFlowIteration]; //CP_SINGLEFLOWFIT one_over_two_taub = 1.0f / (2.0f*taue); xt = 1.0f/(1.0f + (CP[sId].deltaFrames[f]*one_over_two_taub)); //CP_SINGLEFLOWFIT bulk_signal[f*num_beads] = lost_hydrogen[f*num_beads]*xt; f++; for (;f<num_frames; ++f) { xt = 1.0f/(1.0f + (CP[sId].deltaFrames[f]*one_over_two_taub)); //CP_SINGLEFLOWFIT bulk_signal[f*num_beads] = (lost_hydrogen[f*num_beads] - lost_hydrogen[(f-1)*num_beads] + (1.0f-(CP[sId].deltaFrames[f]*one_over_two_taub))*bulk_signal[(f-1)*num_beads])*xt; //CP_SINGLEFLOWFIT } // Scale down the ion by neighbour multiplier for (int i=0; i<CP_XTALKPARAMS[sId].neis; ++i) { for (f=0; f<num_frames; ++f) { if (f < CP[sId].coarse_nuc_start[currentFlowIteration]) *nei_xtalk = 0; else *nei_xtalk = bulk_signal[f*num_beads] * CP_XTALKPARAMS[sId].multiplier[i]; nei_xtalk += num_beads; } } } } __global__ void XtalkAccumulation_k( bead_state *pState, int num_beads, int num_frames, int* neiIdxMap, // MAX_XTALK_NEIGHBOURS x N float* nei_xtalk, // neixNxF float* xtalk, // NxF int sId ) { int bead_ndx = blockIdx.x * blockDim.x + threadIdx.x; if(bead_ndx >= num_beads) return; int orig_beads = num_beads; num_beads = ((num_beads+32-1)/32) * 32; int beadFrameProduct = num_beads*num_frames; xtalk += bead_ndx; neiIdxMap += bead_ndx; // Accumulate crosstalk from neighbours int i,f; for (f=0; f<num_frames; ++f) { xtalk[f*num_beads] = 0; } for (i=0; i<CP_XTALKPARAMS[sId].neis; ++i) { int neiIdx = neiIdxMap[i*orig_beads]; if (neiIdx != -1) { if (pState[neiIdx].pinned || pState[neiIdx].corrupt) continue; for (int f=0; f<num_frames; ++f) { xtalk[f*num_beads] += nei_xtalk[i*beadFrameProduct + f*num_beads + neiIdx]; } } } } __global__ void CalculateGenericXtalkForSimpleModel_k( int num_beads, int num_frames, //int regW, //int regH, bead_state *pState, int *sampNeiIdxMap, float* nei_xtalk, float* xtalk, // FxN float* genericXtalk, int sId) { __shared__ float smBuffer[MAX_UNCOMPRESSED_FRAMES_GPU]; int sampNum = blockIdx.x * blockDim.x + threadIdx.x; if (sampNum >= (GENERIC_SIMPLE_XTALK_SAMPLE)) return; num_beads = ((num_beads+32-1)/32) * 32; if (CP_XTALKPARAMS[sId].simpleXtalk) { //Accumulate xtalk signal for the sample int i,f; for (f=0; f<num_frames; ++f) { xtalk[f*GENERIC_SIMPLE_XTALK_SAMPLE + sampNum] = 0; } sampNeiIdxMap += sampNum; int beadFrameProduct = num_beads * num_frames; for (i=0; i<CP_XTALKPARAMS[sId].neis; ++i) { int neiIdx = sampNeiIdxMap[i*GENERIC_SIMPLE_XTALK_SAMPLE]; if (neiIdx != -1) { if (pState[neiIdx].pinned || pState[neiIdx].corrupt) continue; for (int f=0; f<num_frames; ++f) { xtalk[f*GENERIC_SIMPLE_XTALK_SAMPLE + sampNum] += nei_xtalk[i*beadFrameProduct + f*num_beads + neiIdx]; } } } __syncthreads(); } if (sampNum > 0) return; // calculate xtalk for GENERIC_SIMPLE_XTALK_SAMPLE beads for (int i=0; i<(MAX_UNCOMPRESSED_FRAMES_GPU); ++i) { smBuffer[i] = 0; } if (CP_XTALKPARAMS[sId].simpleXtalk) { for (int f=0; f<num_frames; ++f) { for (int i=0; i<(GENERIC_SIMPLE_XTALK_SAMPLE); ++i) { smBuffer[f] += xtalk[i]; } xtalk += GENERIC_SIMPLE_XTALK_SAMPLE; } } float scaling = 1.0f / (GENERIC_SIMPLE_XTALK_SAMPLE); for (int f=0; f<num_frames; ++f) { genericXtalk[f] = smBuffer[f] * scaling; } } __global__ void ComputeXtalkAndZeromerCorrectedTrace_k(// Here FL stands for flows int currentFlowIteration, float* fgbuffers, // FLxFxN int num_beads, // 4 int num_frames, // 4 float* genericXtalk, // neixNxF float* xtalk, // FLxN float* pCopies, // N float* pR, // N float* pPhi, // N float* pgain, // N float* sbg, // FLxF float* dark_matter, // FLxF float* pPCA_vals, // FxNUM_DM_PCA int flowNum, // starting flow number to calculate absolute flow num int sId ) { int bead_ndx = blockIdx.x * blockDim.x + threadIdx.x; if(bead_ndx >= num_beads) return; num_beads = ((num_beads+32-1)/32) * 32; xtalk += bead_ndx; fgbuffers += bead_ndx; pPCA_vals += bead_ndx; int i; float Rval, tau, SP; float gain = pgain[bead_ndx]; int NucId = CP[sId].flowIdxMap[currentFlowIteration]; //CP_SINGLEFLOWFIT ComputeEtbR_dev(Rval, &CP[sId], pR[bead_ndx], pCopies[bead_ndx], pPhi[bead_ndx], sId, NucId, flowNum + currentFlowIteration); //CP_SINGLEFLOWFIT ComputeTauB_dev(tau, &CP[sId], Rval, sId); //CP_SINGLEFLOWFIT ComputeSP_dev(SP, &CP[sId], pCopies[bead_ndx], flowNum + currentFlowIteration, sId); //CP_SINGLEFLOWFIT Rval -= 1.0f; float dv = 0.0f; float dv_rs = 0.0f; float dvn = 0.0f; float curSbgVal; float aval; // need to go in constant memory since same word access for each thread in the warp float* et; if(CP[sId].useDarkMatterPCA) et = dark_matter; else et = &dark_matter[NucId*num_frames]; for (i=0; i<num_frames; i++) { aval = CP[sId].deltaFrames[i]/(2.0f * tau); //CP_SINGLEFLOWFIT // calculate new dv curSbgVal = sbg[i] + *xtalk - genericXtalk[i]; dvn = (Rval*curSbgVal - dv_rs/tau - dv*aval) / (1.0f + aval); dv_rs += (dv+dvn) * CP[sId].deltaFrames[i] * 0.5f; //CP_SINGLEFLOWFIT dv = dvn; *fgbuffers = *fgbuffers - ((dv+curSbgVal)*gain + ApplyDarkMatterToFrame(et, pPCA_vals, i, num_frames, num_beads, sId)); fgbuffers += num_beads; xtalk += num_beads; } } __global__ void ExponentialTailFitting_k( float bkg_scale_limit, float bkg_tail_dc_lower_bound, bead_state* pState, float* tauAdjust, // obtained from TaubAdjustForExponentialTailFitting() float* Ampl, float* pR, float* pCopies, float* pPhi, float* fg_buffers, float* bkg_trace, // sbg float* tmp_fval, int num_beads, int num_frames, int flowNum, int sId, int flow_block_size ) { int bead_ndx = blockIdx.x * blockDim.x + threadIdx.x; if(bead_ndx >= num_beads) return; num_beads = ((num_beads+32-1)/32) * 32; pState += bead_ndx; if (pState->pinned || !pState->clonal_read || pState->corrupt) return; tauAdjust += bead_ndx; Ampl += bead_ndx; fg_buffers += bead_ndx; tmp_fval += bead_ndx; float kern[7]; for (int flow_ndx=0; flow_ndx < flow_block_size; ++flow_ndx) { float Rval, taub, tmid; int NucId = CP[sId].flowIdxMap[flow_ndx]; //CP_SINGLEFLOWFIT ComputeMidNucTime_dev(tmid, &CP[sId], NucId, flow_ndx); //CP_SINGLEFLOWFIT ComputeEtbR_dev(Rval, &CP[sId], pR[bead_ndx], pCopies[bead_ndx], pPhi[bead_ndx], sId, NucId, flowNum + flow_ndx); //CP_SINGLEFLOWFIT ComputeTauB_dev(taub, &CP[sId], Rval, sId); //CP_SINGLEFLOWFIT taub *= *tauAdjust; // adjust taub with multipler estimated using levmar if (taub > 0.0f) { // set up start and end point for exponential tail float tail_start = tmid + 6.0f + 1.75f * (*Ampl); int tail_start_idx = -1, tail_end_idx = -1; for (int i=0; i<num_frames; ++i) { if ((tail_start_idx == -1) && CP[sId].frameNumber[i] >= tail_start) //CP_SINGLEFLOWFIT tail_start_idx = i; if ((tail_end_idx == -1) && CP[sId].frameNumber[i] >= (tail_start + 60.0f)) //CP_SINGLEFLOWFIT tail_end_idx = i; } if (tail_start_idx == -1) continue; if (tail_end_idx == -1) tail_end_idx = num_frames; int tailFrames = tail_end_idx - tail_start_idx; if (tailFrames >= 5) { // Generate smoothing kernel vector. Distance from the point is +/- 3 so need // 7 weights int exp_kern_start = tailFrames < 7 ? (tail_end_idx - 7) : tail_start_idx; float taubInv = 1.0f / taub; GenerateSmoothingKernelForExponentialTailFit_dev(7, taubInv, exp_kern_start, kern, &CP[sId]); //CP_SINGLEFLOWFIT // perform kernel smoothing on exponential tail // linear regression to calculate A and C in Aexp(-(t-t0)/taub) + C // First calculate lhs and rhs matrix entries which are obtained by taking // derivative of the squared residual (y - (Aexp(-(t-t0)/taub) + C))^2 w.r.t // A and C to 0 which gives two linear equations in A and C float avg_bkg_amp_tail = 0; float lhs_01=0,lhs_11=0, rhs_0=0, rhs_1=0; for (int i=tail_start_idx; i<tail_end_idx; ++i) { float sum=0,scale=0; float tmp_fval; for (int j=i-3, k=0; j <= (i+3); ++j, ++k) { if (j >= 0 && j < num_frames) { sum += (kern[k] * fg_buffers[j*num_beads]); scale += kern[k]; } } tmp_fval = sum / scale; avg_bkg_amp_tail += bkg_trace[i]; float expval = __expf(-(CP[sId].frameNumber[i] - CP[sId].frameNumber[tail_start_idx])*taubInv); //CP_SINGLEFLOWFIT lhs_01 += expval; lhs_11 += expval*expval; rhs_0 += tmp_fval; rhs_1 += tmp_fval*expval; } float A, C; float detInv = 1.0f / (tailFrames*lhs_11 - lhs_01*lhs_01); C = (lhs_11*rhs_0 - lhs_01*rhs_1) * detInv; A = (-lhs_01*rhs_0 + tailFrames*rhs_1) * detInv; // if negative then no incorporation if (A < -20.0f) { C = rhs_0 / tailFrames; } avg_bkg_amp_tail /= tailFrames; if (avg_bkg_amp_tail > bkg_tail_dc_lower_bound) { C /= avg_bkg_amp_tail; clamp_streaming(C, -bkg_scale_limit, bkg_scale_limit); } else C = 0; // correct fg_buffers in place for (int i=0; i<num_frames; ++i) { fg_buffers[i*num_beads] -= C*bkg_trace[i]; } } } Ampl += num_beads; fg_buffers += num_beads*num_frames; bkg_trace += num_frames; } } // only performed in first 20 flows. It wll be called after presingleflowfit __global__ void TaubAdjustForExponentialTailFitting_k( bead_state* pState, float* fg_buffers, float* Ampl, float* pR, float* pCopies, float* pPhi, float* avg_trc, float* fval, float* tmp_fval, float* err, float* jac, int num_beads, int num_frames, float* tauAdjust, // output it is a per bead parameter int sId, int flow_block_size ) { int bead_ndx = blockIdx.x * blockDim.x + threadIdx.x; if(bead_ndx >= num_beads) return; num_beads = ((num_beads+32-1)/32) * 32; pState += bead_ndx; if (pState->pinned || !pState->clonal_read || pState->corrupt) return; tauAdjust += bead_ndx; Ampl += bead_ndx; fg_buffers += bead_ndx; avg_trc += bead_ndx; int count = 0; for (int i=0; i<num_frames; ++i) avg_trc[i*num_beads] = 0.0f; // collect incorporation traces from 1mer to 3mers in this flow block and average them // to get a typical incorporation trace for (int flow_ndx=0; flow_ndx<flow_block_size; ++flow_ndx) { float A = *Ampl; if((A > 0.5f) && (A < 3.0f)) { for (int i=0; i<num_frames; ++i) { avg_trc[i*num_beads] += *fg_buffers; fg_buffers += num_beads; } count++; } else { fg_buffers += num_frames*num_beads; } Ampl += num_beads; } if (count > 6) { float Rval, taub, tmid; int NucId = CP[sId].flowIdxMap[0]; //CP_SINGLEFLOWFIT ComputeMidNucTime_dev(tmid, &CP[sId], NucId, 0); //CP_SINGLEFLOWFIT ComputeEtbR_dev(Rval, &CP[sId], pR[bead_ndx], pCopies[bead_ndx],pPhi[bead_ndx], sId, NucId, 0); //CP_SINGLEFLOWFIT ComputeTauB_dev(taub, &CP[sId], Rval, sId); //CP_SINGLEFLOWFIT float orig_taub = taub; float exp_tail_start = tmid + 6.0f + 2.0*1.5f; int tail_start = -1; // perform average as well as determine tail for (int j=0; j<num_frames; ++j) { avg_trc[j*num_beads] /= count; if ((tail_start == -1) && (CP[sId].frameNumber[j] >= exp_tail_start)) //CP_SINGLEFLOWFIT tail_start = j; } // now perform lev mar fitting for Ampl, taub and dc_offset // set starting values for estimated parameters float dc_offset = 0.0f; float A = 20.0f; float newA, newtaub, newdc; int done = 0; float lambda = 1E-20; float min_taub = orig_taub*0.9f; float max_taub = orig_taub*1.1f; float delta0=0, delta1=0, delta2=0, residual, newresidual; fval += bead_ndx; tmp_fval += bead_ndx; err += bead_ndx; jac += bead_ndx; // calculate model function value with starting params before starting lev mar ModelFunctionEvaluationForExponentialTailFit_dev(tail_start, num_frames, num_beads, A, 1.0f/taub, dc_offset, fval, &CP[sId]); //CP_SINGLEFLOWFIT // calculate squared residual between average incorporation trace and model // function CalculateResidualForExponentialTailFit_dev(avg_trc, fval, tail_start, num_frames, err, num_beads, residual); for (int iter=0; iter<200; ++iter) { if (delta0*delta0 < 0.0000025f) done++; else done = 0; if (done >=5) break; // calculate partial derivatives using pertubed parameters newA = A + 0.001f; newtaub = taub + 0.001f; newdc = dc_offset + 0.001f; // partial derivative w.r.t A ModelFunctionEvaluationForExponentialTailFit_dev(tail_start, num_frames, num_beads, newA, 1.0f/taub, dc_offset, fval, &CP[sId], jac); //CP_SINGLEFLOWFIT // partial derivative w.r.t taub ModelFunctionEvaluationForExponentialTailFit_dev(tail_start, num_frames, num_beads, A, 1.0f/newtaub, dc_offset, fval, &CP[sId], //CP_SINGLEFLOWFIT jac+num_frames*num_beads); // partial derivative w.r.t dc_offset ModelFunctionEvaluationForExponentialTailFit_dev(tail_start, num_frames, num_beads, A, 1.0f/taub, newdc, fval, &CP[sId], //CP_SINGLEFLOWFIT jac+2*num_frames*num_beads); // jacobian matrix members float lhs_00=0, lhs_01=0, lhs_02=0, lhs_11=0, lhs_12=0, lhs_22=0; float rhs_0=0, rhs_1=0, rhs_2=0, det; // calculate jtj matrix entries for (int i=tail_start; i<num_frames; ++i) { lhs_00 += jac[i*num_beads]*jac[i*num_beads]; lhs_01 += jac[i*num_beads]*jac[(num_frames + i)*num_beads]; lhs_02 += jac[i*num_beads]*jac[(2*num_frames + i)*num_beads]; lhs_22 += jac[(2*num_frames + i)*num_beads]*jac[(2*num_frames + i)*num_beads]; lhs_12 += jac[(2*num_frames + i)*num_beads]*jac[(num_frames + i)*num_beads]; lhs_11 += jac[(num_frames + i)*num_beads]*jac[(num_frames + i)*num_beads]; rhs_0 += jac[i*num_beads]*err[i*num_beads]; rhs_1 += jac[(num_frames + i)*num_beads]*err[i*num_beads]; rhs_2 += jac[(2*num_frames + i)*num_beads]*err[i*num_beads]; } // Solve bool cont_proc = false; while (!cont_proc) { float new_lhs00 = lhs_00 * (1.0f + lambda); float new_lhs11 = lhs_11 * (1.0f + lambda); float new_lhs22 = lhs_22 * (1.0f + lambda); // calculate determinant det = new_lhs00*(new_lhs11*new_lhs22 - lhs_12*lhs_12) - lhs_01*(lhs_01*new_lhs22 - lhs_12*lhs_02) + lhs_02*(lhs_01*lhs_12 - new_lhs11*lhs_02); det = 1.0f/det; //if (bead_ndx == 0) // printf("lhs00:%.2f lhs01: %.2f lhs02:%.2f lhs11:%.2f lhs12:%.2f lhs22:%.2f rhs0:%.2f rhs1:%.2f rhs2:%.2f, det:%.2f\n", lhs_00,lhs_01,lhs_02,lhs_11,lhs_12,lhs_22,rhs_0,rhs_1,rhs_2,det); delta0 = det*(rhs_0*(new_lhs11*new_lhs22 - lhs_12*lhs_12) + rhs_1*(lhs_02*lhs_12 - lhs_01*new_lhs22) + rhs_2*(lhs_01*lhs_12 - lhs_02*new_lhs11)); delta1 = det*(rhs_0*(lhs_12*lhs_02 - lhs_01*new_lhs22) + rhs_1*(new_lhs00*new_lhs22 - lhs_02*lhs_02) + rhs_2*(lhs_01*lhs_02 - new_lhs00*lhs_12)); delta2 = det*(rhs_0*(lhs_01*lhs_12 - lhs_02*new_lhs11) + rhs_1*(lhs_01*lhs_02 - new_lhs00*lhs_12) + rhs_2*(new_lhs00*new_lhs11 - lhs_01*lhs_01)); // NAN check bool nan_detected = true; //if (bead_ndx == 0) // printf("delta0: %.2f delta1: %.2f delta2: %.2f\n", delta0, delta1, delta2); if (!::isnan(delta0) && !::isnan(delta1) && !::isnan(delta2)) { newA = A + delta0; newtaub = taub + delta1; newdc = dc_offset + delta2; clamp_streaming(newA, 0.0f, 500.0f); clamp_streaming(newtaub, min_taub, max_taub); clamp_streaming(newdc, -50.0f, 50.0f); //if (bead_ndx == 0) // printf("A:%.2f tau:%.2f dc:%.2f\n", newA, newtaub, newdc); ModelFunctionEvaluationForExponentialTailFit_dev(tail_start, num_frames, num_beads, newA, 1.0f/newtaub, newdc, tmp_fval, &CP[sId]); //CP_SINGLEFLOWFIT CalculateResidualForExponentialTailFit_dev(avg_trc, tmp_fval, tail_start, num_frames, err, num_beads, newresidual); nan_detected = false; } if (!nan_detected && newresidual < residual) { lambda /= 10.0f; if (lambda < FLT_MIN) lambda = FLT_MIN; A = newA; taub = newtaub; dc_offset = newdc; //if (bead_ndx == 0) // printf("===> iter: %d Tau: %.2f residual: %.2f newresidual: %.2f\n", iter, taub, residual, newresidual); float* temp = fval; fval = tmp_fval; tmp_fval = temp; residual = newresidual; cont_proc = true; } else { lambda *= 10.0f; } if (lambda > 100.0f) cont_proc = true; } } *tauAdjust = taub / orig_taub; } } /***************************************************************************** MULTI FLOW FIT KERNELS *****************************************************************************/ //////// Computing Partial Derivatives __global__ void ComputePartialDerivativesForMultiFlowFitForWellsFlowByFlow_k ( // inputs int maxEmphasis, float restrict_clonal, float* pobservedTrace, float* pival, // FLxNxF //scratch float* pscratch_ival, // FLxNxF float* pnucRise, // FL x ISIG_SUB_STEPS_MULTI_FLOW x F float* psbg, // FLxF float* pemphasis, // MAX_POISSON_TABLE_COL xF float* pnon_integer_penalty, // MAX_HPLEN float* pdarkMatterComp, // NUMNUC * F float* pbeadParamsTranspose, // we will be indexing directly into it from the parameter indices provide by CpuStep CpuStep* psteps, // we need a specific struct describing this config for this well fit for GPU unsigned int* pDotProdMasks, float* pJTJ, float* pRHS, int num_params, int num_steps, int num_beads, int num_frames, // outputs float* residual, // N float* poutput, // total bead params x FL x N x F. Need to decide on its layout int sId, int flow_block_size ) { extern __shared__ float emphasisVec[]; for (int i=0; i<MAX_POISSON_TABLE_COL*num_frames; i+=num_frames) { if (threadIdx.x < num_frames) emphasisVec[i + threadIdx.x] = pemphasis[i + threadIdx.x]; } __syncthreads(); int bead_ndx = blockIdx.x * blockDim.x + threadIdx.x; if(bead_ndx >= num_beads) return; num_beads = ((num_beads+32-1)/32) * 32; int i, j, flow_ndx; int stepOffset = num_beads*num_frames; float* ptemp, *pfval; float kmult, Ampl, tauB, etbR, SP; float gain = pbeadParamsTranspose[(BEAD_OFFSET(gain))*num_beads + bead_ndx]; float dmult = pbeadParamsTranspose[(BEAD_OFFSET(dmult))*num_beads + bead_ndx]; float R = pbeadParamsTranspose[(BEAD_OFFSET(R))*num_beads + bead_ndx]; float Copies = pbeadParamsTranspose[(BEAD_OFFSET(Copies))*num_beads + bead_ndx]; float Phi = pbeadParamsTranspose[(BEAD_OFFSET(phi))*num_beads + bead_ndx]; float *pPCA_vals =&pbeadParamsTranspose[(BEAD_OFFSET(pca_vals))*num_beads + bead_ndx]; pfval = poutput + bead_ndx; float tot_err = 0.0f; pobservedTrace += bead_ndx; pival += bead_ndx; pscratch_ival += bead_ndx; for (flow_ndx=0; flow_ndx<flow_block_size; ++flow_ndx) { // calculate emphasis vector index Ampl = pbeadParamsTranspose[(BEAD_OFFSET(Ampl[0]) + flow_ndx)*num_beads + bead_ndx]; kmult = pbeadParamsTranspose[(BEAD_OFFSET(kmult[0]) + flow_ndx)*num_beads + bead_ndx]; int emphasisIdx = (int)(Ampl) > maxEmphasis ? maxEmphasis : (int)Ampl; int nonZeroEmpFrames = CP[sId].non_zero_crude_emphasis_frames[emphasisIdx]; int nucid = CP[sId].flowIdxMap[flow_ndx]; //CP_MULTIFLOWFIT float * et; // if PCA use basebointer to dark Matter otherwise bend pointer to current nuc average if(CP[sId].useDarkMatterPCA) et = pdarkMatterComp; else et = &pdarkMatterComp[nucid*num_frames]; for (i=0; i<num_steps; ++i) { ptemp = poutput + i*stepOffset + bead_ndx; float step = psteps[i].diff; for (int k=nonZeroEmpFrames; k<num_frames; ++k) { ptemp[k*num_beads] = 0; } switch (psteps[i].PartialDerivMask) { case YERR: { float eval; for (j=0; j<nonZeroEmpFrames; ++j) { eval = (pobservedTrace[j*num_beads] - pfval[j*num_beads]) * emphasisVec[emphasisIdx*num_frames + j]; *ptemp = eval; tot_err += eval*eval; ptemp += num_beads; } } break; case FVAL: { ComputeEtbR_dev(etbR, &CP[sId], R, Copies, Phi, sId, nucid, flow_ndx); //CP_MULTIFLOWFIT ComputeTauB_dev(tauB, &CP[sId] ,etbR, sId); //CP_MULTIFLOWFIT ComputeSP_dev(SP, &CP[sId], Copies, flow_ndx, sId); //CP_MULTIFLOWFIT ComputeHydrogenForMultiFlowFit_dev(sId, flow_ndx, nucid, pnucRise, Ampl, kmult*CP[sId].krate[nucid], gain, SP, //CP_MULTIFLOWFIT dmult*CP[sId].d[nucid], //CP_MULTIFLOWFIT ISIG_SUB_STEPS_MULTI_FLOW*CP[sId].coarse_nuc_start[flow_ndx], num_frames, num_beads, pival, num_frames); ComputeSignalForMultiFlowFit_dev(false, num_frames, restrict_clonal, sId, flow_ndx, Ampl, tauB, etbR, gain, num_frames, num_beads, pnon_integer_penalty, et,pPCA_vals, psbg, pival, pfval); } break; default: { // perturb the parameters if (psteps[i].PartialDerivMask == DFDA) { Ampl += step; } else if (psteps[i].PartialDerivMask == DFDDKR) { kmult += step; } else if (psteps[i].PartialDerivMask == DFDR) { R += step; } else if (psteps[i].PartialDerivMask == DFDP) { Copies += step; } else if (psteps[i].PartialDerivMask == DFDPDM) { dmult += step; } float* pivtemp = pival; if (psteps[i].doBoth) { pivtemp = pscratch_ival; ComputeSP_dev(SP, &CP[sId], Copies, flow_ndx, sId); //CP_MULTIFLOWFIT ComputeHydrogenForMultiFlowFit_dev(sId, flow_ndx, nucid, pnucRise, Ampl, kmult*CP[sId].krate[nucid], gain, SP, //CP_MULTIFLOWFIT dmult*CP[sId].d[nucid], //CP_MULTIFLOWFIT ISIG_SUB_STEPS_MULTI_FLOW*CP[sId].coarse_nuc_start[flow_ndx], //CP_MULTIFLOWFIT num_frames, num_beads, pivtemp, nonZeroEmpFrames); } ComputeEtbR_dev(etbR, &CP[sId], R, Copies, Phi, sId, nucid, flow_ndx); //CP_MULTIFLOWFIT ComputeTauB_dev(tauB, &CP[sId], etbR, sId); //CP_MULTIFLOWFIT ComputeSignalForMultiFlowFit_dev(true, nonZeroEmpFrames, restrict_clonal, sId, flow_ndx, Ampl, tauB, etbR, gain, num_frames, num_beads, pnon_integer_penalty, et,pPCA_vals, psbg, pivtemp, ptemp, true, step, emphasisVec + emphasisIdx*num_frames, pfval); // restore the params back if (psteps[i].PartialDerivMask == DFDA) { Ampl -= step; } else if (psteps[i].PartialDerivMask == DFDDKR) { kmult -= step; } else if (psteps[i].PartialDerivMask == DFDR) { R -= step; } else if (psteps[i].PartialDerivMask == DFDP) { Copies -= step; } else if (psteps[i].PartialDerivMask == DFDPDM) { dmult -= step; } } } } pobservedTrace += stepOffset; // initialize jtj and rhs to 0 ptemp = pJTJ + bead_ndx; for(int row=0;row<num_params;row++) { for(int col = 0; col <= row; col++) { unsigned int mask = pDotProdMasks[row*num_params+col]; if ((mask >> flow_ndx) & 1) { unsigned int stepIdx1 = mask >> PARAM1_STEPIDX_SHIFT; unsigned int stepIdx2 = (mask >> PARAM2_STEPIDX_SHIFT) & 63; *ptemp += dotProduct(poutput + stepIdx1*stepOffset + bead_ndx, poutput + stepIdx2*stepOffset + bead_ndx, num_frames, num_beads); } ptemp += num_beads; } } ptemp = pRHS + bead_ndx; for(int row=0;row<num_params;row++){ unsigned int mask = pDotProdMasks[row*num_params+row]; unsigned int stepIdx1 = mask >> PARAM1_STEPIDX_SHIFT; if ((mask >> flow_ndx) & 1) { *ptemp += dotProduct(poutput + stepIdx1*stepOffset + bead_ndx, poutput + (num_steps - 1)*stepOffset + bead_ndx, num_frames, num_beads); } ptemp += num_beads; } } residual[bead_ndx] = sqrtf(tot_err / (flow_block_size*num_frames)); } // Kernel for lev mar fitting on first 20 flows __global__ void MultiFlowLevMarFit_k( // inputs int maxEmphasis, float restrict_clonal, float* pobservedTrace, float* pival, float* pfval, float* pnucRise, // FL x ISIG_SUB_STEPS_MULTI_FLOW x F float* psbg, // FLxF float* pemphasis, // MAX_POISSON_TABLE_COL xF // needs precomputation float* pnon_integer_penalty, // MAX_HPLEN float* pdarkMatterComp, // NUMNUC * F float* pbeadParamsTranspose, // we will be indexing directly into it from the parameter indices provide by CpuStep float* pevalBeadParams, float* plambda, float* pjtj, // jtj matrix generated from build matrix kernel float* pltr, // scratch space to write lower triangular matrix float* pb, // rhs vector float* pdelta, unsigned int* paramIdxMap, int num_params, int num_beads, int num_frames, // outputs float* presidual, // N int sId, int flow_block_size ) { extern __shared__ float emphasisVec[]; for (int i=0; i<MAX_POISSON_TABLE_COL*num_frames; i+=num_frames) { if (threadIdx.x < num_frames) emphasisVec[i + threadIdx.x] = pemphasis[i + threadIdx.x]; } __syncthreads(); int bead_ndx = blockIdx.x * blockDim.x + threadIdx.x; if(bead_ndx >= num_beads) return; num_beads = ((num_beads+32-1)/32) * 32; float lambda = plambda[bead_ndx]; float oldResidual = presidual[bead_ndx]; bool done = false; pival += bead_ndx; pfval += bead_ndx; pobservedTrace += bead_ndx; while(!done) { // solve for delta in params CholeskySolve_dev(lambda, pjtj, pltr, pb, pdelta, bead_ndx, num_params, num_beads); // calculate new beadparams CalculateNewBeadParams_dev(pbeadParamsTranspose, pevalBeadParams, pdelta, paramIdxMap, bead_ndx, num_params, num_beads, sId, flow_block_size); // calculate residual and decide whether to perform further lamda tuning and run cholesky again float newResidual = 0; float kmult, Ampl, tauB, etbR, SP; float gain = pevalBeadParams[(BEAD_OFFSET(gain))*num_beads + bead_ndx]; float dmult = pevalBeadParams[(BEAD_OFFSET(dmult))*num_beads + bead_ndx]; float R = pevalBeadParams[(BEAD_OFFSET(R))*num_beads + bead_ndx]; float Copies = pevalBeadParams[(BEAD_OFFSET(Copies))*num_beads + bead_ndx]; float Phi = pevalBeadParams[(BEAD_OFFSET(phi))*num_beads + bead_ndx]; float *et = pdarkMatterComp; float *pPCA_vals = &pevalBeadParams[(BEAD_OFFSET(pca_vals))*num_beads + bead_ndx]; for (int flow_ndx=0; flow_ndx<flow_block_size; ++flow_ndx) { // calculate emphasis vector index Ampl = pevalBeadParams[(BEAD_OFFSET(Ampl[0]) + flow_ndx)*num_beads + bead_ndx]; kmult = pevalBeadParams[(BEAD_OFFSET(kmult[0]) + flow_ndx)*num_beads + bead_ndx]; int emphasisIdx = (int)(Ampl) > maxEmphasis ? maxEmphasis : (int)Ampl; int nonZeroEmpFrames = CP[sId].non_zero_crude_emphasis_frames[emphasisIdx]; int nucid = CP[sId].flowIdxMap[flow_ndx]; //CP_MULTIFLOWFIT if(!CP[sId].useDarkMatterPCA) et = pdarkMatterComp+num_frames*nucid; ComputeEtbR_dev(etbR, &CP[sId], R, Copies, Phi, sId, nucid, 0+flow_ndx); //CP_MULTIFLOWFIT ComputeTauB_dev(tauB, &CP[sId], etbR, sId); //CP_MULTIFLOWFIT ComputeSP_dev(SP, &CP[sId], Copies, flow_ndx, sId); //CP_MULTIFLOWFIT ComputeHydrogenForMultiFlowFit_dev(sId, flow_ndx, nucid, pnucRise, Ampl, kmult*CP[sId].krate[nucid], gain, SP, //CP_MULTIFLOWFIT dmult*CP[sId].d[nucid], //CP_MULTIFLOWFIT ISIG_SUB_STEPS_MULTI_FLOW*CP[sId].coarse_nuc_start[flow_ndx], //CP_MULTIFLOWFIT num_frames, num_beads, pival, nonZeroEmpFrames); ComputeSignalForMultiFlowFit_dev(true, nonZeroEmpFrames, restrict_clonal, sId, flow_ndx, Ampl, tauB, etbR, gain, num_frames, num_beads, pnon_integer_penalty, et,pPCA_vals, psbg, pival, pfval); CalculateMultiFlowFitResidual_dev(newResidual, pobservedTrace, pfval, emphasisVec + num_frames*emphasisIdx, flow_ndx, num_beads, num_frames, nonZeroEmpFrames); } newResidual = sqrtf(newResidual/(flow_block_size*num_frames)); if (newResidual < oldResidual) { // TODO change wrt to ampl*copies UpdateBeadParams_dev(pbeadParamsTranspose, pevalBeadParams, paramIdxMap, bead_ndx, num_params, num_beads); lambda /= 30.0f; // it is LAMBDA_STEP in LevMarState.cpp if (lambda < FLT_MIN) lambda = FLT_MIN; plambda[bead_ndx] = lambda; presidual[bead_ndx] = newResidual; done = true; } else { lambda *= 30.0f; } if (lambda >= 1E+10f) { done = true; plambda[bead_ndx] = lambda; } } } __global__ void BuildMatrix_k( float* pPartialDeriv, // S*FLxNxF //scatch unsigned int * pDotProdMasks, // pxp int num_steps, int num_params, int num_beads, int num_frames, // outputs float* pJTJ, // pxpxN float* pRHS, // pxN int flow_block_size ) { int bead_ndx = blockIdx.x * blockDim.x + threadIdx.x; if(bead_ndx >= num_beads) return; num_beads = ((num_beads+32-1)/32) * 32; pJTJ += bead_ndx; for(int row=0;row<num_params;row++){ for(int col = 0; col <= row; col++){ *pJTJ = CalculateJTJEntry( pDotProdMasks[row*num_params+col], pPartialDeriv, bead_ndx, num_beads, num_frames, flow_block_size ); pJTJ += num_beads; } } pRHS += bead_ndx; for(int row=0;row<num_params;row++){ *pRHS = CalculateRHSEntry( pDotProdMasks[row*num_params+row], pPartialDeriv, bead_ndx, num_steps, num_beads, num_frames, flow_block_size ); pRHS += num_beads; } } __global__ void BuildMatrixVec2_k( float* pPartialDeriv, // S*FLxNxF //scatch unsigned int * pDotProdMasks, // pxp int num_steps, int num_params, int num_beads, int num_frames, // outputs float* pJTJ, // pxpxN float* pRHS, // pxN int flow_block_size ) { int bead_ndx = blockIdx.x * (blockDim.x*2) + threadIdx.x*2; if(bead_ndx >= num_beads) return; num_beads = ((num_beads+32-1)/32) * 32; unsigned int * masks = pDotProdMasks; pJTJ += bead_ndx; for(int row=0;row<num_params;row++){ for(int col = 0; col <= row; col++){ *((float2*)pJTJ) = CalculateJTJEntryVec2( masks[row*num_params+col], pPartialDeriv, bead_ndx, num_beads, num_frames, flow_block_size); pJTJ += num_beads; } } pRHS += bead_ndx; for(int row=0;row<num_params;row++){ *((float2*)pRHS) = CalculateRHSEntryVec2( masks[row*num_params+row], pPartialDeriv, bead_ndx, num_steps, num_beads, num_frames, flow_block_size ); pRHS += num_beads; } } __global__ void BuildMatrixVec4_k( float* pPartialDeriv, // S*FLxNxF //scatch unsigned int * pDotProdMasks, // pxp int num_steps, int num_params, int num_beads, int num_frames, // outputs float* pJTJ, // pxpxN float* pRHS, // pxN int flow_block_size ) { int bead_ndx = blockIdx.x * (blockDim.x*4) + threadIdx.x*4; extern __shared__ unsigned int masks[]; // load dotproduct masks to shared memory int i=threadIdx.x; while(i < num_params*num_params) { masks[i] = pDotProdMasks[i]; i += blockDim.x; } __syncthreads(); if(bead_ndx >= num_beads) return; num_beads = ((num_beads+32-1)/32) * 32; //num_beads += 32 - (num_beads%32); // unsigned int * masks = pDotProdMasks; pJTJ += bead_ndx; for(int row=0;row<num_params;row++){ for(int col = 0; col <= row; col++){ *((float4*)pJTJ) = CalculateJTJEntryVec4( masks[row*num_params+col], pPartialDeriv, bead_ndx, num_beads, num_frames, flow_block_size); pJTJ += num_beads; } } pRHS += bead_ndx; for(int row=0;row<num_params;row++){ *((float4*)pRHS) = CalculateRHSEntryVec4( masks[row*num_params+row], pPartialDeriv, bead_ndx, num_steps, num_beads, num_frames, flow_block_size ); pRHS += num_beads; } } /**************************************************************************** Amplitude estimation ****************************************************************************/ __global__ void ProjectionSearch_k( bead_state* pState, float* fg_buffers, // FLxFxN (already background corrected but no xtalk correction)) float* emphasisVec, // FxLAST_POISSON_TABLE_COL float* nucRise, // ISIG_SUB_STEPS_MULTI_FLOW*F*FL float* pBeadParamsBase, float* fval, // NxF int realFnum, // starting flow number in block of 20 flows int num_beads, int num_frames, int sId, int flow_block_size ) { int bead_ndx = blockIdx.x * blockDim.x + threadIdx.x; if(bead_ndx >= num_beads) return; num_beads = ((num_beads+32-1)/32) * 32; #ifdef FVAL_L1 float fval_L1[MAX_COMPRESSED_FRAMES_GPU]; fval = &fval_L1[0]; #else fval += bead_ndx; #endif pState += bead_ndx; if (pState->pinned || pState->corrupt) return; fg_buffers += bead_ndx; pBeadParamsBase += bead_ndx; float *pCopies = &pBeadParamsBase[BEAD_OFFSET(Copies)*num_beads]; float *pAmpl = &pBeadParamsBase[BEAD_OFFSET(Ampl[0])*num_beads]; float *pKmult = &pBeadParamsBase[BEAD_OFFSET(kmult[0])*num_beads]; float R = *(pCopies + num_beads); float d = *(pCopies + 2*num_beads); float gain = *(pCopies + 3 * num_beads) ; float copies = *pCopies; float sens = CP[sId].sens*SENSMULTIPLIER; //CP_SINGLEFLOWFIT for(int flow_ndx=0; flow_ndx<flow_block_size; flow_ndx++){ int nucid = CP[sId].flowIdxMap[flow_ndx]; //CP_SINGLEFLOWFIT float dmult = d * CP[sId].d[nucid]; float krate = *pKmult; float Ampl = 1.0f; float etbR; float tauB; float SP; ComputeEtbR_dev(etbR, &CP[sId], R, copies, pBeadParamsBase[BEAD_OFFSET(phi)*num_beads], sId, nucid, realFnum+flow_ndx); //CP_SINGLEFLOWFIT ComputeTauB_dev(tauB, &CP[sId], etbR, sId); //CP_SINGLEFLOWFIT ComputeSP_dev(SP, &CP[sId], copies, realFnum+flow_ndx, sId); //CP_SINGLEFLOWFIT for (int i=0; i<2; ++i) { Fermi_ModelFuncEvaluationForSingleFlowFitNoOutput(&CP[sId], flow_ndx, nucid, nucRise, Ampl, krate*CP[sId].krate[nucid], tauB, gain, SP, dmult, //CP_SINGLEFLOWFIT sens, CP[sId].coarse_nuc_start[flow_ndx], ISIG_SUB_STEPS_MULTI_FLOW, //CP_SINGLEFLOWFIT num_frames, num_beads, fval, CP[sId].deltaFrames, CP[sId].non_zero_crude_emphasis_frames[0]); float num = 0, den = 0.0001f; for (int j=CP[sId].coarse_nuc_start[flow_ndx]; j<CP[sId].non_zero_crude_emphasis_frames[0]; ++j) { #ifdef FVAL_L1 num += fval[j]*fg_buffers[j*num_beads]*emphasisVec[j]*emphasisVec[j]; // multiply by emphasis vectors den += fval[j]*fval[j]*emphasisVec[j]*emphasisVec[j]; #else num += fval[j*num_beads]*fg_buffers[j*num_beads]*emphasisVec[j]*emphasisVec[j]; // multiply by emphasis vectors den += fval[j*num_beads]*fval[j*num_beads]*emphasisVec[j]*emphasisVec[j]; #endif } Ampl *= (num/den); if (::isnan(Ampl)) Ampl = 1.0f; else clamp_streaming(Ampl, 0.001f, (float)LAST_POISSON_TABLE_COL); } *pAmpl = Ampl; pAmpl += num_beads; pKmult += num_beads; fg_buffers += num_beads*num_frames; } } /***************************************************************************** UTILITY KERNELS *****************************************************************************/ __global__ void build_poiss_LUT_k( void ) // build LUT poisson tables on device from CDF { int offset = threadIdx.x; int event = blockIdx.x; //(maxEvent = MAX_HPLEN) int maxEvents = gridDim.x; float* ptrL = POISS_APPROX_TABLE_CUDA_BASE + MAX_POISSON_TABLE_ROW * ((event == 0)?(event):(event-1)) ; float* ptrR = POISS_APPROX_TABLE_CUDA_BASE + MAX_POISSON_TABLE_ROW * ((event < maxEvents-1)?(event):(event-1)) ; int offsetPlusOne = (offset < MAX_POISSON_TABLE_ROW-1)?(offset+1):(offset); float4 tmp; tmp.x = ptrL[offset]; tmp.y = ptrR[offset]; tmp.z = ptrL[offsetPlusOne]; tmp.w = ptrR[offsetPlusOne]; float4* ptrLUT = POISS_APPROX_LUT_CUDA_BASE + event * MAX_POISSON_TABLE_ROW + offset; *ptrLUT = tmp; } __global__ void transposeData_k(float *dest, float *source, int width, int height) { __shared__ float tile[32][32+1]; int xIndexIn = blockIdx.x * 32 + threadIdx.x; int yIndexIn = blockIdx.y * 32 + threadIdx.y; int Iindex = xIndexIn + (yIndexIn)*width; int xIndexOut = blockIdx.y * 32 + threadIdx.x; int yIndexOut = blockIdx.x * 32 + threadIdx.y; int Oindex = xIndexOut + (yIndexOut)*height; if(xIndexIn < width && yIndexIn < height) tile[threadIdx.y][threadIdx.x] = source[Iindex]; __syncthreads(); if(xIndexOut < height && yIndexOut < width) dest[Oindex] = tile[threadIdx.x][threadIdx.y]; } ///////// Transpose Kernel __global__ void transposeDataToFloat_k(float *dest, FG_BUFFER_TYPE *source, int width, int height) { __shared__ float tile[32][32+1]; int xIndexIn = blockIdx.x * 32 + threadIdx.x; int yIndexIn = blockIdx.y * 32 + threadIdx.y; int Iindex = xIndexIn + (yIndexIn)*width; int xIndexOut = blockIdx.y * 32 + threadIdx.x; int yIndexOut = blockIdx.x * 32 + threadIdx.y; int Oindex = xIndexOut + (yIndexOut)*height; if(xIndexIn < width && yIndexIn < height) tile[threadIdx.y][threadIdx.x] = (float)(source[Iindex]); __syncthreads(); if(xIndexOut < height && yIndexOut < width) dest[Oindex] = tile[threadIdx.x][threadIdx.y]; } ////////////////////////////////////////////////////////////////// ///////// EXTERN DECL. WRAPPER FUNCTIONS////////////////////////// void StreamingKernels::copyFittingConstParamAsync(ConstParams* ptr, int offset, cudaStream_t stream) { cudaMemcpyToSymbolAsync ( CP, ptr, sizeof(ConstParams), offset*sizeof(ConstParams),cudaMemcpyHostToDevice, stream); } void StreamingKernels::copyXtalkConstParamAsync(ConstXtalkParams* ptr, int offset, cudaStream_t stream) { cudaMemcpyToSymbolAsync ( CP_XTALKPARAMS, ptr, sizeof(ConstXtalkParams), offset*sizeof(ConstXtalkParams),cudaMemcpyHostToDevice, stream); } void StreamingKernels::PerFlowGaussNewtonFit(int l1type, dim3 grid, dim3 block, int smem, cudaStream_t stream, // inputs float* fg_buffers_base, // NxF float* emphasis, // F float* nucRise, float * pBeadParamsBase, //N bead_state* pState, // scratch space in global memory float* err, // NxF float* fval, // NxF float* tmp_fval, // NxF float* meanErr, // other inputs float minAmpl, float maxKmult, float minKmult, float adjKmult, bool fitKmult, int realFnum, int num_beads, // 4 int num_frames, // 4 bool useDynamicEmphasis, // int * pMonitor, int sId, int flow_block_size ) { switch(l1type){ case 1: cudaFuncSetCacheConfig(PerFlowGaussNewtonFit_k, cudaFuncCachePreferShared); break; case 2: cudaFuncSetCacheConfig(PerFlowGaussNewtonFit_k, cudaFuncCachePreferL1); break; default: cudaFuncSetCacheConfig(PerFlowGaussNewtonFit_k, cudaFuncCachePreferEqual); } PerFlowGaussNewtonFit_k<<< grid, block, smem, stream >>> ( fg_buffers_base, // NxF emphasis, nucRise, pBeadParamsBase, //N pState, err, // NxF #ifndef FVAL_L1 fval, // NxF tmp_fval, // NxF #endif meanErr, minAmpl, maxKmult, minKmult, adjKmult, fitKmult, realFnum, num_beads, // 4 num_frames, // 4 useDynamicEmphasis, // pMonitor, sId, flow_block_size); } void StreamingKernels::PerFlowHybridFit(int l1type, dim3 grid, dim3 block, int smem, cudaStream_t stream, // inputs float* fg_buffers_base, // NxF float* emphasis, // F float* nucRise, // bead params float * pBeadParamsBase, //N bead_state* pState, // scratch space in global memory float* err, // NxF float* fval, // NxF float* tmp_fval, // NxF float* meanErr, // other inputs float minAmpl, float maxKmult, float minKmult, float adjKmult, bool fitKmult, int realFnum, int num_beads, // 4 int num_frames, // 4 bool useDynamicEmphasis, // int * pMonitor, int sId, int switchToLevMar, int flow_block_size ) { switch(l1type){ case 1: cudaFuncSetCacheConfig(PerFlowHybridFit_k, cudaFuncCachePreferShared); break; case 2: cudaFuncSetCacheConfig(PerFlowHybridFit_k, cudaFuncCachePreferL1); break; default: cudaFuncSetCacheConfig(PerFlowHybridFit_k, cudaFuncCachePreferEqual); } PerFlowHybridFit_k<<< grid, block, smem, stream >>> ( fg_buffers_base, // NxF emphasis, nucRise, pBeadParamsBase, //N pState, err, // NxF #ifndef FVAL_L1 fval, // NxF tmp_fval, // NxF #endif meanErr, minAmpl, maxKmult, minKmult, adjKmult, fitKmult, realFnum, num_beads, // 4 num_frames, // 4 useDynamicEmphasis, // pMonitor, sId, switchToLevMar, flow_block_size ); } void StreamingKernels::PerFlowLevMarFit(int l1type, dim3 grid, dim3 block, int smem, cudaStream_t stream, // inputs float* fg_buffers_base, // NxF float* emphasis, // F float* nucRise, // bead params float * pBeadParamsBase, //N bead_state* pState, // scratch space in global memory float* err, // NxF float* fval, // NxF float* tmp_fval, // NxF float* meanErr, // other inputs float minAmpl, float maxKmult, float minKmult, float adjKmult, bool fitKmult, int realFnum, int num_beads, // 4 int num_frames, // 4 bool useDynamicEmphasis, // int * pMonitor, int sId, int flow_block_size ) { switch(l1type){ case 1: cudaFuncSetCacheConfig(PerFlowLevMarFit_k, cudaFuncCachePreferShared); break; case 2: cudaFuncSetCacheConfig(PerFlowLevMarFit_k, cudaFuncCachePreferL1); break; default: cudaFuncSetCacheConfig(PerFlowLevMarFit_k, cudaFuncCachePreferEqual); } PerFlowLevMarFit_k<<< grid, block, smem, stream >>> ( fg_buffers_base, // NxF emphasis, nucRise, pBeadParamsBase, //N pState, err, // NxF #ifndef FVAL_L1 fval, // NxF tmp_fval, // NxF #endif meanErr, minAmpl, maxKmult, minKmult, adjKmult, fitKmult, realFnum, num_beads, // 4 num_frames, // 4 useDynamicEmphasis, // pMonitor, sId, flow_block_size); } void StreamingKernels::PerFlowRelaxKmultGaussNewtonFit(int l1type, dim3 grid, dim3 block, int smem, cudaStream_t stream, // inputs float* fg_buffers_base, // NxF float* emphasis, // F float* nucRise, float * pBeadParamsBase, //N bead_state* pState, // scratch space in global memory float* err, // NxF float* fval, // NxF float* tmp_fval, // NxF float* jac, // NxF float* meanErr, // other inputs float minAmpl, float maxKmult, float minKmult, float adjKmult, bool fitKmult, int realFnum, int num_beads, // 4 int num_frames, // 4 bool useDynamicEmphasis, bool useSlowKmultInit, // int * pMonitor, int sId, int flow_block_size ) { switch(l1type){ case 1: cudaFuncSetCacheConfig(PerFlowRelaxedKmultGaussNewtonFit_k, cudaFuncCachePreferShared); break; case 2: cudaFuncSetCacheConfig(PerFlowRelaxedKmultGaussNewtonFit_k, cudaFuncCachePreferL1); break; default: cudaFuncSetCacheConfig(PerFlowRelaxedKmultGaussNewtonFit_k, cudaFuncCachePreferEqual); } PerFlowRelaxedKmultGaussNewtonFit_k<<< grid, block, smem, stream >>> ( fg_buffers_base, // NxF emphasis, nucRise, pBeadParamsBase, //N pState, err, // NxF #ifndef FVAL_L1 fval, // NxF tmp_fval, // NxF #endif jac, // NxF meanErr, minAmpl, maxKmult, minKmult, adjKmult, fitKmult, realFnum, num_beads, // 4 num_frames, // 4 useDynamicEmphasis, useSlowKmultInit, sId, flow_block_size); } ///////// Pre-processing kernel (bkg correct and well params calculation); void StreamingKernels::PreSingleFitProcessing(dim3 grid, dim3 block, int smem, cudaStream_t stream,// Here FL stands for flows // inputs from data reorganization float* pCopies, // N float* pR, // N float* pPhi, // N float* pgain, // N float* pAmpl, // FLxN float* sbg, // FLxF float* dark_matter, // FLxF float* pPCA_vals, float* fgbuffers, // FLxFxN // other inputs int flowNum, // starting flow number to calculate absolute flow num int num_beads, // 4 int num_frames, // 4 bool alternatingFit, int sId, int flow_block_size) { PreSingleFitProcessing_k<<< grid, block, smem, stream >>>( pCopies, // N pR, // N pPhi, // N pgain, // N pAmpl, // FLxN sbg, // FLxF dark_matter, // FLxF pPCA_vals, fgbuffers, // FLxFxN flowNum, // starting flow number to calculate absolute flow num num_beads, // 4 num_frames, // 4 alternatingFit, sId, flow_block_size ); } //////// Computing Partial Derivatives void StreamingKernels::ComputePartialDerivativesForMultiFlowFitForWellsFlowByFlow( int l1type, dim3 grid, dim3 block, int smem, cudaStream_t stream, // inputs int maxEmphasis, float restrict_clonal, float* pobservedTrace, float* pival, // FLxNxF //scatch float* pscratch_ival, // FLxNxF float* pnucRise, // FL x ISIG_SUB_STEPS_MULTI_FLOW x F float* psbg, // FLxF float* pemphasis, // MAX_POISSON_TABLE_COL xF // needs precomputation float* pnon_integer_penalty, // MAX_HPLEN float* pdarkMatterComp, // NUMNUC * F float* pbeadParamsTranspose, // we will be indexing directly into it from the parameter indices provide by CpuStep CpuStep* psteps, // we need a specific struct describing this config for this well fit for GPU unsigned int* pDotProdMasks, float* pJTJ, float* pRHS, int num_params, int num_steps, int num_beads, int num_frames, // outputs float* presidual, float* poutput, // total bead params x FL x N x F. Need to decide on its layout int sId, int flow_block_size ) { switch(l1type){ case 1: cudaFuncSetCacheConfig(ComputePartialDerivativesForMultiFlowFitForWellsFlowByFlow_k, cudaFuncCachePreferShared); break; case 2: cudaFuncSetCacheConfig(ComputePartialDerivativesForMultiFlowFitForWellsFlowByFlow_k, cudaFuncCachePreferL1); break; default: cudaFuncSetCacheConfig(ComputePartialDerivativesForMultiFlowFitForWellsFlowByFlow_k, cudaFuncCachePreferEqual); } ComputePartialDerivativesForMultiFlowFitForWellsFlowByFlow_k<<<grid,block,smem,stream>>>( // inputs maxEmphasis, restrict_clonal, pobservedTrace, pival, // FLxNxF //scatch pscratch_ival, // FLxNxF pnucRise, // FL x ISIG_SUB_STEPS_MULTI_FLOW x F psbg, // FLxF pemphasis, // MAX_POISSON_TABLE_COL xF // needs precomputation pnon_integer_penalty, // MAX_HPLEN pdarkMatterComp, // NUMNUC * F pbeadParamsTranspose, // we will be indexing directly into it from the parameter indices provide by CpuStep psteps, // we need a specific struct describing this config for this well fit for GPU pDotProdMasks, pJTJ, pRHS, num_params, num_steps, num_beads, num_frames, // outputs presidual, poutput, // total bead params x FL x N x F. Need to decide on its layout sId, flow_block_size); } void StreamingKernels::BuildMatrix( dim3 grid, dim3 block, int smem, cudaStream_t stream, float* pPartialDeriv, // S*FLxNxF //scatch unsigned int * pDotProdMasks, // pxp int num_steps, int num_params, int num_beads, int num_frames, // outputs float* pJTJ, // pxpxN float* pRHS, // pxN int vec, int flow_block_size ) { switch(vec){ case 4: block.x = 256; grid.x = (num_beads + block.x*4-1)/(block.x*4); grid.y = 1; smem = num_params*num_params*sizeof(unsigned int); cudaFuncSetCacheConfig(BuildMatrixVec4_k, cudaFuncCachePreferL1); BuildMatrixVec4_k<<< grid,block, smem, stream >>>( pPartialDeriv, // S*FLxNxF //scatch pDotProdMasks, // pxp num_steps, num_params, num_beads, num_frames, pJTJ, // pxpxN pRHS, // pxN flow_block_size ); break; case 2: grid.x = (num_beads + block.x*2-1)/(block.x*2); grid.y = 1; cudaFuncSetCacheConfig(BuildMatrixVec2_k, cudaFuncCachePreferL1); BuildMatrixVec2_k<<< grid,block, smem, stream >>>( pPartialDeriv, // S*FLxNxF //scatch pDotProdMasks, // pxp num_steps, num_params, num_beads, num_frames, pJTJ, // pxpxN pRHS, // pxN flow_block_size ); break; default: cudaFuncSetCacheConfig(BuildMatrix_k, cudaFuncCachePreferL1); BuildMatrix_k<<< grid,block, smem, stream >>>( pPartialDeriv, // S*FLxNxF //scatch pDotProdMasks, // pxp num_steps, num_params, num_beads, num_frames, pJTJ, // pxpxN pRHS, // pxN flow_block_size ); } } void StreamingKernels::MultiFlowLevMarFit(int l1type, dim3 grid, dim3 block, int smem, cudaStream_t stream, // inputs int maxEmphasis, float restrict_clonal, float* pobservedTrace, float* pival, float* pfval, // FLxNxFx2 //scratch for both ival and fval float* pnucRise, // FL x ISIG_SUB_STEPS_MULTI_FLOW x F float* psbg, // FLxF float* pemphasis, // MAX_POISSON_TABLE_COL xF // needs precomputation float* pnon_integer_penalty, // MAX_HPLEN float* pdarkMatterComp, // NUMNUC * F float* pbeadParamsTranspose, // we will be indexing directly into it from the parameter indices provide by CpuStep float* pevalBeadParams, float* plambda, float* pjtj, // jtj matrix generated from build matrix kernel float* pltr, // scratch space to write lower triangular matrix float* pb, // rhs vector float* pdelta, unsigned int* paramIdxMap, int num_params, int num_beads, int num_frames, // outputs float* presidual, // N int sId, int flow_block_size ) { switch(l1type){ case 1: cudaFuncSetCacheConfig(MultiFlowLevMarFit_k, cudaFuncCachePreferShared); break; case 2: cudaFuncSetCacheConfig(MultiFlowLevMarFit_k, cudaFuncCachePreferL1); break; case 0: default: cudaFuncSetCacheConfig(MultiFlowLevMarFit_k, cudaFuncCachePreferEqual); } MultiFlowLevMarFit_k<<< grid ,block , smem, stream >>>( maxEmphasis, restrict_clonal, pobservedTrace, pival, pfval, // FLxNxFx2 //scratch for both ival and fval pnucRise, // FL x ISIG_SUB_STEPS_MULTI_FLOW x F psbg, // FLxF pemphasis, // MAX_POISSON_TABLE_COL xF // needs precomputation pnon_integer_penalty, // MAX_HPLEN pdarkMatterComp, // NUMNUC * F pbeadParamsTranspose, // we will be indexing directly into it from the parameter indices provide by CpuStep pevalBeadParams, plambda, pjtj, // jtj matrix generated from build matrix kernel pltr, // scratch space to write lower triangular matrix pb, // rhs vector pdelta, paramIdxMap, num_params, num_beads, num_frames, presidual, // N sId, flow_block_size); } ///////// Xtalk computation kernel wrapper void StreamingKernels::NeighbourContributionToXtalk( dim3 grid, dim3 block, int smem, cudaStream_t stream,// Here FL stands for flows // inputs from data reorganization float* pR, // N float* pCopies, // N float* pPhi, // N float* sbg, // FLxF float* fgbuffers, // FLxFxN // other inputs bead_state *pState, int startingFlowNum, // starting flow number to calculate absolute flow num int currentFlowIteration, int num_beads, // 4 int num_frames, // 4 float* scratch_buf, float* nei_xtalk, int sId ) { NeighbourContributionToXtalk_k<<< grid, block, smem, stream >>>( pR, // N pCopies, pPhi, sbg, // FLxF fgbuffers, // FLxFxN pState, startingFlowNum, // starting flow number to calculate absolute flow num currentFlowIteration, num_beads, // 4 num_frames, // 4 scratch_buf, nei_xtalk, sId ); } void StreamingKernels::XtalkAccumulation( dim3 grid, dim3 block, int smem, cudaStream_t stream, bead_state *pState, int num_beads, // 4 int num_frames, // 4 int* neiIdxMap, // MAX_XTALK_NEIGHBOURS x N float* nei_xtalk, // neixNxF float* xtalk, // NxF int sId ) { XtalkAccumulation_k<<< grid, block, smem, stream >>>( pState, num_beads, // 4 num_frames, // 4 neiIdxMap, nei_xtalk, xtalk, sId); } void StreamingKernels::ComputeXtalkAndZeromerCorrectedTrace(// Here FL stands for flows dim3 grid, dim3 block, int smem, cudaStream_t stream,// Here FL stands for flows int currentFlowIteration, float* fgbuffers, // FLxFxN int num_beads, // 4 int num_frames, // 4 float* genericXtalk, // neixNxF float* xtalk, // FLxN float* pCopies, // N float* pR, // N float* pPhi,// N float* pgain, // N float* sbg, // FLxF float* dark_matter, // FLxF float* pPCA_vals, int flowNum, // starting flow number to calculate absolute flow num int sId ) { ComputeXtalkAndZeromerCorrectedTrace_k<<< grid, block, smem, stream >>>( currentFlowIteration, fgbuffers, // FLxFxN num_beads, // 4 num_frames, // 4 genericXtalk, xtalk, pCopies, // N pR, // N pPhi, // N pgain, // N sbg, // FLxF dark_matter, // FLxF pPCA_vals, flowNum, // starting flow number to calculate absolute flow num sId ); } void StreamingKernels::CalculateGenericXtalkForSimpleModel( dim3 grid, dim3 block, int smem, cudaStream_t stream, int num_beads, // 4 int num_frames, // 4 // int regW, // int regH, bead_state *pState, int* sampNeiIdxMap, float* nei_xtalk, float* xtalk, // NxF float* genericXtalk, // GENERIC_SIMPLE_XTALK_SAMPLE x F int sId) { CalculateGenericXtalkForSimpleModel_k<<< grid, block, smem, stream >>>( num_beads, num_frames, //regW, //regH, pState, sampNeiIdxMap, nei_xtalk, xtalk, // FLxN genericXtalk, sId); } void StreamingKernels::TaubAdjustForExponentialTailFitting( dim3 grid, dim3 block, int smem, cudaStream_t stream, bead_state* pState, float* fg_buffers, float* Ampl, float* pR, float* pCopies, float* pPhi, float* avg_trc, float* fval, float* tmp_fval, float* err, float* jac, int num_beads, int num_frames, float* tauAdjust, int sId, int flow_block_size ) { TaubAdjustForExponentialTailFitting_k <<< grid, block, smem, stream >>>( pState, fg_buffers, // FLxFxN, Ampl, // FLxN pR, // N pCopies, pPhi, avg_trc, fval, tmp_fval, err, jac, num_beads, num_frames, tauAdjust, // output it is a per bead parameter sId, flow_block_size); } void StreamingKernels::ExponentialTailFitting( dim3 grid, dim3 block, int smem, cudaStream_t stream, float bkg_scale_limit, float bkg_tail_dc_lower_bound, bead_state* pState, float* tauAdjust, float* Ampl, float* pR, float* pCopies, float* pPhi, float* fg_buffers, float* bkg_trace, float* tmp_fval, int num_beads, int num_frames, int flowNum, int sId, int flow_block_size ) { ExponentialTailFitting_k <<< grid, block, smem, stream >>> ( bkg_scale_limit, bkg_tail_dc_lower_bound, pState, tauAdjust, Ampl, pR, pCopies, pPhi, fg_buffers, bkg_trace, tmp_fval, num_beads, num_frames, flowNum, sId, flow_block_size); } void StreamingKernels::ProjectionSearch( dim3 grid, dim3 block, int smem, cudaStream_t stream, bead_state* pState, float* fg_buffers, // FLxFxN (already background and xtalk corrected if applicable)) float* emphasisVec, // FxLAST_POISSON_TABLE_COL float* nucRise, // ISIG_SUB_STEPS_MULTI_FLOW*F*FL float* pBeadParamsBase, float* fval, // NxF int realFnum, // starting flow number in block of 20 flows int num_beads, int num_frames, int sId, int flow_block_size ) { ProjectionSearch_k<<< grid, block, smem, stream>>>( pState, fg_buffers, emphasisVec, nucRise, pBeadParamsBase, fval, realFnum, num_beads, num_frames, sId, flow_block_size); } void StreamingKernels::transposeData(dim3 grid, dim3 block, int smem, cudaStream_t stream,float *dest, float *source, int width, int height) { transposeData_k<<< grid, block, smem, stream >>>( dest, source, width, height); } ///////// Transpose Kernel void StreamingKernels::transposeDataToFloat(dim3 grid, dim3 block, int smem, cudaStream_t stream,float *dest, FG_BUFFER_TYPE *source, int width, int height) { transposeDataToFloat_k<<< grid, block, smem, stream >>>( dest,source,width,height); } void StreamingKernels::initPoissonTables(int device, float ** poiss_cdf) { cudaSetDevice(device); ///////// regular float version int poissTableSize = MAX_POISSON_TABLE_COL * MAX_POISSON_TABLE_ROW * sizeof(float); float * devPtr =NULL; cudaMalloc(&devPtr, poissTableSize); CUDA_ALLOC_CHECK(devPtr); cudaMemcpyToSymbol(POISS_APPROX_TABLE_CUDA_BASE , &devPtr , sizeof (float*)); CUDA_ERROR_CHECK(); for(int i = 0; i< (MAX_POISSON_TABLE_COL); i++) { cudaMemcpy(devPtr, poiss_cdf[i], sizeof(float)*MAX_POISSON_TABLE_ROW, cudaMemcpyHostToDevice ); CUDA_ERROR_CHECK(); devPtr += MAX_POISSON_TABLE_ROW; } #ifndef USE_CUDA_ERF cudaMemcpyToSymbol (ERF_APPROX_TABLE_CUDA, ERF_APPROX_TABLE, sizeof (ERF_APPROX_TABLE)); CUDA_ERROR_CHECK(); #endif } void StreamingKernels::initPoissonTablesLUT(int device, void ** poissLUT) { cudaSetDevice(device); ////////// float4/avx version // float4 ** pPoissLUT = (float4**) poissLUT; int poissTableSize = MAX_LUT_TABLE_COL * MAX_POISSON_TABLE_ROW * sizeof(float4); float4 * devPtrLUT = NULL; cudaMalloc(&devPtrLUT, poissTableSize); CUDA_ALLOC_CHECK(devPtrLUT); cudaMemset(devPtrLUT, 0, poissTableSize); CUDA_ERROR_CHECK(); cudaMemcpyToSymbol(POISS_APPROX_LUT_CUDA_BASE, &devPtrLUT , sizeof (float4*)); CUDA_ERROR_CHECK(); #ifdef CREATE_POISSON_LUT_ON_DEVICE // run kernel to create LUT table from CDF tables on device dim3 block(512,1); dim3 grid (MAX_POISSON_TABLE_COL, 1); build_poiss_LUT_k<<<grid, block >>>( ); CUDA_ERROR_CHECK(); #else // cast and copy host side __m128 SSE/AVX data to float4 float4** pPoissLUT =(float4**)poissLUT; for(int i = 0; i< MAX_LUT_TABLE_COL; i++) { cudaMemcpy(devPtrLUT, &pPoissLUT[i][0], sizeof(float4)*MAX_POISSON_TABLE_ROW, cudaMemcpyHostToDevice ); CUDA_ERROR_CHECK(); devPtrLUT += MAX_POISSON_TABLE_ROW; } #endif } void StreamingKernels::destroyPoissonTables(int device) { cudaSetDevice(device); float * basepointer; cudaMemcpyFromSymbol (&basepointer, POISS_APPROX_TABLE_CUDA_BASE , sizeof (float*)); CUDA_ERROR_CHECK(); if(basepointer != NULL){ cudaFree(basepointer); CUDA_ERROR_CHECK(); } }
the_stack
using namespace onnxruntime::cuda; namespace onnxruntime { namespace contrib { namespace cuda { template <typename T> __global__ void TransposeCtx(const int H, const bool reversed_bs, const T* input, T* output) { // Input: BxNxSxH // Output: BxSxNxH int n = threadIdx.y; int s = blockIdx.x; int b = blockIdx.y; int num_heads = blockDim.y; int sequence_length = gridDim.x; const int NH = num_heads * H; const int NHS = NH * sequence_length; const int in_offset = s * H + n * sequence_length * H + b * NHS; int out_offset = 0; if (reversed_bs) { const int batch_size = gridDim.y; const int BNH = NH * batch_size; out_offset = n * H + b * NH + s * BNH; } else { out_offset = n * H + s * NH + b * NHS; } const int i = threadIdx.x; if (i < H) { output[out_offset + i] = input[in_offset + i]; } } template <typename T> __global__ void TransposeCtxLarge(const int H, const bool reversed_bs, const T* input, T* output) { // Use when (H*)*num_heads > 1024 // Input: BxNxSxH // Output: BxSxNxH int n = threadIdx.y; int s = blockIdx.x; int b = blockIdx.y; int stride = blockDim.x; int num_heads = blockDim.y; int sequence_length = gridDim.x; const int NH = num_heads * H; const int NHS = NH * sequence_length; const int in_offset = s * H + n * sequence_length * H + b * NHS; int out_offset = 0; if (reversed_bs) { const int batch_size = gridDim.y; const int BNH = NH * batch_size; out_offset = n * H + b * NH + s * BNH; } else { out_offset = n * H + s * NH + b * NHS; } int i = threadIdx.x; while (i < H) { output[out_offset + i] = input[in_offset + i]; i += stride; } } bool LaunchTransCtx(cudaStream_t stream, const int sequence_length, const int batch_size, const int head_size, const int num_heads, const int max_threads_per_block, const bool reversed_bs, const float* input, float* output) { const dim3 grid(sequence_length, batch_size, 1); if (0 == (head_size & 1)) { const int H = head_size / 2; const float2* input2 = reinterpret_cast<const float2*>(input); float2* output2 = reinterpret_cast<float2*>(output); if (H * num_heads <= max_threads_per_block) { const dim3 block(H, num_heads, 1); TransposeCtx<float2><<<grid, block, 0, stream>>>(H, reversed_bs, input2, output2); } else { const dim3 block(max_threads_per_block / num_heads, num_heads, 1); TransposeCtxLarge<float2><<<grid, block, 0, stream>>>(H, reversed_bs, input2, output2); } } else { if (head_size * num_heads <= max_threads_per_block) { const dim3 block(head_size, num_heads, 1); TransposeCtx<float><<<grid, block, 0, stream>>>(head_size, reversed_bs, input, output); } else { const dim3 block(max_threads_per_block / num_heads, num_heads, 1); TransposeCtxLarge<float><<<grid, block, 0, stream>>>(head_size, reversed_bs, input, output); } } return CUDA_CALL(cudaPeekAtLastError()); } bool LaunchTransCtx(cudaStream_t stream, const int sequence_length, const int batch_size, const int head_size, const int num_heads, const int max_threads_per_block, const bool reversed_bs, const half* input, half* output) { const dim3 grid(sequence_length, batch_size, 1); if (0 == (head_size % 4)) { const int H = head_size / 4; const float2* input2 = reinterpret_cast<const float2*>(input); float2* output2 = reinterpret_cast<float2*>(output); if (H * num_heads <= max_threads_per_block) { const dim3 block(H, num_heads, 1); TransposeCtx<float2><<<grid, block, 0, stream>>>(H, reversed_bs, input2, output2); } else { const dim3 block(max_threads_per_block / num_heads, num_heads, 1); TransposeCtxLarge<float2><<<grid, block, 0, stream>>>(H, reversed_bs, input2, output2); } } else if (0 == (head_size & 1)) { const int H = head_size / 2; const half2* input2 = reinterpret_cast<const half2*>(input); half2* output2 = reinterpret_cast<half2*>(output); if (H * num_heads <= max_threads_per_block) { const dim3 block(H, num_heads, 1); TransposeCtx<half2><<<grid, block, 0, stream>>>(H, reversed_bs, input2, output2); } else { const dim3 block(max_threads_per_block / num_heads, num_heads, 1); TransposeCtxLarge<half2><<<grid, block, 0, stream>>>(H, reversed_bs, input2, output2); } } else { // this should be an "odd" case. probably not worth catching it in the half2 kernel. if (head_size * num_heads <= max_threads_per_block) { const dim3 block(head_size, num_heads, 1); TransposeCtx<half><<<grid, block, 0, stream>>>(head_size, reversed_bs, input, output); } else { const dim3 block(max_threads_per_block / num_heads, num_heads, 1); TransposeCtxLarge<half><<<grid, block, 0, stream>>>(head_size, reversed_bs, input, output); } } return CUDA_CALL(cudaPeekAtLastError()); } template <typename T> __global__ void TransposeQKV(const int H, const bool reversed_bs, const T* input, T* output) { // Input: BxSxKxNxH or SxBxKxNxH // Output: KxBxNxSxH // K is the number of identical matrix int n = threadIdx.y; int s = blockIdx.x; int b = blockIdx.y; int m = blockIdx.z; // matrix id const int num_heads = blockDim.y; const int sequence_length = gridDim.x; const int batch_size = gridDim.y; const int chunk_num = gridDim.z; const int NH = num_heads * H; const int NHS = NH * sequence_length; int in_offset = 0; if (reversed_bs) { const int BNH = NH * batch_size; in_offset = n * H + (m + b * chunk_num) * NH + s * BNH * chunk_num; } else { in_offset = n * H + (m + s * chunk_num) * NH + b * NHS * chunk_num; } const int out_offset = s * H + n * sequence_length * H + b * NHS + m * NHS * batch_size; const int i = threadIdx.x; if (i < H) { output[out_offset + i] = input[in_offset + i]; } } template <typename T> __global__ void TransposeQKVLarge(const int H, const bool reversed_bs, const T* input, T* output) { // Use when (H*)*num_heads > 1024 // Input: BxSxKxNxH or SxBxKxNxH // Output: KxBxNxSxH // K is the number of identical matrix int n = threadIdx.y; int s = blockIdx.x; int b = blockIdx.y; int m = blockIdx.z; // matrix id const int stride = blockDim.x; const int num_heads = blockDim.y; const int sequence_length = gridDim.x; const int batch_size = gridDim.y; const int chunk_num = gridDim.z; const int NH = num_heads * H; const int NHS = NH * sequence_length; int in_offset = 0; if (reversed_bs) { const int BNH = NH * batch_size; in_offset = n * H + (m + b * chunk_num) * NH + s * BNH * chunk_num; } else { in_offset = n * H + (m + s * chunk_num) * NH + b * NHS * chunk_num; } const int out_offset = s * H + n * sequence_length * H + b * NHS + m * NHS * batch_size; int i = threadIdx.x; while (i < H) { output[out_offset + i] = input[in_offset + i]; i += stride; } } bool LaunchTransQkv(cudaStream_t stream, const int matrix_num, const int sequence_length, const int batch_size, const int head_size, const int num_heads, const int max_threads_per_block, const bool reversed_bs, const float* input, float* output) { const dim3 grid(sequence_length, batch_size, matrix_num); if (0 == (head_size & 1)) { const int H = head_size / 2; const float2* input2 = reinterpret_cast<const float2*>(input); float2* output2 = reinterpret_cast<float2*>(output); if (H * num_heads <= max_threads_per_block) { const dim3 block(H, num_heads, 1); TransposeQKV<float2><<<grid, block, 0, stream>>>(H, reversed_bs, input2, output2); } else { const dim3 block(max_threads_per_block / num_heads, num_heads, 1); TransposeQKVLarge<float2><<<grid, block, 0, stream>>>(H, reversed_bs, input2, output2); } } else { if (head_size * num_heads <= max_threads_per_block) { const dim3 block(head_size, num_heads, 1); TransposeQKV<float><<<grid, block, 0, stream>>>(head_size, reversed_bs, input, output); } else { const dim3 block(max_threads_per_block / num_heads, num_heads, 1); TransposeQKVLarge<float><<<grid, block, 0, stream>>>(head_size, reversed_bs, input, output); } } return CUDA_CALL(cudaPeekAtLastError()); } bool LaunchTransQkv(cudaStream_t stream, const int matrix_num, const int sequence_length, const int batch_size, const int head_size, const int num_heads, const int max_threads_per_block, const bool reversed_bs, const half* input, half* output) { const dim3 grid(sequence_length, batch_size, matrix_num); if (0 == (head_size % 4)) { const int H = head_size / 4; const float2* input2 = reinterpret_cast<const float2*>(input); float2* output2 = reinterpret_cast<float2*>(output); if (H * num_heads <= max_threads_per_block) { const dim3 block(H, num_heads, 1); TransposeQKV<float2><<<grid, block, 0, stream>>>(H, reversed_bs, input2, output2); } else { const dim3 block(max_threads_per_block / num_heads, num_heads, 1); TransposeQKVLarge<float2><<<grid, block, 0, stream>>>(H, reversed_bs, input2, output2); } } else if (0 == (head_size & 1)) { const int H = head_size / 2; const half2* input2 = reinterpret_cast<const half2*>(input); half2* output2 = reinterpret_cast<half2*>(output); if (H * num_heads <= max_threads_per_block) { const dim3 block(H, num_heads, 1); TransposeQKV<half2><<<grid, block, 0, stream>>>(H, reversed_bs, input2, output2); } else { const dim3 block(max_threads_per_block / num_heads, num_heads, 1); TransposeQKVLarge<half2><<<grid, block, 0, stream>>>(H, reversed_bs, input2, output2); } } else { // this should be an "odd" case. probably not worth catching it in the half2 kernel.. if (head_size * num_heads <= max_threads_per_block) { const dim3 block(head_size, num_heads, 1); TransposeQKV<half><<<grid, block, 0, stream>>>(head_size, reversed_bs, input, output); } else { const dim3 block(max_threads_per_block / num_heads, num_heads, 1); TransposeQKVLarge<half><<<grid, block, 0, stream>>>(head_size, reversed_bs, input, output); } } return CUDA_CALL(cudaPeekAtLastError()); } } // namespace cuda } // namespace contrib } // namespace onnxruntime
the_stack
#include "timer.h" // CUDA libraries #include <hip/hip_runtime.h> /** Problem size along one side; total number of cells is this squared */ #define NUM 256 // block size #define BLOCK_SIZE 128 #define Real float #define ZERO 0.0f #define ONE 1.0f #define TWO 2.0f /** SOR relaxation parameter */ const Real omega = 1.85f; /////////////////////////////////////////////////////////////////////////////// /** Function to evaluate coefficient matrix and right-hand side vector. * * \param[in] rowmax number of rows * \param[in] colmax number of columns * \param[in] th_cond thermal conductivity * \param[in] dx grid size in x dimension (uniform) * \param[in] dy grid size in y dimension (uniform) * \param[in] width width of plate (z dimension) * \param[in] TN temperature at top boundary * \param[out] aP array of self coefficients * \param[out] aW array of west neighbor coefficients * \param[out] aE array of east neighbor coefficients * \param[out] aS array of south neighbor coefficients * \param[out] aN array of north neighbor coefficients * \param[out] b right-hand side array */ void fill_coeffs (int rowmax, int colmax, Real th_cond, Real dx, Real dy, Real width, Real TN, Real * aP, Real * aW, Real * aE, Real * aS, Real * aN, Real * b) { int col, row; for (col = 0; col < colmax; ++col) { for (row = 0; row < rowmax; ++row) { int ind = col * rowmax + row; b[ind] = ZERO; Real SP = ZERO; if (col == 0) { // left BC: temp = 0 aW[ind] = ZERO; SP = -TWO * th_cond * width * dy / dx; } else { aW[ind] = th_cond * width * dy / dx; } if (col == colmax - 1) { // right BC: temp = 0 aE[ind] = ZERO; SP = -TWO * th_cond * width * dy / dx; } else { aE[ind] = th_cond * width * dy / dx; } if (row == 0) { // bottom BC: temp = 0 aS[ind] = ZERO; SP = -TWO * th_cond * width * dx / dy; } else { aS[ind] = th_cond * width * dx / dy; } if (row == rowmax - 1) { // top BC: temp = TN aN[ind] = ZERO; b[ind] = TWO * th_cond * width * dx * TN / dy; SP = -TWO * th_cond * width * dx / dy; } else { aN[ind] = th_cond * width * dx / dy; } aP[ind] = aW[ind] + aE[ind] + aS[ind] + aN[ind] - SP; } // end for row } // end for col } // end fill_coeffs /////////////////////////////////////////////////////////////////////////////// /** Function to update temperature for red cells * * \param[in] aP array of self coefficients * \param[in] aW array of west neighbor coefficients * \param[in] aE array of east neighbor coefficients * \param[in] aS array of south neighbor coefficients * \param[in] aN array of north neighbor coefficients * \param[in] b right-hand side array * \param[in] temp_black temperatures of black cells, constant in this function * \param[inout] temp_red temperatures of red cells * \param[out] bl_norm_L2 array with residual information for blocks */ __global__ void red_kernel (const Real * aP, const Real * aW, const Real * aE, const Real * aS, const Real * aN, const Real * b, const Real * temp_black, Real * temp_red, Real * norm_L2) { int row = 1 + (blockIdx.x * blockDim.x) + threadIdx.x; int col = 1 + (blockIdx.y * blockDim.y) + threadIdx.y; int ind_red = col * ((NUM >> 1) + 2) + row; // local (red) index int ind = 2 * row - (col & 1) - 1 + NUM * (col - 1); // global index Real temp_old = temp_red[ind_red]; Real res = b[ind] + (aW[ind] * temp_black[row + (col - 1) * ((NUM >> 1) + 2)] + aE[ind] * temp_black[row + (col + 1) * ((NUM >> 1) + 2)] + aS[ind] * temp_black[row - (col & 1) + col * ((NUM >> 1) + 2)] + aN[ind] * temp_black[row + ((col + 1) & 1) + col * ((NUM >> 1) + 2)]); Real temp_new = temp_old * (ONE - omega) + omega * (res / aP[ind]); temp_red[ind_red] = temp_new; res = temp_new - temp_old; norm_L2[ind_red] = res * res; } // end red_kernel /////////////////////////////////////////////////////////////////////////////// /** Function to update temperature for black cells * * \param[in] aP array of self coefficients * \param[in] aW array of west neighbor coefficients * \param[in] aE array of east neighbor coefficients * \param[in] aS array of south neighbor coefficients * \param[in] aN array of north neighbor coefficients * \param[in] b right-hand side array * \param[in] temp_red temperatures of red cells, constant in this function * \param[inout] temp_black temperatures of black cells * \param[out] bl_norm_L2 array with residual information for blocks */ __global__ void black_kernel (const Real * aP, const Real * aW, const Real * aE, const Real * aS, const Real * aN, const Real * b, const Real * temp_red, Real * temp_black, Real * norm_L2) { int row = 1 + (blockIdx.x * blockDim.x) + threadIdx.x; int col = 1 + (blockIdx.y * blockDim.y) + threadIdx.y; int ind_black = col * ((NUM >> 1) + 2) + row; // local (black) index int ind = 2 * row - ((col + 1) & 1) - 1 + NUM * (col - 1); // global index Real temp_old = temp_black[ind_black]; Real res = b[ind] + (aW[ind] * temp_red[row + (col - 1) * ((NUM >> 1) + 2)] + aE[ind] * temp_red[row + (col + 1) * ((NUM >> 1) + 2)] + aS[ind] * temp_red[row - ((col + 1) & 1) + col * ((NUM >> 1) + 2)] + aN[ind] * temp_red[row + (col & 1) + col * ((NUM >> 1) + 2)]); Real temp_new = temp_old * (ONE - omega) + omega * (res / aP[ind]); temp_black[ind_black] = temp_new; res = temp_new - temp_old; norm_L2[ind_black] = res * res; } // end black_kernel /////////////////////////////////////////////////////////////////////////////// /** Main function that solves Laplace's equation in 2D (heat conduction in plate) * * Contains iteration loop for red-black Gauss-Seidel with SOR GPU kernels */ int main (void) { // size of plate Real L = 1.0; Real H = 1.0; Real width = 0.01; // thermal conductivity Real th_cond = 1.0; // temperature at top boundary Real TN = 1.0; // SOR iteration tolerance Real tol = 1.e-6; // number of cells in x and y directions // including unused boundary cells int num_rows = (NUM / 2) + 2; int num_cols = NUM + 2; int size_temp = num_rows * num_cols; int size = NUM * NUM; // size of cells Real dx = L / NUM; Real dy = H / NUM; // iterations for Red-Black Gauss-Seidel with SOR int iter; int it_max = 1e6; // allocate memory Real *aP, *aW, *aE, *aS, *aN, *b; Real *temp_red, *temp_black; // arrays of coefficients aP = (Real *) calloc (size, sizeof(Real)); aW = (Real *) calloc (size, sizeof(Real)); aE = (Real *) calloc (size, sizeof(Real)); aS = (Real *) calloc (size, sizeof(Real)); aN = (Real *) calloc (size, sizeof(Real)); // RHS b = (Real *) calloc (size, sizeof(Real)); // temperature arrays temp_red = (Real *) calloc (size_temp, sizeof(Real)); temp_black = (Real *) calloc (size_temp, sizeof(Real)); // set coefficients fill_coeffs (NUM, NUM, th_cond, dx, dy, width, TN, aP, aW, aE, aS, aN, b); int i; for (i = 0; i < size_temp; ++i) { temp_red[i] = ZERO; temp_black[i] = ZERO; } // block and grid dimensions dim3 dimBlock (BLOCK_SIZE, 1); dim3 dimGrid (NUM / (2 * BLOCK_SIZE), NUM); // residual Real *bl_norm_L2; // one for each temperature value int size_norm = size_temp; bl_norm_L2 = (Real *) calloc (size_norm, sizeof(Real)); for (i = 0; i < size_norm; ++i) { bl_norm_L2[i] = ZERO; } // print problem info printf("Problem size: %d x %d \n", NUM, NUM); StartTimer(); // allocate device memory Real *aP_d, *aW_d, *aE_d, *aS_d, *aN_d, *b_d; Real *temp_red_d; Real *temp_black_d; hipMalloc ((void**) &aP_d, size * sizeof(Real)); hipMalloc ((void**) &aW_d, size * sizeof(Real)); hipMalloc ((void**) &aE_d, size * sizeof(Real)); hipMalloc ((void**) &aS_d, size * sizeof(Real)); hipMalloc ((void**) &aN_d, size * sizeof(Real)); hipMalloc ((void**) &b_d, size * sizeof(Real)); hipMalloc ((void**) &temp_red_d, size_temp * sizeof(Real)); hipMalloc ((void**) &temp_black_d, size_temp * sizeof(Real)); // copy to device memory hipMemcpy (aP_d, aP, size * sizeof(Real), hipMemcpyHostToDevice); hipMemcpy (aW_d, aW, size * sizeof(Real), hipMemcpyHostToDevice); hipMemcpy (aE_d, aE, size * sizeof(Real), hipMemcpyHostToDevice); hipMemcpy (aS_d, aS, size * sizeof(Real), hipMemcpyHostToDevice); hipMemcpy (aN_d, aN, size * sizeof(Real), hipMemcpyHostToDevice); hipMemcpy (b_d, b, size * sizeof(Real), hipMemcpyHostToDevice); hipMemcpy (temp_red_d, temp_red, size_temp * sizeof(Real), hipMemcpyHostToDevice); hipMemcpy (temp_black_d, temp_black, size_temp * sizeof(Real), hipMemcpyHostToDevice); // residual Real *bl_norm_L2_d; hipMalloc ((void**) &bl_norm_L2_d, size_norm * sizeof(Real)); hipMemcpy (bl_norm_L2_d, bl_norm_L2, size_norm * sizeof(Real), hipMemcpyHostToDevice); // iteration loop for (iter = 1; iter <= it_max; ++iter) { Real norm_L2 = ZERO; hipLaunchKernelGGL(red_kernel, dimGrid, dimBlock, 0, 0, aP_d, aW_d, aE_d, aS_d, aN_d, b_d, temp_black_d, temp_red_d, bl_norm_L2_d); // transfer residual value(s) back to CPU hipMemcpy (bl_norm_L2, bl_norm_L2_d, size_norm * sizeof(Real), hipMemcpyDeviceToHost); // add red cell contributions to residual for (int i = 0; i < size_norm; ++i) { norm_L2 += bl_norm_L2[i]; } hipLaunchKernelGGL(black_kernel, dimGrid, dimBlock, 0, 0, aP_d, aW_d, aE_d, aS_d, aN_d, b_d, temp_red_d, temp_black_d, bl_norm_L2_d); // transfer residual value(s) back to CPU and // add black cell contributions to residual hipMemcpy (bl_norm_L2, bl_norm_L2_d, size_norm * sizeof(Real), hipMemcpyDeviceToHost); for (int i = 0; i < size_norm; ++i) { norm_L2 += bl_norm_L2[i]; } // calculate residual norm_L2 = sqrt(norm_L2 / ((Real)size)); if (iter % 1000 == 0) printf("%5d, %0.6f\n", iter, norm_L2); // if tolerance has been reached, end SOR iterations if (norm_L2 < tol) { break; } } // transfer final temperature values back hipMemcpy (temp_red, temp_red_d, size_temp * sizeof(Real), hipMemcpyDeviceToHost); hipMemcpy (temp_black, temp_red_d, size_temp * sizeof(Real), hipMemcpyDeviceToHost); double runtime = GetTimer(); printf("GPU\n"); printf("Iterations: %i\n", iter); printf("Total time: %f s\n", runtime / 1000.0); // print temperature data to file FILE * pfile; pfile = fopen("temperature.dat", "w"); if (pfile != NULL) { fprintf(pfile, "#x\ty\ttemp(K)\n"); int row, col; for (row = 1; row < NUM + 1; ++row) { for (col = 1; col < NUM + 1; ++col) { Real x_pos = (col - 1) * dx + (dx / 2); Real y_pos = (row - 1) * dy + (dy / 2); if ((row + col) % 2 == 0) { // even, so red cell int ind = col * num_rows + (row + (col % 2)) / 2; fprintf(pfile, "%f\t%f\t%f\n", x_pos, y_pos, temp_red[ind]); } else { // odd, so black cell int ind = col * num_rows + (row + ((col + 1) % 2)) / 2; fprintf(pfile, "%f\t%f\t%f\n", x_pos, y_pos, temp_black[ind]); } } fprintf(pfile, "\n"); } } fclose(pfile); // free device memory hipFree(aP_d); hipFree(aW_d); hipFree(aE_d); hipFree(aS_d); hipFree(aN_d); hipFree(b_d); hipFree(temp_red_d); hipFree(temp_black_d); hipFree(bl_norm_L2_d); free(aP); free(aW); free(aE); free(aS); free(aN); free(b); free(temp_red); free(temp_black); free(bl_norm_L2); return 0; }
the_stack
constexpr int NUM_PER_THREAD_REDUCE = 4; constexpr int WARP_SIZE = 32; constexpr int NUM_SHARED_SUM_INPUT = 6; constexpr int NUM_SHARED_SUM_GAMMA = 3; template <typename T> inline __device__ T my_pow(T a, double b) { return pow(a, static_cast<float>(b)); } template <> inline __device__ half my_pow(half a, double b) { return __float2half(pow(__half2float(a), static_cast<float>(b))); } template <typename T> inline __device__ void GammaAndBetaThreadReduce(const int &col, const int &row_dim, const int &col_dim, const int &mean_dim, const T &epsilon, const T *dy, const T *x, const T *mean, const T *var, const T *grad_dx, T *part1, T *part2, T *part3, const T *global_sum1, const T *global_sum2) { int loop_num = (row_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE; for (int i = threadIdx.x; i < loop_num; i += blockDim.x) { for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) { int row = NUM_PER_THREAD_REDUCE * i + j; if (row >= row_dim) { return; } int pos = row * col_dim + col; int mean_offset = pos / mean_dim; T v1 = my_pow(var[mean_offset] + epsilon, -0.5); part1[0] += dy[pos] * v1 * (x[pos] - mean[mean_offset]) * global_sum2[pos]; part2[0] += dy[pos] * global_sum1[pos]; part3[0] += dy[pos] * grad_dx[pos] * v1; } } } template <typename T> inline __device__ void GammaAndBetaWarpReduce(T *part1, T *part2, T *part3) { for (int delta = (WARP_SIZE >> 1); delta > 0; delta >>= 1) { part1[0] += __shfl_down_sync(0xffffffff, part1[0], delta); part2[0] += __shfl_down_sync(0xffffffff, part2[0], delta); part3[0] += __shfl_down_sync(0xffffffff, part3[0], delta); } } template <typename T> inline __device__ void GammaAndBetaBlockReduce(const int &col, const int &row_dim, T *part1, T *part2, T *part3, T *d_gamma) { // load data to share memory // thread(0, 32, 64, 96, ...) keep the data DynamicSharedMem<T> share_mem; if (threadIdx.x % WARP_SIZE == 0) { int offset = threadIdx.x / WARP_SIZE * 3; share_mem.addr()[offset] = part1[0]; share_mem.addr()[offset + 1] = part2[0]; share_mem.addr()[offset + 2] = part3[0]; } __syncthreads(); for (int stride = blockDim.x / WARP_SIZE / 2; stride > 0; stride >>= 1) { if (threadIdx.x < stride) { int offset = (threadIdx.x + stride) * 3; share_mem.addr()[threadIdx.x * 3] += share_mem.addr()[offset]; share_mem.addr()[threadIdx.x * 3 + 1] += share_mem.addr()[offset + 1]; share_mem.addr()[threadIdx.x * 3 + 2] += share_mem.addr()[offset + 2]; } } __syncthreads(); if (threadIdx.x == 0) { d_gamma[col] = share_mem.addr()[0] + share_mem.addr()[1] + share_mem.addr()[2]; } } template <typename T> __global__ void GammaAndBetaPropKernel(const int row_dim, const int col_dim, const int mean_dim, const T epsilon, const T *dy, const T *x, const T *mean, const T *var, const T *grad_dx, T *d_gamma, T *global_sum1, T *global_sum2) { for (int col = blockIdx.x; col < col_dim; col += gridDim.x) { T part1 = 0; T part2 = 0; T part3 = 0; GammaAndBetaThreadReduce(col, row_dim, col_dim, mean_dim, epsilon, dy, x, mean, var, grad_dx, &part1, &part2, &part3, global_sum1, global_sum2); GammaAndBetaWarpReduce(&part1, &part2, &part3); GammaAndBetaBlockReduce(col, row_dim, &part1, &part2, &part3, d_gamma); } } template <typename T> inline __device__ void InputThreadReduceInnerMean(const int &row, const int &col_dim, const int &param_dim, const T &epsilon, T *sum1, T *sum2, T *sum3, T *sum4, const T *dy, const T *x, const T *mean, const T *var, const T *gamma, const T *grad_dx) { int loop_num = (col_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE; for (int i = threadIdx.x; i < loop_num; i += blockDim.x) { for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) { int col = NUM_PER_THREAD_REDUCE * i + j; if (col >= col_dim) { return; } int pos = row * col_dim + col; int gamma_offset = pos % param_dim; T v1 = x[pos] - mean[row]; T v2 = my_pow(var[row] + epsilon, -0.5); T v3 = v1 * v2; T v4 = dy[pos] * gamma[gamma_offset]; sum1[0] -= v2 * grad_dx[pos]; sum2[0] -= v3 * v2 * grad_dx[pos]; sum3[0] += v4; sum4[0] += v4 * v3; } } } template <typename T> inline __device__ void InputWarpReduceInnerMean(T *sum1, T *sum2, T *sum3, T *sum4) { for (int delta = (WARP_SIZE >> 1); delta > 0; delta >>= 1) { sum1[0] += __shfl_down_sync(0xffffffff, sum1[0], delta); sum2[0] += __shfl_down_sync(0xffffffff, sum2[0], delta); sum3[0] += __shfl_down_sync(0xffffffff, sum3[0], delta); sum4[0] += __shfl_down_sync(0xffffffff, sum4[0], delta); } } template <typename T> inline __device__ void InputBlockReduceInnerMean(const int &col_dim, T *sum1, T *sum2, T *sum3, T *sum4, T *share_mem) { // load data to share memory // thread(0, 32, 64, 96, ...) keep the data if (threadIdx.x % WARP_SIZE == 0) { int offset = threadIdx.x / WARP_SIZE * 6; share_mem[offset] = sum1[0]; share_mem[offset + 1] = sum2[0]; share_mem[offset + 2] = sum3[0]; share_mem[offset + 3] = sum4[0]; } __syncthreads(); for (int stride = blockDim.x / WARP_SIZE / 2; stride > 0; stride >>= 1) { if (threadIdx.x < stride) { int offset = (threadIdx.x + stride) * 6; share_mem[threadIdx.x * 3] += share_mem[offset]; share_mem[threadIdx.x * 3 + 1] += share_mem[offset + 1]; share_mem[threadIdx.x * 3 + 2] += share_mem[offset + 2]; share_mem[threadIdx.x * 3 + 3] += share_mem[offset + 3]; } } __syncthreads(); } template <typename T> inline __device__ void InputThreadReduceOuterMean(const int &row, const int &col_dim, const int &param_dim, const T &epsilon, T *sum5, T *sum6, T *share_mem, const T *dy, const T *x, const T *mean, const T *var, const T *gamma, const T *grad_dx, const T *grad_dg, T *d_x) { int loop_num = (col_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE; for (int i = threadIdx.x; i < loop_num; i += blockDim.x) { for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) { int col = NUM_PER_THREAD_REDUCE * i + j; if (col >= col_dim) { return; } int pos = row * col_dim + col; int gamma_offset = pos % param_dim; T v1 = x[pos] - mean[row]; T v2 = my_pow(var[row] + epsilon, -0.5); T v3 = dy[pos] * gamma[gamma_offset]; T v4 = v3 * share_mem[1] * (1.0 / col_dim); T v5 = grad_dx[pos] * v2 * share_mem[3] * (-1.0 / col_dim); T v6 = dy[pos] * grad_dg[gamma_offset]; T v7 = v4 + v5 + v6; T part1 = v1 * v7; T part2 = v2 * v7; d_x[pos] = part2; sum5[0] += part1; sum6[0] -= part2; } } } template <> inline __device__ void InputThreadReduceOuterMean(const int &row, const int &col_dim, const int &param_dim, const half &epsilon, half *sum5, half *sum6, half *share_mem, const half *dy, const half *x, const half *mean, const half *var, const half *gamma, const half *grad_dx, const half *grad_dg, half *d_x) { int loop_num = (col_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE; for (int i = threadIdx.x; i < loop_num; i += blockDim.x) { for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) { int col = NUM_PER_THREAD_REDUCE * i + j; if (col >= col_dim) { return; } int pos = row * col_dim + col; int gamma_offset = pos % param_dim; half v1 = x[pos] - mean[row]; half v2 = my_pow(var[row] + epsilon, -0.5); half v3 = dy[pos] * gamma[gamma_offset]; half v4 = v3 * share_mem[1] * __float2half(1.0 / col_dim); half v5 = grad_dx[pos] * v2 * share_mem[3] * __float2half(-1.0 / col_dim); half v6 = dy[pos] * grad_dg[gamma_offset]; half v7 = v4 + v5 + v6; half part1 = v1 * v7; half part2 = v2 * v7; d_x[pos] = part2; sum5[0] += part1; sum6[0] -= part2; } } } template <typename T> inline __device__ void InputWarpReduceOuterMean(T *sum5, T *sum6) { for (int delta = (WARP_SIZE >> 1); delta > 0; delta >>= 1) { sum5[0] += __shfl_down_sync(0xffffffff, sum5[0], delta); sum6[0] += __shfl_down_sync(0xffffffff, sum6[0], delta); } } template <typename T> inline __device__ void InputBlockReduceOuterMean(const int &col_dim, T *sum5, T *sum6, T *share_mem) { // load data to share memory // thread(0, 32, 64, 96, ...) keep the data if (threadIdx.x % WARP_SIZE == 0) { int offset = threadIdx.x / WARP_SIZE * 6; share_mem[offset + 4] = sum5[0]; share_mem[offset + 5] = sum6[0]; } __syncthreads(); for (int stride = blockDim.x / WARP_SIZE / 2; stride > 0; stride >>= 1) { if (threadIdx.x < stride) { int offset = (threadIdx.x + stride) * 6; share_mem[threadIdx.x * 6 + 4] += share_mem[offset + 4]; share_mem[threadIdx.x * 6 + 5] += share_mem[offset + 5]; } } __syncthreads(); } template <typename T> inline __device__ void InputProp(const int &row, const int &col_dim, const int &param_dim, const T &epsilon, const T *dy, const T *x, const T *mean, const T *var, const T *gamma, const T *grad_dx, const T *grad_dg, const T *grad_db, T *d_dy, T *d_x, const T *share_mem, T *global_sum1, T *global_sum2) { for (int col = threadIdx.x; col < col_dim; col += blockDim.x) { int pos = (row * col_dim + col); int gamma_offset = pos % param_dim; T v1 = x[pos] - mean[row]; T v2 = my_pow(var[row] + epsilon, -0.5); T v3 = v1 * v2; T part1 = gamma[gamma_offset] * grad_dx[pos] * v2; T part2 = gamma[gamma_offset] * share_mem[0] * (1.0 / col_dim); T part3 = gamma[gamma_offset] * v3 * share_mem[1] * (1.0 / col_dim); T part4 = v3 * grad_dg[gamma_offset]; d_dy[pos] = part1 + part2 + part3 + part4 + grad_db[gamma_offset]; T part5 = v1 * (my_pow(var[row] + epsilon, -1.5) * (share_mem[4] * (-1.0 / col_dim))); d_x[pos] += part5 + share_mem[5] * (1.0 / col_dim); global_sum1[pos] = share_mem[0] * (1.0 / col_dim); global_sum2[pos] = share_mem[1] * (1.0 / col_dim); } } template <> inline __device__ void InputProp(const int &row, const int &col_dim, const int &param_dim, const half &epsilon, const half *dy, const half *x, const half *mean, const half *var, const half *gamma, const half *grad_dx, const half *grad_dg, const half *grad_db, half *d_dy, half *d_x, const half *share_mem, half *global_sum1, half *global_sum2) { for (int col = threadIdx.x; col < col_dim; col += blockDim.x) { int pos = (row * col_dim + col); int gamma_offset = pos % param_dim; half v1 = x[pos] - mean[row]; half v2 = my_pow(var[row] + epsilon, -0.5); half v3 = v1 * v2; half part1 = gamma[gamma_offset] * grad_dx[pos] * v2; half part2 = gamma[gamma_offset] * share_mem[0] * __float2half(1.0 / col_dim); half part3 = gamma[gamma_offset] * v3 * share_mem[1] * __float2half(1.0 / col_dim); half part4 = v3 * grad_dg[gamma_offset]; d_dy[pos] = part1 + part2 + part3 + part4 + grad_db[gamma_offset]; half part5 = v1 * (my_pow(var[row] + epsilon, -1.5) * (share_mem[4] * __float2half(-1.0 / col_dim))); d_x[pos] += part5 + share_mem[5] * __float2half(1.0 / col_dim); global_sum1[pos] = share_mem[0] * __float2half(1.0 / col_dim); global_sum2[pos] = share_mem[1] * __float2half(1.0 / col_dim); } } template <typename T> __global__ void InputPropKernel(const int row_dim, const int col_dim, const int param_dim, const T epsilon, const T *dy, const T *x, const T *mean, const T *var, const T *gamma, const T *grad_dx, const T *grad_dg, const T *grad_db, T *d_dy, T *d_x, T *global_sum1, T *global_sum2) { for (int row = blockIdx.x; row < row_dim; row += gridDim.x) { T sum1 = 0; T sum2 = 0; T sum3 = 0; T sum4 = 0; T sum5 = 0; T sum6 = 0; DynamicSharedMem<T> share_mem; InputThreadReduceInnerMean(row, col_dim, param_dim, epsilon, &sum1, &sum2, &sum3, &sum4, dy, x, mean, var, gamma, grad_dx); InputWarpReduceInnerMean(&sum1, &sum2, &sum3, &sum4); InputBlockReduceInnerMean(col_dim, &sum1, &sum2, &sum3, &sum4, share_mem.addr()); InputThreadReduceOuterMean(row, col_dim, param_dim, epsilon, &sum5, &sum6, share_mem.addr(), dy, x, mean, var, gamma, grad_dx, grad_dg, d_x); InputWarpReduceOuterMean(&sum5, &sum6); InputBlockReduceOuterMean(col_dim, &sum5, &sum6, share_mem.addr()); InputProp(row, col_dim, param_dim, epsilon, dy, x, mean, var, gamma, grad_dx, grad_dg, grad_db, d_dy, d_x, share_mem.addr(), global_sum1, global_sum2); } } template <typename T> void LayerNormGradGrad(const int &row_dim, const int &col_dim, const int &param_dim, T *global_sum1, T *global_sum2, const T &epsilon, const T *dy, const T *x, const T *mean, const T *var, const T *gamma, const T* grad_dx, const T* grad_dg, const T* grad_db, T *d_dy, T *d_x, T *d_gamma, cudaStream_t stream) { const int thread_per_block = 256; int share_mem_size = thread_per_block / WARP_SIZE * NUM_SHARED_SUM_INPUT * sizeof(T); InputPropKernel<<<row_dim, thread_per_block, share_mem_size, stream>>>(row_dim, col_dim, param_dim, epsilon, dy, x, mean, var, gamma, grad_dx, grad_dg, grad_db, d_dy, d_x, global_sum1, global_sum2); share_mem_size = thread_per_block / WARP_SIZE * NUM_SHARED_SUM_GAMMA * sizeof(T); int param_reduce_dim = row_dim * col_dim / param_dim; GammaAndBetaPropKernel<<<param_dim, thread_per_block, share_mem_size, stream>>>(param_reduce_dim, param_dim, col_dim, epsilon, dy, x, mean, var, grad_dx, d_gamma, global_sum1, global_sum2); } template void LayerNormGradGrad(const int &row_dim, const int &col_dim, const int &param_dim, float *global_sum1, float *global_sum2, const float &epsilon, const float *dy, const float *x, const float *mean, const float *var, const float *gamma, const float *grad_dx, const float *grad_dg, const float *grad_db, float *d_dy, float *d_x, float *d_gamma, cudaStream_t stream); template void LayerNormGradGrad(const int &row_dim, const int &col_dim, const int &param_dim, half *global_sum1, half *global_sum2, const half &epsilon, const half *dy, const half *x, const half *mean, const half *var, const half *gamma, const half *grad_dx, const half *grad_dg, const half *grad_db, half *d_dy, half *d_x, half *d_gamma, cudaStream_t stream);
the_stack
#include "LabelIslandSortArea.h" #include <iostream> using namespace std; // 宏:MAX_PAIRS_NUM //(面积值-标记值)键值对的个数。 #ifndef MAX_PAIRS_NUM #define MAX_PAIRS_NUM 256 #endif // 宏:SORT_ARRAY_TYPE_ASC // 排序标识,升序排序。 #ifndef SORT_ARRAY_TYPE_ASC #define SORT_ARRAY_TYPE_ASC 2 #endif // 宏:SORT_ARRAY_TYPE_DESC // 排序标识,降序排序。 #ifndef SORT_ARRAY_TYPE_DESC #define SORT_ARRAY_TYPE_DESC 1 #endif // Kernel 函数: _findAreasByMinMaxKer(筛选面积) // 筛选出在最大最小面积范围之间的标记区域。 static __global__ void _findAreasByMinMaxKer( unsigned int *histogram, // 直方图面积。 unsigned int minArea, // 最小面积。 unsigned int maxArea // 最大面积。 ); // Kernel 函数: _bitonicSortPairsByAscendKer(按照升序排序区域面积) // 实现并行双调排序,按照升序排序区域面积。 static __global__ void _bitonicSortPairsByAscendKer( unsigned int *devarray, // 面积数组。 unsigned int *devareaRank // 输出的(面积值-标记值)键值对。 ); // Kernel 函数: _bitonicSortPairsByDescendKer(按照降序排序区域面积) // 实现并行双调排序,按照降序排序区域面积。 static __global__ void _bitonicSortPairsByDescendKer( unsigned int *devarray, // 面积数组。 unsigned int *devareaRank // 输出的(面积值-标记值)键值对。 ); // Kernel 函数: _bitonicSortPairsByDescendKer(按照降序排序区域面积) static __global__ void _bitonicSortPairsByDescendKer( unsigned int *devarray, unsigned int *devareaRank) { // 读取线程号。 int tid = threadIdx.x; int k, ixj, j; unsigned int tempArea, tempIndex; // 声明共享内存,加快数据存取速度。 __shared__ unsigned int area[MAX_PAIRS_NUM]; __shared__ unsigned int index[MAX_PAIRS_NUM]; // 将面积值拷贝到共享内存中。 area[tid] = devarray[tid]; // 将标记值拷贝到共享内存了。 index[tid] = tid; __syncthreads(); // 并行双调排序,降序排序。 for (k = 2; k <= MAX_PAIRS_NUM; k = k << 1) { // 双调合并。 for (j = k >> 1; j > 0; j = j >> 1) { // ixj 是与当前位置 i 进行比较交换的位置。 ixj = tid ^ j; if (ixj > tid) { // 如果 (tid & k) == 0,按照降序交换两项。 if ((tid & k) == 0 && (area[tid] < area[ixj])) { // 交换面积值。 tempArea = area[tid]; area[tid] = area[ixj]; area[ixj] = tempArea; // 交换下标值。 tempIndex = index[tid]; index[tid] = index[ixj]; index[ixj] = tempIndex; // 如果 (tid & k) == 0,按照升序交换两项。 } else if ((tid & k) != 0 && area[tid] > area[ixj]) { // 交换面积值。 tempArea = area[tid]; area[tid] = area[ixj]; area[ixj] = tempArea; // 交换下标值。 tempIndex = index[tid]; index[tid] = index[ixj]; index[ixj] = tempIndex; } } __syncthreads(); } } // 将共享内存中的面积值拷贝到全局内存中。 devareaRank[2 * tid] = area[tid]; // 将共享内存中的下标值拷贝到全局内存中。 devareaRank[2 * tid + 1] = index[tid]; } // Kernel 函数: _bitonicSortPairsByAscendKer(按照升序排序区域面积) static __global__ void _bitonicSortPairsByAscendKer( unsigned int *devarray, unsigned int *devareaRank) { // 读取线程号。 int tid = threadIdx.x; int k, ixj, j; unsigned int tempArea, tempIndex; // 声明共享内存,加快数据存取速度。 __shared__ unsigned int area[MAX_PAIRS_NUM]; __shared__ unsigned int index[MAX_PAIRS_NUM]; // 将面积值拷贝到共享内存中。 area[tid] = devarray[tid]; // 将标记值拷贝到共享内存了。 index[tid] = tid; __syncthreads(); // 并行双调排序,升序排序。 for (k = 2; k <= MAX_PAIRS_NUM; k = k << 1) { // 双调合并。 for (j = k >> 1; j > 0; j = j >> 1) { // ixj 是与当前位置 i 进行比较交换的位置。 ixj = tid ^ j; if (ixj > tid) { // 如果 (tid & k) == 0,按照升序交换两项。 if ((tid & k) == 0 && (area[tid] > area[ixj])) { // 交换面积值。 tempArea = area[tid]; area[tid] = area[ixj]; area[ixj] = tempArea; // 交换下标值。 tempIndex = index[tid]; index[tid] = index[ixj]; index[ixj] = tempIndex; // 如果 (tid & k) == 0,按照降序交换两项。 } else if ((tid & k) != 0 && area[tid] < area[ixj]) { // 交换面积值。 tempArea = area[tid]; area[tid] = area[ixj]; area[ixj] = tempArea; // 交换下标值。 tempIndex = index[tid]; index[tid] = index[ixj]; index[ixj] = tempIndex; } } __syncthreads(); } } // 将共享内存中的面积值拷贝到全局内存中。 devareaRank[2 * tid] = area[tid]; // 将共享内存中的下标值拷贝到全局内存中。 devareaRank[2 * tid + 1] = index[tid]; } // Host 成员方法:bitonicSortPairs(对区域面积进行排序) __host__ int LabelIslandSortArea::bitonicSortPairs( unsigned int *inarray, unsigned int *areaRank) { // 检查 inarray 是否为空 if (inarray == NULL) return NULL_POINTER; // 检查 areaRank 是否为空 if (areaRank == NULL) return NULL_POINTER; if (this->sortflag == SORT_ARRAY_TYPE_ASC) // 升序排序区域面积。 _bitonicSortPairsByAscendKer<<<1, MAX_PAIRS_NUM>>>(inarray, areaRank); else if (this->sortflag == SORT_ARRAY_TYPE_DESC) // 降序排序区域面积。 _bitonicSortPairsByDescendKer<<<1, MAX_PAIRS_NUM>>>(inarray, areaRank); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; return NO_ERROR; } // Kernel 函数: _findAreasByMinMaxKer(筛选面积) static __global__ void _findAreasByMinMaxKer( unsigned int *histogram, unsigned int minArea, unsigned int maxArea) { // 获取线程号。 int tid = threadIdx.x; histogram[0] = 0; // 如果直方图面积不在最大最小面积范围内,则将其对应面积清0。 if (histogram[tid] < minArea || histogram[tid] > maxArea) histogram[tid] = 0; } // Host 成员方法:labelIslandSortArea(对标记后的所有区域按照面积进行排序) __host__ int LabelIslandSortArea::labelIslandSortArea( Image *inimg, unsigned int *areaRank) { // 检查图像是否为 NULL。 if (inimg == NULL) return NULL_POINTER; // 检查 areaRank 是否为空 if (areaRank == NULL) return NULL_POINTER; // 检查参数是否合法。 if (minarea < 0 || maxarea < 0 || (sortflag != SORT_ARRAY_TYPE_ASC && sortflag != SORT_ARRAY_TYPE_DESC)) return INVALID_DATA; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为输 // 入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 在 Device 上分配临时空间。一次申请所有空间,然后通过偏移索引各个数组。 cudaError_t cudaerrcode; unsigned int *alldevicedata; unsigned int *devhistogram, *devareaRank; cudaerrcode = cudaMalloc((void**)&alldevicedata, 3 * MAX_PAIRS_NUM * sizeof (unsigned int)); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 初始化 Device 上的内存空间。 cudaerrcode = cudaMemset(alldevicedata, 0, 3 * MAX_PAIRS_NUM * sizeof (unsigned int)); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 通过偏移读取 devhistogram 内存空间。 devhistogram = alldevicedata; // 通过直方图计算区域面积. Histogram hist; errcode = hist.histogram(inimg, devhistogram, 0); if (errcode != NO_ERROR) return errcode; // 筛选出在最大最小面积范围之间的标记区域。 _findAreasByMinMaxKer<<<1, MAX_PAIRS_NUM>>>(devhistogram, minarea, maxarea); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicedata); return CUDA_ERROR; } // areaRank 在 Host 端。 if (this->ishost == 1) { // 通过偏移读取 devareaRank 内存空间。 devareaRank = alldevicedata + MAX_PAIRS_NUM; // 调用并行双调排序函数,对所选面积进行排序。 bitonicSortPairs(devhistogram, devareaRank); //将 Device上的 devareaRank 拷贝到 Host上。 cudaerrcode = cudaMemcpy(areaRank, devareaRank, MAX_PAIRS_NUM * 2 * sizeof(unsigned int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 计算满足条件的不同区域的个数 int k; this->length = 0; for (k = 0; k< MAX_PAIRS_NUM; k++) { if (areaRank [2 * k] > 0) this->length++; } // 当 areaRank 按照升序排序时,在数组的前面会有很多无效的 0 项,因为 // 区域的个数可能小于定义的 MAX_PAIRS_NUM,所以需要重新颠倒数组,使得 // 有效的非零数据位于数组的前面。下面的代码就是解决此问题的。例如处理 // 前假设 areaRank = [0, 0, 0, 0, ......, 50000, 8, 60000, 3, 70000, 6] // ,那么处理后 areaRank = [50000, 8, 60000, 3, 70000, 6, 0, 0, 0, 0, // ......]。 int i, j; if (sortflag == 2) { if (areaRank[0] == 0) { j = 0; for (i = 0; i < MAX_PAIRS_NUM; i++) { // 如果面积大于0,则迁移。 if (areaRank[2 * i] > 0) { areaRank[2 * j] = areaRank[2 * i]; areaRank[2 * j + 1] = areaRank[2 * i + 1]; areaRank[2 * i] = 0; areaRank[2 * i + 1] = 0; j++; } } } } // areaRank 在 Device 端。 } else if (this->ishost == 0) { // 声明 Host 端数组,为以后的处理做准备。 unsigned int hostareaRank[MAX_PAIRS_NUM * 2]; // 通过偏移读取 devareaRank 内存空间。 devareaRank = alldevicedata + MAX_PAIRS_NUM; // 调用并行双调排序函数,对所选面积进行排序。 bitonicSortPairs(devhistogram, areaRank); //将 Device上的 areaRank 拷贝到 Host上。 cudaerrcode = cudaMemcpy(hostareaRank, areaRank, MAX_PAIRS_NUM * 2 * sizeof(unsigned int), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 计算满足条件的不同区域的个数 int k; this->length = 0; for (k = 0; k< MAX_PAIRS_NUM; k++) { if (hostareaRank [2 * k] > 0) this->length++; } // 当 hostareaRank 按照升序排序时,在数组的前面会有很多无效的 0 项, // 因为区域的个数可能小于定义的 MAX_PAIRS_NUM,所以需要重新颠倒数组, // 使得有效的非零数据位于数组的前面。下面的代码就是解决此问题的。 // 例如处理前假设 hostareaRank = [0, 0, 0, 0, ......, 50000, 8, 60000, // 3, 70000, 6],那么处理后 hostareaRank = [50000, 8, 60000, 3, 70000, // 6, 0, 0, 0, 0,......]。 int i, j; if (sortflag == 2) { if (hostareaRank[0] == 0) { j = 0; for (i = 0; i < MAX_PAIRS_NUM; i++) { // 如果面积大于0,则迁移。 if (hostareaRank[2 * i] > 0) { hostareaRank[2 * j] = hostareaRank[2 * i]; hostareaRank[2 * j + 1] = hostareaRank[2 * i + 1]; hostareaRank[2 * i] = 0; hostareaRank[2 * i + 1] = 0; j++; } } } } } // 释放显存上的临时空间。 cudaFree(alldevicedata); return NO_ERROR; }
the_stack
#include <GL/glew.h> #if defined(__APPLE__) || defined(MACOSX) #include <GLUT/glut.h> #else #include <GL/freeglut.h> #endif // CUDA standard includes #include <cuda_runtime.h> #include <cuda_gl_interop.h> #include "bodysystem.h" __constant__ float softeningSquared; __constant__ double softeningSquared_fp64; cudaError_t setSofteningSquared(float softeningSq) { return cudaMemcpyToSymbol(softeningSquared, &softeningSq, sizeof(float), 0, cudaMemcpyHostToDevice); } cudaError_t setSofteningSquared(double softeningSq) { return cudaMemcpyToSymbol(softeningSquared_fp64, &softeningSq, sizeof(double), 0, cudaMemcpyHostToDevice); } template<class T> struct SharedMemory { __device__ inline operator T *() { extern __shared__ int __smem[]; return (T *)__smem; } __device__ inline operator const T *() const { extern __shared__ int __smem[]; return (T *)__smem; } }; template<typename T> __device__ T rsqrt_T(T x) { return rsqrt(x); } template<> __device__ float rsqrt_T<float>(float x) { return rsqrtf(x); } // Macros to simplify shared memory addressing #define SX(i) sharedPos[i+blockDim.x*threadIdx.y] // This macro is only used when multithreadBodies is true (below) #define SX_SUM(i,j) sharedPos[i+blockDim.x*j] template <typename T> __device__ T getSofteningSquared() { return softeningSquared; } template <> __device__ double getSofteningSquared<double>() { return softeningSquared_fp64; } template <typename T> struct DeviceData { T *dPos[2]; // mapped host pointers T *dVel; cudaEvent_t event; unsigned int offset; unsigned int numBodies; }; template <typename T> __device__ typename vec3<T>::Type bodyBodyInteraction(typename vec3<T>::Type ai, typename vec4<T>::Type bi, typename vec4<T>::Type bj) { typename vec3<T>::Type r; // r_ij [3 FLOPS] r.x = bj.x - bi.x; r.y = bj.y - bi.y; r.z = bj.z - bi.z; // distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS] T distSqr = r.x * r.x + r.y * r.y + r.z * r.z; distSqr += getSofteningSquared<T>(); // invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)] T invDist = rsqrt_T(distSqr); T invDistCube = invDist * invDist * invDist; // s = m_j * invDistCube [1 FLOP] T s = bj.w * invDistCube; // a_i = a_i + s * r_ij [6 FLOPS] ai.x += r.x * s; ai.y += r.y * s; ai.z += r.z * s; return ai; } // This is the "tile_calculation" function from the GPUG3 article. template <typename T> __device__ typename vec3<T>::Type gravitation(typename vec4<T>::Type iPos, typename vec3<T>::Type accel) { typename vec4<T>::Type *sharedPos = SharedMemory<typename vec4<T>::Type>(); // The CUDA 1.1 compiler cannot determine that i is not going to // overflow in the loop below. Therefore if int is used on 64-bit linux // or windows (or long instead of long long on win64), the compiler // generates suboptimal code. Therefore we use long long on win64 and // long on everything else. (Workaround for Bug ID 347697) #ifdef _Win64 unsigned long long j = 0; #else unsigned long j = 0; #endif // Here we unroll the loop to reduce bookkeeping instruction overhead // 32x unrolling seems to provide best performance // Note that having an unsigned int loop counter and an unsigned // long index helps the compiler generate efficient code on 64-bit // OSes. The compiler can't assume the 64-bit index won't overflow // so it incurs extra integer operations. This is a standard issue // in porting 32-bit code to 64-bit OSes. #pragma unroll 32 for (unsigned int counter = 0; counter < blockDim.x; counter++) { accel = bodyBodyInteraction<T>(accel, iPos, SX(j++)); } return accel; } // WRAP is used to force each block to start working on a different // chunk (and wrap around back to the beginning of the array) so that // not all multiprocessors try to read the same memory locations at // once. #define WRAP(x,m) (((x)<(m))?(x):((x)-(m))) // Mod without divide, works on values from 0 up to 2m #if 0 template <typename T, bool multithreadBodies> __device__ typename vec3<T>::Type computeBodyAccel(typename vec4<T>::Type bodyPos, typename vec4<T>::Type *positions, int numBodies) { typename vec4<T>::Type *sharedPos = SharedMemory<typename vec4<T>::Type>(); typename vec3<T>::Type acc = {0.0f, 0.0f, 0.0f}; int p = blockDim.x; int q = blockDim.y; int n = numBodies; int numTiles = (n + p*q - 1) / (p * q); for (int tile = blockIdx.y; tile < numTiles + blockIdx.y; tile++) { int index = multithreadBodies ? WRAP(blockIdx.x + q * tile + threadIdx.y, gridDim.x) : WRAP(blockIdx.x + tile, gridDim.x-1); index = index * p + threadIdx.x; if (index < numBodies) sharedPos[threadIdx.x+blockDim.x*threadIdx.y] = positions[index]; else sharedPos[threadIdx.x+blockDim.x*threadIdx.y].w = 0; __syncthreads(); // This is the "tile_calculation" function from the GPUG3 article. acc = gravitation<T>(bodyPos, acc); __syncthreads(); } // When the numBodies / thread block size is < # multiprocessors (16 on G80), the GPU is // underutilized. For example, with a 256 threads per block and 1024 bodies, there will only // be 4 thread blocks, so the GPU will only be 25% utilized. To improve this, we use multiple // threads per body. We still can use blocks of 256 threads, but they are arranged in q rows // of p threads each. Each thread processes 1/q of the forces that affect each body, and then // 1/q of the threads (those with threadIdx.y==0) add up the partial sums from the other // threads for that body. To enable this, use the "--p=" and "--q=" command line options to // this example. e.g.: "nbody.exe --n=1024 --p=64 --q=4" will use 4 threads per body and 256 // threads per block. There will be n/p = 16 blocks, so a G80 GPU will be 100% utilized. // We use a bool template parameter to specify when the number of threads per body is greater // than one, so that when it is not we don't have to execute the more complex code required! if (multithreadBodies) { SX_SUM(threadIdx.x, threadIdx.y).x = acc.x; SX_SUM(threadIdx.x, threadIdx.y).y = acc.y; SX_SUM(threadIdx.x, threadIdx.y).z = acc.z; __syncthreads(); // Save the result in global memory for the integration step if (threadIdx.y == 0) { for (int i = 1; i < blockDim.y; i++) { acc.x += SX_SUM(threadIdx.x,i).x; acc.y += SX_SUM(threadIdx.x,i).y; acc.z += SX_SUM(threadIdx.x,i).z; } } } return acc; } #endif template <typename T, bool multithreadBodies> __device__ typename vec3<T>::Type computeBodyAccel(typename vec4<T>::Type bodyPos, typename vec4<T>::Type *positions, int numBodies) { typename vec4<T>::Type *sharedPos = SharedMemory<typename vec4<T>::Type>(); typename vec3<T>::Type acc = {0.0f, 0.0f, 0.0f}; int p = blockDim.x; int q = blockDim.y; int n = numBodies; int numTiles = n / (p * q); for (int tile = blockIdx.y; tile < numTiles + blockIdx.y; tile++) { sharedPos[threadIdx.x+blockDim.x*threadIdx.y] = multithreadBodies ? positions[WRAP(blockIdx.x + q * tile + threadIdx.y, gridDim.x) * p + threadIdx.x] : positions[WRAP(blockIdx.x + tile, gridDim.x) * p + threadIdx.x]; __syncthreads(); // This is the "tile_calculation" function from the GPUG3 article. acc = gravitation<T>(bodyPos, acc); __syncthreads(); } // When the numBodies / thread block size is < # multiprocessors (16 on G80), the GPU is // underutilized. For example, with a 256 threads per block and 1024 bodies, there will only // be 4 thread blocks, so the GPU will only be 25% utilized. To improve this, we use multiple // threads per body. We still can use blocks of 256 threads, but they are arranged in q rows // of p threads each. Each thread processes 1/q of the forces that affect each body, and then // 1/q of the threads (those with threadIdx.y==0) add up the partial sums from the other // threads for that body. To enable this, use the "--p=" and "--q=" command line options to // this example. e.g.: "nbody.exe --n=1024 --p=64 --q=4" will use 4 threads per body and 256 // threads per block. There will be n/p = 16 blocks, so a G80 GPU will be 100% utilized. // We use a bool template parameter to specify when the number of threads per body is greater // than one, so that when it is not we don't have to execute the more complex code required! if (multithreadBodies) { SX_SUM(threadIdx.x, threadIdx.y).x = acc.x; SX_SUM(threadIdx.x, threadIdx.y).y = acc.y; SX_SUM(threadIdx.x, threadIdx.y).z = acc.z; __syncthreads(); // Save the result in global memory for the integration step if (threadIdx.y == 0) { for (int i = 1; i < blockDim.y; i++) { acc.x += SX_SUM(threadIdx.x,i).x; acc.y += SX_SUM(threadIdx.x,i).y; acc.z += SX_SUM(threadIdx.x,i).z; } } } return acc; } template<typename T, bool multithreadBodies> __global__ void integrateBodies(typename vec4<T>::Type *newPos, typename vec4<T>::Type *oldPos, typename vec4<T>::Type *vel, unsigned int deviceOffset, unsigned int deviceNumBodies, float deltaTime, float damping, int totalNumBodies) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= deviceNumBodies) { return; } typename vec4<T>::Type position = oldPos[deviceOffset + index]; typename vec3<T>::Type accel = computeBodyAccel<T, multithreadBodies>(position, oldPos, totalNumBodies); if (!multithreadBodies || (threadIdx.y == 0)) { // acceleration = force \ mass; // new velocity = old velocity + acceleration * deltaTime // note we factor out the body's mass from the equation, here and in bodyBodyInteraction // (because they cancel out). Thus here force == acceleration typename vec4<T>::Type velocity = vel[deviceOffset + index]; velocity.x += accel.x * deltaTime; velocity.y += accel.y * deltaTime; velocity.z += accel.z * deltaTime; velocity.x *= damping; velocity.y *= damping; velocity.z *= damping; // new position = old position + velocity * deltaTime position.x += velocity.x * deltaTime; position.y += velocity.y * deltaTime; position.z += velocity.z * deltaTime; // store new position and velocity newPos[deviceOffset + index] = position; vel[deviceOffset + index] = velocity; } } template <typename T> void integrateNbodySystem(DeviceData<T> *deviceData, cudaGraphicsResource **pgres, unsigned int currentRead, float deltaTime, float damping, unsigned int numBodies, unsigned int numDevices, int p, int q, bool bUsePBO) { if (bUsePBO) { checkCudaErrors(cudaGraphicsResourceSetMapFlags(pgres[currentRead], cudaGraphicsMapFlagsReadOnly)); checkCudaErrors(cudaGraphicsResourceSetMapFlags(pgres[1-currentRead], cudaGraphicsMapFlagsWriteDiscard)); checkCudaErrors(cudaGraphicsMapResources(2, pgres, 0)); size_t bytes; checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&(deviceData[0].dPos[currentRead]), &bytes, pgres[currentRead])); checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&(deviceData[0].dPos[1-currentRead]), &bytes, pgres[1-currentRead])); } cudaDeviceProp props; for (unsigned int dev = 0; dev != numDevices; dev++) { if (numDevices > 1) { cudaSetDevice(dev); } checkCudaErrors(cudaGetDeviceProperties(&props, dev)); while ((deviceData[dev].numBodies > 0) && p > 1 && (deviceData[dev].numBodies / p < (unsigned)props.multiProcessorCount)) { p /= 2; q *= 2; } dim3 threads(p,q,1); dim3 grid((deviceData[dev].numBodies + (p-1))/p, 1, 1); // execute the kernel: // When the numBodies / thread block size is < # multiprocessors // (16 on G80), the GPU is underutilized. For example, with 256 threads per // block and 1024 bodies, there will only be 4 thread blocks, so the // GPU will only be 25% utilized. To improve this, we use multiple threads // per body. We still can use blocks of 256 threads, but they are arranged // in q rows of p threads each. Each thread processes 1/q of the forces // that affect each body, and then 1/q of the threads (those with // threadIdx.y==0) add up the partial sums from the other threads for that // body. To enable this, use the "--p=" and "--q=" command line options to // this example. e.g.: "nbody.exe --n=1024 --p=64 --q=4" will use 4 // threads per body and 256 threads per block. There will be n/p = 16 // blocks, so a G80 GPU will be 100% utilized. // We use a bool template parameter to specify when the number of threads // per body is greater than one, so that when it is not we don't have to // execute the more complex code required! int sharedMemSize = p * q * 4 * sizeof(T); // 4 floats for pos if (grid.x > 0 && threads.y == 1) { integrateBodies<T, false><<< grid, threads, sharedMemSize >>> ((typename vec4<T>::Type *)deviceData[dev].dPos[1-currentRead], (typename vec4<T>::Type *)deviceData[dev].dPos[currentRead], (typename vec4<T>::Type *)deviceData[dev].dVel, deviceData[dev].offset, deviceData[dev].numBodies, deltaTime, damping, numBodies); } else if (grid.x > 0) { integrateBodies<T, true><<< grid, threads, sharedMemSize >>> ((typename vec4<T>::Type *)deviceData[dev].dPos[1-currentRead], (typename vec4<T>::Type *)deviceData[dev].dPos[currentRead], (typename vec4<T>::Type *)deviceData[dev].dVel, deviceData[dev].offset, deviceData[dev].numBodies, deltaTime, damping, numBodies); } if (numDevices > 1) { checkCudaErrors(cudaEventRecord(deviceData[dev].event)); // MJH: Hack on older driver versions to force kernel launches to flush! cudaStreamQuery(0); } // check if kernel invocation generated an error getLastCudaError("Kernel execution failed"); } if (numDevices > 1) { for (unsigned int dev = 0; dev < numDevices; dev++) { checkCudaErrors(cudaEventSynchronize(deviceData[dev].event)); } } if (bUsePBO) { checkCudaErrors(cudaGraphicsUnmapResources(2, pgres, 0)); } } // Explicit specializations needed to generate code template void integrateNbodySystem<float>(DeviceData<float> *deviceData, cudaGraphicsResource **pgres, unsigned int currentRead, float deltaTime, float damping, unsigned int numBodies, unsigned int numDevices, int p, int q, bool bUsePBO); template void integrateNbodySystem<double>(DeviceData<double> *deviceData, cudaGraphicsResource **pgres, unsigned int currentRead, float deltaTime, float damping, unsigned int numBodies, unsigned int numDevices, int p, int q, bool bUsePBO);
the_stack
#ifndef _BLURKERNEL_H_ #define _BLURKERNEL_H_ #include "gpu/image/blur.hpp" #include "backend/common/imageOps.hpp" #include "backend/common/image/blurdef.h" namespace VideoStitch { namespace Image { /** * Accumulator values for several types. */ template <typename U> struct AccumT {}; /** * Accumulator values for float. */ template <> struct AccumT<float> { typedef float Type; typedef float DividerType; static __device__ float init(const float s) { return s; } }; /** * Accumulator values for float2. */ template <> struct AccumT<float2> { typedef float2 Type; typedef float2 DividerType; static __device__ float2 init(const float s) { return make_float2(s, s); } }; /** * Accumulator values for uchar. */ template <> struct AccumT<unsigned char> { typedef unsigned Type; typedef unsigned DividerType; static __device__ unsigned init(const unsigned s) { return s; } }; /** * A class that accumulates values of type T. * The default implementation is for scalar values only. */ template <typename T> class Accumulator { public: __device__ Accumulator(int radius) : acc(AccumT<T>::init(0)), divider(AccumT<T>::init(2 * radius + 1)) {} /** * Accumulates a value. * @param v */ __device__ void accumulate(const T v) { acc += v; } /** * Unaccumulates a value. * @param v */ __device__ void unaccumulate(const T v) { acc -= v; } /** * Returns the divided accumulated (blurred) pixel value. * Parameters are unused but are here to provide the same API as Accumulator<uint32_t>. */ __device__ T get(const T* /*src*/, size_t /*i*/) const { return (divider == AccumT<T>::init(0)) ? AccumT<T>::init(0) : (acc / divider); } private: typename AccumT<T>::Type acc; const typename AccumT<T>::DividerType divider; }; /** Maybe I'm overlooking something, but I don't understand why nvcc gives warning about all 4 private fields being * unused. They are clearly used, or this Accumulator would not accumulate anything. */ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-private-field" #endif /** * Accumulator for RGBA210 values. Pixels with 0 alpha contribute 0. * So the divider will always be between 0 and 2 * radius + 1. */ template <> class Accumulator<uint32_t> { public: __device__ Accumulator(int /*radius*/) : accR(0), accG(0), accB(0), divider(0) {} /** * Accumulates a value. * @param v */ __device__ void accumulate(const uint32_t v) { const int32_t isSolid = RGB210::a(v); if (isSolid != 0) { accR += RGB210::r(v); accG += RGB210::g(v); accB += RGB210::b(v); ++divider; } } /** * Unaccumulates a value. * @param v */ __device__ void unaccumulate(const uint32_t v) { const int32_t isSolid = RGB210::a(v); if (isSolid != 0) { accR -= RGB210::r(v); accG -= RGB210::g(v); accB -= RGB210::b(v); --divider; } } /** * Returns the divided accumulated (blurred) pixel value. */ __device__ uint32_t get(const uint32_t* src, size_t i) const { return (divider == 0) ? 0 : RGB210::pack(accR / divider, accG / divider, accB / divider, RGB210::a(src[i])); } private: int32_t accR; int32_t accG; int32_t accB; int32_t divider; }; #ifdef __clang__ #pragma clang diagnostic pop #endif /** * Gaussian blur in 1D. @a r is the filter radius, which must be such that 2 * r < h. * NOTE: STRONG rounding artifacts with T==integer type */ template <typename T> __global__ void blur1DKernelNoWrap(T* __restrict__ dst, const T* __restrict__ src, int w, int h, const int r) { int columnId = blockIdx.x * blockDim.x + threadIdx.x; if (columnId < w) { dst += columnId; src += columnId; Accumulator<T> accumulator(r); // Boundary condition: extend. const T v0 = src[0]; accumulator.accumulate(v0); for (int y = 1; y < (r + 1); ++y) { accumulator.accumulate(v0); accumulator.accumulate(src[y * w]); } dst[0] = accumulator.get(src, 0); for (int y = 1; y < (r + 1); ++y) { accumulator.accumulate(src[(y + r) * w]); accumulator.unaccumulate(v0); dst[y * w] = accumulator.get(src, y * w); } // Main loop for (int y = (r + 1); y < (h - r); ++y) { accumulator.accumulate(src[(y + r) * w]); accumulator.unaccumulate(src[((y - r) * w) - w]); dst[y * w] = accumulator.get(src, y * w); } // Boundary condition: extend. const T vEnd = src[(h - 1) * w]; for (int y = h - r; y < h; ++y) { accumulator.accumulate(vEnd); accumulator.unaccumulate(src[((y - r) * w) - w]); dst[y * w] = accumulator.get(src, y * w); } } } /** * Gaussian blur in 1D for cases where 2 * r >= h. r must be such that: r < h */ template <typename T> __global__ void blur1DKernelNoWrapLargeRadius(T* __restrict__ dst, const T* __restrict__ src, int w, int h, const int r) { int columnId = blockIdx.x * blockDim.x + threadIdx.x; if (columnId < w) { dst += columnId; src += columnId; Accumulator<T> accumulator(r); // Boundary condition: extend. const T v0 = src[0]; accumulator.accumulate(v0); for (int y = 1; y < (r + 1); ++y) { accumulator.accumulate(v0); accumulator.accumulate(src[y * w]); } dst[0] = accumulator.get(src, 0); // Stops at (h - r - 1) instead of (r + 1). for (int y = 1; y < (h - r); ++y) { accumulator.accumulate(src[(y + r) * w]); accumulator.unaccumulate(v0); dst[y * w] = accumulator.get(src, y * w); } const T vEnd = src[(h - 1) * w]; // Middle loop for (int y = h - r; y < (r + 1); ++y) { accumulator.accumulate(vEnd); accumulator.unaccumulate(v0); dst[y * w] = accumulator.get(src, y * w); } // Boundary condition: extend. for (int y = r + 1; y < h; ++y) { accumulator.accumulate(vEnd); accumulator.unaccumulate(src[((y - r) * w) - w]); dst[y * w] = accumulator.get(src, y * w); } } } /** * Gaussian blur in 1D for cases where r >= h. Here only the boundary conditions apply since all the buffer values are * in the stencil, always. */ template <typename T> __global__ void blur1DKernelNoWrapHugeRadius(T* __restrict__ dst, const T* __restrict__ src, int w, int h, const int r) { int columnId = blockIdx.x * blockDim.x + threadIdx.x; if (columnId < w) { dst += columnId; src += columnId; Accumulator<T> accumulator(r); // Boundary condition: extend. const T v0 = src[0]; const T vEnd = src[(h - 1) * w]; for (int y = 0; y < r; ++y) { accumulator.accumulate(v0); } // Accumulate all buffer values. for (int y = 0; y < h; ++y) { accumulator.accumulate(src[y * w]); } // Fill up with past-end-of-buffer values. for (int y = h; y < r + 1; ++y) { accumulator.accumulate(vEnd); } // Then everything is simple. dst[0] = accumulator.get(src, 0); for (int y = 1; y < h; ++y) { accumulator.accumulate(vEnd); accumulator.unaccumulate(v0); dst[y * w] = accumulator.get(src, y * w); } } } /** * Same, but wraps */ template <typename T> __global__ void blur1DKernelWrap(T* __restrict__ dst, const T* __restrict__ src, int w, int h, const int r) { int columnId = blockIdx.x * blockDim.x + threadIdx.x; if (columnId < w) { dst += columnId; src += columnId; Accumulator<T> accumulator(r); // Boundary condition: wrap. for (int y = h - r; y < h; ++y) { accumulator.accumulate(src[y * w]); } for (int y = 0; y < (r + 1); ++y) { accumulator.accumulate(src[y * w]); } dst[0] = accumulator.get(src, 0); for (int y = 1; y < (r + 1); ++y) { accumulator.accumulate(src[(y + r) * w]); accumulator.unaccumulate(src[(h + y - r) * w - w]); dst[y * w] = accumulator.get(src, y * w); } // Main loop for (int y = (r + 1); y < (h - r); ++y) { accumulator.accumulate(src[(y + r) * w]); accumulator.unaccumulate(src[(y - r) * w - w]); dst[y * w] = accumulator.get(src, y * w); } // Boundary condition: wrap. for (int y = h - r; y < h; ++y) { accumulator.accumulate(src[(y + r - h) * w]); accumulator.unaccumulate(src[((y - r) * w) - w]); dst[y * w] = accumulator.get(src, y * w); } } } /* * bluColumnsKernel is a modification of convolution separable kernel from Nvidia samples. * It adds to the use of shared memory the accumulation algorithm. * Each thread blurs COLUMNS_RESULT_STEPS consecutive pixels on the Y dimension. */ #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) template <typename T> __global__ void blurColumnsKernelNoWrap(T* dst, const T* src, int width, int height, int pitch, int radius) { const int idx = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; // thread id on x dimension const int idy = blockIdx.y * COLUMNS_BLOCKDIM_Y + threadIdx.y; // thread id on y dimension if (idx < width) { // check if thread is not out of the image // Shared buffer is a 2D array represented as a 1D array. Each thread blurs COLUMNS_RESULT_STEPS pixels on the Y // dimension. __shared__ T s_Data[COLUMNS_BLOCKDIM_X * SIZE_Y]; // Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; const T* InputWithHaloOffset = src + baseY * pitch + baseX; // move for reading T* OutputWithOffset = dst + (threadIdx.y * COLUMNS_RESULT_STEPS + (blockIdx.y * COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)) * pitch + baseX; // move for writing // load data needed by the block into shared memory #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; ++i) { if ((baseY >= (-i * COLUMNS_BLOCKDIM_Y)) && (height - baseY > i * COLUMNS_BLOCKDIM_Y)) { // inside image s_Data[threadIdx.x * SIZE_Y + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = InputWithHaloOffset[i * COLUMNS_BLOCKDIM_Y * pitch]; } else { if (baseY < -i * COLUMNS_BLOCKDIM_Y) { // out of image (upper) s_Data[threadIdx.x * SIZE_Y + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = InputWithHaloOffset[-baseY * pitch]; } else { // out of image (lower) s_Data[threadIdx.x * SIZE_Y + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = InputWithHaloOffset[(height - 1 - baseY) * pitch]; } } } __syncthreads(); Accumulator<T> acc(radius); // every thread blurs COLUMNS_RESULT_STEPS pixels starting from this offset (skipping the halo) const int offset = threadIdx.x * SIZE_Y + COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS + threadIdx.y * COLUMNS_RESULT_STEPS; if (idy * COLUMNS_RESULT_STEPS < height) { // check if thread is not out of the image #pragma unroll for (int j = -radius; j <= radius; ++j) { T v = s_Data[offset + j]; acc.accumulate(v); } OutputWithOffset[0] = acc.get(s_Data, offset); // every thread blurs COLUMNS_RESULT_STEPS pixels, unless it is in the very low part of the image for (int i = 1; i < MIN(COLUMNS_RESULT_STEPS, height - idy * COLUMNS_RESULT_STEPS); ++i) { T v0 = s_Data[offset + i + radius]; acc.accumulate(v0); T v1 = s_Data[offset + i - radius - 1]; acc.unaccumulate(v1); OutputWithOffset[i * pitch] = acc.get(s_Data, offset + i); } } } } template <> __global__ void blurColumnsKernelNoWrap<uint32_t>(uint32_t* dst, const uint32_t* src, int width, int height, int pitch, int radius) { const int idx = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; // thread id on x dimension const int idy = blockIdx.y * COLUMNS_BLOCKDIM_Y + threadIdx.y; // thread id on y dimension if (idx < width) { // check if thread is not out of the image // Shared buffer is a 2D array represented as a 1D array. Each thread blurs COLUMNS_RESULT_STEPS pixels on the Y // dimension. __shared__ uint32_t s_Data[COLUMNS_BLOCKDIM_X * SIZE_Y]; // Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; const uint32_t* InputWithHaloOffset = src + baseY * pitch + baseX; // move for reading uint32_t* OutputWithOffset = dst + (threadIdx.y * COLUMNS_RESULT_STEPS + (blockIdx.y * COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)) * pitch + baseX; // move for writing // load data needed by the block into shared memory #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; ++i) { if ((baseY >= (-i * COLUMNS_BLOCKDIM_Y)) && (height - baseY > i * COLUMNS_BLOCKDIM_Y)) { // inside image s_Data[threadIdx.x * SIZE_Y + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = InputWithHaloOffset[i * COLUMNS_BLOCKDIM_Y * pitch]; } else { if (baseY < -i * COLUMNS_BLOCKDIM_Y) { // out of image (upper) s_Data[threadIdx.x * SIZE_Y + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = InputWithHaloOffset[-baseY * pitch]; } else { // out of image (lower) s_Data[threadIdx.x * SIZE_Y + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = InputWithHaloOffset[(height - 1 - baseY) * pitch]; } } } __syncthreads(); // the use of accumlator class leads sometimes to a bug on linux which is unexplained. That's why we avoid it int32_t accR(0); int32_t accG(0); int32_t accB(0); int32_t divider(0); // every thread blurs COLUMNS_RESULT_STEPS pixels starting from this offset (skipping the halo) const int offset = threadIdx.x * SIZE_Y + COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS + threadIdx.y * COLUMNS_RESULT_STEPS; if (idy * COLUMNS_RESULT_STEPS < height) { for (int j = -radius; j <= radius; ++j) { uint32_t v = s_Data[offset + j]; if (RGB210::a(v) != 0) { accR += RGB210::r(v); accG += RGB210::g(v); accB += RGB210::b(v); divider++; } } if (divider != 0) { OutputWithOffset[0] = RGB210::pack(accR / divider, accG / divider, accB / divider, RGB210::a(s_Data[offset])); } else { OutputWithOffset[0] = 0; } // every thread blurs COLUMNS_RESULT_STEPS pixels, unless it is in the very low part of the image for (int i = 1; i < MIN(COLUMNS_RESULT_STEPS, height - idy * COLUMNS_RESULT_STEPS); ++i) { uint32_t v0 = s_Data[offset + i + radius]; if (RGB210::a(v0) != 0) { accR += RGB210::r(v0); accG += RGB210::g(v0); accB += RGB210::b(v0); ++divider; } uint32_t v1 = s_Data[offset + i - radius - 1]; if (RGB210::a(v1) != 0) { accR -= RGB210::r(v1); accG -= RGB210::g(v1); accB -= RGB210::b(v1); --divider; } if (divider != 0) { OutputWithOffset[i * pitch] = RGB210::pack(accR / divider, accG / divider, accB / divider, RGB210::a(s_Data[offset + i])); } else { OutputWithOffset[i * pitch] = 0; } } } } } template <typename T> __global__ void blurColumnsKernelWrap(T* dst, const T* src, int width, int height, int pitch, int radius) { const int idx = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; // thread id on x dimension const int idy = blockIdx.y * COLUMNS_BLOCKDIM_Y + threadIdx.y; // thread id on y dimension if (idx < width) { // check if thread is not out of the image // Shared buffer is a 2D array represented as a 1D array. Each thread blurs COLUMNS_RESULT_STEPS pixels on the Y // dimension. __shared__ T s_Data[COLUMNS_BLOCKDIM_X * SIZE_Y]; // Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; const T* InputWithHaloOffset = src + baseY * pitch + baseX; T* OutputWithOffset = dst + (threadIdx.y * COLUMNS_RESULT_STEPS + (blockIdx.y * COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)) * pitch + baseX; // move for writing // load data needed by the block into shared memory #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { if ((baseY >= (-i * COLUMNS_BLOCKDIM_Y)) && (height - baseY > i * COLUMNS_BLOCKDIM_Y)) { s_Data[threadIdx.x * SIZE_Y + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = InputWithHaloOffset[i * COLUMNS_BLOCKDIM_Y * pitch]; } else { if (baseY < -i * COLUMNS_BLOCKDIM_Y) { // out of image (upper) s_Data[threadIdx.x * SIZE_Y + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = InputWithHaloOffset[(height + i * COLUMNS_BLOCKDIM_Y) * pitch]; } else { // out of image (lower) s_Data[threadIdx.x * SIZE_Y + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = InputWithHaloOffset[(i * COLUMNS_BLOCKDIM_Y - height) * pitch]; } } } __syncthreads(); Accumulator<T> acc(radius); // every thread blurs COLUMNS_RESULT_STEPS pixels starting from this offset (skipping the halo) const int offset = threadIdx.x * SIZE_Y + COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS + threadIdx.y * COLUMNS_RESULT_STEPS; if (idy * COLUMNS_RESULT_STEPS < height) { // check if thread is not out of the image #pragma unroll for (int j = -radius; j <= radius; j++) { T v = s_Data[offset + j]; acc.accumulate(v); } OutputWithOffset[0] = acc.get(s_Data, offset); // every thread blurs COLUMNS_RESULT_STEPS pixels, unless it is in the very low part of the image for (int i = 1; i < MIN(COLUMNS_RESULT_STEPS, height - idy * COLUMNS_RESULT_STEPS); i++) { T v0 = s_Data[offset + i + radius]; acc.accumulate(v0); T v1 = s_Data[offset + i - radius - 1]; acc.unaccumulate(v1); OutputWithOffset[i * pitch] = acc.get(s_Data, offset + i); } } } } template <> __global__ void blurColumnsKernelWrap<uint32_t>(uint32_t* dst, const uint32_t* src, int width, int height, int pitch, int radius) { const int idx = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; // thread id on x dimension const int idy = blockIdx.y * COLUMNS_BLOCKDIM_Y + threadIdx.y; // thread id on y dimension if (idx < width) { // check if thread is not out of the image // Shared buffer is a 2D array represented as a 1D array. Each thread blurs COLUMNS_RESULT_STEPS pixels on the Y // dimension. __shared__ uint32_t s_Data[COLUMNS_BLOCKDIM_X * SIZE_Y]; // Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; const uint32_t* InputWithHaloOffset = src + baseY * pitch + baseX; uint32_t* OutputWithOffset = dst + (threadIdx.y * COLUMNS_RESULT_STEPS + (blockIdx.y * COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)) * pitch + baseX; // moving for writing // load data needed by the block into shared memory #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { if ((baseY >= (-i * COLUMNS_BLOCKDIM_Y)) && (height - baseY > i * COLUMNS_BLOCKDIM_Y)) { s_Data[threadIdx.x * SIZE_Y + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = InputWithHaloOffset[i * COLUMNS_BLOCKDIM_Y * pitch]; } else { if (baseY < -i * COLUMNS_BLOCKDIM_Y) { // out of image (upper) s_Data[threadIdx.x * SIZE_Y + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = InputWithHaloOffset[(height + i * COLUMNS_BLOCKDIM_Y) * pitch]; } else { // out of image (lower) s_Data[threadIdx.x * SIZE_Y + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = InputWithHaloOffset[(i * COLUMNS_BLOCKDIM_Y - height) * pitch]; } } } __syncthreads(); // the use of accumlator class leads sometimes to a bug on linux which is unexplained. That's why we avoid it int32_t accR(0); int32_t accG(0); int32_t accB(0); int32_t divider(0); // every thread blurs COLUMNS_RESULT_STEPS pixels starting from this offset (skipping the halo) const int offset = threadIdx.x * SIZE_Y + COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS + threadIdx.y * COLUMNS_RESULT_STEPS; if (idy * COLUMNS_RESULT_STEPS < height) { for (int j = -radius; j <= radius; j++) { uint32_t v = s_Data[offset + j]; if (RGB210::a(v) != 0) { accR += RGB210::r(v); accG += RGB210::g(v); accB += RGB210::b(v); divider++; } } if (divider != 0) { OutputWithOffset[0] = RGB210::pack(accR / divider, accG / divider, accB / divider, RGB210::a(s_Data[offset])); } else { OutputWithOffset[0] = 0; } // every thread blurs COLUMNS_RESULT_STEPS pixels, unless it is in the very low part of the image for (int i = 1; i < MIN(COLUMNS_RESULT_STEPS, height - idy * COLUMNS_RESULT_STEPS); i++) { uint32_t v0 = s_Data[offset + i + radius]; if (RGB210::a(v0) != 0) { accR += RGB210::r(v0); accG += RGB210::g(v0); accB += RGB210::b(v0); ++divider; } uint32_t v1 = s_Data[offset + i - radius - 1]; if (RGB210::a(v1) != 0) { accR -= RGB210::r(v1); accG -= RGB210::g(v1); accB -= RGB210::b(v1); --divider; } if (divider != 0) { OutputWithOffset[i * pitch] = RGB210::pack(accR / divider, accG / divider, accB / divider, RGB210::a(s_Data[offset + i])); } else { OutputWithOffset[i * pitch] = 0; } } } } } __global__ void blurRowsKernelNoWrap(uint32_t* dst, const uint32_t* src, std::size_t width, std::size_t height, std::size_t pitch, int radius) { __shared__ uint32_t s_Data_Input[ROWS_BLOCKDIM_Y][SIZE_X]; __shared__ uint32_t s_Data_Output[ROWS_BLOCKDIM_Y][SIZE_X]; // Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; const uint32_t* InputWithHaloOffset = src + baseY * pitch + baseX; uint32_t* OutputWithHaloOffset = dst + baseY * pitch + baseX; const int idy = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; const int idx = blockIdx.x * ROWS_BLOCKDIM_X + threadIdx.x; if (idy < height) { #pragma unroll // load data needed by the block into shared memory for (int i = 0; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { if (((width - baseX) > (i * ROWS_BLOCKDIM_X)) && (baseX >= -i * ROWS_BLOCKDIM_X)) { s_Data_Input[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = InputWithHaloOffset[i * ROWS_BLOCKDIM_X]; } else { if (baseX < -i * ROWS_BLOCKDIM_X) { // out of image (left) s_Data_Input[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = InputWithHaloOffset[-baseX]; } else { // out of image (right) s_Data_Input[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = InputWithHaloOffset[width - baseX - 1]; } } } // Compute and store results __syncthreads(); int32_t accR(0); int32_t accG(0); int32_t accB(0); int32_t divider(0); // every thread blurs ROWS_RESULT_STEPS pixels starting from this offset const int offset = ROWS_HALO_STEPS * ROWS_BLOCKDIM_X + threadIdx.x * ROWS_RESULT_STEPS; if (idx * ROWS_RESULT_STEPS < width) { for (int j = -radius; j <= radius; j++) { uint32_t v = s_Data_Input[threadIdx.y][offset + j]; if (RGB210::a(v) != 0) { accR += RGB210::r(v); accG += RGB210::g(v); accB += RGB210::b(v); ++divider; } } if (divider != 0) { s_Data_Output[threadIdx.y][offset] = RGB210::pack(accR / divider, accG / divider, accB / divider, RGB210::a(s_Data_Input[threadIdx.y][offset])); } else { s_Data_Output[threadIdx.y][offset] = 0; } for (int i = 1; i < MIN(ROWS_RESULT_STEPS, width - idx * ROWS_RESULT_STEPS); i++) { uint32_t v0 = s_Data_Input[threadIdx.y][offset + i + radius]; if (RGB210::a(v0) != 0) { accR += RGB210::r(v0); accG += RGB210::g(v0); accB += RGB210::b(v0); ++divider; } uint32_t v1 = s_Data_Input[threadIdx.y][offset + i - radius - 1]; if (RGB210::a(v1) != 0) { accR -= RGB210::r(v1); accG -= RGB210::g(v1); accB -= RGB210::b(v1); --divider; } if (divider != 0) { s_Data_Output[threadIdx.y][offset + i] = RGB210::pack(accR / divider, accG / divider, accB / divider, RGB210::a(s_Data_Input[threadIdx.y][offset + i])); } else { s_Data_Output[threadIdx.y][offset + i] = 0; } } } __syncthreads(); // write to global memory (coalesced access) #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { if ((i * ROWS_BLOCKDIM_X + baseX) < width) { OutputWithHaloOffset[i * ROWS_BLOCKDIM_X] = s_Data_Output[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X]; } } } } __global__ void blurRowsKernelWrap(uint32_t* dst, const uint32_t* src, std::size_t width, std::size_t height, std::size_t pitch, int radius) { __shared__ uint32_t s_Data_Input[ROWS_BLOCKDIM_Y][SIZE_X]; __shared__ uint32_t s_Data_Output[ROWS_BLOCKDIM_Y][SIZE_X]; // Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; const uint32_t* InputWithHaloOffset = src + baseY * pitch + baseX; uint32_t* OutputWithHaloOffset = dst + baseY * pitch + baseX; const int idy = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; const int idx = blockIdx.x * ROWS_BLOCKDIM_X + threadIdx.x; if (idy < height) { #pragma unroll // load data needed by the block into shared memory for (int i = 0; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { if (((width - baseX) > (i * ROWS_BLOCKDIM_X)) && (baseX >= -i * ROWS_BLOCKDIM_X)) { s_Data_Input[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = InputWithHaloOffset[i * ROWS_BLOCKDIM_X]; } else { if (baseX < -i * ROWS_BLOCKDIM_X) { // out of image (left) s_Data_Input[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = InputWithHaloOffset[width + i * ROWS_BLOCKDIM_X]; } else { // out of image (right) s_Data_Input[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = InputWithHaloOffset[i * ROWS_BLOCKDIM_X - width]; } } } __syncthreads(); // Compute and store results int32_t accR(0); int32_t accG(0); int32_t accB(0); int32_t divider(0); // every thread blurs ROWS_RESULT_STEPS consecutive pixels starting from this offset const int offset = ROWS_HALO_STEPS * ROWS_BLOCKDIM_X + threadIdx.x * ROWS_RESULT_STEPS; if (idx * ROWS_RESULT_STEPS < width) { for (int j = -radius; j <= radius; j++) { uint32_t v = s_Data_Input[threadIdx.y][offset + j]; if (RGB210::a(v) != 0) { accR += RGB210::r(v); accG += RGB210::g(v); accB += RGB210::b(v); ++divider; } } if (divider != 0) { s_Data_Output[threadIdx.y][offset] = RGB210::pack(accR / divider, accG / divider, accB / divider, RGB210::a(s_Data_Input[threadIdx.y][offset])); } else { s_Data_Output[threadIdx.y][offset] = 0; } for (int i = 1; i < MIN(ROWS_RESULT_STEPS, width - idx * ROWS_RESULT_STEPS); i++) { uint32_t v0 = s_Data_Input[threadIdx.y][offset + i + radius]; if (RGB210::a(v0) != 0) { accR += RGB210::r(v0); accG += RGB210::g(v0); accB += RGB210::b(v0); ++divider; } uint32_t v1 = s_Data_Input[threadIdx.y][offset + i - radius - 1]; if (RGB210::a(v1) != 0) { accR -= RGB210::r(v1); accG -= RGB210::g(v1); accB -= RGB210::b(v1); --divider; } if (divider != 0) { s_Data_Output[threadIdx.y][offset + i] = RGB210::pack(accR / divider, accG / divider, accB / divider, RGB210::a(s_Data_Input[threadIdx.y][offset + i])); } else { s_Data_Output[threadIdx.y][offset + i] = 0; } } } __syncthreads(); // write to global memory (coalesced access) #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { if ((i * ROWS_BLOCKDIM_X + baseX) < width) { OutputWithHaloOffset[i * ROWS_BLOCKDIM_X] = s_Data_Output[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X]; } } } } //////////////////////////////////////////////////////////////////////////////// // Convolution kernel storage //////////////////////////////////////////////////////////////////////////////// __constant__ uint32_t c_Kernel[KERNEL_LENGTH]; extern "C" void setConvolutionKernel(uint32_t* h_Kernel) { cudaMemcpyToSymbol(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(uint32_t)); } template <bool wrap> __global__ void convolutionRowsKernel(uint32_t* __restrict__ dst, const uint32_t* __restrict__ src, int width, int height, int pitch) { __shared__ uint32_t s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; // Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; src += baseY * pitch + baseX; dst += baseY * pitch + baseX; // Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (width - baseX > i * ROWS_BLOCKDIM_X) ? src[i * ROWS_BLOCKDIM_X] : (wrap ? src[i * ROWS_BLOCKDIM_X - baseX] : src[width - 1 - baseX]); } // Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? src[i * ROWS_BLOCKDIM_X] : (wrap ? src[width - baseX - i * ROWS_BLOCKDIM_X] : src[-baseX]); } // Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (width - baseX > i * ROWS_BLOCKDIM_X) ? src[i * ROWS_BLOCKDIM_X] : (wrap ? src[i * ROWS_BLOCKDIM_X - baseX] : src[width - 1 - baseX]); } // Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { uint32_t accR = 0; uint32_t accG = 0; uint32_t accB = 0; uint32_t divider = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { uint32_t v = s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; const int32_t isSolid = !!RGBA::a(v); accR += isSolid * c_Kernel[KERNEL_RADIUS - j] * RGBA::r(v); accG += isSolid * c_Kernel[KERNEL_RADIUS - j] * RGBA::g(v); accB += isSolid * c_Kernel[KERNEL_RADIUS - j] * RGBA::b(v); divider += isSolid * c_Kernel[KERNEL_RADIUS - j]; } if (width - baseX > i * COLUMNS_BLOCKDIM_X) { dst[i * ROWS_BLOCKDIM_X] = (divider == 0) ? 0 : RGBA::pack(accR / divider, accG / divider, accB / divider, RGBA::a(s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X])); } } } __global__ void convolutionColumnsKernel(uint32_t* __restrict__ dst, const uint32_t* __restrict__ src, int width, int height, int pitch) { __shared__ uint32_t s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; // Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; src += baseY * pitch + baseX; dst += baseY * pitch + baseX; // Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (height - baseY > i * COLUMNS_BLOCKDIM_Y) ? src[i * COLUMNS_BLOCKDIM_Y * pitch] : src[(height - 1 - baseY) * pitch]; } // Upper halo #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? src[i * COLUMNS_BLOCKDIM_Y * pitch] : src[-baseY * pitch]; } // Lower halo #pragma unroll for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (height - baseY > i * COLUMNS_BLOCKDIM_Y) ? src[i * COLUMNS_BLOCKDIM_Y * pitch] : src[(height - 1 - baseY) * pitch]; } // Compute and store results __syncthreads(); #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { uint32_t accR = 0; uint32_t accG = 0; uint32_t accB = 0; uint32_t divider = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { uint32_t v = s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; const int32_t isSolid = !!RGBA::a(v); accR += isSolid * c_Kernel[KERNEL_RADIUS - j] * RGBA::r(v); accG += isSolid * c_Kernel[KERNEL_RADIUS - j] * RGBA::g(v); accB += isSolid * c_Kernel[KERNEL_RADIUS - j] * RGBA::b(v); divider += isSolid * c_Kernel[KERNEL_RADIUS - j]; } if (height - baseY > i * COLUMNS_BLOCKDIM_Y) { dst[i * COLUMNS_BLOCKDIM_Y * pitch] = (divider == 0) ? 0 : RGBA::pack(accR / divider, accG / divider, accB / divider, RGBA::a(s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y])); } } } } // namespace Image } // namespace VideoStitch #endif
the_stack
#include <ATen/div_rtn.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDABlas.h> #include <ATen/native/ConvUtils.h> #include <ATen/native/Resize.h> #include <ATen/native/cuda/im2col.cuh> namespace at { namespace native { namespace { void slow_conv2d_shape_check( const Tensor& input, const Tensor& grad_output, const Tensor& weight, const Tensor& bias, int64_t kH, int64_t kW, int64_t dH, int64_t dW, int64_t padH, int64_t padW, bool weight_nullable) { TORCH_CHECK(kW > 0 && kH > 0, "kernel size should be greater than zero, but got kH: ", kH, " kW: ", kW); TORCH_CHECK(dW > 0 && dH > 0, "stride should be greater than zero, but got dH: ", dH, " dW: ", dW); TORCH_CHECK(weight_nullable || weight.defined(), "weight tensor is expected to be non-nullable"); TORCH_CHECK(!weight.defined() || ((weight.numel() > 0) && (weight.dim() == 2)), "non-empty 2D weight tensor expected, but got: ", weight.sizes()); TORCH_CHECK(!bias.defined() || (bias.dim() == 1 && bias.sizes()[0] == weight.sizes()[0]), "Expected bias to have shape [", weight.sizes()[0], "] but got ", bias.sizes()); const auto in_sizes = input.sizes(); constexpr int ndim = 4; constexpr int dimf = 1; constexpr int dimh = 2; constexpr int dimw = 3; TORCH_CHECK(in_sizes.size() == ndim, "Expected 4D input tensor, but got ", in_sizes); // Allow for empty batch size but not other dimensions const bool valid_empty = c10::multiply_integers(in_sizes.slice(1)) != 0; TORCH_CHECK(valid_empty, "non-empty input tensor expected but got: ", in_sizes); int64_t inputHeight = in_sizes[dimh]; int64_t inputWidth = in_sizes[dimw]; int64_t exactInputHeight = inputHeight + 2 * padH; int64_t exactInputWidth = inputWidth + 2 * padW; TORCH_CHECK(exactInputHeight >= kH && exactInputWidth >= kW, "Calculated padded input size per channel: ", IntArrayRef{exactInputHeight, exactInputWidth}, ". Kernel size: ", IntArrayRef{kH, kW}, ". Kernel size can't be greater than actual input size"); // NOTE: can't use conv_output_size if the weight isn't defined auto outputHeight = div_rtn<int64_t>(exactInputHeight - kH, dH) + 1; auto outputWidth = div_rtn<int64_t>(exactInputWidth - kW, dW) + 1; TORCH_CHECK(outputWidth >= 1 && outputHeight >= 1, "Given input size per channel: ", IntArrayRef{inputHeight, inputWidth}, ". Calculated output size per channel: ", IntArrayRef{outputHeight, outputWidth}, ". Output size is too small"); if (weight.defined()) { const auto w_sizes = weight.sizes(); int64_t nInputPlane = w_sizes[1]; if (w_sizes.size() == 2) { nInputPlane /= (kH * kW); } TORCH_CHECK(in_sizes[dimf] == nInputPlane, "Expected input dim ", dimf, " to have size ", nInputPlane, " but got ", in_sizes[dimf]); } if (grad_output.defined()) { const auto gO_sizes = grad_output.sizes(); TORCH_CHECK(gO_sizes.size() == ndim, "Expected grad_output to have ", ndim, " dimensions but got shape", gO_sizes); if (weight.defined()) { const auto w_sizes = weight.sizes(); TORCH_CHECK(gO_sizes[dimf] == w_sizes[0], "Expected dim ", dimf, " to have size ", w_sizes[0], " but got ", gO_sizes[dimf]); } else if (bias.defined()) { const auto b_sizes = bias.sizes(); int64_t nOutputPlane = b_sizes.size() == 0 ? 1 : b_sizes[0]; TORCH_CHECK(gO_sizes[dimf] == nOutputPlane, "Expected grad_output dim ", dimf, " to have size ", nOutputPlane, " but got ", gO_sizes[dimf]); } TORCH_CHECK(gO_sizes[dimh] == outputHeight, "Expected grad_output dim ", dimh, " to have size ", outputHeight, " but got ", gO_sizes[dimh]); TORCH_CHECK(gO_sizes[dimw] == outputWidth, "Expected grad_output dim ", dimw, " to have size ", outputWidth, " but got ", gO_sizes[dimw]); } } Tensor new_view_weight_MM2d(const Tensor& weight_) { auto weight = weight_.expect_contiguous(); const auto w_sizes = weight->sizes(); TORCH_CHECK(w_sizes.size() == 4); int64_t s1 = w_sizes[0]; int64_t s2 = c10::multiply_integers(w_sizes.slice(1)); return weight->view({s1, s2}); } void slow_conv2d_forward( const Tensor &input, const Tensor &output, const Tensor &weight_, const Tensor &bias, const Tensor &columns, int64_t kH, int64_t kW, int64_t dH, int64_t dW, int64_t padH, int64_t padW) { auto weight = new_view_weight_MM2d(weight_); slow_conv2d_shape_check( input, {}, weight, bias, kH, kW, dH, dW, padH, padW, /*weight_nullable*/false); constexpr int ndim = 4; constexpr int dimf = 1; constexpr int dimh = 2; constexpr int dimw = 3; auto in_sizes = input.sizes(); int64_t batchSize = in_sizes[0]; int64_t nInputPlane = in_sizes[dimf]; int64_t inputHeight = in_sizes[dimh]; int64_t inputWidth = in_sizes[dimw]; int64_t nOutputPlane = weight.sizes()[0]; int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1; int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1; // Resize output resize_output(output, {batchSize, nOutputPlane, outputHeight, outputWidth}); // Resize temporary columns resize_output(columns, {nInputPlane * kW * kH, outputHeight * outputWidth}); const bool requires_columns = ( kW != 1 || kH != 1 || dW != 1 || dH != 1 || padH != 0 || padW != 0); if (bias.defined()) { TORCH_CHECK(bias.scalar_type() == input.scalar_type(), "Expected bias to have type ", input.scalar_type(), " but got ", bias.scalar_type()); output.copy_(bias.view({-1, 1, 1})); } else { output.zero_(); } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv2d_cuda", [&] { // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: auto input_n = input.select(0, elt); auto output_n = output.select(0, elt); if (requires_columns) { // Extract columns: at::native::im2col( c10::cuda::getCurrentCUDAStream(), input_n.data_ptr<scalar_t>(), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, 1, 1, columns.data_ptr<scalar_t>() ); } // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = nOutputPlane; int64_t n = columns.size(1); int64_t k = nInputPlane*kH*kW; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) auto gemm_in_ptr = requires_columns ? columns.data_ptr<scalar_t>() : input_n.data_ptr<scalar_t>(); at::cuda::blas::gemm( 'n', 'n', n, m, k, scalar_t(1), gemm_in_ptr, n, weight.data_ptr<scalar_t>(), k, scalar_t(1), output_n.data_ptr<scalar_t>(), n ); } }); } void slow_conv2d_backward( const Tensor &input, const Tensor &grad_output, const Tensor &grad_input, const Tensor &weight_, const Tensor &grad_columns, int kH, int kW, int dH, int dW, int padH, int padW) { Tensor weight = new_view_weight_MM2d(weight_); slow_conv2d_shape_check(input, grad_output, weight, {}, kH, kW, dH, dW, padH, padW, /*weight_nullable=*/false); // Params auto weight_sizes = weight.sizes(); int nInputPlane = weight_sizes[1]/(kW*kH); int nOutputPlane = weight_sizes[0]; TORCH_INTERNAL_ASSERT(grad_output.is_contiguous()); auto input_sizes = input.sizes(); int64_t inputWidth = input_sizes[3]; int64_t inputHeight = input_sizes[2]; auto output_sizes = grad_output.sizes(); int64_t outputWidth = output_sizes[3]; int64_t outputHeight = output_sizes[2]; // Batch size + input planes int64_t batchSize = input_sizes[0]; // Resize output resize_output(grad_input, input_sizes); TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous"); // Resize temporary columns resize_output(grad_columns, {nInputPlane*kW*kH, outputHeight*outputWidth}); TORCH_CHECK(grad_columns.is_contiguous(), "grad_columns must be contiguous"); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv2d_backward_cuda", [&] { // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per sample: auto grad_input_n = grad_input.select(0, elt); auto grad_output_n = grad_output.select(0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = nInputPlane*kW*kH; int64_t n = grad_columns.sizes()[1]; int64_t k = nOutputPlane; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) at::cuda::blas::gemm<scalar_t>( 'n', 't', n, m, k, scalar_t(1), grad_output_n.data_ptr<scalar_t>(), n, weight.data_ptr<scalar_t>(), m, scalar_t(0), grad_columns.data_ptr<scalar_t>(), n ); // Unpack columns back into input: using acc_t = at::acc_type<scalar_t, true>; at::native::col2im<scalar_t, acc_t>( c10::cuda::getCurrentCUDAStream(), grad_columns.data_ptr<scalar_t>(), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, 1, 1, grad_input_n.data_ptr<scalar_t>() ); } }); } void slow_conv2d_grad_weight( const Tensor &input, const Tensor &grad_output, const Tensor &grad_weight_, const Tensor &columns, int64_t kH, int64_t kW, int64_t dH, int64_t dW, int64_t padH, int64_t padW) { TORCH_CHECK(grad_weight_.is_contiguous(), "grad_weight needs to be contiguous"); auto grad_weight = new_view_weight_MM2d(grad_weight_); slow_conv2d_shape_check(input, grad_output, grad_weight, {}, kH, kW, dH, dW, padH, padW, /*weight_nullable=*/true); // Params TORCH_INTERNAL_ASSERT(input.is_contiguous()); TORCH_INTERNAL_ASSERT(grad_output.is_contiguous()); auto input_sizes = input.sizes(); int64_t nInputPlane = input_sizes[1]; int64_t nOutputPlane = grad_output.sizes()[1]; int64_t inputWidth = input_sizes[3]; int64_t inputHeight = input_sizes[2]; int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1; int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1; // Batch size + input planes int64_t batchSize = input_sizes[0]; // Resize temporary columns resize_output(columns, {nInputPlane * kH * kW, outputHeight * outputWidth}); const bool requires_columns = ( kW != 1 || kH != 1 || dW != 1 || dH != 1 || padH != 0 || padW != 0); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv2d_grad_weight_cuda", [&] { // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: auto grad_output_n = grad_output.select(0, elt); // Matrix mulitply per output: auto input_n = input.select(0, elt); if (requires_columns) { // Extract columns: at::native::im2col<scalar_t>( c10::cuda::getCurrentCUDAStream(), input_n.data_ptr<scalar_t>(), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, 1, 1, columns.data_ptr<scalar_t>() ); } // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = nOutputPlane; int64_t n = nInputPlane*kW*kH; int64_t k = columns.sizes()[1]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) auto gemm_in_ptr = requires_columns ? columns.data_ptr<scalar_t>() : input_n.data_ptr<scalar_t>(); at::cuda::blas::gemm( 't', 'n', n, m, k, scalar_t(1), gemm_in_ptr, k, grad_output_n.data_ptr<scalar_t>(), k, scalar_t(1), grad_weight.data_ptr<scalar_t>(), n ); } }); } } // namespace (anonymous) std::tuple<Tensor&, Tensor&> slow_conv2d_forward_out_cuda( const Tensor &self_, const Tensor &weight_, IntArrayRef kernel_size, const c10::optional<Tensor> &bias_, IntArrayRef stride, IntArrayRef padding, Tensor &output, Tensor &finput) { TORCH_CHECK(kernel_size.size() == 2); TORCH_CHECK(stride.size() == 2); TORCH_CHECK(padding.size() == 2); auto self = self_.expect_contiguous(); auto weight = weight_.expect_contiguous(); auto bias = [&] { if (bias_.has_value() && bias_->defined()) { return bias_->expect_contiguous(); } return MaybeOwned<Tensor>::owned(c10::in_place); }(); slow_conv2d_forward( *self, output, *weight, *bias, finput, kernel_size[0], kernel_size[1], stride[0], stride[1], padding[0], padding[1] ); return std::tuple<Tensor&, Tensor&>{output, finput}; } std::tuple<Tensor, Tensor> slow_conv2d_forward_cuda( const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, const c10::optional<Tensor> &bias, IntArrayRef stride, IntArrayRef padding) { auto output = at::empty({0}, self.options()); auto finput = at::empty({0}, self.options()); return slow_conv2d_forward_out_cuda( self, weight, kernel_size, bias, stride, padding, output, finput); } std::tuple<Tensor&, Tensor&, Tensor&> slow_conv2d_backward_out_cuda( const Tensor& grad_output_, const Tensor& self_, const Tensor& weight_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor& finput, Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias) { auto grad_output = grad_output_.expect_contiguous(); if (grad_input.defined()) { resize_output(grad_input, self_.sizes()); auto weight = weight_.expect_contiguous(); slow_conv2d_backward( self_, *grad_output, grad_input, *weight, finput, kernel_size[0], kernel_size[1], stride[0], stride[1], padding[0], padding[1]); } if (grad_bias.defined()) { at::sum_out(grad_bias, *grad_output, IntArrayRef{0, 2, 3}); } if (grad_weight.defined()) { resize_output(grad_weight, weight_.sizes()); grad_weight.zero_(); auto self = self_.expect_contiguous(); slow_conv2d_grad_weight( *self, *grad_output, grad_weight, finput, kernel_size[0], kernel_size[1], stride[0], stride[1], padding[0], padding[1] ); } return std::tuple<Tensor&, Tensor&, Tensor&>{ grad_input, grad_weight, grad_bias}; } std::tuple<Tensor, Tensor, Tensor> slow_conv2d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor& finput, std::array<bool, 3> output_mask) { Tensor grad_input; Tensor grad_weight; Tensor grad_bias; if (output_mask[0]) { grad_input = at::empty({0}, grad_output.options()); } if (output_mask[1]) { grad_weight = at::empty({0}, grad_output.options()); } if (output_mask[2]) { grad_bias = at::empty({0}, grad_output.options()); } return native::slow_conv2d_backward_out_cuda( grad_output, self, weight, kernel_size, stride, padding, finput, grad_input, grad_weight, grad_bias); } } // namespace native } // namespace at
the_stack
#include <thrust/copy.h> #include <thrust/execution_policy.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #define FORCE_CPU_VERSION #include "BufferEntryUtils.h" #undef FORCE_CPU_VERSION namespace { template <class K, class V, class I> std::vector<uint32_t> do_radix_sort(const ExecutorDeviceType device_type, ThrustAllocator& thrust_allocator, const int8_t* groupby_buffer, V dev_oe_col_buffer_begin, V dev_oe_col_buffer_end, I dev_idx_buff_begin, const size_t dev_idx_buff_size, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n) { if (dev_idx_buff_size == 0) { return {}; } if (oe.is_desc) { if (device_type == ExecutorDeviceType::GPU) { thrust::sort_by_key(thrust::device(thrust_allocator), dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin, thrust::greater<int64_t>()); } else { thrust::sort_by_key(dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin, thrust::greater<int64_t>()); } } else { if (device_type == ExecutorDeviceType::GPU) { thrust::sort_by_key(thrust::device(thrust_allocator), dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin); } else { thrust::sort_by_key( dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin); } } // Speculatively transfer only the top_n first, most of the time it'll be enough. thrust::host_vector<uint32_t> host_vector_result( dev_idx_buff_begin, dev_idx_buff_begin + std::min(top_n, dev_idx_buff_size)); // Sometimes, radix sort can bring to the front entries which are empty. // For example, ascending sort on COUNT(*) will bring non-existent groups // to the front of dev_idx_buff since they're 0 in our system. Re-do the // transfer in that case to bring the entire dev_idx_buff; existing logic // in row iteration will take care of skipping the empty rows. for (size_t i = 0; i < host_vector_result.size(); ++i) { const auto entry_idx = host_vector_result[i]; if (is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) { host_vector_result = thrust::host_vector<uint32_t>( dev_idx_buff_begin, dev_idx_buff_begin + dev_idx_buff_size); break; } } std::vector<uint32_t> result; result.reserve(std::min(top_n, host_vector_result.size())); for (size_t i = 0; i < host_vector_result.size(); ++i) { const auto entry_idx = host_vector_result[i]; if (!is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) { result.push_back(entry_idx); if (result.size() >= top_n) { break; } } } return result; } void add_nulls(std::vector<uint32_t>& idx_buff, const std::vector<uint32_t>& null_idx_buff, const PodOrderEntry& oe) { if (null_idx_buff.empty()) { return; } const auto insertion_point = oe.nulls_first ? idx_buff.begin() : idx_buff.end(); idx_buff.insert(insertion_point, null_idx_buff.begin(), null_idx_buff.end()); } template <typename T> thrust::device_ptr<T> get_device_copy_ptr(const thrust::host_vector<T>& host_vec, ThrustAllocator& thrust_allocator) { if (host_vec.empty()) { return thrust::device_ptr<T>(static_cast<T*>(nullptr)); } const auto host_vec_bytes = host_vec.size() * sizeof(T); T* dev_ptr = reinterpret_cast<T*>( thrust_allocator.allocateScopedBuffer(align_to_int64(host_vec_bytes))); copy_to_nvidia_gpu(thrust_allocator.getDataMgr(), reinterpret_cast<CUdeviceptr>(dev_ptr), &host_vec[0], host_vec_bytes, thrust_allocator.getDeviceId()); return thrust::device_ptr<T>(dev_ptr); } template <class K> std::vector<uint32_t> baseline_sort_fp(const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const thrust::host_vector<int64_t>& oe_col_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step) { thrust::host_vector<uint32_t> neg_idx_buff; thrust::host_vector<uint32_t> pos_idx_buff; std::vector<uint32_t> null_idx_buff; thrust::host_vector<int64_t> neg_oe_col_buffer; thrust::host_vector<int64_t> pos_oe_col_buffer; const auto slice_entry_count = layout.entry_count / step + (layout.entry_count % step ? 1 : 0); neg_idx_buff.reserve(slice_entry_count); pos_idx_buff.reserve(slice_entry_count); null_idx_buff.reserve(slice_entry_count); neg_oe_col_buffer.reserve(slice_entry_count); pos_oe_col_buffer.reserve(slice_entry_count); size_t oe_col_buffer_idx = 0; const auto& oe_info = layout.oe_target_info; const auto col_ti = oe_info.agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false) : oe_info.sql_type; // Execlude AVG b/c collect_order_entry_column already makes its pair collapse into a // double const bool float_argument_input = takes_float_argument(oe_info) && oe_info.agg_kind != kAVG; auto is_negative = float_argument_input ? [](const int64_t v) -> bool { return (v & (1 << 31)) != 0; } : [](const int64_t v) -> bool { return v < 0; }; for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) { if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) && oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(col_ti, float_argument_input)) { null_idx_buff.push_back(i); continue; } if (is_negative(oe_col_buffer[oe_col_buffer_idx])) { // sign bit works the same for // integer and floating point neg_idx_buff.push_back(i); neg_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); } else { pos_idx_buff.push_back(i); pos_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); } } std::vector<uint32_t> pos_result; ThrustAllocator thrust_allocator(data_mgr, device_id); if (device_type == ExecutorDeviceType::GPU) { const auto dev_pos_idx_buff = get_device_copy_ptr(pos_idx_buff, thrust_allocator); const auto dev_pos_oe_col_buffer = get_device_copy_ptr(pos_oe_col_buffer, thrust_allocator); pos_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_pos_oe_col_buffer, dev_pos_oe_col_buffer + pos_oe_col_buffer.size(), dev_pos_idx_buff, pos_idx_buff.size(), oe, layout, top_n); } else { CHECK(device_type == ExecutorDeviceType::CPU); pos_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, pos_oe_col_buffer.begin(), pos_oe_col_buffer.end(), pos_idx_buff.begin(), pos_idx_buff.size(), oe, layout, top_n); } std::vector<uint32_t> neg_result; PodOrderEntry reverse_oe{oe.tle_no, !oe.is_desc, oe.nulls_first}; if (device_type == ExecutorDeviceType::GPU) { const auto dev_neg_idx_buff = get_device_copy_ptr(neg_idx_buff, thrust_allocator); const auto dev_neg_oe_col_buffer = get_device_copy_ptr(neg_oe_col_buffer, thrust_allocator); neg_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_neg_oe_col_buffer, dev_neg_oe_col_buffer + neg_oe_col_buffer.size(), dev_neg_idx_buff, neg_idx_buff.size(), reverse_oe, layout, top_n); } else { CHECK(device_type == ExecutorDeviceType::CPU); neg_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, neg_oe_col_buffer.begin(), neg_oe_col_buffer.end(), neg_idx_buff.begin(), neg_idx_buff.size(), reverse_oe, layout, top_n); } if (oe.is_desc) { pos_result.insert(pos_result.end(), neg_result.begin(), neg_result.end()); add_nulls(pos_result, null_idx_buff, oe); return pos_result; } neg_result.insert(neg_result.end(), pos_result.begin(), pos_result.end()); add_nulls(neg_result, null_idx_buff, oe); return neg_result; } template <class K> std::vector<uint32_t> baseline_sort_int(const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const thrust::host_vector<int64_t>& oe_col_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step) { const auto& entry_ti = get_compact_type(layout.oe_target_info); std::vector<uint32_t> null_idx_buff; thrust::host_vector<uint32_t> notnull_idx_buff; const auto slice_entry_count = layout.entry_count / step + (layout.entry_count % step ? 1 : 0); null_idx_buff.reserve(slice_entry_count); notnull_idx_buff.reserve(slice_entry_count); thrust::host_vector<int64_t> notnull_oe_col_buffer; notnull_oe_col_buffer.reserve(slice_entry_count); size_t oe_col_buffer_idx = 0; for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) { if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) && oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(entry_ti, false)) { null_idx_buff.push_back(i); } else { notnull_idx_buff.push_back(i); notnull_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); } } std::vector<uint32_t> notnull_result; ThrustAllocator thrust_allocator(data_mgr, device_id); if (device_type == ExecutorDeviceType::GPU) { const auto dev_notnull_idx_buff = get_device_copy_ptr(notnull_idx_buff, thrust_allocator); const auto dev_notnull_oe_col_buffer = get_device_copy_ptr(notnull_oe_col_buffer, thrust_allocator); notnull_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_notnull_oe_col_buffer, dev_notnull_oe_col_buffer + notnull_oe_col_buffer.size(), dev_notnull_idx_buff, notnull_idx_buff.size(), oe, layout, top_n); } else { CHECK(device_type == ExecutorDeviceType::CPU); notnull_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, notnull_oe_col_buffer.begin(), notnull_oe_col_buffer.end(), notnull_idx_buff.begin(), notnull_idx_buff.size(), oe, layout, top_n); } add_nulls(notnull_result, null_idx_buff, oe); return notnull_result; } template <class K> thrust::host_vector<int64_t> collect_order_entry_column( const int8_t* groupby_buffer, const GroupByBufferLayoutInfo& layout, const size_t start, const size_t step) { thrust::host_vector<int64_t> oe_col_buffer; const auto row_ptr = groupby_buffer + start * layout.row_bytes; auto crt_group_ptr1 = layout.target_groupby_index >= 0 ? row_ptr + layout.target_groupby_index * sizeof(K) : row_ptr + layout.col_off; const int8_t* crt_group_ptr2{nullptr}; if (layout.oe_target_info.agg_kind == kAVG) { crt_group_ptr2 = crt_group_ptr1 + layout.col_bytes; } const auto entry_ti = get_compact_type(layout.oe_target_info); const bool float_argument_input = takes_float_argument(layout.oe_target_info); const auto step_bytes = layout.row_bytes * step; const auto col_bytes = float_argument_input ? entry_ti.get_size() : layout.col_bytes; for (size_t i = start; i < layout.entry_count; i += step) { auto val1 = read_int_from_buff(crt_group_ptr1, col_bytes > 0 ? col_bytes : sizeof(K)); if (crt_group_ptr2) { const auto val2 = read_int_from_buff(crt_group_ptr2, 8); const auto avg_val = pair_to_double({val1, val2}, entry_ti, float_argument_input); val1 = *reinterpret_cast<const int64_t*>(&avg_val); } oe_col_buffer.push_back(val1); crt_group_ptr1 += step_bytes; if (crt_group_ptr2) { crt_group_ptr2 += step_bytes; } } return oe_col_buffer; } } // namespace template <class K> std::vector<uint32_t> baseline_sort(const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step) { auto oe_col_buffer = collect_order_entry_column<K>(groupby_buffer, layout, start, step); const auto& entry_ti = get_compact_type(layout.oe_target_info); CHECK(entry_ti.is_number()); if (entry_ti.is_fp() || layout.oe_target_info.agg_kind == kAVG) { return baseline_sort_fp<K>(device_type, device_id, data_mgr, groupby_buffer, oe_col_buffer, oe, layout, top_n, start, step); } // Because of how we represent nulls for integral types, they'd be at the // wrong position in these two cases. Separate them into a different buffer. if ((oe.is_desc && oe.nulls_first) || (!oe.is_desc && !oe.nulls_first)) { return baseline_sort_int<K>(device_type, device_id, data_mgr, groupby_buffer, oe_col_buffer, oe, layout, top_n, start, step); } ThrustAllocator thrust_allocator(data_mgr, device_id); // Fastest path, no need to separate nulls away since they'll end up at the // right place as a side effect of how we're representing nulls. if (device_type == ExecutorDeviceType::GPU) { if (oe_col_buffer.empty()) { return {}; } const auto dev_idx_buff = get_device_ptr<uint32_t>(oe_col_buffer.size(), thrust_allocator); thrust::sequence(dev_idx_buff, dev_idx_buff + oe_col_buffer.size(), start, step); const auto dev_oe_col_buffer = get_device_copy_ptr(oe_col_buffer, thrust_allocator); return do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_oe_col_buffer, dev_oe_col_buffer + oe_col_buffer.size(), dev_idx_buff, oe_col_buffer.size(), oe, layout, top_n); } CHECK(device_type == ExecutorDeviceType::CPU); thrust::host_vector<uint32_t> host_idx_buff(oe_col_buffer.size()); thrust::sequence(host_idx_buff.begin(), host_idx_buff.end(), start, step); return do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, oe_col_buffer.begin(), oe_col_buffer.end(), host_idx_buff.begin(), host_idx_buff.size(), oe, layout, top_n); } template std::vector<uint32_t> baseline_sort<int32_t>( const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step); template std::vector<uint32_t> baseline_sort<int64_t>( const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step);
the_stack
namespace tl = treelite; /* All functions and classes defined in this anonymous namespace are strictly * for internal use by GPUTreeSHAP. */ namespace { template <typename ThresholdType> struct SplitCondition { SplitCondition() = default; SplitCondition(ThresholdType feature_lower_bound, ThresholdType feature_upper_bound, tl::Operator comparison_op) : feature_lower_bound(feature_lower_bound), feature_upper_bound(feature_upper_bound), comparison_op(comparison_op) { if (feature_lower_bound > feature_upper_bound) { RAFT_FAIL("Lower bound cannot exceed upper bound"); } if (comparison_op != tl::Operator::kLT && comparison_op != tl::Operator::kLE && comparison_op != tl::Operator::kNone) { RAFT_FAIL("Unsupported comparison operator"); } } // Lower and upper bounds on feature values flowing down this path ThresholdType feature_lower_bound; ThresholdType feature_upper_bound; // Comparison operator used in the test. For now only < (kLT) and <= (kLE) // are supported. tl::Operator comparison_op; // Does this instance flow down this path? __host__ __device__ bool EvaluateSplit(ThresholdType x) const { if (comparison_op == tl::Operator::kLE) { return x > feature_lower_bound && x <= feature_upper_bound; } return x >= feature_lower_bound && x < feature_upper_bound; } // Combine two split conditions on the same feature __host__ __device__ void Merge(const SplitCondition& other) { // Combine duplicate features feature_lower_bound = max(feature_lower_bound, other.feature_lower_bound); feature_upper_bound = min(feature_upper_bound, other.feature_upper_bound); } static_assert(std::is_same<ThresholdType, float>::value || std::is_same<ThresholdType, double>::value, "ThresholdType must be a float or double"); }; template <typename ThresholdType> class TreePathInfoImpl : public ML::Explainer::TreePathInfo { public: ThresholdTypeEnum threshold_type; int num_tree; float global_bias; tl::TaskType task_type; tl::TaskParam task_param; bool average_tree_output; std::vector<gpu_treeshap::PathElement<SplitCondition<ThresholdType>>> paths; static_assert(std::is_same<ThresholdType, float>::value || std::is_same<ThresholdType, double>::value, "ThresholdType must be a float or double"); TreePathInfoImpl() { if constexpr (std::is_same<ThresholdType, double>::value) { threshold_type = ThresholdTypeEnum::kDouble; } else { threshold_type = ThresholdTypeEnum::kFloat; } } virtual ~TreePathInfoImpl() = default; ThresholdTypeEnum GetThresholdType() const override { return threshold_type; } }; class DenseDatasetWrapper { const float* data; std::size_t num_rows; std::size_t num_cols; public: DenseDatasetWrapper() = default; DenseDatasetWrapper(const float* data, int num_rows, int num_cols) : data(data), num_rows(num_rows), num_cols(num_cols) { } __device__ float GetElement(std::size_t row_idx, std::size_t col_idx) const { return data[row_idx * num_cols + col_idx]; } __host__ __device__ std::size_t NumRows() const { return num_rows; } __host__ __device__ std::size_t NumCols() const { return num_cols; } }; template <typename ThresholdType> void gpu_treeshap_impl(const TreePathInfoImpl<ThresholdType>* path_info, const float* data, std::size_t n_rows, std::size_t n_cols, float* out_preds) { DenseDatasetWrapper X(data, n_rows, n_cols); std::size_t num_groups = 1; if (path_info->task_type == tl::TaskType::kMultiClfGrovePerClass && path_info->task_param.num_class > 1) { num_groups = static_cast<std::size_t>(path_info->task_param.num_class); } std::size_t pred_size = n_rows * num_groups * (n_cols + 1); thrust::device_ptr<float> out_preds_ptr = thrust::device_pointer_cast(out_preds); gpu_treeshap::GPUTreeShap(X, path_info->paths.begin(), path_info->paths.end(), num_groups, out_preds_ptr, out_preds_ptr + pred_size); // Post-processing auto count_iter = thrust::make_counting_iterator(0); auto num_tree = path_info->num_tree; auto global_bias = path_info->global_bias; if (path_info->average_tree_output) { thrust::for_each( thrust::device, count_iter, count_iter + pred_size, [=] __device__(std::size_t idx) { out_preds[idx] /= num_tree; }); } thrust::for_each( thrust::device, count_iter, count_iter + (n_rows * num_groups), [=] __device__(std::size_t idx) { out_preds[(idx + 1) * (n_cols + 1) - 1] += global_bias; }); } } // anonymous namespace namespace ML { namespace Explainer { template <typename ThresholdType, typename LeafType> std::unique_ptr<TreePathInfo> extract_path_info_impl( const tl::ModelImpl<ThresholdType, LeafType>& model) { if (!std::is_same<ThresholdType, LeafType>::value) { RAFT_FAIL("ThresholdType and LeafType must be identical"); } if (model.task_type != tl::TaskType::kBinaryClfRegr && model.task_type != tl::TaskType::kMultiClfGrovePerClass) { RAFT_FAIL("cuML RF / scikit-learn classifiers are not yet supported"); } std::unique_ptr<TreePathInfo> path_info_ptr = std::make_unique<TreePathInfoImpl<ThresholdType>>(); auto* path_info = dynamic_cast<TreePathInfoImpl<ThresholdType>*>(path_info_ptr.get()); std::size_t path_idx = 0; int tree_idx = 0; int num_groups = 1; if (model.task_type == tl::TaskType::kMultiClfGrovePerClass && model.task_param.num_class > 1) { num_groups = model.task_param.num_class; } for (const tl::Tree<ThresholdType, LeafType>& tree : model.trees) { int group_id = tree_idx % num_groups; std::vector<int> parent_id(tree.num_nodes, -1); // Compute parent ID of each node for (int i = 0; i < tree.num_nodes; i++) { if (!tree.IsLeaf(i)) { parent_id[tree.LeftChild(i)] = i; parent_id[tree.RightChild(i)] = i; } } // Find leaf nodes // Work backwards from leaf to root, order does not matter // It's also possible to work from root to leaf for (int i = 0; i < tree.num_nodes; i++) { if (tree.IsLeaf(i)) { auto v = static_cast<float>(tree.LeafValue(i)); int child_idx = i; int parent_idx = parent_id[child_idx]; constexpr auto inf = std::numeric_limits<ThresholdType>::infinity(); tl::Operator comparison_op = tl::Operator::kNone; while (parent_idx != -1) { double zero_fraction = 1.0; bool has_count_info = false; if (tree.HasSumHess(parent_idx) && tree.HasSumHess(child_idx)) { zero_fraction = static_cast<double>(tree.SumHess(child_idx) / tree.SumHess(parent_idx)); has_count_info = true; } if (tree.HasDataCount(parent_idx) && tree.HasDataCount(child_idx)) { zero_fraction = static_cast<double>(tree.DataCount(child_idx)) / tree.DataCount(parent_idx); has_count_info = true; } if (!has_count_info) { RAFT_FAIL("Tree model doesn't have data count information"); } // Encode the range of feature values that flow down this path bool is_left_path = tree.LeftChild(parent_idx) == child_idx; if (tree.SplitType(parent_idx) == tl::SplitFeatureType::kCategorical) { RAFT_FAIL( "Only trees with numerical splits are supported. " "Trees with categorical splits are not supported yet."); } ThresholdType lower_bound = is_left_path ? -inf : tree.Threshold(parent_idx); ThresholdType upper_bound = is_left_path ? tree.Threshold(parent_idx) : inf; comparison_op = tree.ComparisonOp(parent_idx); path_info->paths.push_back(gpu_treeshap::PathElement<SplitCondition<ThresholdType>>{ path_idx, tree.SplitIndex(parent_idx), group_id, SplitCondition{lower_bound, upper_bound, comparison_op}, zero_fraction, v}); child_idx = parent_idx; parent_idx = parent_id[child_idx]; } // Root node has feature -1 comparison_op = tree.ComparisonOp(child_idx); path_info->paths.push_back(gpu_treeshap::PathElement<SplitCondition<ThresholdType>>{ path_idx, -1, group_id, SplitCondition{-inf, inf, comparison_op}, 1.0, v}); path_idx++; } } tree_idx++; } path_info->global_bias = model.param.global_bias; path_info->task_type = model.task_type; path_info->task_param = model.task_param; path_info->average_tree_output = model.average_tree_output; path_info->num_tree = static_cast<int>(model.trees.size()); return path_info_ptr; } std::unique_ptr<TreePathInfo> extract_path_info(ModelHandle model) { const tl::Model& model_ref = *static_cast<tl::Model*>(model); return model_ref.Dispatch([&](const auto& model_inner) { // model_inner is of the concrete type tl::ModelImpl<threshold_t, leaf_t> return extract_path_info_impl(model_inner); }); } void gpu_treeshap(const TreePathInfo* path_info, const float* data, std::size_t n_rows, std::size_t n_cols, float* out_preds) { switch (path_info->GetThresholdType()) { case TreePathInfo::ThresholdTypeEnum::kDouble: { const auto* path_info_casted = dynamic_cast<const TreePathInfoImpl<double>*>(path_info); gpu_treeshap_impl(path_info_casted, data, n_rows, n_cols, out_preds); } break; case TreePathInfo::ThresholdTypeEnum::kFloat: default: { const auto* path_info_casted = dynamic_cast<const TreePathInfoImpl<float>*>(path_info); gpu_treeshap_impl(path_info_casted, data, n_rows, n_cols, out_preds); } break; } } } // namespace Explainer } // namespace ML
the_stack
using namespace facebook::cuda; namespace facebook { namespace deeplearning { namespace torch { namespace detail { extern __shared__ float pShared[]; template <int BatchSize, typename T> __launch_bounds__(256, 6) __global__ void updateOutputBatch(DeviceTensor<T, 4> input, DeviceTensor<T, 6> weight, DeviceTensor<float, 3> bias, DeviceTensor<float, 4> output, int dH, int dW) { int outputRow = blockIdx.z; int outputCol = blockIdx.y * blockDim.y + threadIdx.y; int outputPlane = threadIdx.x / input.getSize(kPlaneDim); int inputRow = outputRow * dH; int inputCol = outputCol * dW; int inputPlane = threadIdx.x % input.getSize(kPlaneDim); int inputSizeSMEM[3] = {blockDim.y, BatchSize, input.getSize(kPlaneDim)}; DeviceTensor<T, 3> inputSMEM(reinterpret_cast<T*>(pShared), inputSizeSMEM); // compute offset to end of inputSMEM int offsetSMEM = inputSizeSMEM[0] * inputSizeSMEM[1] * inputSizeSMEM[2] * sizeof(T) / sizeof(float); // create shared memory bias tensor int biasSizeSMEM[2] = {blockDim.y, output.getSize(kPlaneDim)}; DeviceTensor<float, 2> biasSMEM(pShared + offsetSMEM, biasSizeSMEM); float vSum[BatchSize]; if (outputCol < output.getSize(kWidthDim)) { // guard right-edge // stage biases into shared if (threadIdx.x < output.getSize(kPlaneDim)) { biasSMEM[threadIdx.y][threadIdx.x] = bias[outputRow][outputCol] [threadIdx.x]; } for (int batch = 0; batch < BatchSize; ++batch) { vSum[batch] = 0.0f; } for (int kernelRow = 0; kernelRow < weight.getSize(kKernelHeightDim); ++kernelRow) { for (int kernelCol = 0; kernelCol < weight.getSize(kKernelWidthDim); ++kernelCol) { T w = weight[outputRow][outputCol][kernelRow][kernelCol] [outputPlane][inputPlane]; // use output-plane tiling to iterate images for (int image = outputPlane; image < BatchSize; image += output.getSize(kPlaneDim)) { inputSMEM[threadIdx.y][image][inputPlane] = input[image] [inputRow + kernelRow][inputCol + kernelCol][inputPlane].ldg(); } __syncthreads(); for (int image = 0; image < BatchSize; ++image) { T in = inputSMEM[threadIdx.y][image][inputPlane]; vSum[image] += dot(in, w); } __syncthreads(); } } for (int delta = 1; delta < input.getSize(kPlaneDim); delta *= 2) { for (int batch = 0; batch < BatchSize; ++batch) { vSum[batch] += __shfl_down(vSum[batch], delta); } } if (inputPlane == 0) { for (int batch = 0; batch < BatchSize; ++batch) { output[batch][outputRow][outputCol][outputPlane] = vSum[batch] + biasSMEM[threadIdx.y][outputPlane]; } } } // right-edge guard } template <int BatchSize, typename T> __global__ void updateOutputBatch(DeviceTensor<T, 4> input, DeviceTensor<T, 6> weight, DeviceTensor<float, 3> bias, DeviceTensor<float, 4> output, int dH, int dW, int inputPlaneThreads) { int outputRow = blockIdx.z; int outputCol = blockIdx.y * blockDim.y + threadIdx.y; int outputPlane = threadIdx.x / inputPlaneThreads; int inputRow = outputRow * dH; int inputCol = outputCol * dW; int inputThread = threadIdx.x % inputPlaneThreads; float vSum[BatchSize]; if (outputCol < output.getSize(kWidthDim)) { // guard right-edge for (int batch = 0; batch < BatchSize; ++batch) { vSum[batch] = 0.0f; } for (int kernelRow = 0; kernelRow < weight.getSize(kKernelHeightDim); ++kernelRow) { for (int kernelCol = 0; kernelCol < weight.getSize(kKernelWidthDim); ++kernelCol) { for (int inputPlane = inputThread; inputPlane < input.getSize(kPlaneDim); inputPlane += inputPlaneThreads) { T w = weight[outputRow][outputCol][kernelRow][kernelCol] [outputPlane][inputPlane]; for (int batch = 0; batch < BatchSize; ++batch) { T in = input[batch][inputRow + kernelRow] [inputCol + kernelCol][inputPlane]; vSum[batch] += dot(in, w); } } } } for (int delta = 1; delta < inputPlaneThreads; delta *= 2) { for (int batch = 0; batch < BatchSize; ++batch) { vSum[batch] += __shfl_down(vSum[batch], delta); } } if (inputThread == 0) { for (int batch = 0; batch < BatchSize; ++batch) { output[batch][outputRow][outputCol][outputPlane] = vSum[batch] + bias[outputRow][outputCol][outputPlane]; } } } // right-edge guard } // Dispatch based on input- and output-planes being powers of two // in which case an optimized version of the kernel can be used. // template <int BatchSize, typename T> void dispatchUpdateOutputPlanePOT(cudaStream_t stream, DeviceTensor<T, 4> input, DeviceTensor<T, 6> weight, DeviceTensor<float, 3> bias, DeviceTensor<float, 4> output, int dH, int dW) { const int kBlockSize = 256; // threads int inputPlaneThreads = kBlockSize / output.getSize(kPlaneDim); // The following conditions force the catch-all slow path: // // inputPlaneThreads < inputPlanes: This condition indicates that the // total number of threads per pixel column (i.e. // outputPlanes * inputPlanes) is greater than kBlockSize. In this case // the iterating kernel is necessary to cover all inputs by looping. // // outputPlanes and inputPlanes must be powers of two: inputPlanes must // be a power of two for the in-warp shuffle reductions to work. In order // for the input plane threads to properly align (when multiple columns // are processed by a single CTA), outputPlanes must also be a power of two. // // inputPlanes > 32: All input-plane threads must belong to the same warp // in order for in-warp shuffle reductions to work. if (inputPlaneThreads < input.getSize(kPlaneDim) || !isPowerOfTwo(output.getSize(kPlaneDim)) || !isPowerOfTwo(input.getSize(kPlaneDim)) || input.getSize(kPlaneDim) > 32) { inputPlaneThreads = std::min(32, inputPlaneThreads); inputPlaneThreads = greatestPowerOfTwoLessEq(inputPlaneThreads); dim3 block(output.getSize(kPlaneDim) * inputPlaneThreads); dim3 grid(1, output.getSize(kWidthDim), output.getSize(kHeightDim)); updateOutputBatch<BatchSize, T><<<grid, block, 0, stream>>>( input, weight, bias, output, dH, dW, inputPlaneThreads); } else { const int totalPlanes = input.getSize(kPlaneDim) * output.getSize(kPlaneDim); dim3 block(totalPlanes, kBlockSize / totalPlanes); dim3 grid(1, cuda::ceil(output.getSize(kWidthDim), static_cast<int>(block.y)), output.getSize(kHeightDim)); // smem for input caching int smem = block.y * BatchSize * input.getSize(kPlaneDim) * sizeof(T); // smem for bias caching smem += block.y * output.getSize(kPlaneDim) * sizeof(float); updateOutputBatch<BatchSize, T><<<grid, block, smem, stream>>>( input, weight, bias, output, dH, dW); } } // Dispatch updateOutput implementations depending on the possible degree // of in-thread-parallelism. template <int BatchSize> void dispatchUpdateOutputITP(cudaStream_t stream, DeviceTensor<float, 4> input, DeviceTensor<float, 6> weight, DeviceTensor<float, 3> bias, DeviceTensor<float, 4> output, int dH, int dW) { // determine if float4 based (16-byte) data reading is possible if (input.getSize(kPlaneDim) % 4 == 0 && isAligned(input.data(), sizeof(float4)) && kFloat4Optimization) { // create float4 based input tensor DeviceTensor<float4, 4> input4 = convertImageBatch<float4>(input); // creat float4 based weight tensor DeviceTensor<float4, 6> weight4 = convertWeight<float4>(weight); dispatchUpdateOutputPlanePOT<BatchSize, float4>( stream, input4, weight4, bias, output, dH, dW); // determine if float2 based (8-byte) data reading is possible } else if (input.getSize(kPlaneDim) % 2 == 0 && isAligned(input.data(), sizeof(float2)) && kFloat2Optimization) { // create float2 based input tensor DeviceTensor<float2, 4> input2 = convertImageBatch<float2>(input); // creat float2 based weight tensor DeviceTensor<float2, 6> weight2 = convertWeight<float2>(weight); dispatchUpdateOutputPlanePOT<BatchSize, float2>( stream, input2, weight2, bias, output, dH, dW); } else { dispatchUpdateOutputPlanePOT<BatchSize, float>( stream, input, weight, bias, output, dH, dW); } } #define UPDATE_OUTPUT_CASE(BatchSize) case BatchSize: \ dispatchUpdateOutputITP<BatchSize>(stream, input, weight, bias, output, \ dH, dW); \ break // Dispatcher function that binds the batchSize, which must be a power-of-two // (POT) to a function template with the batch size baked in. void updateOutputBatchPOT(cudaStream_t stream, DeviceTensor<float, 4> input, DeviceTensor<float, 6> weight, DeviceTensor<float, 3> bias, DeviceTensor<float, 4> output, int batchSize, int dH, int dW) { switch (batchSize) { UPDATE_OUTPUT_CASE(128); UPDATE_OUTPUT_CASE(64); UPDATE_OUTPUT_CASE(32); UPDATE_OUTPUT_CASE(16); UPDATE_OUTPUT_CASE(8); UPDATE_OUTPUT_CASE(4); UPDATE_OUTPUT_CASE(2); UPDATE_OUTPUT_CASE(1); default: assert(false); // input validation, for debugging only } } void locallyConnectedUpdateOutput(cudaStream_t stream, const float* input, const float* weight, const float* bias, float* output, LocallyConnectedParam& params) { int weightSize[6] = {params.outputHeight, params.outputWidth, params.kernelHeight, params.kernelWidth, params.outputPlanes, params.inputPlanes}; DeviceTensor<float, 6> cudaWeight(const_cast<float*>(weight), weightSize); int biasSize[3] = {params.outputHeight, params.outputWidth, params.outputPlanes}; DeviceTensor<float, 3> cudaBias(const_cast<float*>(bias), biasSize); long batchIdx = 0; // Iterate images in the batch; processing successively smaller // sub-batches. // maxBatchSize is the biggest sub-batch size. this should be picked // based on the performance of the underlying kernels for each batch // size. Performance of the kernels increases with batch size up to a // point. Sub batch sizes must be powers of two. int maxBatchSize = 16; int inputSize[4] = {maxBatchSize, params.inputHeight, params.inputWidth, params.inputPlanes}; int outputSize[4] = {maxBatchSize, params.outputHeight, params.outputWidth, params.outputPlanes}; while (maxBatchSize > 0) { while (batchIdx < (params.batchSize / maxBatchSize) * maxBatchSize) { DeviceTensor<float, 4> cudaInput(const_cast<float*>(input), inputSize); DeviceTensor<float, 4> cudaOutput(output, outputSize); updateOutputBatchPOT(stream, cudaInput, cudaWeight, cudaBias, cudaOutput, maxBatchSize, params.dH, params.dW); batchIdx += maxBatchSize; input += cudaInput.numElements(); output += cudaOutput.numElements(); } maxBatchSize /= 2; inputSize[0] = maxBatchSize; outputSize[0] = maxBatchSize; } } } // detail namespace }}} // namespaces
the_stack
#include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/max_pooling_backward.hpp> #include <nbla/cuda/utils/atomic_add.cuh> #include <nbla/cuda/utils/nd_index.cuh> #include <nbla/variable.hpp> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> namespace nbla { namespace max_pooling_backward { template <typename T> __global__ void kernel_zeroing(int size, T *x) { NBLA_CUDA_KERNEL_LOOP(idx, size) { x[idx] = T(0); } } template <typename T, bool channel_last = false> __global__ void kernel_max_pooling_2d_forward( int y_isize, int x_isize, T *dx, const T *dy, const T *x, int Cx, int Hx, int Wx, int2 xstride, int By, int Cy, int Hy, int Wy, int2 ystride, int wkernel, int hkernel, int wstride, int hstride, int wpad, int hpad) { NBLA_CUDA_KERNEL_LOOP(yidx, y_isize) { auto ynd_index = device_flat_to_3d(yidx, ystride); auto c = channel_last ? ynd_index.z : ynd_index.x; auto h = channel_last ? ynd_index.x : ynd_index.y; auto w = channel_last ? ynd_index.y : ynd_index.z; for (auto b = 0; b < By; ++b) { auto dx_b = dx + b * x_isize; auto x_b = x + b * x_isize; auto dy_b = dy + b * y_isize; // region auto hi_pool_start = h * hstride - hpad; auto wi_pool_start = w * wstride - wpad; auto hi_pool_end = min(hi_pool_start + hkernel, Hx); auto wi_pool_end = min(wi_pool_start + wkernel, Wx); hi_pool_start = max(hi_pool_start, 0); wi_pool_start = max(wi_pool_start, 0); // pool auto xnd_idx = channel_last ? make_int3(hi_pool_start, wi_pool_start, c) : make_int3(c, hi_pool_start, wi_pool_start); auto max_idx = device_3d_to_flat(xnd_idx, xstride); auto max_val = x_b[max_idx]; for (auto rh = hi_pool_start; rh < hi_pool_end; ++rh) { for (auto rw = wi_pool_start; rw < wi_pool_end; ++rw) { xnd_idx = channel_last ? make_int3(rh, rw, c) : make_int3(c, rh, rw); auto xidx = device_3d_to_flat(xnd_idx, xstride); auto x_bidx = x_b[xidx]; if (max_val < x_bidx) { max_val = x_bidx; max_idx = xidx; } } } atomic_add(dx_b + max_idx, dy_b[yidx]); } } } template <typename T, bool channel_last = false> __global__ void kernel_max_pooling_3d_forward( int y_isize, int x_isize, T *dx, const T *dy, const T *x, int Cx, int Dx, int Hx, int Wx, int3 xstride, int By, int Cy, int Dy, int Hy, int Wy, int3 ystride, int wkernel, int hkernel, int dkernel, int wstride, int hstride, int dstride, int wpad, int hpad, int dpad) { NBLA_CUDA_KERNEL_LOOP(yidx, y_isize) { auto ynd_index = device_flat_to_4d(yidx, ystride); auto c = channel_last ? ynd_index.w : ynd_index.x; auto d = channel_last ? ynd_index.x : ynd_index.y; auto h = channel_last ? ynd_index.y : ynd_index.z; auto w = channel_last ? ynd_index.z : ynd_index.w; for (auto b = 0; b < By; ++b) { auto dx_b = dx + b * x_isize; auto x_b = x + b * x_isize; auto dy_b = dy + b * y_isize; // region auto di_pool_start = d * dstride - dpad; auto hi_pool_start = h * hstride - hpad; auto wi_pool_start = w * wstride - wpad; auto di_pool_end = min(di_pool_start + dkernel, Dx); auto hi_pool_end = min(hi_pool_start + hkernel, Hx); auto wi_pool_end = min(wi_pool_start + wkernel, Wx); di_pool_start = max(di_pool_start, 0); hi_pool_start = max(hi_pool_start, 0); wi_pool_start = max(wi_pool_start, 0); // pool auto xnd_idx = channel_last ? make_int4(di_pool_start, hi_pool_start, wi_pool_start, c) : make_int4(c, di_pool_start, hi_pool_start, wi_pool_start); auto max_idx = device_4d_to_flat(xnd_idx, xstride); auto max_val = x_b[max_idx]; for (auto rd = di_pool_start; rd < di_pool_end; ++rd) { for (auto rh = hi_pool_start; rh < hi_pool_end; ++rh) { for (auto rw = wi_pool_start; rw < wi_pool_end; ++rw) { xnd_idx = channel_last ? make_int4(rd, rh, rw, c) : make_int4(c, rd, rh, rw); auto xidx = device_4d_to_flat(xnd_idx, xstride); auto x_bidx = x_b[xidx]; if (max_val < x_bidx) { max_val = x_bidx; max_idx = xidx; } } } } atomic_add(dx_b + max_idx, dy_b[yidx]); } } } template <typename T, bool accum = false, bool channel_last = false> __global__ void kernel_max_pooling_2d_backward( int y_isize, int x_isize, T *gdy, const T *gdx, const T *x, int Cx, int Hx, int Wx, int2 xstride, int By, int Cy, int Hy, int Wy, int2 ystride, int wkernel, int hkernel, int wstride, int hstride, int wpad, int hpad) { NBLA_CUDA_KERNEL_LOOP(yidx, y_isize) { auto ynd_index = device_flat_to_3d(yidx, ystride); auto c = channel_last ? ynd_index.z : ynd_index.x; auto h = channel_last ? ynd_index.x : ynd_index.y; auto w = channel_last ? ynd_index.y : ynd_index.z; for (auto b = 0; b < By; ++b) { auto gdx_b = gdx + b * x_isize; auto x_b = x + b * x_isize; auto gdy_b = gdy + b * y_isize; // region auto hi_pool_start = h * hstride - hpad; auto wi_pool_start = w * wstride - wpad; auto hi_pool_end = min(hi_pool_start + hkernel, Hx); auto wi_pool_end = min(wi_pool_start + wkernel, Wx); hi_pool_start = max(hi_pool_start, 0); wi_pool_start = max(wi_pool_start, 0); // pool auto xnd_idx = channel_last ? make_int3(hi_pool_start, wi_pool_start, c) : make_int3(c, hi_pool_start, wi_pool_start); auto max_idx = device_3d_to_flat(xnd_idx, xstride); auto max_val = x_b[max_idx]; for (auto rh = hi_pool_start; rh < hi_pool_end; ++rh) { for (auto rw = wi_pool_start; rw < wi_pool_end; ++rw) { xnd_idx = channel_last ? make_int3(rh, rw, c) : make_int3(c, rh, rw); auto xidx = device_3d_to_flat(xnd_idx, xstride); auto x_bidx = x_b[xidx]; if (max_val < x_bidx) { max_val = x_bidx; max_idx = xidx; } } } gdy_b[yidx] = accum ? gdy_b[yidx] + gdx_b[max_idx] : gdx_b[max_idx]; } } } template <typename T, bool accum = false, bool channel_last = false> __global__ void kernel_max_pooling_3d_backward( int y_isize, int x_isize, T *gdy, const T *gdx, const T *x, int Cx, int Dx, int Hx, int Wx, int3 xstride, int By, int Cy, int Dy, int Hy, int Wy, int3 ystride, int wkernel, int hkernel, int dkernel, int wstride, int hstride, int dstride, int wpad, int hpad, int dpad) { NBLA_CUDA_KERNEL_LOOP(yidx, y_isize) { auto ynd_index = device_flat_to_4d(yidx, ystride); auto c = channel_last ? ynd_index.w : ynd_index.x; auto d = channel_last ? ynd_index.x : ynd_index.y; auto h = channel_last ? ynd_index.y : ynd_index.z; auto w = channel_last ? ynd_index.z : ynd_index.w; for (auto b = 0; b < By; ++b) { auto gdx_b = gdx + b * x_isize; auto x_b = x + b * x_isize; auto gdy_b = gdy + b * y_isize; // region auto di_pool_start = d * dstride - dpad; auto hi_pool_start = h * hstride - hpad; auto wi_pool_start = w * wstride - wpad; auto di_pool_end = min(di_pool_start + dkernel, Dx); auto hi_pool_end = min(hi_pool_start + hkernel, Hx); auto wi_pool_end = min(wi_pool_start + wkernel, Wx); di_pool_start = max(di_pool_start, 0); hi_pool_start = max(hi_pool_start, 0); wi_pool_start = max(wi_pool_start, 0); // pool auto xnd_idx = channel_last ? make_int4(di_pool_start, hi_pool_start, wi_pool_start, c) : make_int4(c, di_pool_start, hi_pool_start, wi_pool_start); auto max_idx = device_4d_to_flat(xnd_idx, xstride); auto max_val = x_b[max_idx]; for (auto rd = di_pool_start; rd < di_pool_end; ++rd) { for (auto rh = hi_pool_start; rh < hi_pool_end; ++rh) { for (auto rw = wi_pool_start; rw < wi_pool_end; ++rw) { xnd_idx = channel_last ? make_int4(rd, rh, rw, c) : make_int4(c, rd, rh, rw); auto xidx = device_4d_to_flat(xnd_idx, xstride); auto x_bidx = x_b[xidx]; if (max_val < x_bidx) { max_val = x_bidx; max_idx = xidx; } } } } gdy_b[yidx] = accum ? gdy_b[yidx] + gdx_b[max_idx] : gdx_b[max_idx]; } } } } template <typename T> void MaxPoolingBackwardCuda<T>::setup_impl(const Variables &inputs, const Variables &outputs) { MaxPoolingBackward<T>::setup_impl(inputs, outputs); cuda_set_device(this->device_); } template <typename T> void MaxPoolingBackwardCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); // inputs[0] : dy // inputs[1] : x // outputs[0] : dx // dx = df(dy, x) auto sdim = this->kernel_.size(); auto yshape = inputs[0]->shape(); auto xshape = inputs[1]->shape(); int ndim = xshape.size(); // data auto dy = inputs[0]->get_data_pointer<Tcu>(this->ctx_); auto x = inputs[1]->get_data_pointer<Tcu>(this->ctx_); auto dx = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, false); // zeroing NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(max_pooling_backward::kernel_zeroing, outputs[0]->size(), dx); if (sdim == 2) { // pool params int hstride = this->stride_[0]; int wstride = this->stride_[1]; int hpad = this->pad_[0]; int wpad = this->pad_[1]; int hkernel = this->kernel_[0]; int wkernel = this->kernel_[1]; int Cx = this->channel_last_ ? xshape[ndim - 1] : xshape[ndim - 3]; int Hx = this->channel_last_ ? xshape[ndim - 3] : xshape[ndim - 2]; int Wx = this->channel_last_ ? xshape[ndim - 2] : xshape[ndim - 1]; int Cy = this->channel_last_ ? yshape[ndim - 1] : yshape[ndim - 3]; int Hy = this->channel_last_ ? yshape[ndim - 3] : yshape[ndim - 2]; int Wy = this->channel_last_ ? yshape[ndim - 2] : yshape[ndim - 1]; int By = inputs[0]->size() / (Cy * Hy * Wy); auto y_isize = Cy * Hy * Wy; auto x_isize = Cx * Hx * Wx; auto ystride = this->channel_last_ ? make_int2(Wy * Cy, Cy) : make_int2(Hy * Wy, Wy); auto xstride = this->channel_last_ ? make_int2(Wx * Cx, Cx) : make_int2(Hx * Wx, Wx); // pool auto forward = this->channel_last_ ? max_pooling_backward::kernel_max_pooling_2d_forward<Tcu, true> : max_pooling_backward::kernel_max_pooling_2d_forward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( forward, y_isize, x_isize, dx, dy, x, Cx, Hx, Wx, xstride, By, Cy, Hy, Wy, ystride, wkernel, hkernel, wstride, hstride, wpad, hpad); } else if (sdim == 3) { // pool params int dstride = this->stride_[0]; int hstride = this->stride_[1]; int wstride = this->stride_[2]; int dpad = this->pad_[0]; int hpad = this->pad_[1]; int wpad = this->pad_[2]; int dkernel = this->kernel_[0]; int hkernel = this->kernel_[1]; int wkernel = this->kernel_[2]; int Cx = this->channel_last_ ? xshape[ndim - 1] : xshape[ndim - 4]; int Dx = this->channel_last_ ? xshape[ndim - 4] : xshape[ndim - 3]; int Hx = this->channel_last_ ? xshape[ndim - 3] : xshape[ndim - 2]; int Wx = this->channel_last_ ? xshape[ndim - 2] : xshape[ndim - 1]; int Cy = this->channel_last_ ? yshape[ndim - 1] : yshape[ndim - 4]; int Dy = this->channel_last_ ? yshape[ndim - 4] : yshape[ndim - 3]; int Hy = this->channel_last_ ? yshape[ndim - 3] : yshape[ndim - 2]; int Wy = this->channel_last_ ? yshape[ndim - 2] : yshape[ndim - 1]; int By = inputs[0]->size() / (Cy * Dy * Hy * Wy); auto y_isize = Cy * Dy * Hy * Wy; auto x_isize = Cx * Dx * Hx * Wx; auto ystride = this->channel_last_ ? make_int3(Hy * Wy * Cy, Wy * Cy, Cy) : make_int3(Dy * Hy * Wy, Hy * Wy, Wy); auto xstride = this->channel_last_ ? make_int3(Hx * Wx * Cx, Wx * Cx, Cx) : make_int3(Dx * Hx * Wx, Hx * Wx, Wx); // pool auto forward = this->channel_last_ ? max_pooling_backward::kernel_max_pooling_3d_forward<Tcu, true> : max_pooling_backward::kernel_max_pooling_3d_forward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(forward, y_isize, x_isize, dx, dy, x, Cx, Dx, Hx, Wx, xstride, By, Cy, Dy, Hy, Wy, ystride, wkernel, hkernel, dkernel, wstride, hstride, dstride, wpad, hpad, dpad); } } template <typename T> void MaxPoolingBackwardCuda<T>::backward_impl( const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0] || propagate_down[1])) { return; } cuda_set_device(this->device_); auto sdim = this->kernel_.size(); auto yshape = inputs[0]->shape(); auto xshape = inputs[1]->shape(); int ndim = xshape.size(); // data auto gdy = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]); auto x = inputs[1]->get_data_pointer<Tcu>(this->ctx_); auto gdx = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); if (sdim == 2) { // pool params int hstride = this->stride_[0]; int wstride = this->stride_[1]; int hpad = this->pad_[0]; int wpad = this->pad_[1]; int hkernel = this->kernel_[0]; int wkernel = this->kernel_[1]; int Cx = this->channel_last_ ? xshape[ndim - 1] : xshape[ndim - 3]; int Hx = this->channel_last_ ? xshape[ndim - 3] : xshape[ndim - 2]; int Wx = this->channel_last_ ? xshape[ndim - 2] : xshape[ndim - 1]; int Cy = this->channel_last_ ? yshape[ndim - 1] : yshape[ndim - 3]; int Hy = this->channel_last_ ? yshape[ndim - 3] : yshape[ndim - 2]; int Wy = this->channel_last_ ? yshape[ndim - 2] : yshape[ndim - 1]; int By = inputs[0]->size() / (Cy * Hy * Wy); auto y_isize = Cy * Hy * Wy; auto x_isize = Cx * Hx * Wx; auto ystride = this->channel_last_ ? make_int2(Wy * Cy, Cy) : make_int2(Hy * Wy, Wy); auto xstride = this->channel_last_ ? make_int2(Wx * Cx, Cx) : make_int2(Hx * Wx, Wx); // pool auto backward = accum[0] ? this->channel_last_ ? max_pooling_backward::kernel_max_pooling_2d_backward< Tcu, true, true> : max_pooling_backward::kernel_max_pooling_2d_backward< Tcu, true, false> : this->channel_last_ ? max_pooling_backward::kernel_max_pooling_2d_backward< Tcu, false, true> : max_pooling_backward::kernel_max_pooling_2d_backward< Tcu, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( backward, y_isize, x_isize, gdy, gdx, x, Cx, Hx, Wx, xstride, By, Cy, Hy, Wy, ystride, wkernel, hkernel, wstride, hstride, wpad, hpad); } else if (sdim == 3) { // pool params int dstride = this->stride_[0]; int hstride = this->stride_[1]; int wstride = this->stride_[2]; int dpad = this->pad_[0]; int hpad = this->pad_[1]; int wpad = this->pad_[2]; int dkernel = this->kernel_[0]; int hkernel = this->kernel_[1]; int wkernel = this->kernel_[2]; int Cx = this->channel_last_ ? xshape[ndim - 1] : xshape[ndim - 4]; int Dx = this->channel_last_ ? xshape[ndim - 4] : xshape[ndim - 3]; int Hx = this->channel_last_ ? xshape[ndim - 3] : xshape[ndim - 2]; int Wx = this->channel_last_ ? xshape[ndim - 2] : xshape[ndim - 1]; int Cy = this->channel_last_ ? yshape[ndim - 1] : yshape[ndim - 4]; int Dy = this->channel_last_ ? yshape[ndim - 4] : yshape[ndim - 3]; int Hy = this->channel_last_ ? yshape[ndim - 3] : yshape[ndim - 2]; int Wy = this->channel_last_ ? yshape[ndim - 2] : yshape[ndim - 1]; int By = inputs[0]->size() / (Cy * Dy * Hy * Wy); auto y_isize = Cy * Dy * Hy * Wy; auto x_isize = Cx * Dx * Hx * Wx; auto ystride = this->channel_last_ ? make_int3(Hy * Wy * Cy, Wy * Cy, Cy) : make_int3(Dy * Hy * Wy, Hy * Wy, Wy); auto xstride = this->channel_last_ ? make_int3(Hx * Wx * Cx, Wx * Cx, Cx) : make_int3(Dx * Hx * Wx, Hx * Wx, Wx); // pool auto backward = accum[0] ? this->channel_last_ ? max_pooling_backward::kernel_max_pooling_3d_backward< Tcu, true, true> : max_pooling_backward::kernel_max_pooling_3d_backward< Tcu, true, false> : this->channel_last_ ? max_pooling_backward::kernel_max_pooling_3d_backward< Tcu, false, true> : max_pooling_backward::kernel_max_pooling_3d_backward< Tcu, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(backward, y_isize, x_isize, gdy, gdx, x, Cx, Dx, Hx, Wx, xstride, By, Cy, Dy, Hy, Wy, ystride, wkernel, hkernel, dkernel, wstride, hstride, dstride, wpad, hpad, dpad); } } }
the_stack
#include <cuda.h> #include <cuda_runtime.h> #include <math.h> #include <float.h> #include <vector> #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) // CUDA: grid stride looping #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) __device__ float bilinear_interpolate(const float *bottom_data, const int height, const int width, float y, float x) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (float)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (float)x_low; } else { x_high = x_low + 1; } float ly = y - y_low; float lx = x - x_low; float hy = 1. - ly; float hx = 1. - lx; // do bilinear interpolation float lt = bottom_data[y_low * width + x_low]; float rt = bottom_data[y_low * width + x_high]; float lb = bottom_data[y_high * width + x_low]; float rb = bottom_data[y_high * width + x_high]; float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; float val = (w1 * lt + w2 * rt + w3 * lb + w4 * rb); return val; } __device__ void bilinear_interpolate_gradient(const int height, const int width, float y, float x, float &w1, float &w2, float &w3, float &w4, int &x_low, int &x_high, int &y_low, int &y_high) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (float)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (float)x_low; } else { x_high = x_low + 1; } float ly = y - y_low; float lx = x - x_low; float hy = 1. - ly; float hx = 1. - lx; w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } __global__ void PSROIAlignForward( const float* __restrict__ bottom_data, const float* __restrict__ bottom_rois, size_t total_size, float spatial_scale, int channels, int height, int width, int pooled_dim, int pooled_height, int pooled_width, int group_size, int sampling_ratio, float* __restrict__ top_data, int* __restrict__ argmax_data) { CUDA_KERNEL_LOOP(index, total_size) { int ph = (index / pooled_width) % pooled_height; int pw = index % pooled_width; int ctop = (index / pooled_width / pooled_height) % pooled_dim; int n = index / pooled_width / pooled_height / pooled_dim; float roi_start_w = static_cast<float>(round(bottom_rois[n * 4 + 0])) * spatial_scale; float roi_start_h = static_cast<float>(round(bottom_rois[n * 4 + 1])) * spatial_scale; float roi_end_w = static_cast<float>(round(bottom_rois[n * 4 + 2])) * spatial_scale; float roi_end_h = static_cast<float>(round(bottom_rois[n * 4 + 3])) * spatial_scale; // Force too small ROIs to be 1x1 float roi_height = max(roi_end_h - roi_start_h, 0.1); float roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 // Compute w and h at bottom float bin_size_h = roi_height / static_cast<float>(pooled_height); float bin_size_w = roi_width / static_cast<float>(pooled_width); // Compute c at bottom int gh = floor(static_cast<float>(ph) * group_size / pooled_height); int gw = floor(static_cast<float>(pw) * group_size / pooled_width); gh = min(max(gh, 0), group_size - 1); gw = min(max(gw, 0), group_size - 1); int c = (ctop * group_size + gh) * group_size + gw; // int bottom_data_offset = c * height * width; const float *offset_bottom_data = bottom_data + c * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0)? sampling_ratio : ceil(roi_height / pooled_height); // e.g. = 2 int roi_bin_grid_w = (sampling_ratio > 0)? sampling_ratio : ceil(roi_width / pooled_width); float maxval = -1E+20; int maxidx = -1; for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g. iy = 0, 1 { float y = roi_start_h + ph * bin_size_h + static_cast<float>(iy + .5f) * bin_size_h / static_cast<float>(roi_bin_grid_h); // e.g. 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { float x = roi_start_w + pw * bin_size_w + static_cast<float>(ix + .5f) * bin_size_w / static_cast<float>(roi_bin_grid_w); float tmpval = bilinear_interpolate(offset_bottom_data, height, width, y, x); int bottom_index = iy * roi_bin_grid_w + ix; if (tmpval > maxval) { maxval = tmpval; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } __global__ void PSROIAlignBackward( const float* __restrict__ top_diff, const int* __restrict__ argmax_data, const float* __restrict__ bottom_rois, size_t total_size, float spatial_scale, int channels, int height, int width, int pooled_dim, int pooled_height, int pooled_width, int group_size, int sampling_ratio, float* __restrict__ bottom_diff) { CUDA_KERNEL_LOOP(index, total_size) { int ph = (index / pooled_width) % pooled_height; int pw = index % pooled_width; int ctop = (index / pooled_width / pooled_height) % pooled_dim; int n = index / pooled_width / pooled_height / pooled_dim; // Do not using rounding; this implementation detail is critical float roi_start_w = static_cast<float>(round(bottom_rois[n * 4 + 0])) * spatial_scale; float roi_start_h = static_cast<float>(round(bottom_rois[n * 4 + 1])) * spatial_scale; float roi_end_w = static_cast<float>(round(bottom_rois[n * 4 + 2])) * spatial_scale; float roi_end_h = static_cast<float>(round(bottom_rois[n * 4 + 3])) * spatial_scale; // Force too small ROIs to be 1x1 float roi_height = max(roi_end_h - roi_start_h, 0.1); float roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 // Compute w and h at bottom float bin_size_h = roi_height / static_cast<float>(pooled_height); float bin_size_w = roi_width / static_cast<float>(pooled_width); // Compute c at bottom int gh = floor(static_cast<float>(ph) * group_size / pooled_height); int gw = floor(static_cast<float>(pw) * group_size / pooled_width); gh = min(max(gh, 0), group_size - 1); gw = min(max(gw, 0), group_size - 1); int c = (ctop * group_size + gh) * group_size + gw; int bottom_diff_offset = c * height * width; int top_offset = (n * pooled_dim + ctop) * pooled_height * pooled_width; float top_diff_this_bin = top_diff[top_offset + ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0)? sampling_ratio : ceil(roi_height / pooled_height); // e.g. = 2 int roi_bin_grid_w = (sampling_ratio > 0)? sampling_ratio : ceil(roi_width / pooled_width); int maxidx = argmax_data[top_offset + ph * pooled_width + pw]; int iy = maxidx / roi_bin_grid_w; int ix = maxidx % roi_bin_grid_w; float y = roi_start_h + ph * bin_size_h + static_cast<float>(iy + .5f) * bin_size_h / static_cast<float>(roi_bin_grid_h); // e.g. 0.5, 1.5 float x = roi_start_w + pw * bin_size_w + static_cast<float>(ix + .5f) * bin_size_w / static_cast<float>(roi_bin_grid_w); float w1, w2, w3, w4; int x_low, x_high, y_low, y_high; // bilinear_interpolation_gradient bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); float g1 = top_diff_this_bin * w1; float g2 = top_diff_this_bin * w2; float g3 = top_diff_this_bin * w3; float g4 = top_diff_this_bin * w4; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(&bottom_diff[bottom_diff_offset + y_low * width + x_low], g1); atomicAdd(&bottom_diff[bottom_diff_offset + y_low * width + x_high], g2); atomicAdd(&bottom_diff[bottom_diff_offset + y_high * width + x_low], g3); atomicAdd(&bottom_diff[bottom_diff_offset + y_high * width + x_high], g4); } // }} } } // namespace int PSROIAlignForwardLaucher( at::Tensor bottom_data, at::Tensor bottom_rois, at::Tensor top_data, at::Tensor argmax_data, float spatial_scale, int group_size, int sampling_ratio) { const auto channels = bottom_data.size(1); const auto height = bottom_data.size(2); const auto width = bottom_data.size(3); const auto batch_size = top_data.size(0); const auto pooled_dim = top_data.size(1); const auto pooled_height = top_data.size(2); const auto pooled_width = top_data.size(3); const auto total_size = batch_size * pooled_dim * pooled_height * pooled_width; const int threads = 1024; const int blocks = (total_size + threads - 1) / threads; PSROIAlignForward<<<blocks, threads>>>( bottom_data.data<float>(), bottom_rois.data<float>(), total_size, spatial_scale, channels, height, width, pooled_dim, pooled_height, pooled_width, group_size, sampling_ratio, top_data.data<float>(), argmax_data.data<int>()); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } int PSROIAlignBackwardLaucher( at::Tensor top_diff, at::Tensor argmax_data, at::Tensor bottom_rois, at::Tensor bottom_diff, float spatial_scale, int group_size, int sampling_ratio) { const auto channels = bottom_diff.size(1); const auto height = bottom_diff.size(2); const auto width = bottom_diff.size(3); const auto batch_size = top_diff.size(0); const auto pooled_dim = top_diff.size(1); const auto pooled_height = top_diff.size(2); const auto pooled_width = top_diff.size(3); const auto total_size = batch_size * pooled_dim * pooled_height * pooled_width; const int threads = 1024; const int blocks = (total_size + threads - 1) / threads; PSROIAlignBackward<<<blocks, threads>>>( top_diff.data<float>(), argmax_data.data<int>(), bottom_rois.data<float>(), total_size, spatial_scale, channels, height, width, pooled_dim, pooled_height, pooled_width, group_size, sampling_ratio, bottom_diff.data<float>()); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; }
the_stack
using namespace std; #define DEBUG_SYNC 1 //forces sync after each kernel/async call to guarantee correct error catching #define DEBUG_OUTPUT 0 //fully verbose #define DEBUG_REGION 999999 // region to print for debugging, if DEBUG_REGION_ALL is set this value is ignored #define DEBUG_REGION_ALL 0 // if set to 1 all regions are printed for debugging if 0 only the DEBUG_REGION is printed #define RESULTS_CHECK 0 #define RESULT_DUMP 0 // 1 = dump 0 = compare #define INIT_CONTROL_OUPUT 1 #define SAMPLE_CONTROL 0 //use dump for the following values #define READ_EMPTY_FROM_FILE 0 #define READ_EMPHASIS_FROM_FILE 0 #define READ_FGBUFFER_FROM_FILE 0 #define COMPARE_FG_BUFFER_FROM_FILE 0 ////////////////////////////////////////////// //Todo: //Remove all flowIdx references and any flow index related calls. we are only working with real flow num from now on and a flowidx of 0 BkgGpuPipeline::BkgGpuPipeline( BkgModelWorkInfo* pbkinfo, int deviceId, HistoryCollection * HistCol) { this->bkinfo = pbkinfo; startFlowNum = pbkinfo->flow; //the flow at which we create this class is also the starting flow so all the correct initalisations are triggered, could be removed since only used in first flow update to prevent double copying SpDev = NULL; HostTLXTalkData=NULL; DevTLXTalkData=NULL; pTLXTalkConstP=NULL; pConstXTP = NULL; Dev=NULL; Host=NULL; pHistCol = HistCol; devId = deviceId; cudaSetDevice( devId ); cudaDeviceProp cuda_props; cudaGetDeviceProperties( &cuda_props, devId ); cout << "CUDA: BkgGpuPipeline: Initiating Flow by Flow Pipeline on Device: "<< devId << "( " << cuda_props.name << " v"<< cuda_props.major <<"."<< cuda_props.minor << ")" << endl; setSpatialParams(); InitPipeline(); } void BkgGpuPipeline::setSpatialParams() { const RawImage * rpt = bkinfo->img->GetImage(); const SpatialContext * loc = &bkinfo[0].inception_state->loc_context; cout << "CUDA: Chip offset x:" << loc->chip_offset_x << " y:" << loc->chip_offset_y << endl; ImgP.init(rpt->cols, rpt->rows, loc->regionXSize, loc->regionYSize); ImgP.print(); } void BkgGpuPipeline::InitPipeline() { //check memory and set context/device checkAvailableDevMem(); //Todo: Mulit-Device support CreatePoissonApproxOnDevice(devId); ConstanSymbolCopier::PopulateSymbolConstantImgageParams(ImgP, ConstFrmP, bkinfo); ConstanSymbolCopier::PopulateSymbolConstantGlobal(ConstGP,bkinfo); ConstanSymbolCopier::PopulateSymbolConfigParams(ConfP,bkinfo); ConstanSymbolCopier::PopulateSymbolPerFlowGlobal(GpFP, bkinfo); copySymbolsToDevice(ImgP); //ToDo: exception handling for Unsuccessful allocate try{ Dev = new DeviceData(ImgP,ConstFrmP); }catch(cudaException &e){ e.Print(); throw cudaAllocationError(e.getCudaError(), __FILE__, __LINE__); } if(Host == NULL) Host = new HostData(ImgP,ConstFrmP); PrepareSampleCollection(); PrepareInputsForSetupKernel(); ExecuteT0AvgNumLBeadKernel(); InitPersistentData(); InitXTalk(); } bool BkgGpuPipeline::firstFlow(){ return(GpFP.getRealFnum() == startFlowNum) ; //return startFlowNum; } size_t BkgGpuPipeline::checkAvailableDevMem() { size_t free_byte ; size_t total_byte ; double divMB = 1024.0*1024.0; cudaMemGetInfo( &free_byte, &total_byte ); cout << "CUDA " << devId << ": GPU memory usage: used = " << (total_byte-free_byte)/divMB<< ", free = " << free_byte/divMB<< " MB, total = "<< total_byte/divMB<<" MB" << endl; return free_byte; } void BkgGpuPipeline::PrepareInputsForSetupKernel() { Host->BfMask.init(bkinfo); Dev->BfMask.copy(Host->BfMask); Dev->T0.init(bkinfo); Host->BeadStateMask.init(bkinfo); Dev->BeadStateMask.copy(Host->BeadStateMask); } void BkgGpuPipeline::PrepareSampleCollection() { if(pHistCol == NULL){ // no history provided use only current flow cout << "CUDA WARNING: BkgGpuPipeline: No HistoryCollection found! Creating new HistoryCollection initialized with latest available Regional Params!" <<endl; pHistCol = new HistoryCollection(ImgP,1,ConstFrmP.getMaxCompFrames(),ConstFrmP.getUncompFrames()); for(size_t i=0; i < ImgP.getNumRegions(); i++){ WorkSet myJob(&bkinfo[i]); size_t regId = ImgP.getRegId(myJob.getRegCol(),myJob.getRegRow()); pHistCol->initRegionalParametersOneRegion(&bkinfo[i],regId); } } pHistCol->InitDeviceBuffersAndSymbol(ConstFrmP); } void BkgGpuPipeline::InitPersistentData() { //Temporary Host Buffers only needed once for setup LayoutCubeWithRegions<float>HostRegionFrameCube(ImgP.getGridParam(ConstFrmP.getMaxCompFrames()), Rf_NUM_PARAMS, HostMem); LayoutCubeWithRegions<int>HostRegionFramesPerPoint(ImgP.getGridParam(ConstFrmP.getMaxCompFrames()), 1, HostMem); //LayoutCubeWithRegions<float> HostBeadParamCube(ImgP, Bp_NUM_PARAMS, HostMem); LayoutCubeWithRegions<int>HostNonZeroEmphasisFrames(ImgP.getGridParam(MAX_POISSON_TABLE_COL),Nz_NUM_PARAMS, HostMem); LayoutCubeWithRegions<PerNucParamsRegion>HostPerNucRegP(ImgP.getGridParam(),NUMNUC,HostMem); //perBeadParamCubeClass HostBeadParamCube(ImgP, HostMem); //perBeadPolyClonalCubeClass HostPolyClonalCube(ImgP, HostMem); for(size_t i=0; i < ImgP.getNumRegions(); i++) { //setup step to guarantee all host side buffers are generated and available bkinfo[i].bkgObj->SetFittersIfNeeded(); WorkSet myJob(&bkinfo[i]); if(myJob.DataAvailalbe()){ //determine region Id based on start coordinates of region size_t regId = ImgP.getRegId(myJob.getRegCol(), myJob.getRegRow()); int numf = myJob.getNumFrames(); Host->NumFrames.putAtReg(myJob.getNumFrames(), regId); TranslatorsFlowByFlow::TranslateConstantRegionParams_RegionToCube(Host->ConstRegP,&bkinfo[i],regId); TranslatorsFlowByFlow::TranslateRegionFrameCube_RegionToCube(HostRegionFrameCube, &bkinfo[i], regId); TranslatorsFlowByFlow::TranslateRegionFramesPerPoint_RegionToCube(HostRegionFramesPerPoint,&bkinfo[i], regId); TranslatorsFlowByFlow::TranslateNonZeroEmphasisFrames_RegionToCube(HostNonZeroEmphasisFrames, &bkinfo[i], regId); TranslatorsFlowByFlow::TranslatePerNucRegionParams_RegionToCube(HostPerNucRegP,&bkinfo[i],regId); //HostBeadParamCube.initHostRegion(&bkinfo[i], regId); //HostPolyClonalCube.initHostRegion(&bkinfo[i], regId); } } //Dev->BeadParamCube.copy(HostBeadParamCube); //Dev->PolyClonalCube.copy(HostPolyClonalCube); Dev->BeadParamCube.init(bkinfo); Dev->PolyClonalCube.init(bkinfo); //Copy temp buffers to device Dev->NumFrames.copy(Host->NumFrames); Dev->ConstRegP.copy(Host->ConstRegP); Dev->RegionFrameCube.copy(HostRegionFrameCube); Dev->RegionFramesPerPoint.copy(HostRegionFramesPerPoint); Dev->fineNonZeroEmphasisFrames.copy(HostNonZeroEmphasisFrames); Dev->PerNucRegP.copy(HostPerNucRegP); #if INIT_CONTROL_OUPUT cout << "CUDA: BkgGpuPipeline: InitPersistentData: num Time-Compressed-Frames Per Region:" << endl; Host->NumFrames.printRegionTable<size_t>(); #endif } void BkgGpuPipeline::InitXTalk(){ //Oh you beautiful XTalk stuff... (cannot be done before persistent data init since it needs old pipeline buffers to be initialized. if(ConfP.PerformTraceLevelXTalk()){ const TraceCrossTalkSpecification & tXTPec = bkinfo->bkgObj->getTraceXTalkSpecs(); pTLXTalkConstP = new XTalkNeighbourStatsHost( tXTPec.cx, tXTPec.cy, tXTPec.multiplier ); pTLXTalkConstP->setHexPacked(tXTPec.hex_packed); pTLXTalkConstP->setInitialPhase(tXTPec.initial_phase); pTLXTalkConstP->setThreeSeries(tXTPec.three_series); copySymbolsToDevice(*pTLXTalkConstP); pTLXTalkConstP->print(); HostTLXTalkData= new HostTracelevelXTalkData(ImgP,ConstFrmP); DevTLXTalkData= new DeviceTracelevelXTalkData(ImgP,ConstFrmP); HostTLXTalkData->createSampleMask(); DevTLXTalkData->TestingGenericXTalkSampleMask.copy(HostTLXTalkData->TestingGenericXTalkSampleMask); } if(ConfP.PerformWellsLevelXTalk()){ const WellXtalk & wellXT = bkinfo->bkgObj->getWellXTalk(); const int xtalkSpanX = wellXT.nn_span_x; const int xtalkSpanY = wellXT.nn_span_y; const float * evenphasemap = &wellXT.nn_even_phase_map[0]; const float * oddphasemap = &wellXT.nn_odd_phase_map[0]; pConstXTP = new WellsLevelXTalkParamsHost( oddphasemap, evenphasemap, xtalkSpanX, xtalkSpanY ); copySymbolsToDevice(*pConstXTP); pConstXTP->print(); } } BkgGpuPipeline::~BkgGpuPipeline() { cout << "CUDA: Starting cleanup flow by flow GPU pipeline" << endl; checkAvailableDevMem(); if(SpDev != NULL) delete SpDev; if(Dev != NULL) delete Dev; if(Host != NULL) delete Host; if(pConstXTP != NULL) delete pConstXTP; if(pTLXTalkConstP != NULL) delete pTLXTalkConstP; cout << "CUDA: Cleanup flow by flow GPU pipeline completed" << endl; checkAvailableDevMem(); } void BkgGpuPipeline::PerFlowDataUpdate(BkgModelWorkInfo* pbkinfo) { //Per FLow Inputs: this->bkinfo = pbkinfo; WorkSet myJob(&bkinfo[0]); if (!(myJob.performPostFitHandshake())) { GlobalDefaultsForBkgModel tmp = bkinfo->bkgObj->getGlobalDefaultsForBkgModel(); for(size_t i=0; i < getParams().getNumRegions(); i++) { SignalProcessingMasterFitter * Obj = bkinfo[i].bkgObj; Obj->region_data->AddOneFlowToBuffer ( tmp,*(Obj->region_data_extras.my_flow), bkinfo[i].flow); Obj->region_data_extras.my_flow->Increment(); Obj->region_data->my_beads.ZeroOutPins( Obj->region_data->region, Obj->GetGlobalStage().bfmask, *Obj->GetGlobalStage().pinnedInFlow, Obj->region_data_extras.my_flow->flow_ndx_map[0], 0); } } //updated per flow device Symbols and buffers ConstanSymbolCopier::PopulateSymbolPerFlowGlobal(GpFP, bkinfo); if(!firstFlow()){ // copied in constructor for first flow //Host->BfMask.copyIn(&bkinfo->bkgObj->GetGlobalStage().bfmask->mask[0]); // debugging only Host->BfMask.init(bkinfo); Dev->BfMask.copy(Host->BfMask); } //raw image Dev->RawTraces.init(bkinfo); //Host->RawTraces.wrappPtr(bkinfo->img->raw->image); //Dev->RawTraces.copy(Host->RawTraces); } dim3 BkgGpuPipeline::matchThreadBlocksToRegionSize(int bx, int by) { int rH = ImgP.getRegH(); int correctBy = by; while(rH%correctBy != 0) --correctBy; if(correctBy!=by) cout << "CUDA WARNING: requested region height of " << ImgP.getRegH() << " does not allow optimal GPU threadblock height of 4 warps! Threadblock height corrected to " << correctBy << ". For optimal performance please choose a region height of a multiple of " << by << "." << endl; dim3 block(bx,correctBy); return block; } ///////////////////////////////////////////////////////////////////////////////// // Kernels void BkgGpuPipeline::ExecuteT0AvgNumLBeadKernel() { //one block per region execution model, no y-dim check needed dim3 block(32, 4); dim3 grid(ImgP.getGridDimX(),ImgP.getGridDimY()); size_t smem = 2*(block.x * block.y *sizeof(int)); Dev->SampleRowPtr.memSet(0); Dev->NumSamples.memSet(0); cout << "CUDA: BkgGpuPipeline: ExecuteT0AvgNumLBeadKernel: executing GenerateT0AvgAndNumLBeads Kernel grid(" << grid.x << "," << grid.y << "), block(" << block.x << "," << block.y <<"), smem("<< smem <<")" << endl; GenerateT0AvgAndNumLBeads_New<<<grid, block, smem>>>( Dev->RegionStateMask.getPtr(), Dev->BfMask.getPtr(), Dev->BeadStateMask.getPtr(), Dev->T0.getPtr(), Dev->SampleRowPtr.getPtr(), Dev->NumSamples.getPtr(), Dev->NumLBeads.getPtr(), //numLbeads of whole region Dev->T0Avg.getPtr() // T0Avg per REgion //ToDo check if this is really needed or if updating the T0Est would be better ); #if DEBUG_SYNC || DEBUG_OUTPUT cudaDeviceSynchronize(); CUDA_ERROR_CHECK(); #endif #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteT0AvgNumLBeadKernel: GenerateT0AvgAndNumLBeads_New finalize" << endl; #endif #if INIT_CONTROL_OUPUT || DEBUG_OUTPUT LayoutCubeWithRegions<float>HostT0Avg(Dev->T0Avg,HostMem); LayoutCubeWithRegions<int>HostNumLBeads(Dev->NumLBeads, HostMem); cout << "CUDA: BkgGpuPipeline: ExecuteT0AvgNumLBeadKernel: num live beads per region: " << endl; HostNumLBeads.printRegionTable<int>(); cout << "CUDA: BkgGpuPipeline: ExecuteT0AvgNumLBeadKernel: T0 avg per region: " << endl; HostT0Avg.printRegionTable<float>(); // perBeadT0CubeClass HostT0(Dev->T0, HostMem); //LayoutCubeWithRegions<float> RegionStdDev(ImgP.getGridDimX(), ImgP.getGridDimY()); //for(size_t i=0; i<ImgP.getNumRegions(); i++){ //RegionStdDev[i] = HostT0.getStdDevReg<float>(i,0,HostT0Avg[i],0,&Host->BfMask,(unsigned short)MaskLive); //} //cout << "CUDA: BkgGpuPipeline: BkgGpuPipeline: std deviation per region: "<< endl; //RegionStdDev.printRegionTable<float>(); #endif #if INIT_CONTROL_OUPUT || DEBUG_OUTPUT || SAMPLE_CONTROL LayoutCubeWithRegions<int>HostSampleCount(Dev->SampleRowPtr, HostMem); LayoutCubeWithRegions<int>HostNumSamples(Dev->NumSamples, HostMem); cout << "CUDA: Number of samples for regional fitting per Region:" << endl; HostNumSamples.printRegionTable<int>(); #endif #if SAMPLE_CONTROL || DEBUG_OUTPUT HostSampleCount.setRWStrideX(); cout << "CUDA: starting offset for samples per Row (last entry is num samples)" << endl; for(size_t rg = 0; rg < ImgP.getNumRegions(); rg++){ if(rg == DEBUG_REGION || DEBUG_REGION_ALL) cout << "regId " << rg << "," << HostSampleCount.getCSVatReg<int>(rg,0,0,0,ImgP.getRegH()) << endl; } #endif } void BkgGpuPipeline::ExecuteGenerateBeadTrace() { dim3 block = matchThreadBlocksToRegionSize(32,4); dim3 grid(ImgP.getGridDimX(),(ImgP.getImgH()+block.y-1)/block.y); int cacheSetting = 0; size_t numTBlocksPerReg = (ImgP.getRegH()+block.y-1)/block.y; //Special Buffers for this kernel dependent on launch configuration if(SpDev == NULL) SpDev = new SpecialDeviceData(ImgP,ConstFrmP,numTBlocksPerReg); //ToDo: exception Handling if alloc fails Dev->EmptyTraceComplete.memSet(0); //Dev->EmptyTraceAvg.memSet(0); //DoTo: Stuff! SpDev->EmptyTraceSumRegionTBlock.memSet(0); SpDev->EmptyTraceCountRegionTBlock.memSet(0); Dev->SampleRowCounter.memSet(0); //Dev->SampleCompressedTraces.memSet(0); pHistCol->RezeroWriteBuffer(); Dev->SampleParamCube.memSet(0); Dev->SampleStateMask.memSet(0); size_t smem = block.x * block.y *sizeof(int); switch(cacheSetting){ case 0: cudaFuncSetCacheConfig(GenerateAllBeadTraceEmptyFromMeta_k, cudaFuncCachePreferEqual); #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteGenerateBeadTrace: CacheSetting: GenerateAllBeadTraceEmptyFromMeta_k cudaFuncCachePreferEqual" << endl; #endif break; case 2: cudaFuncSetCacheConfig(GenerateAllBeadTraceEmptyFromMeta_k, cudaFuncCachePreferL1); #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteGenerateBeadTrace: CacheSetting: GenerateAllBeadTraceEmptyFromMeta_k cudaFuncCachePreferL1" << endl; #endif break; case 1: default: cudaFuncSetCacheConfig(GenerateAllBeadTraceEmptyFromMeta_k, cudaFuncCachePreferShared); #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteGenerateBeadTrace: CacheSetting: GenerateAllBeadTraceEmptyFromMeta_k cudaFuncCachePreferShared" << endl; #endif } // print bead traces /*LayoutCubeWithRegions<short> HostRawTraces(Dev->RawTraces, HostMem); HostRawTraces.print(); HostRawTraces.setRWStrideZ(); cout << "Gen Trace From Host" << endl; cout << HostRawTraces.getCSVatReg<short>(0,0,0,0,105) << endl; */ cout << "CUDA: BkgGpuPipeline: ExecuteGenerateBeadTrace: executing GenerateAllBeadTraceFromMeta_k Kernel grid(" << grid.x << "," << grid.y << "), block(" << block.x << "," << block.y <<"), smem("<< smem <<")" << endl; GenerateAllBeadTraceEmptyFromMeta_k<<<grid, block, smem >>> ( Dev->RegionStateMask.getPtr(), Dev->RawTraces.getPtr(), //perwell input and output Dev->BfMask.getPtr(), //per well Dev->T0.getPtr(), //per well Dev->RegionFrameCube.getPtrToPlane(RfFrameNumber), Dev->RegionFramesPerPoint.getPtr(), Dev->NumFrames.getPtr(), //frames per region Dev->NumLBeads.getPtr(), Dev->T0Avg.getPtr(), // ToDo: try already subtract T0 after calculating the average so this would not be needed here anymore! Dev->ConstRegP.getPtr(), //Dev->PerFlowRegionParams.getPtr(), pHistCol->getDevPerFlowRegParams().getPtr(), SpDev->EmptyTraceSumRegionTBlock.getPtr(), // has to be initialized to 0!! will contain avg of all empty trace frames for each region SpDev->EmptyTraceCountRegionTBlock.getPtr(), // has to be initialized to 0!! will contain number of empty traces summed up for each region Dev->EmptyTraceComplete.getPtr(), //has to be initialized to 0!! completion counter per region for final sum ToDo: figure out if we can do without it // for regional fit sample extraction: //inputs Dev->BeadParamCube.getPtr(), Dev->BeadStateMask.getPtr(), //meta data Dev->SampleRowPtr.getPtr(), Dev->SampleRowCounter.getPtr(), //outputs Dev->SampleStateMask.getPtr(), NULL,//Dev->SampleCompressedTraces.getPtr(), Dev->SampleParamCube.getPtr(), Dev->SampleCoord.getPtr() ); #if DEBUG_SYNC || DEBUG_OUTPUT cudaDeviceSynchronize(); CUDA_ERROR_CHECK(); #endif // print bead traces post GPU GenBeadTracesKernel /*LayoutCubeWithRegions<short> HostBeadTraces(Dev->RawTraces, HostMem); HostBeadTraces.print(); HostBeadTraces.setRWStrideZ(); cout << "Gen Trace From Device" << endl; cout << HostBeadTraces.getCSVatReg<short>(0,0,0,0,29) << endl; */ /*LayoutCubeWithRegions<float> HostFrameNum(Dev->RegionFrameCube, HostMem); cout << "Frame Num" << endl; HostFrameNum.print(); cout << HostFrameNum.getCSVRegionPlane<float>(0,0,0,RfFrameNumber) << endl; cout << HostFrameNum.getCSVRegionPlane<float>(0,0,0,RfDeltaFrames) << endl; */ #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteGenerateBeadTrace: GenerateAllBeadTraceEmptyFromMeta_k finalize" << endl; #endif //one block per region execution model, no y-dim check needed dim3 blockER(32,1); dim3 gridER(ImgP.getGridDimX(), ImgP.getGridDimY()); smem = (blockER.y * ConstFrmP.getUncompFrames() + blockER.y )* sizeof(float); //LayoutCubeWithRegions<float> DevDcOffsetDebug(ImgP.getGridParam(),1,DeviceGlobal); cout << "CUDA: BkgGpuPipeline: ExecuteGenerateBeadTrace: executing ReduceEmptyAverage_k Kernel grid(" << gridER.x << "," << gridER.y << "), block(" << blockER.x << "," << blockER.y <<"), smem("<< smem <<")" << endl; ReduceEmptyAverage_k<<<gridER, blockER, smem>>>( Dev->RegionStateMask.getPtr(), Dev->ConstRegP.getPtr(), pHistCol->getDevPerFlowRegParams().getPtr(), Dev->RegionFrameCube.getPtrToPlane(RfFrameNumber), Dev->RegionFramesPerPoint.getPtr(), Dev->NumFrames.getPtr(), //frames p SpDev->EmptyTraceSumRegionTBlock.getPtr(), // has to be initialized to 0!! will contain avg of all empty trace frames for each region SpDev->EmptyTraceCountRegionTBlock.getPtr(), // has to be initialized to 0!! will contain number of empty traces summed up for each region numTBlocksPerReg //DevDcOffsetDebug.getPtr() ); #if DEBUG_SYNC || DEBUG_OUTPUT cudaDeviceSynchronize(); CUDA_ERROR_CHECK(); #endif #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteGenerateBeadTrace: ReduceEmptyAverage_k finalize" << endl; #endif pHistCol->UpdateHistoryCollection(GpFP); if(GpFP.getRealFnum() == 20){ cout << "CUDA: Region State: " << endl; LayoutCubeWithRegions<unsigned short>HostRegionMask(Dev->RegionStateMask,HostMem); HostRegionMask.printRegionTable<unsigned short>(); #if DEBUG_OUTPUT printRegionStateMask(); #endif } #if DEBUG_OUTPUT //static LayoutCubeWithRegions<int>HostEmptyTraceComplete(ImgP.getGridParam(),1,HostMem);HostEmptyTraceComplete.trackMe(muT); //static LayoutCubeWithRegions<float>HostEmptyTraceAvg(Dev->EmptyTraceAvg,HostMem); LayoutCubeWithRegions<float> * HostEmptyTraceAvg = pHistCol->getLatestEmptyTraceAvgs(); //HostEmptyTraceAvg->copy(Dev->EmptyTraceAvg); HostEmptyTraceAvg->setRWStrideX(); cout << "CUDA: BkgGpuPipeline: ExecuteGenerateBeadTrace: Average Empty Traces:" << endl; for(size_t regId = 0; regId < ImgP.getNumRegions(); regId++){ if ( regId == DEBUG_REGION || DEBUG_REGION_ALL ){ int nf = Host->NumFrames.getAtReg(regId); if(nf <= 0 || nf > ConstFrmP.getMaxCompFrames()) nf = ConstFrmP.getMaxCompFrames(); cout <<"DEBUG GPU EmptytraceAvg Current," << regId <<"," << GpFP.getRealFnum() << "," << HostEmptyTraceAvg->getCSVatReg<float>(regId,0,0,0,nf) << endl; } } #endif #if SAMPLE_CONTROL LayoutCubeWithRegions<int> HostNumSample(Dev->NumSamples,HostMem); LayoutCubeWithRegions<short> * SampleHostCompressedTraces = pHistCol->getLatestSampleTraces(); LayoutCubeWithRegions<SampleCoordPair> HostSamplesCoords(Dev->SampleCoord, HostMem); SampleHostCompressedTraces->setRWStrideZ(); HostSamplesCoords.setRWStrideX(); for(size_t rgid =0 ; rgid < ImgP.getNumRegions(); rgid++){ if( rgid == DEBUG_REGION || DEBUG_REGION_ALL){ HostSamplesCoords.setRWPtrRegion(rgid); for(int i = 0; i < HostNumSample.getAtReg(rgid); i++){ int nf = Host->NumFrames.getAtReg(rgid); if(nf <= 0 || nf > ConstFrmP.getMaxCompFrames()) nf = ConstFrmP.getMaxCompFrames(); SampleCoordPair loc = HostSamplesCoords.read(); cout << "regId," << rgid <<",x,"<< loc.x << ",y,"<< loc.y << "," << SampleHostCompressedTraces->getCSVatReg<short>(rgid,i,0,0,nf) << endl;; } } } #endif } void BkgGpuPipeline::ExecuteTraceLevelXTalk() { if(ConfP.PerformTraceLevelXTalk()){ DevTLXTalkData->BaseXTalkContribution.memSet(0); DevTLXTalkData->xTalkContribution.memSet(0); DevTLXTalkData->genericXTalkTracesRegion.memSet(0); DevTLXTalkData->numGenericXTalkTracesRegion.memSet(0); dim3 block = matchThreadBlocksToRegionSize(32,4); dim3 grid(ImgP.getGridDimX(),(ImgP.getImgH()+block.y-1)/block.y); size_t smem = 0; //smem needed: one float per thread //one trace with ConstFrmP.getMaxCompFrames() frames of type float per warp //num warps == block.y cout << "CUDA: BkgGpuPipeline: ExecuteTraceLevelXTalk: executing SimpleXTalkNeighbourContribution grid(" << grid.x << "," << grid.y << "), block(" << block.x << "," << block.y <<"), smem("<< smem <<")" << endl; SimpleXTalkNeighbourContribution<<<grid, block, smem >>>(// Here FL stands for flows Dev->RegionStateMask.getPtr(), Dev->BfMask.getPtr(), Dev->BeadStateMask.getPtr(), DevTLXTalkData->BaseXTalkContribution.getPtr(), Dev->RawTraces.getPtr(), Dev->BeadParamCube.getPtr(), //NxP Dev->RegionFrameCube.getPtr(), //FxRxT bkgTrace, DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber Dev->ConstRegP.getPtr(), // R //Dev->PerFlowRegionParams.getPtr(), // R pHistCol->getDevPerFlowRegParams().getPtr(), Dev->PerNucRegP.getPtr(), //RxNuc Dev->NumFrames.getPtr() // R ); #if DEBUG_SYNC || DEBUG_OUTPUT cudaDeviceSynchronize(); CUDA_ERROR_CHECK(); #endif #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteTraceLevelXTalk: SimpleXTalkNeighbourContribution finalize" << endl; #endif /* if (GpFP.getRealFnum() == 20 ){ cout << "CUDA: Per Bead XTalk Contribution " <<endl; LayoutCubeWithRegions<float>HostBeadXtalkContri(DevTLXTalkData->BaseXTalkContribution, HostMem); HostBeadXtalkContri.setRWStrideZ(); Host->BfMask.copy(Dev->BfMask); for(size_t idx=0; idx < ImgP.getImgSize(); idx++ ){ if(Host->BfMask[idx] & (unsigned short)MaskLive){ size_t x = ImgP.getXFromIdx(idx) ; size_t y = ImgP.getYFromIdx(idx) ; cout << x << ", " << y << ", "; //float sumF = 0; for(size_t f = 0; f < ConstFrmP.getMaxCompFrames(); f++) { cout << HostBeadXtalkContri.getAt(x,y,f) << ", "; //sumF += HostBeadXtalk.getAt(x,y,f); } //cout << sumF << endl; cout << endl; //HostBeadXtalk.getCSVatReg<float>(0,x,y,0,ConstFrmP.getMaxCompFrames()) << endl; } } } */ smem = ( (block.x*block.y) + (block.y * ConstFrmP.getMaxCompFrames()) ) * sizeof(float) ; //allocate or rezero int threadBlocksPerRegion = (ImgP.getRegH()+block.y-1)/block.y; DevTLXTalkData->allocateRezeroDynamicBuffer(ImgP,ConstFrmP,threadBlocksPerRegion); cout << "CUDA: BkgGpuPipeline: ExecuteTraceLevelXTalk: executing GenericXTalkAndNeighbourAccumulation grid(" << grid.x << "," << grid.y << "), block(" << block.x << "," << block.y <<"), smem("<< smem <<")"<< endl; GenericXTalkAndNeighbourAccumulation<<<grid, block, smem >>>(// Here FL stands for flows Dev->RegionStateMask.getPtr(), Dev->BfMask.getPtr(), Dev->BeadStateMask.getPtr(), DevTLXTalkData->BaseXTalkContribution.getPtr(), DevTLXTalkData->xTalkContribution.getPtr(), // buffer XTalk contribution to this well NxF DevTLXTalkData->pDyncmaicPerBLockGenericXTalk->getPtr(), // one trace of max compressed frames per thread block DevTLXTalkData->numGenericXTalkTracesRegion.getPtr(), //one int per region to average after accumulation //Dev->PerFlowRegionParams.getPtr(), // R pHistCol->getDevPerFlowRegParams().getPtr(), Dev->NumFrames.getPtr(), // R DevTLXTalkData->TestingGenericXTalkSampleMask.getPtr() //ToDo: remove when testing done ); #if DEBUG_SYNC || DEBUG_OUTPUT cudaDeviceSynchronize(); CUDA_ERROR_CHECK(); #endif #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteTraceLevelXTalk: GenericXTalkAndNeighbourAccumulation finalize" << endl; #endif dim3 accumBlock(128,1); dim3 accumGrid(ImgP.getGridDimX(),ImgP.getGridDimY()); cout << "CUDA: BkgGpuPipeline: ExecuteTraceLevelXTalk: executing GenericXTalkAccumulation grid(" << accumGrid.x << "," << accumGrid.y << "), block(" << accumBlock.x << "," << accumBlock.y <<"), smem(0)" << endl; GenericXTalkAccumulation<<<accumGrid,accumBlock>>>(// Here FL stands for flows DevTLXTalkData->genericXTalkTracesRegion.getPtr(), // one trace of max compressed frames per region DevTLXTalkData->pDyncmaicPerBLockGenericXTalk->getPtr(), // one trace of max compressed frames per thread block DevTLXTalkData->numGenericXTalkTracesRegion.getPtr(), //one int per region to average after accumulation Dev->NumFrames.getPtr(), // R threadBlocksPerRegion ); #if DEBUG_SYNC || DEBUG_OUTPUT cudaDeviceSynchronize(); CUDA_ERROR_CHECK(); #endif #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteTraceLevelXTalk: GenericXTalkAccumulation finalize" << endl; #endif /* smem = ( (block.x*block.y) + (block.y * ConstFrmP.getMaxCompFrames()) ) * sizeof(float) ; #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteTraceLevelXTalk: executing SimpleXTalkNeighbourContributionAndAccumulation with: block(" << block.x << "," << block.y <<"), grid(" << grid.x << "," << grid.y << ") and smem: "<< smem << endl; #endif SimpleXTalkNeighbourContributionAndAccumulation_LocalMem<<<grid, block, smem >>>( Dev->RegionStateMask.getPtr(), Dev->BfMask.getPtr(), Dev->BeadStateMask.getPtr(), DevTLXTalkData->xTalkContribution.getPtr(), // buffer XTalk contribution to this well NxF DevTLXTalkData->genericXTalkTracesRegion.getPtr(), // one trace of max compressed frames per thread block or per region (atomicAdd) DevTLXTalkData->numGenericXTalkTracesRegion.getPtr(), //one int per region to average after accumulation Dev->RawTraces.getPtr(), Dev->BeadParamCube.getPtr(), //NxP Dev->RegionFrameCube.getPtr(), //FxRxT bkgTrace, DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber Dev->ConstRegP.getPtr(), // R //Dev->PerFlowRegionParams.getPtr(), // R pHistCol->getDevPerFlowRegParams().getPtr(), Dev->PerNucRegP.getPtr(), //RxNuc Dev->NumFrames.getPtr(), // R DevTLXTalkData->TestingGenericXTalkSampleMask.getPtr() //ToDo: remove when testing done ); */ /* HostTLXTalkData->genericXTalkTracesRegion.copy(DevTLXTalkData->genericXTalkTracesRegion); HostTLXTalkData->numGenericXTalkTracesRegion.copy(DevTLXTalkData->numGenericXTalkTracesRegion); HostTLXTalkData->xTalkContribution.copy(DevTLXTalkData->xTalkContribution); cout << "CUDA: num GenericXTalkTraces per Region: " << endl; HostTLXTalkData->numGenericXTalkTracesRegion.printRegionTable<int>(); LayoutCubeWithRegions<unsigned short>HostRegState(Dev->RegionStateMask, HostMem); for(size_t regId=0; regId < ImgP.getNumRegions(); regId++ ){ if(HostRegState[regId] == RegionMaskLive){ HostTLXTalkData->genericXTalkTracesRegion.setRWStrideX(); cout << "regId " << regId << "," << HostTLXTalkData->genericXTalkTracesRegion.getCSVatReg<float>(regId,0,0,0,ConstFrmP.getMaxCompFrames()) << endl; } } */ /* if (GpFP.getRealFnum() == 39 ){ cout << "CUDA: Per Bead XTalk " <<endl; LayoutCubeWithRegions<float>HostBeadXtalk(DevTLXTalkData->xTalkContribution, HostMem); HostBeadXtalk.setRWStrideZ(); Host->BfMask.copy(Dev->BfMask); for(size_t idx=0; idx < ImgP.getImgW()*4; idx++ ){ if(Host->BfMask[idx] & (unsigned short)MaskLive){ size_t x = ImgP.getXFromIdx(idx) ; size_t y = ImgP.getYFromIdx(idx) ; cout << x << ", " << y << ", "; //float sumF = 0; for(size_t f = 0; f < ConstFrmP.getMaxCompFrames(); f++) { cout << HostBeadXtalk.getAt(x,y,f) << ", "; //sumF += HostBeadXtalk.getAt(x,y,f); } //cout << sumF << endl; cout << endl; //HostBeadXtalk.getCSVatReg<float>(0,x,y,0,ConstFrmP.getMaxCompFrames()) << endl; } } } */ } } void BkgGpuPipeline::ExecuteSingleFlowFit() { int cacheSetting = 0; dim3 block=matchThreadBlocksToRegionSize(32,4); dim3 grid(ImgP.getGridDimX(),(ImgP.getImgH()+block.y-1)/block.y); #if EMPTY_IN_SHARED size_t smem = (MAX_POISSON_TABLE_COL * ConstFrmP.getMaxCompFrames() + ConstFrmP.getUncompFrames()) * sizeof(float); #else size_t smem = (MAX_POISSON_TABLE_COL * ConstFrmP.getMaxCompFrames()) * sizeof(float); #endif switch(cacheSetting){ case 0: cudaFuncSetCacheConfig(ExecuteThreadBlockPerRegion2DBlocksDense, cudaFuncCachePreferEqual); #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteSingleFlowFit: CacheSetting: ExecuteThreadBlockPerRegion2DBlocks cudaFuncCachePreferEqual" << endl; #endif break; case 2: cudaFuncSetCacheConfig(ExecuteThreadBlockPerRegion2DBlocksDense, cudaFuncCachePreferL1); #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteSingleFlowFit: CacheSetting: ExecuteThreadBlockPerRegion2DBlocks cudaFuncCachePreferL1" << endl; #endif break; case 1: default: cudaFuncSetCacheConfig(ExecuteThreadBlockPerRegion2DBlocksDense, cudaFuncCachePreferShared); #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteSingleFlowFit: CacheSetting: ExecuteThreadBlockPerRegion2DBlocks cudaFuncCachePreferShared" << endl; #endif } Dev->ResultCube.memSet(0); //TMemSegPairAlloc<int> numLBeads(sizeof(int)*36,HostPageLocked,DeviceGlobal); //numLBeads.memSet(0); // Do we need this here ? //Dev->ResultCube.memSetPlane(0,ResultAmpl); // Dev->ResultCube.memSetPlane(0,ResultAmplXTalk); cout << "CUDA: BkgGpuPipeline: ExecuteSingleFlowFit: executing ExecuteThreadBlockPerRegion2DBlocks Kernel (SingleFlowFit) grid(" << grid.x << "," << grid.y << "), block(" << block.x << "," << block.y <<"), smem("<< smem <<")" << endl; ExecuteThreadBlockPerRegion2DBlocksDense<<<grid, block, smem >>>( Dev->RegionStateMask.getPtr(), Dev->BfMask.getPtr(), Dev->BeadStateMask.getPtr(), Dev->RawTraces.getPtr(), Dev->BeadParamCube.getPtr(), Dev->crudeEmphasisVec.getPtr(), Dev->crudeNonZeroEmphasisFrames.getPtr(), Dev->fineEmphasisVec.getPtr(), Dev->fineNonZeroEmphasisFrames.getPtr(), Dev->fineNucRise.getPtr(), Dev->coarseNucRise.getPtr(), Dev->ResultCube.getPtr(), Dev->NumFrames.getPtr(), // moce to constant per region Dev->NumLBeads.getPtr(), Dev->ConstRegP.getPtr(), //Dev->PerFlowRegionParams.getPtr(), pHistCol->getDevPerFlowRegParams().getPtr(), Dev->PerNucRegP.getPtr(), Dev->RegionFrameCube.getPtr(), (DevTLXTalkData)?(DevTLXTalkData->xTalkContribution.getPtr()):(NULL), // buffer XTalk contribution to this well NxF (DevTLXTalkData)?(DevTLXTalkData->genericXTalkTracesRegion.getPtr()):(NULL) // one trace of max compressed frames per thread block or per region (atomicAdd) ); #if DEBUG_SYNC || DEBUG_OUTPUT cudaDeviceSynchronize(); CUDA_ERROR_CHECK(); #endif #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteSingleFlowFit: ExecuteThreadBlockPerRegion2DBlocksDense finalize" << endl; #endif /* LayoutCubeWithRegions<short>HostIterCount(ImgP,2,HostMem); HostIterCount.copyPlanes(Dev->RawTraces,ConstFrmP.getRawFrames()-2,0,2); HostIterCount.setRWStrideX(); size_t iterCounter[9] = {0}; cout << "CUDA: Iteration counter: " << endl; for(size_t i= 0; i<2; i++){ HostIterCount.setRWPtr(0,0,i); for(size_t x = 0; x < ImgP.getImgSize(); x++){ int val = HostIterCount.read(); if( val < 0 || val > 8 ) val = 0; iterCounter[val]++; } cout << "iterPass: " << i << ","; for(int c=0; c<9; c++){ cout << iterCounter[c] << ","; iterCounter[c] = 0; } cout << endl; } int numWarpsPerRegRow = (ImgP.getRegW()+31)/32; LayoutCubeWithRegions<short>HostIterPerWarp(ImgP.getGridParam(numWarpsPerRegRow,ImgP.getRegH()),1,HostMem); HostIterCount.copySubSet( Dev->RawTraces, //src (ConstFrmP.getRawFrames()-3)*sizeof(short)*ImgP.getImgSize(), //srcOffset in bytes 0, //dstOffset in bytes HostIterPerWarp.getParams().getImgSize()*sizeof(short) //copy size in bytes ); size_t numwarps = 0; size_t iterCounterAll[9] = {0}; HostIterCount.setRWStrideX(); for(size_t reg = 0; reg < ImgP.getNumRegions(); reg++){ size_t iterCounterReg[9] = {0}; for(size_t row=0; row < ImgP.getRegH(reg); row++){ HostIterCount.setRWPtrRegion(reg,0,row); for(int w=0; w < numWarpsPerRegRow && w*32 < ImgP.getRegW(reg); w++) { int val = HostIterCount.read(); if( val < 0 || val > 8 ) val = 0; iterCounterReg[val]++; iterCounterAll[val]++; numwarps++; } } cout << "CUDA: Region: " << reg << " numWarps: " << numWarpsPerRegRow*ImgP.getRegH(reg) << " max iter: "; for(int c=0; c<9; c++){ cout << iterCounterReg[c] << ","; } cout << endl; } cout << "CUDA: Max iterations within all " << numwarps << " warps: "; for(int c=0; c<9; c++){ cout << iterCounterAll[c] << ","; iterCounterAll[c] = 0; } cout << endl; */ } void BkgGpuPipeline::HandleResults(RingBuffer<float> * ringbuffer) { WorkSet myJob(&bkinfo[0]); //if (myJob.performPostFitHandshake()) { getDataForRawWells(ringbuffer); ApplyClonalFilter(); /*} else { getDataForPostFitStepsOnHost(); // No transaltion is required if background thread writing to raw wells. // It can take care of translation if required for(size_t i=0; i< ImgP.getNumRegions(); i++){ WorkSet myJob(&bkinfo[i]); size_t regId = ImgP.getRegId(myJob.getRegCol(),myJob.getRegRow()); if(myJob.DataAvailalbe()){ TranslatorsFlowByFlow::TranslateResults_CubeToRegion(Host->ResultCube,&bkinfo[i],GpFP.getFlowIdx(),regId); TranslatorsFlowByFlow::TranslateBeadStateMask_CubeToRegion(Host->BeadStateMask,&bkinfo[i],regId); TranslatorsFlowByFlow::TranslatePerFlowRegionParams_CubeToRegion(Host->PerFlowRegionParams, &bkinfo[i], regId); } myJob.setJobToPostFitStep(); WorkerInfoQueueItem item; item.private_data = (void*)&bkinfo[i]; myJob.putJobToCPU(item); } cout << "CUDA: BkgGpuPipeline: Reinjecting results for flowblock containing flows "<< getFlowP().getRealFnum() - flowBlockSize << " to " << getFlowP().getRealFnum() << endl; cout << "CUDA: waiting on CPU Q ... "; bkinfo->pq->GetCpuQueue()->WaitTillDone(); cout <<" continue" << endl; }*/ } ///////////////////////////////////////// // Temporary Functions needed to simulate region fitting //TODO: can be removed since Regional Parameters will get initalized in RE /* void BkgGpuPipeline::InitRegionalParamsAtFirstFlow() { if(GpFP.getRealFnum() == startFlowNum){ if(!isRestart){ std::cout << "CUDA: BkgGpuPipeline: Starting Flow: " << startFlowNum << std::endl; //Host->PerFlowRegionParams.memSet(0); pHistCol->getHostPerFlowRegParams().memSet(0); for(size_t i=0; i < ImgP.getNumRegions(); i++) { WorkSet myJob(&bkinfo[i]); size_t regId = ImgP.getRegId(myJob.getRegCol(), myJob.getRegRow()); if(myJob.DataAvailalbe()){ //translate current reg params inot new layout //TranslatorsFlowByFlow::TranslatePerFlowRegionParams_RegionToCube(Host->PerFlowRegionParams, &bkinfo[i], 0, regId); TranslatorsFlowByFlow::TranslatePerFlowRegionParams_RegionToCube(pHistCol->getHostPerFlowRegParams(), &bkinfo[i], 0, regId); #if DEBUG_OUTPUT if(regId == DEBUG_REGION || DEBUG_REGION_ALL) cout << "CUDA: BkgGpuPipeline: InitOldRegionalParamsAtFirstFlow: DEBUG regId " << regId << " PerFlowRegionParams,"; //Host->PerFlowRegionParams.getAtReg(regId).print(); pHistCol->getHostPerFlowRegParams().getAtReg(regId).print(); #endif } } } //Dev->PerFlowRegionParams.copy(Host->PerFlowRegionParams); // can get moved here, see comment below pHistCol->getDevPerFlowRegParams().copy(pHistCol->getHostPerFlowRegParams()); } } */ /* void BkgGpuPipeline::ReadRegionDataFromFileForBlockOf20() { if (GpFP.getFlowIdx() == 0 ){ //only for first flow in block Host->RegionDump.setFilePathPrefix("RegionParams"); Host->EmphasisVec.memSet(0); for(size_t i=0; i< ImgP.getNumRegions(); i++){ WorkSet myJob(&bkinfo[i]); size_t regId = ImgP.getRegId(myJob.getRegCol(), myJob.getRegRow()); if(myJob.DataAvailalbe()){ //overwrite current bkg-model reg params with the ones read in from file TranslatorsFlowByFlow::TranslateRegionParams_CubeToRegion(Host->RegionDump.getFlowCube(myJob.getAbsoluteFlowNum()),myJob.getRegionParams(),regId); #if DEBUG_OUTPUT if(i==0) cout << "CUDA: BkgGpuPipeline: ReadRegionDataFromFileForBlockOf20: updating GPU emphasis and nucRise" << endl; #endif myJob.setUpFineEmphasisVectors(); if (myJob.performExpTailFitting() && myJob.performRecompressionTailRawTrace()) myJob.setUpFineEmphasisVectorsForStdCompression(); TranslatorsFlowByFlow::TranslateEmphasis_RegionToCube(Host->EmphasisVec, &bkinfo[i], regId); } } #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ReadRegionDataFromFileForBlockOf20: updating Emphasis and NucRise on device for next block of " << flowBlockSize << " flows." << endl; #endif Dev->EmphasisVec.copy(Host->EmphasisVec); } } */ void BkgGpuPipeline::ExecuteRegionalFitting() { // Input needed // reg_params // bead params and bead state // Estimated amplitude // Emphasis // Nucrise...need to be done on the device for regional fitting // num of lev mar iterations // number of flows and starting flow // bead traces and shifted bkg traces // Nuc Id in case of multi flow regional fitting dim3 block(NUM_SAMPLES_RF); dim3 grid(ImgP.getNumRegions()); //size_t numFlows = 1; cout << "CUDA: BkgGpuPipeline: ExecuteRegionalFitting: executing PerformMultiFlowRegionalFitting Kernel grid(" << grid.x << "," << grid.y << "), block(" << block.x << "," << block.y <<"), smem(0)" << endl; PerformMultiFlowRegionalFitting<<<grid, block>>>( Dev->RegionStateMask.getPtr(), Dev->SampleParamCube.getPtr(), Dev->SampleStateMask.getPtr(), Dev->crudeEmphasisVec.getPtr(), Dev->crudeNonZeroEmphasisFrames.getPtr(), Dev->fineNucRise.getPtr(), Dev->coarseNucRise.getPtr(), Dev->ResultCube.getPtr(), Dev->NumFrames.getPtr(), // move to constant per region Dev->ConstRegP.getPtr(), pHistCol->getDevPerFlowRegParams().getPtr(), //Dev->PerFlowRegionParams.getPtr(), Dev->PerNucRegP.getPtr(), Dev->RegionFrameCube.getPtr(), Dev->NumSamples.getPtr() ); #if DEBUG_SYNC || DEBUG_OUTPUT cudaDeviceSynchronize(); CUDA_ERROR_CHECK(); #endif #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteRegionalFitting: PerformMultiFlowRegionalFitting finalized" << endl; #endif } void BkgGpuPipeline::PrepareForRegionalFitting() { Host->EmphasisVec.memSet(0); LayoutCubeWithRegions<int>HostNonZeroEmphasisFrames(ImgP.getGridParam(MAX_POISSON_TABLE_COL),Nz_NUM_PARAMS, HostMem); for(size_t i=0; i< ImgP.getNumRegions(); i++){ WorkSet myJob(&bkinfo[i]); size_t regId = ImgP.getRegId(myJob.getRegCol(), myJob.getRegRow()); if(myJob.DataAvailalbe()){ #if DEBUG_OUTPUT if(i==0) cout << "CUDA: BkgGpuPipeline: PrepareForRegionalFitting: updating GPU crude emphasis" << endl; #endif myJob.setUpCrudeEmphasisVectors(); // TODO if still going ahead with recompression //if (myJob.performExpTailFitting() && myJob.performRecompressionTailRawTrace()) // myJob.setUpFineEmphasisVectorsForStdCompression(); TranslatorsFlowByFlow::TranslateEmphasis_RegionToCube(Host->EmphasisVec, &bkinfo[i], regId); TranslatorsFlowByFlow::TranslateNonZeroEmphasisFrames_RegionToCube(HostNonZeroEmphasisFrames, &bkinfo[i], regId); } } Dev->crudeEmphasisVec.copy(Host->EmphasisVec); Dev->crudeNonZeroEmphasisFrames.copy(HostNonZeroEmphasisFrames); //Dev->NewPerFlowRegionParams.copy(Dev->PerFlowRegionParams); } void BkgGpuPipeline::PrepareForSingleFlowFit() { Host->EmphasisVec.memSet(0); LayoutCubeWithRegions<int>HostNonZeroEmphasisFrames(ImgP.getGridParam(MAX_POISSON_TABLE_COL),Nz_NUM_PARAMS, HostMem); for(size_t i=0; i< ImgP.getNumRegions(); i++){ WorkSet myJob(&bkinfo[i]); size_t regId = ImgP.getRegId(myJob.getRegCol(), myJob.getRegRow()); if(myJob.DataAvailalbe()){ #if DEBUG_OUTPUT if(i==0) cout << "CUDA: BkgGpuPipeline: PrepareForSingleFlowFit: updating GPU fine emphasis" << endl; #endif myJob.setUpFineEmphasisVectors(); TranslatorsFlowByFlow::TranslateEmphasis_RegionToCube(Host->EmphasisVec, &bkinfo[i], regId); TranslatorsFlowByFlow::TranslateNonZeroEmphasisFrames_RegionToCube(HostNonZeroEmphasisFrames, &bkinfo[i], regId); } } Dev->fineEmphasisVec.copy(Host->EmphasisVec); Dev->fineNonZeroEmphasisFrames.copy(HostNonZeroEmphasisFrames); } void BkgGpuPipeline::HandleRegionalFittingResults() { pHistCol->getHostPerFlowRegParams().copy(pHistCol->getDevPerFlowRegParams()); //Host->PerFlowRegionParams.copy(Dev->PerFlowRegionParams); for (size_t i=0; i<ImgP.getNumRegions(); ++i) { WorkSet myJob(&bkinfo[i]); size_t regId = ImgP.getRegId(myJob.getRegCol(), myJob.getRegRow()); if(myJob.DataAvailalbe()){ #if DEBUG_OUTPUT if(i==0) cout << "CUDA: BkgGpuPipeline: HandleRegionalFittingResults: updating reg params on host" << endl; #endif //TranslatorsFlowByFlow::TranslatePerFlowRegionParams_CubeToRegion(Host->PerFlowRegionParams, &bkinfo[i], regId); TranslatorsFlowByFlow::TranslatePerFlowRegionParams_CubeToRegion(pHistCol->getHostPerFlowRegParams(), &bkinfo[i], regId); } } /*for (size_t i=0; i<ImgP.getNumRegions(); ++i) { WorkSet myJob(&bkinfo[i]); std::cout << "regCol:" << myJob.getRegCol() << ","; std::cout << "regRow:" << myJob.getRegRow() << ","; std::cout << "RegId:" << i << ","; std::cout << "tmidNuc:" << *(myJob.getRegionParams()->AccessTMidNuc()) << ","; std::cout << "rdr:" << *(myJob.getRegionParams()->AccessRatioDrift()) << ","; std::cout << "pdr:" << *(myJob.getRegionParams()->AccessCopyDrift()) << ","; std::cout << std::endl; }*/ //Dev->PerFlowRegionParams.copy(Dev->NewPerFlowRegionParams); } void BkgGpuPipeline::ExecuteCrudeEmphasisGeneration() { dim3 block(512); dim3 grid(ImgP.getNumRegions()); int smem = (MAX_POISSON_TABLE_COL)*ConstFrmP.getMaxCompFrames()*sizeof(float); cout << "CUDA: BkgGpuPipeline: ExecuteCrudeEmphasisGeneration: executing emphasis generation Kernel grid(" << grid.x << "," << grid.y << "), block(" << block.x << "," << block.y <<"), smem("<< smem <<")" << endl; GenerateEmphasis<<<grid, block, smem>>>( Dev->RegionStateMask.getPtr(), MAX_POISSON_TABLE_COL, CRUDEXEMPHASIS, //Dev->PerFlowRegionParams.getPtr(), pHistCol->getDevPerFlowRegParams().getPtr(), Dev->RegionFramesPerPoint.getPtr(), Dev->RegionFrameCube.getPtr(), Dev->NumFrames.getPtr(), Dev->crudeEmphasisVec.getPtr(), Dev->crudeNonZeroEmphasisFrames.getPtr()); #if DEBUG_SYNC || DEBUG_OUTPUT cudaDeviceSynchronize(); CUDA_ERROR_CHECK(); #endif #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteCrudeEmphasisGeneration: GenerateEmphasis finalized" << endl; #endif } void BkgGpuPipeline::ExecuteFineEmphasisGeneration() { dim3 block(512); dim3 grid(ImgP.getNumRegions()); int smem = (MAX_POISSON_TABLE_COL)*ConstFrmP.getMaxCompFrames()*sizeof(float); cout << "CUDA: BkgGpuPipeline: ExecuteFineEmphasisGeneration: executing emphasis generation Kernel grid(" << grid.x << "," << grid.y << "), block(" << block.x << "," << block.y <<"), smem("<< smem <<")" << endl; GenerateEmphasis<<<grid, block, smem>>>( Dev->RegionStateMask.getPtr(), MAX_POISSON_TABLE_COL, FINEXEMPHASIS, //Dev->PerFlowRegionParams.getPtr(), pHistCol->getDevPerFlowRegParams().getPtr(), Dev->RegionFramesPerPoint.getPtr(), Dev->RegionFrameCube.getPtr(), Dev->NumFrames.getPtr(), Dev->fineEmphasisVec.getPtr(), Dev->fineNonZeroEmphasisFrames.getPtr()); #if DEBUG_SYNC || DEBUG_OUTPUT cudaDeviceSynchronize(); CUDA_ERROR_CHECK(); #endif #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecuteFineEmphasisGeneration: GenerateEmphasis finalized" << endl; #endif } void BkgGpuPipeline::ExecutePostFitSteps() { WorkSet myJob(&bkinfo[0]); //if (!(myJob.performPostFitHandshake())) // return; dim3 block = matchThreadBlocksToRegionSize(32,4); dim3 gridBlockPerRegion(ImgP.getGridDimX(),ImgP.getGridDimY()); dim3 gridWarpPerRow(ImgP.getGridDimX(),(ImgP.getImgH()+block.y-1)/block.y); size_t smem = 0; if(ConfP.PerformWellsLevelXTalk()){ smem = block.x * block.y *sizeof(float); cout << "CUDA: BkgGpuPipeline: ExecutePostFitSteps: executing Wells XTalk Update Signal Map Kernel grid(" << gridBlockPerRegion.x << "," << gridBlockPerRegion.y << "), block(" << block.x << "," << block.y <<"), smem("<< smem <<")" << endl; UpdateSignalMap_k<<<gridBlockPerRegion, block, smem>>>( Dev->RegionStateMask.getPtr(), Dev->BfMask.getPtr(), Dev->BeadParamCube.getPtr(), Dev->ResultCube.getPtr(), Dev->AverageSignalRegion.getPtr() ); #if DEBUG_SYNC || DEBUG_OUTPUT cudaDeviceSynchronize(); CUDA_ERROR_CHECK(); #endif #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecutePostFitSteps: UpdateSignalMap_k finalized" << endl; #endif } if(ConfP.PerformWellsLevelXTalk() || ConfP.PerformPolyClonalFilter()){ cout << "CUDA: BkgGpuPipeline: ExecutePostFitSteps: executing post processing and corrections kernel grid(" << gridWarpPerRow.x << "," << gridWarpPerRow.y << "), block(" << block.x << "," << block.y <<"), smem(0)" << endl; cudaFuncSetCacheConfig(PostProcessingCorrections_k, cudaFuncCachePreferL1); PostProcessingCorrections_k<<<gridWarpPerRow, block, 0>>>( Dev->RegionStateMask.getPtr(), //Dev->PerFlowRegionParams.getPtr(), pHistCol->getDevPerFlowRegParams().getPtr(), Dev->PerNucRegP.getPtr(), Dev->BfMask.getPtr(), Dev->BeadParamCube.getPtr(), Dev->BeadStateMask.getPtr(), Dev->PolyClonalCube.getPtr(), Dev->ResultCube.getPtr(), Dev->AverageSignalRegion.getPtr() ); #if DEBUG_SYNC || DEBUG_OUTPUT cudaDeviceSynchronize(); CUDA_ERROR_CHECK(); #endif #if DEBUG_OUTPUT cout << "CUDA: BkgGpuPipeline: ExecutePostFitSteps: ProtonXTalk_k finalized" << endl; #endif } } void BkgGpuPipeline::ApplyClonalFilter() { //control opts //if clonal filter enabled if(ConfP.PerformPolyClonalFilter()) { //if last clonal filter update complete, execute filter if( ConstGP.isApplyClonalFilterFlow( GpFP.getRealFnum() ) ) { try{ //copy back from device cout << "CUDA: Applying PolyClonal Filter after Flow: " << GpFP.getRealFnum() << endl; //already copied in handle results //LayoutCubeWithRegions<unsigned short> HostBeadStateMaskTMP(Dev->BeadStateMask, HostMem ); Host->BeadStateMask.copy(Dev->BeadStateMask); Host->BfMask.copy(Dev->BfMask); LayoutCubeWithRegions<float> HostPolyClonalCube(Dev->PolyClonalCube, HostMem); //ClonalFilterWrapper clonalFilter(&bkinfo->bkgObj->GetGlobalStage().bfmask->mask[0],Host->BeadStateMask,HostPolyClonalCube); ClonalFilterWrapper clonalFilter(Host->BfMask.getPtr(),Host->BeadStateMask,HostPolyClonalCube); clonalFilter.DumpPPFSSQ(bkinfo->inception_state->sys_context.GetResultsFolder()); clonalFilter.DumpPPFSSQtoH5(bkinfo->inception_state->sys_context.GetResultsFolder()); clonalFilter.ApplyClonalFilter(bkinfo->inception_state->bkg_control.polyclonal_filter); clonalFilter.UpdateMask(); //host bf mask updated so update original bfmask being written out //WHY IS THIS NEEDED?? //Host->BfMask.copyPlanesOut(&bkinfo->bkgObj->GetGlobalStage().bfmask->mask[0],0,1); Dev->BeadStateMask.copy(Host->BeadStateMask); Dev->BfMask.copy(Host->BfMask); }catch(exception& e){ cerr << "NOTE: clonal filter failed." << e.what() << endl; }catch(...){ cerr << "NOTE: clonal filter failed." << endl; } } } } void BkgGpuPipeline::CopySerializationDataFromDeviceToHost() { Dev->PolyClonalCube.reinjectHostStructures(bkinfo); } void BkgGpuPipeline::DebugOutputDeviceBuffers(){ //#if DEBUG_OUTPUT LayoutCubeWithRegions<int> tmpHostNumSamples(Dev->NumSamples,HostMem); cout << "CUDA DEBUG: "; ImgP.print(); cout << "CUDA DEBUG:" << GpFP << endl; cout << "CUDA DEBUG:" << ConfP << endl; cout << "CUDA DEBUG:" << ConstFrmP << endl; cout << "CUDA DEBUG:" << ConstGP << endl; pHistCol->getHostPerFlowRegParams().copy(pHistCol->getDevPerFlowRegParams()); for(size_t regId = 0; regId < ImgP.getNumRegions(); regId++){ cout << "CUDA DEBUG:" << regId << "," << Host->ConstRegP.refAtReg(regId) << endl; cout << "CUDA DEBUG:" << regId << "," << pHistCol->getHostPerFlowRegParams().refAtReg(regId) << endl; cout << "CUDA DEBUG Sample traces:" << endl; cout << pHistCol->getLatestSampleTraces(GpFP.getRealFnum())->getCSVRegionCube<short>(regId,tmpHostNumSamples[regId],0,Host->NumFrames[regId]); cout << "CUDA DEBUG Empty Trace Avg:"; cout << pHistCol->getLatestEmptyTraceAvgs(GpFP.getRealFnum())->getCSVRegionPlane<float>(regId,Host->NumFrames[regId]); } //#endif } void BkgGpuPipeline::getDataForRawWells(RingBuffer<float> * ringbuffer) { float *ampBuf = ringbuffer->writeOneBuffer(); // need copies and copydrift too for new 1.wells format Dev->ResultCube.copyPlanesOut(ampBuf,ResultAmpl,1); ringbuffer->updateWritePos(); } void BkgGpuPipeline::getDataForPostFitStepsOnHost() { Host->ResultCube.copy(Dev->ResultCube); Host->BeadStateMask.copy(Dev->BeadStateMask); //Host->PerFlowRegionParams.copy(Dev->PerFlowRegionParams); pHistCol->getHostPerFlowRegParams().copy(pHistCol->getDevPerFlowRegParams()); } //debug helper void BkgGpuPipeline::printBkgModelMaskEnum(){ std::cout << "CUDA: BkgModelMask flags: "<< std::endl <<" BkgMaskBadRead " << BkgMaskBadRead << std::endl <<" BkgMaskPolyClonal " << BkgMaskPolyClonal << std::endl <<" BkgMaskCorrupt " << BkgMaskCorrupt << std::endl <<" BkgMaskRandomSample " << BkgMaskRandomSample << std::endl <<" BkgMaskHighQaulity " << BkgMaskHighQaulity << std::endl <<" BkgMaskRegionalSampled " << BkgMaskRegionalSampled << std::endl <<" BkgMaskPinned " << BkgMaskPinned << std::endl; } void BkgGpuPipeline::printRegionStateMask(){ std::cout << "CUDA: BkgModelMask flags: "<< std::endl << " RegionMaskNoLiveBeads " << RegionMaskNoLiveBeads << std::endl << " RegionMaskNoT0Average " << RegionMaskNoT0Average << std::endl << " RegionMaskT0AverageBelowThreshold ("<<THRESHOLD_T0_AVERAGE<<") " << RegionMaskT0AverageBelowThreshold << std::endl << " RegionMaskNoEmpties " << RegionMaskNoEmpties << std::endl << " RegionMaskNumEmptiesBelowThreshold ("<<THRESHOLD_NUM_EMPTIES<<") " << RegionMaskNumEmptiesBelowThreshold << std::endl << " RegionMaskNoRegionSamples " << RegionMaskNoRegionSamples << std::endl << " RegionMaskNumRegionSamplesBelowThreshold ("<<THRESHOLD_NUM_REGION_SAMPLE<<") " << RegionMaskNumRegionSamplesBelowThreshold << std::endl; }
the_stack
#pragma once #include <gunrock/oprtr/compacted_cull_filter/kernel_policy.cuh> #include <gunrock/oprtr/cull_filter/cta.cuh> namespace gunrock { namespace oprtr { namespace compacted_cull_filter { template <typename KernelPolicy, typename Problem, typename Functor> struct ThreadWork { typedef typename Problem::VertexId VertexId; typedef typename Problem::SizeT SizeT; typedef typename Problem::Value Value; typedef typename Problem::DataSlice DataSlice; typedef typename Functor::LabelT LabelT; typedef typename KernelPolicy::SmemStorage SmemStorageT; VertexId vertices[KernelPolicy::GLOBAL_LOAD_SIZE]; int num_elements; int block_num_elements; int block_input_start; int warp_id; int lane_id; VertexId *warp_hash; VertexId *d_keys_in; VertexId *d_keys_out; SizeT input_queue_length; SizeT *d_output_counter; unsigned char *d_visited_mask; VertexId *d_labels; DataSlice *d_data_slice; LabelT label; SmemStorageT &smem; __device__ __forceinline__ ThreadWork( // VertexId **block_warp_hash, SmemStorageT &_smem, VertexId *_d_keys_in, VertexId *_d_keys_out, SizeT _input_queue_length, SizeT *_d_output_counter, SizeT _block_input_start, unsigned char *_d_visited_mask, DataSlice *_d_data_slice, LabelT _label) : smem(_smem), num_elements(0), block_num_elements(0), block_input_start(_block_input_start), warp_id(threadIdx.x >> KernelPolicy::LOG_WARP_SIZE), lane_id(threadIdx.x & KernelPolicy::WARP_SIZE_MASK), d_keys_in(_d_keys_in), d_keys_out(_d_keys_out), input_queue_length(_input_queue_length), d_output_counter(_d_output_counter), d_visited_mask(_d_visited_mask), d_data_slice(_d_data_slice), label(_label) { // warp_hash = smem.warp_hash[warp_id]; d_labels = d_data_slice->labels.GetPointer(util::DEVICE); } }; template <typename KernelPolicy, typename Problem, typename Functor> struct Cta { typedef typename Problem::VertexId VertexId; typedef typename Problem::SizeT SizeT; typedef typename Problem::Value Value; typedef typename Functor::LabelT LabelT; typedef Cta<KernelPolicy, Problem, Functor> CtaT; typedef ThreadWork<KernelPolicy, Problem, Functor> ThreadWorkT; typedef typename KernelPolicy::SmemStorage SmemStorageT; typedef typename KernelPolicy::BlockScanT BlockScanT; typedef typename KernelPolicy::BlockLoadT BlockLoadT; SmemStorageT smem; __device__ __forceinline__ void Init(ThreadWorkT &thread_work) // SmemStorageT &smem) { /*for (int i = threadIdx.x; i < KernelPolicy::BLOCK_HASH_LENGTH; i += KernelPolicy::THREADS) smem.block_hash[i] = util::InvalidValue<VertexId>(); for (int i = thread_work.lane_id; i < KernelPolicy::WARP_HASH_LENGTH; i += KernelPolicy::WARP_SIZE) smem.warp_hash[thread_work.warp_id][i] = util::InvalidValue<VertexId>();*/ } __device__ __forceinline__ void Load_from_Global(ThreadWorkT &thread_work) // SmemStorageT &smem) { // typedef typename util::VectorType<VertexId, // KernelPolicy::NUM_ELEMENT_PER_GLOBAL_LOAD>::Type LoadT; if (thread_work.block_input_start + (KernelPolicy::GLOBAL_LOAD_SIZE << KernelPolicy::LOG_THREADS) <= thread_work.input_queue_length) { BlockLoadT(smem.cub_storage.load_space) .Load(thread_work.d_keys_in + thread_work.block_input_start, thread_work.vertices); thread_work.num_elements = KernelPolicy::GLOBAL_LOAD_SIZE; } else { SizeT thread_input_pos = thread_work.block_input_start + thread_work.warp_id * (KernelPolicy::GLOBAL_LOAD_SIZE << KernelPolicy::LOG_WARP_SIZE) + thread_work.lane_id; thread_work.num_elements = 0; #pragma unroll for (int i = 0; i < KernelPolicy::GLOBAL_LOAD_SIZE; i++) { if (thread_input_pos >= thread_work.input_queue_length) { thread_work.vertices[i] = util::InvalidValue<VertexId>(); } else { thread_work.vertices[i] = thread_work.d_keys_in[thread_input_pos]; thread_work.num_elements++; } thread_input_pos += KernelPolicy::WARP_SIZE; } } /*LoadT *keys_in = (LoadT*)(d_keys_in + thread_input_pos); *((LoadT*)thread_data.vertices) = *keys_in; if (thread_input_start + KernelPolicy::NUM_ELEMENT_PER_GLOBAL_LOAD >= input_queue_length) { thread_data.num_elements = input_queue_length - thread_input_start; for (int i=thread_data.num_elements; i < KernelPolicy::NUM_ELEMENT_PER_GLOBAL_LOAD; i++) thread_data.vertices[i] = util::InvalidValue<VertexId>(); } else thread_data.num_elements = KernelPolicy::NUM_ELEMENT_PER_GLOBAL_LOAD;*/ } __device__ __forceinline__ void Load_from_Shared(ThreadWorkT &thread_work) // SmemStorageT &smem) { /*typedef typename util::VectorType<VertexId, KernelPolicy::NUM_ELEMENT_PER_SHARED_LOAD>::Type LoadT; SizeT thread_input_start = threadIdx.x * KernelPolicy::NUM_ELEMENT_PER_SHARED_LOAD; if (thread_input_start >= thread_data.block_num_elements) { thread_data.num_elements = 0; } else { LoadT *keys_in = (LoadT*)(vertices + thread_input_start); *((LoadT*)thread_data.vertices) = *keys_in; if (thread_input_start + KernelPolicy::NUM_ELEMENT_PER_SHARED_LOAD >= thread_data.block_num_elements) { } else thread_data.num_elements = KernelPolicy::NUM_ELEMENT_PER_SHARED_LOAD; }*/ int thread_pos = thread_work.warp_id * (KernelPolicy::GLOBAL_LOAD_SIZE << KernelPolicy::LOG_WARP_SIZE) + thread_work.lane_id; thread_work.num_elements = 0; #pragma unroll for (int i = 0; i < KernelPolicy::GLOBAL_LOAD_SIZE; i++) { if (thread_pos < smem.num_elements) { thread_work.vertices[i] = smem.vertices[thread_pos]; thread_work.num_elements++; } else thread_work.vertices[i] = util::InvalidValue<VertexId>(); thread_pos += KernelPolicy::WARP_SIZE; } } __device__ __forceinline__ void Store_to_Global(ThreadWorkT &thread_work) // SmemStorageT &smem) { // temp_space[threadIdx.x] = thread_data.num_elements; /*if (threadIdx.x == 0) num_elements = 0; __syncthreads(); WarpScanT(cub_storage.scan_space[thread_data.warp_id]).ExclusiveSum(thread_data.num_elements, thread_offset); int warp_offset; if (thread_data.lane_id == KernelPolicy::WARP_SIZE_MASK) { warp_offset = atomicAdd(&num_elements, thread_offset + thread_data.num_elements); //num_elements = thread_data.block_num_elements; } __syncthreads();*/ SizeT thread_offset = 0; //, block_size = 0; BlockScanT(smem.cub_storage.scan_space) .ExclusiveSum(thread_work.num_elements, thread_offset); //, block_size); if (threadIdx.x == KernelPolicy::THREADS - 1) { smem.block_offset = atomicAdd(thread_work.d_output_counter, thread_offset + thread_work.num_elements); // block_size); // if (//block_offset > thread_data.input_queue_length || // num_elements > KernelPolicy::THREADS * // KernelPolicy::GLOBAL_LOAD_SIZE) printf("(%4d, %4d) : num_elements = // %d, block_offset = %d, input_queue_length = %d\n", blockIdx.x, // threadIdx.x, num_elements, block_offset, // thread_data.input_queue_length); } __syncthreads(); // if (thread_data.lane_id == KernelPolicy::WARP_SIZE_MASK) // warp_offset += block_offset; // warp_offset = cub::ShuffleIndex(warp_offset, // KernelPolicy::WARP_SIZE_MASK); thread_offset += smem.block_offset; //__syncthreads(); //#pragma unroll for (int i = 0; i < /*KernelPolicy::GLOBAL_LOAD_SIZE*/ thread_work.num_elements; i++) { // if (i == thread_data.num_elements) break; // thread_data.d_keys_out[thread_offset + i] = thread_data.vertices[i]; util::io::ModifiedStore<util::io::st::cg>::St( thread_work.vertices[i], thread_work.d_keys_out + (thread_offset + i)); } } __device__ __forceinline__ void Store_to_Shared(ThreadWorkT &thread_work) // SmemStorageT &smem) { SizeT thread_offset = 0, block_num_elements; // thread_work.num_elements = 0; //#pragma unroll // for (int i=0; i<thread_work.num_elements;i++) // if (util::isValid(thread_work.vertices[i])) thread_work.num_elements ++; BlockScanT(smem.cub_storage.scan_space) .ExclusiveSum(thread_work.num_elements, thread_offset, block_num_elements); if (threadIdx.x == 0) smem.num_elements = block_num_elements; // thread_work.num_elements = 0; //#pragma unroll for (int i = 0; i < thread_work.num_elements; i++) { // if (!util::isValid(thread_work.vertices[i])) continue; smem.vertices[thread_offset + i] = thread_work.vertices[i]; // thread_work.num_elements ++; } __syncthreads(); } __device__ __forceinline__ void Local_Compact(ThreadWorkT &thread_work) // SmemStorageT &smem) { int temp_size = 0; #pragma unroll for (int i = 0; i < /*thread_data.num_elements*/ KernelPolicy::GLOBAL_LOAD_SIZE; i++) { if (!util::isValid(thread_work.vertices[i])) continue; // if (temp_size != i) thread_work.vertices[temp_size] = thread_work.vertices[i]; temp_size++; } thread_work.num_elements = temp_size; } __device__ __forceinline__ void BitMask_Cull(ThreadWorkT &thread_work) // SmemStorageT &smem) { #pragma unroll for (int i = 0; i < KernelPolicy::GLOBAL_LOAD_SIZE /* thread_data.num_elements*/; i++) { if (!util::isValid(thread_work.vertices[i])) continue; // Location of mask byte to read SizeT mask_byte_offset = (thread_work.vertices[i] //& KernelPolicy::ELEMENT_ID_MASK ) >> 3; // Bit in mask byte corresponding to current vertex id unsigned char mask_bit = 1 << (thread_work.vertices[i] & 7); // Read byte from visited mask in tex // unsigned char tex_mask_byte = tex1Dfetch( // gunrock::oprtr::cull_filter::BitmaskTex<unsigned // char>::ref,//cta->t_bitmask[0], mask_byte_offset); // unsigned char tex_mask_byte = cta->d_visited_mask[mask_byte_offset]; unsigned char tex_mask_byte = _ldg(thread_work.d_visited_mask + mask_byte_offset); if (mask_bit & tex_mask_byte) { // Seen it thread_work.vertices[i] = util::InvalidValue<VertexId>(); } else { // unsigned char mask_byte = tex_mask_byte; // util::io::ModifiedLoad<util::io::ld::cg>::Ld( // mask_byte, cta->d_visited_mask + mask_byte_offset); // mask_byte = cta->d_visited_mask[mask_byte_offset]; // mask_byte |= tex_mask_byte; // if (mask_bit & mask_byte) { // Seen it // tile->element_id[LOAD][VEC] = util::InvalidValue<VertexId>(); //} else { // Update with best effort // mask_byte |= mask_bit; tex_mask_byte |= mask_bit; util::io::ModifiedStore<util::io::st::cg>::St( tex_mask_byte, // mask_byte, thread_work.d_visited_mask + mask_byte_offset); // thread_work.d_visited_mask[mask_byte_offset] |= mask_bit; /// thread_data.d_visited_mask [mask_byte_offset] = tex_mask_byte; //} } } } template <typename DummyT, bool ENABLE_IDEMPOTENCE> struct VertexC { static __device__ __forceinline__ void Cull(ThreadWorkT &thread_work) // SmemStorageT &smem) {} }; template <typename DummyT> struct VertexC<DummyT, true> { static __device__ __forceinline__ void Cull(ThreadWorkT &thread_work) // SmemStorageT &smem) { #pragma unroll for (int i = 0; i < KernelPolicy::GLOBAL_LOAD_SIZE /*thread_data.num_elements*/; i++) { if (!util::isValid(thread_work.vertices[i])) continue; VertexId row_id = thread_work.vertices[i]; //& KernelPolicy::ELEMENT_ID_MASK; if (thread_work.d_labels[row_id] != util::MaxValue<LabelT>()) thread_work.vertices[i] = util::InvalidValue<VertexId>(); } } }; template <typename DummyT> struct VertexC<DummyT, false> { static __device__ __forceinline__ void Cull(ThreadWorkT &thread_work) // SmemStorageT &smem) {} }; __device__ __forceinline__ void Vertex_Cull(ThreadWorkT &thread_work) // SmemStorageT &smem) { VertexC<SizeT, Problem::ENABLE_IDEMPOTENCE>::Cull(thread_work); } __device__ __forceinline__ void History_Cull(ThreadWorkT &thread_work) // SmemStorageT &smem) { #pragma unroll for (int i = 0; i < KernelPolicy::GLOBAL_LOAD_SIZE /*thread_data.num_elements*/; i++) { if (!util::isValid(thread_work.vertices[i])) continue; int hash = (thread_work.vertices[i]) & KernelPolicy::BLOCK_HASH_MASK; VertexId retrieved = smem.block_hash[hash]; if (retrieved == thread_work.vertices[i]) // Seen it thread_work.vertices[i] = util::InvalidValue<VertexId>(); else // Update it smem.block_hash[hash] = thread_work.vertices[i]; } } __device__ __forceinline__ void Warp_Cull(ThreadWorkT &thread_work) // SmemStorageT &smem) { #pragma unroll for (int i = 0; i < KernelPolicy::GLOBAL_LOAD_SIZE /* thread_data.num_elements*/; i++) { if (!util::isValid(thread_work.vertices[i])) continue; // int warp_id = threadIdx.x >> 5; int hash = thread_work.vertices[i] & (KernelPolicy::WARP_HASH_MASK); smem.warp_hash[thread_work.warp_id][hash] = thread_work.vertices[i]; // thread_work.warp_hash[hash] = thread_work.vertices[i]; VertexId retrieved = smem.warp_hash[thread_work.warp_id][hash]; // VertexId retrieved = thread_work.warp_hash[hash]; if (retrieved == thread_work.vertices[i]) { smem.warp_hash[thread_work.warp_id][hash] = threadIdx.x; // thread_work.warp_hash[hash] = threadIdx.x; VertexId tid = smem.warp_hash[thread_work.warp_id][hash]; // VertexId tid = thread_work.warp_hash[hash]; if (tid != threadIdx.x) thread_work.vertices[i] = util::InvalidValue<VertexId>(); } } } template <typename DummyT, bool ENABLE_IDEMPOTENCE, bool MARK_PREDECESSORS> struct VertexP { static __device__ __forceinline__ void Process(ThreadWorkT &thread_work) // SmemStorageT &smem) {} }; template <typename DummyT> struct VertexP<DummyT, true, false> { static __device__ __forceinline__ void Process(ThreadWorkT &thread_work) // SmemStorageT &smem) { //#pragma unroll for (int i = 0; i < /*KernelPolicy::GLOBAL_LOAD_SIZE*/ thread_work.num_elements; i++) { if (!util::isValid(thread_work.vertices[i])) continue; VertexId row_id = thread_work.vertices[i]; // & KernelPolicy::ELEMENT_ID_MASK; if (thread_work.d_labels[row_id] != util::MaxValue<LabelT>()) { thread_work.vertices[i] = util::InvalidValue<VertexId>(); } else { if (Functor::CondFilter( util::InvalidValue<VertexId>(), row_id, thread_work.d_data_slice, util::InvalidValue<SizeT>(), thread_work.label, util::InvalidValue<SizeT>(), util::InvalidValue<SizeT>())) { Functor::ApplyFilter(util::InvalidValue<VertexId>(), row_id, thread_work.d_data_slice, util::InvalidValue<SizeT>(), thread_work.label, util::InvalidValue<SizeT>(), util::InvalidValue<SizeT>()); } else thread_work.vertices[i] = util::InvalidValue<VertexId>(); } } } }; template <typename DummyT, bool MARK_PREDECESSORS> struct VertexP<DummyT, false, MARK_PREDECESSORS> { static __device__ __forceinline__ void Process(ThreadWorkT &thread_work) // SmemStorageT &smem) { #pragma unroll for (int i = 0; i < KernelPolicy::GLOBAL_LOAD_SIZE /* thread_data.num_elements*/; i++) { if (!util::isValid(thread_work.vertices[i])) continue; if (Functor::CondFilter( util::InvalidValue<VertexId>(), thread_work.vertices[i], thread_work.d_data_slice, util::InvalidValue<SizeT>(), thread_work.label, util::InvalidValue<SizeT>(), util::InvalidValue<SizeT>())) { Functor::ApplyFilter( util::InvalidValue<VertexId>(), thread_work.vertices[i], thread_work.d_data_slice, util::InvalidValue<SizeT>(), thread_work.label, util::InvalidValue<SizeT>(), util::InvalidValue<SizeT>()); } else thread_work.vertices[i] = util::InvalidValue<VertexId>(); } } }; __device__ __forceinline__ void Vertex_Process(ThreadWorkT &thread_work) // SmemStorageT &smem) { VertexP<SizeT, Problem::ENABLE_IDEMPOTENCE, Problem::MARK_PREDECESSORS>::Process(thread_work); } template <typename DummyT, bool ENABLE_IDEMPOTENCE> struct Kernel_ { static __device__ __forceinline__ void Invoke(CtaT &cta, ThreadWorkT &thread_work) {} }; template <typename DummyT> struct Kernel_<DummyT, true> { static __device__ __forceinline__ void Invoke(CtaT &cta, ThreadWorkT &thread_work) // SmemStorageT &smem) { cta.Load_from_Global(thread_work); // cta.Warp_Cull (thread_work); // cta.History_Cull (thread_work); cta.BitMask_Cull(thread_work); cta.Local_Compact(thread_work); cta.Store_to_Shared(thread_work); cta.Load_from_Shared(thread_work); if (thread_work.num_elements > 0) { // cta.Vertex_Cull (thread_work); // cta. Local_Compact (thread_data); cta.Vertex_Process(thread_work); cta.Local_Compact(thread_work); } cta.Store_to_Global(thread_work); } }; template <typename DummyT> struct Kernel_<DummyT, false> { static __device__ __forceinline__ void Invoke(CtaT &cta, ThreadWorkT &thread_work) // SmemStorageT &smem) { cta.Load_from_Global(thread_work); // cta.Vertex_Cull (thread_work); cta.Vertex_Process(thread_work); cta.Local_Compact(thread_work); cta.Store_to_Global(thread_work); } }; __device__ __forceinline__ void Kernel(ThreadWorkT &thread_work) // SmemStorageT &smem) { Kernel_<SizeT, Problem::ENABLE_IDEMPOTENCE>::Invoke(*this, thread_work); /*Load_from_Global(); if (Problem::ENABLE_IDEMPOTENCE) { BitMask_Cull (); Vertex_Cull (); History_Cull (); Warp_Cull (); } else Vertex_Cull (); Vertex_Process (); Local_Compact (); Store_to_Global ();*/ } }; // end of Cta } // namespace compacted_cull_filter } // namespace oprtr } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#pragma once #include <gunrock/util/srts_grid.cuh> #include <gunrock/util/reduction/soa/cooperative_soa_reduction.cuh> #include <gunrock/util/scan/soa/serial_soa_scan.cuh> #include <gunrock/util/scan/soa/warp_soa_scan.cuh> namespace gunrock { namespace util { namespace scan { namespace soa { /** * Cooperative SOA reduction in raking smem grid hierarchies */ template <typename RakingSoaDetails, typename SecondaryRakingSoaDetails = typename RakingSoaDetails::SecondaryRakingSoaDetails> struct CooperativeSoaGridScan; /** * Cooperative SOA tile scan */ template <int VEC_SIZE, // Length of vector-loads (e.g, vec-1, vec-2, vec-4) bool EXCLUSIVE = true> // Whether or not this is an exclusive scan struct CooperativeSoaTileScan { //--------------------------------------------------------------------- // Iteration structures for extracting partials from raking lanes and // using them to seed scans of tile vectors //--------------------------------------------------------------------- // Next lane/load template <int LANE, int TOTAL_LANES> struct ScanLane { template <typename RakingSoaDetails, typename TileSoa, typename ReductionOp> static __device__ __forceinline__ void Invoke( RakingSoaDetails raking_soa_details, TileSoa tile_soa, ReductionOp scan_op) { // Retrieve partial reduction from raking grid typename RakingSoaDetails::TileTuple exclusive_partial; raking_soa_details.lane_partials.Get(exclusive_partial, LANE, 0); // Scan the partials in this lane/load SerialSoaScan<VEC_SIZE, EXCLUSIVE>::Scan(tile_soa, exclusive_partial, LANE, scan_op); // Next load ScanLane<LANE + 1, TOTAL_LANES>::Invoke(raking_soa_details, tile_soa, scan_op); } }; // Terminate template <int TOTAL_LANES> struct ScanLane<TOTAL_LANES, TOTAL_LANES> { template <typename RakingSoaDetails, typename TileSoa, typename ReductionOp> static __device__ __forceinline__ void Invoke( RakingSoaDetails raking_soa_details, TileSoa tile_soa, ReductionOp scan_op) {} }; //--------------------------------------------------------------------- // Interface //--------------------------------------------------------------------- /* * Scan a single tile where carry is assigned (or updated if REDUCE_INTO_CARRY * is set) with the total aggregate only in raking threads. * * No post-synchronization needed before grid reuse. */ template <bool REDUCE_INTO_CARRY, typename RakingSoaDetails, typename TileSoa, typename TileTuple, typename ReductionOp> static __device__ __forceinline__ void ScanTileWithCarry( RakingSoaDetails raking_soa_details, TileSoa tile_soa, TileTuple &carry, ReductionOp scan_op) { // Reduce vectors in tile, placing resulting partial into corresponding // raking grid lanes reduction::soa::CooperativeSoaTileReduction<VEC_SIZE>::template ReduceLane< 0, RakingSoaDetails::SCAN_LANES>::Invoke(raking_soa_details, tile_soa, scan_op); __syncthreads(); CooperativeSoaGridScan<RakingSoaDetails>::template ScanTileWithCarry< REDUCE_INTO_CARRY>(raking_soa_details, carry, scan_op); __syncthreads(); // Scan partials in tile, retrieving resulting partial from raking grid lane // partial ScanLane<0, RakingSoaDetails::SCAN_LANES>::Invoke(raking_soa_details, tile_soa, scan_op); } /* * Scan a single tile. Total aggregate is computed and returned in all * threads. * * No post-synchronization needed before grid reuse. */ template <typename RakingSoaDetails, typename TileSoa, typename TileTuple, typename ReductionOp> static __device__ __forceinline__ void ScanTile( TileTuple &retval, RakingSoaDetails raking_soa_details, TileSoa tile_soa, ReductionOp scan_op) { // Reduce vectors in tile, placing resulting partial into corresponding // raking grid lanes reduction::soa::CooperativeSoaTileReduction<VEC_SIZE>::template ReduceLane< 0, RakingSoaDetails::SCAN_LANES>::Invoke(raking_soa_details, tile_soa, scan_op); __syncthreads(); CooperativeSoaGridScan<RakingSoaDetails>::ScanTile(raking_soa_details, scan_op); __syncthreads(); // Scan partials in tile, retrieving resulting partial from raking grid lane // partial ScanLane<0, RakingSoaDetails::SCAN_LANES>::Invoke(raking_soa_details, tile_soa, scan_op); // Return last thread's inclusive partial retval = raking_soa_details.CumulativePartial(); } }; /****************************************************************************** * CooperativeSoaGridScan ******************************************************************************/ /** * Cooperative SOA raking grid reduction (specialized for last-level of raking * grid) */ template <typename RakingSoaDetails> struct CooperativeSoaGridScan<RakingSoaDetails, NullType> { typedef typename RakingSoaDetails::TileTuple TileTuple; /* * Scan in last-level raking grid. */ template <typename ReductionOp> static __device__ __forceinline__ void ScanTile( RakingSoaDetails raking_soa_details, ReductionOp scan_op) { if (threadIdx.x < RakingSoaDetails::RAKING_THREADS) { // Raking reduction TileTuple inclusive_partial; reduction::soa::SerialSoaReduce<RakingSoaDetails::PARTIALS_PER_SEG>:: Reduce(inclusive_partial, raking_soa_details.raking_segments, scan_op); // Exclusive warp scan TileTuple exclusive_partial = WarpSoaScan<RakingSoaDetails::LOG_RAKING_THREADS>::Scan( inclusive_partial, raking_soa_details.warpscan_partials, scan_op); // Exclusive raking scan SerialSoaScan<RakingSoaDetails::PARTIALS_PER_SEG>::Scan( raking_soa_details.raking_segments, exclusive_partial, scan_op); } } /* * Scan in last-level raking grid. Carry-in/out is updated only in raking * threads (homogeneously) */ template <bool REDUCE_INTO_CARRY, typename ReductionOp> static __device__ __forceinline__ void ScanTileWithCarry( RakingSoaDetails raking_soa_details, TileTuple &carry, ReductionOp scan_op) { if (threadIdx.x < RakingSoaDetails::RAKING_THREADS) { // Raking reduction TileTuple inclusive_partial; reduction::soa::SerialSoaReduce<RakingSoaDetails::PARTIALS_PER_SEG>:: Reduce(inclusive_partial, raking_soa_details.raking_segments, scan_op); // Exclusive warp scan, get total TileTuple warpscan_total; TileTuple exclusive_partial = WarpSoaScan<RakingSoaDetails::LOG_RAKING_THREADS>::Scan( inclusive_partial, warpscan_total, raking_soa_details.warpscan_partials, scan_op); // Seed exclusive partial with carry-in if (REDUCE_INTO_CARRY) { if (!ReductionOp::IDENTITY_STRIDES && (threadIdx.x == 0)) { // Thread-zero can't use the exclusive partial from the warpscan // because it contains garbage exclusive_partial = carry; } else { // Seed exclusive partial with the carry partial exclusive_partial = scan_op(carry, exclusive_partial); } // Update carry carry = scan_op(carry, warpscan_total); } else { // Set carry carry = warpscan_total; } // Exclusive raking scan SerialSoaScan<RakingSoaDetails::PARTIALS_PER_SEG>::Scan( raking_soa_details.raking_segments, exclusive_partial, scan_op); } } }; /** * Cooperative SOA raking grid reduction (specialized for last-level of raking * grid) */ template <typename RakingSoaDetails, typename SecondaryRakingSoaDetails> struct CooperativeSoaGridScan { typedef typename RakingSoaDetails::TileTuple TileTuple; /* * Scan in last-level raking grid. */ template <typename ReductionOp> static __device__ __forceinline__ void ScanTile( RakingSoaDetails raking_soa_details, ReductionOp scan_op) { if (threadIdx.x < RakingSoaDetails::RAKING_THREADS) { // Raking reduction TileTuple inclusive_partial; reduction::soa::SerialSoaReduce<RakingSoaDetails::PARTIALS_PER_SEG>:: Reduce(inclusive_partial, raking_soa_details.raking_segments, scan_op); // Store partial reduction into next raking grid raking_soa_details.secondary_details.lane_partials.Set(inclusive_partial, 0, 0); } __syncthreads(); // Collectively scan in next grid CooperativeSoaGridScan<SecondaryRakingSoaDetails>::ScanTile( raking_soa_details.secondary_details, scan_op); __syncthreads(); if (threadIdx.x < RakingSoaDetails::RAKING_THREADS) { // Retrieve partial reduction from next raking grid TileTuple exclusive_partial; raking_soa_details.secondary_details.lane_partials.Get(exclusive_partial, 0, 0); // Exclusive raking scan SerialSoaScan<RakingSoaDetails::PARTIALS_PER_SEG>::Scan( raking_soa_details.raking_segments, exclusive_partial, scan_op); } } /* * Scan in last-level raking grid. Carry-in/out is updated only in raking * threads (homogeneously) */ template <bool REDUCE_INTO_CARRY, typename ReductionOp> static __device__ __forceinline__ void ScanTileWithCarry( RakingSoaDetails raking_soa_details, TileTuple &carry, ReductionOp scan_op) { if (threadIdx.x < RakingSoaDetails::RAKING_THREADS) { // Raking reduction TileTuple inclusive_partial; reduction::soa::SerialSoaReduce<RakingSoaDetails::PARTIALS_PER_SEG>:: Reduce(inclusive_partial, raking_soa_details.raking_segments, scan_op); // Store partial reduction into next raking grid raking_soa_details.secondary_details.lane_partials.Set(inclusive_partial, 0, 0); } __syncthreads(); // Collectively scan in next grid CooperativeSoaGridScan<SecondaryRakingSoaDetails>:: template ScanTileWithCarry<REDUCE_INTO_CARRY>( raking_soa_details.secondary_details, carry, scan_op); __syncthreads(); if (threadIdx.x < RakingSoaDetails::RAKING_THREADS) { // Retrieve partial reduction from next raking grid TileTuple exclusive_partial; raking_soa_details.secondary_details.lane_partials.Get(exclusive_partial, 0, 0); // Exclusive raking scan SerialSoaScan<RakingSoaDetails::PARTIALS_PER_SEG>::Scan( raking_soa_details.raking_segments, exclusive_partial, scan_op); } } }; } // namespace soa } // namespace scan } // namespace util } // namespace gunrock
the_stack
@page @model Sweetalert2Model @{ ViewData["Title"] = "SweetAlert2"; ViewData["PageName"] = "notifications_sweetalert2"; ViewData["Category1"] = "Notifications"; ViewData["Heading"] = "<i class='subheader-icon fal fa-exclamation-circle'></i> SweetAlert2 <sup class='badge badge-primary fw-500'>ADDON</sup>"; ViewData["PageDescription"] = " A beautiful, responsive customizable, accessible (wai-aria) replacement for javascript's popup boxes with no dependencies"; } @section HeadBlock { <link rel="stylesheet" media="screen, print" href="~/css/notifications/sweetalert2/sweetalert2.bundle.css"> <link rel="stylesheet" media="screen, print" href="~/css/theme-demo.css"> } <div class="alert alert-primary"> <div class="d-flex flex-start w-100"> <div class="mr-2 hidden-md-down"> <span class="icon-stack icon-stack-lg"> <i class="base base-6 icon-stack-3x opacity-100 color-primary-500"></i> <i class="base base-10 icon-stack-2x opacity-100 color-primary-300 fa-flip-vertical"></i> <i class="ni ni-blog-read icon-stack-1x opacity-100 color-white"></i> </span> </div> <div class="d-flex flex-fill"> <div class="flex-fill"> <span class="h5">About</span> <p>SweetAlert2 is a JavaScript library that helps us create alerts in our web applications. SweetAlert2 is a replacement for default JavaScript pop up boxes. It needs zero dependencies, is customizable, well structured, accessible (wai-aria) and responsive. It needs promise.js for IE11 support. It is currently not supported in IE10</p> <p class="m-0"> Find in-depth, guidelines, tutorials and more on sweetalert2's <a href="https://sweetalert2.github.io/#usage" target="_blank">Official Documentation</a> </p> </div> </div> </div> </div> <div class="row"> <div class="col-xl-12"> <div id="panel-1" class="panel"> <div class="panel-hdr"> <h2> SweetAlert2 <span class="fw-300"><i>Examples</i></span> </h2> <div class="panel-toolbar"> <button class="btn btn-panel" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"></button> <button class="btn btn-panel" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"></button> <button class="btn btn-panel" data-action="panel-close" data-toggle="tooltip" data-offset="0,10" data-original-title="Close"></button> </div> </div> <div class="panel-container show"> <div class="panel-content"> <div class="panel-tag"> SweetAlert2 automatically centers itself on the page and looks great no matter if you're using a desktop computer, mobile or tablet. It's even highly customizeable, as you can see below! </div> <table class="table table-bordered table-hover"> <tbody> <tr> <td style="width: 40%; vertical-align: middle;"> <span class="d-inline-flex border border-primary text-primary width-1 height-1 rounded-circle fw-500 mr-2 align-items-center justify-content-center">1</span> A basic message </td> <td> <a href="javascript:void(0);" class="btn btn-outline-primary" id="js-sweetalert2-example-1">Try me!</a> </td> </tr> <tr> <td style="width: 40%; vertical-align: middle;"> <span class="d-inline-flex border border-primary text-primary width-1 height-1 rounded-circle fw-500 mr-2 align-items-center justify-content-center">2</span> A title with a text under </td> <td> <a href="javascript:void(0);" class="btn btn-outline-primary" id="js-sweetalert2-example-2">Try me!</a> </td> </tr> <tr> <td style="width: 40%; vertical-align: middle;"> <span class="d-inline-flex border border-primary text-primary width-1 height-1 rounded-circle fw-500 mr-2 align-items-center justify-content-center">3</span> A modal with a title, an error icon, a text, and a footer </td> <td> <a href="javascript:void(0);" class="btn btn-outline-primary" id="js-sweetalert2-example-3">Try me!</a> </td> </tr> <tr> <td style="width: 40%; vertical-align: middle;"> <span class="d-inline-flex border border-primary text-primary width-1 height-1 rounded-circle fw-500 mr-2 align-items-center justify-content-center">4</span> A modal window with a long content inside: </td> <td> <a href="javascript:void(0);" class="btn btn-outline-primary" id="js-sweetalert2-example-4">Try me!</a> </td> </tr> <tr> <td style="width: 40%; vertical-align: middle;"> <span class="d-inline-flex border border-primary text-primary width-1 height-1 rounded-circle fw-500 mr-2 align-items-center justify-content-center">5</span> Custom HTML description and buttons with ARIA labels </td> <td> <a href="javascript:void(0);" class="btn btn-outline-primary" id="js-sweetalert2-example-5">Try me!</a> </td> </tr> <tr> <td style="width: 40%; vertical-align: middle;"> <span class="d-inline-flex border border-primary text-primary width-1 height-1 rounded-circle fw-500 mr-2 align-items-center justify-content-center">6</span> A custom positioned dialog </td> <td> <a href="javascript:void(0);" class="btn btn-outline-primary" id="js-sweetalert2-example-6">Try me!</a> </td> </tr> <tr> <td style="width: 40%; vertical-align: middle;"> <span class="d-inline-flex border border-primary text-primary width-1 height-1 rounded-circle fw-500 mr-2 align-items-center justify-content-center">7</span> A confirm dialog, with a function attached to the "Confirm"-button... </td> <td> <a href="javascript:void(0);" class="btn btn-outline-primary" id="js-sweetalert2-example-7">Try me!</a> </td> </tr> <tr> <td style="width: 40%; vertical-align: middle;"> <span class="d-inline-flex border border-primary text-primary width-1 height-1 rounded-circle fw-500 mr-2 align-items-center justify-content-center">8</span> ... and by passing a parameter, you can execute something else for "Cancel". </td> <td> <a href="javascript:void(0);" class="btn btn-outline-primary" id="js-sweetalert2-example-8">Try me!</a> </td> </tr> <tr> <td style="width: 40%; vertical-align: middle;"> <span class="d-inline-flex border border-primary text-primary width-1 height-1 rounded-circle fw-500 mr-2 align-items-center justify-content-center">9</span> A message with a custom image and CSS animation disabled </td> <td> <a href="javascript:void(0);" class="btn btn-outline-primary" id="js-sweetalert2-example-9">Try me!</a> </td> </tr> <tr> <td style="width: 40%; vertical-align: middle;"> <span class="d-inline-flex border border-primary text-primary width-1 height-1 rounded-circle fw-500 mr-2 align-items-center justify-content-center">10</span> A message with custom width, padding, background and animated Nyan Cat </td> <td> <a href="javascript:void(0);" class="btn btn-outline-primary" id="js-sweetalert2-example-10">Try me!</a> </td> </tr> <tr> <td style="width: 40%; vertical-align: middle;"> <span class="d-inline-flex border border-primary text-primary width-1 height-1 rounded-circle fw-500 mr-2 align-items-center justify-content-center">11</span> A message with auto close timer </td> <td> <a href="javascript:void(0);" class="btn btn-outline-primary" id="js-sweetalert2-example-11">Try me!</a> </td> </tr> <tr> <td style="width: 40%; vertical-align: middle;"> <span class="d-inline-flex border border-primary text-primary width-1 height-1 rounded-circle fw-500 mr-2 align-items-center justify-content-center">12</span> Right-to-left support for Arabic, Hebrew, and other RTL languages </td> <td> <a href="javascript:void(0);" class="btn btn-outline-primary" id="js-sweetalert2-example-12">Try me!</a> </td> </tr> <tr> <td style="width: 40%; vertical-align: middle;"> <span class="d-inline-flex border border-primary text-primary width-1 height-1 rounded-circle fw-500 mr-2 align-items-center justify-content-center">13</span> AJAX request example </td> <td> <a href="javascript:void(0);" class="btn btn-outline-primary" id="js-sweetalert2-example-13">Try me!</a> </td> </tr> <tr> <td style="width: 40%; vertical-align: middle;"> <span class="d-inline-flex border border-primary text-primary width-1 height-1 rounded-circle fw-500 mr-2 align-items-center justify-content-center">14</span> Chaining modals (queue) example </td> <td> <a href="javascript:void(0);" class="btn btn-outline-primary" id="js-sweetalert2-example-14">Try me!</a> </td> </tr> <tr> <td style="width: 40%; vertical-align: middle;"> <span class="d-inline-flex border border-primary text-primary width-1 height-1 rounded-circle fw-500 mr-2 align-items-center justify-content-center">15</span> Dynamic queue example </td> <td> <a href="javascript:void(0);" class="btn btn-outline-primary" id="js-sweetalert2-example-15">Try me!</a> </td> </tr> <tr> <td style="width: 40%; vertical-align: middle;"> <span class="d-inline-flex border border-primary text-primary width-1 height-1 rounded-circle fw-500 mr-2 align-items-center justify-content-center">16</span> Timer functions example </td> <td> <a href="javascript:void(0);" class="btn btn-outline-primary" id="js-sweetalert2-example-16">Try me!</a> </td> </tr> </tbody> </table> </div> </div> </div> </div> </div> @section ScriptsBlock { <script src="~/js/notifications/sweetalert2/sweetalert2.bundle.js"></script> <script> $(document).ready(function () { "use strict"; //A basic message $("#js-sweetalert2-example-1").on("click", function () { Swal.fire("Any fool can use a computer"); }); //A title with a text under $("#js-sweetalert2-example-2").on("click", function () { Swal.fire("The Internet?", "That thing is still around?", "question"); }); //A modal with a title, an error icon, a text, and a footer $("#js-sweetalert2-example-3").on("click", function () { Swal.fire({ type: "error", title: "Oops...", text: "Something went wrong!", footer: "<a href>Why do I have this issue?</a>" }); }); //A modal window with a long content inside: $("#js-sweetalert2-example-4").on("click", function () { Swal.fire({ imageUrl: "https://placeholder.pics/svg/300x1500", imageHeight: 1500, imageAlt: "A tall image" }); }); //Custom HTML description and buttons with ARIA labels $("#js-sweetalert2-example-5").on("click", function () { Swal.fire({ title: "<strong>HTML <u>example</u></strong>", type: "info", html: "You can use <b>bold text</b>, " + '<a href="//github.com">links</a> ' + "and other HTML tags", showCloseButton: true, showCancelButton: true, focusConfirm: false, confirmButtonText: '<i class="@(Settings.Theme.IconPrefix) fa-thumbs-up"></i> Great!', confirmButtonAriaLabel: "Thumbs up, great!", cancelButtonText: '<i class="@(Settings.Theme.IconPrefix) fa-thumbs-down"></i>', cancelButtonAriaLabel: "Thumbs down" }); }); //A custom positioned dialog $("#js-sweetalert2-example-6").on("click", function () { Swal.fire({ position: "top-end", type: "success", title: "Your work has been saved", showConfirmButton: false, timer: 1500 }); }); //A confirm dialog, with a function attached to the "Confirm"-button... $("#js-sweetalert2-example-7").on("click", function () { Swal.fire({ title: "Are you sure?", text: "You won't be able to revert this!", type: "warning", showCancelButton: true, confirmButtonText: "Yes, delete it!" }).then(function (result) { if (result.value) { Swal.fire("Deleted!", "Your file has been deleted.", "success"); } }); }); // ... and by passing a parameter, you can execute something else for "Cancel". $("#js-sweetalert2-example-8").on("click", function () { var swalWithBootstrapButtons = Swal.mixin({ customClass: { confirmButton: "btn btn-primary", cancelButton: "btn btn-danger mr-2" }, buttonsStyling: false }); swalWithBootstrapButtons .fire({ title: "Are you sure?", text: "You won't be able to revert this!", type: "warning", showCancelButton: true, confirmButtonText: "Yes, delete it!", cancelButtonText: "No, cancel!", reverseButtons: true }) .then(function (result) { if (result.value) { swalWithBootstrapButtons.fire( "Deleted!", "Your file has been deleted.", "success" ); } else if ( // Read more about handling dismissals result.dismiss === Swal.DismissReason.cancel ) { swalWithBootstrapButtons.fire( "Cancelled", "Your imaginary file is safe :)", "error" ); } }); }); // A message with a custom image and CSS animation disabled $("#js-sweetalert2-example-9").on("click", function () { Swal.fire({ title: "Sweet!", text: "Modal with a custom image.", imageUrl: "https://unsplash.it/400/200", imageWidth: 400, imageHeight: 200, imageAlt: "Custom image", animation: false }); }); //A message with custom width, padding, background and animated Nyan Cat $("#js-sweetalert2-example-10").on("click", function () { Swal.fire({ title: "Custom width, padding, background.", width: 600, padding: "3em", background: "#fff url(/images/trees.png)", backdrop: '\n\t\t\t rgba(0,0,123,0.4)\n\t\t\t url("/images/nyan-cat.gif")\n\t\t\t center left\n\t\t\t no-repeat\n\t\t\t ' }); }); // A message with auto close timer $("#js-sweetalert2-example-11").on("click", function () { var timerInterval; Swal.fire({ title: "Auto close alert!", html: "I will close in <strong></strong> seconds.", timer: 2000, onBeforeOpen: function onBeforeOpen() { Swal.showLoading(); timerInterval = setInterval(function () { Swal.getContent().querySelector( "strong" ).textContent = Swal.getTimerLeft(); }, 100); }, onClose: function onClose() { clearInterval(timerInterval); } }).then(function (result) { if ( // Read more about handling dismissals result.dismiss === Swal.DismissReason.timer ) { console.log("I was closed by the timer"); } }); }); //Right-to-left support for Arabic, Hebrew, and other RTL languages $("#js-sweetalert2-example-12").on("click", function () { Swal.fire({ title: "هل تريد الاستمرار؟", type: "question", customClass: { icon: "swal2-arabic-question-mark" }, confirmButtonText: "نعم", cancelButtonText: "لا", showCancelButton: true, showCloseButton: true }); }); //AJAX request example $("#js-sweetalert2-example-13").on("click", function () { Swal.fire({ title: "Submit your Github username", input: "text", inputAttributes: { autocapitalize: "off" }, showCancelButton: true, confirmButtonText: "Look up", showLoaderOnConfirm: true, preConfirm: function preConfirm(login) { return fetch("//api.github.com/users/".concat(login)) .then(function (response) { if (!response.ok) { throw new Error(response.statusText); } return response.json(); }) .catch(function (error) { Swal.showValidationMessage("Request failed: ".concat(error)); }); }, allowOutsideClick: function allowOutsideClick() { return !Swal.isLoading(); } }).then(function (result) { if (result.value) { Swal.fire({ title: "".concat(result.value.login, "'s avatar"), imageUrl: result.value.avatar_url }); } }); }); //Dynamic queue example $("#js-sweetalert2-example-14").on("click", function () { var ipAPI = "https://api.ipify.org?format=json"; Swal.queue([{ title: "Your public IP", confirmButtonText: "Show my public IP", text: "Your public IP will be received " + "via AJAX request", showLoaderOnConfirm: true, preConfirm: function preConfirm() { return fetch(ipAPI) .then(function (response) { return response.json(); }) .then(function (data) { return Swal.insertQueueStep(data.ip); }) .catch(function () { Swal.insertQueueStep({ type: "error", title: "Unable to get your public IP" }); }); } }]); }); //Timer functions example $("#js-sweetalert2-example-15").on("click", function () { Swal.mixin({ input: "text", confirmButtonText: 'Next <i class="@(Settings.Theme.IconPrefix) fa-chevron-right"></i>', showCancelButton: true, progressSteps: ["1", "2", "3"] }) .queue([{ title: "Question 1", text: "Chaining swal2 modals is easy" }, "Question 2", "Question 3" ]) .then(function (result) { if (result.value) { Swal.fire({ title: "All done!", html: "Your answers: <pre><code>" + JSON.stringify(result.value) + "</code></pre>", confirmButtonText: "Lovely!" }); } }); }); //Chaining modals (queue) example $("#js-sweetalert2-example-16").on("click", function () { var timerInterval; Swal.fire({ title: "Auto close alert!", html: "I will close in <strong></strong> seconds.<br/><br/>" + '<button id="increase" class="btn btn-warning">' + "I need 5 more seconds!" + "</button><br/>" + '<button id="stop" class="btn btn-danger">' + "Please stop the timer!!" + "</button><br/>" + '<button id="resume" class="btn btn-success" disabled>' + "Phew... you can restart now!" + "</button><br/>" + '<button id="toggle" class="btn btn-primary">' + "Toggle" + "</button>", timer: 10000, onBeforeOpen: function onBeforeOpen() { var content = Swal.getContent(); var $ = content.querySelector.bind(content); var stop = $("#stop"); var resume = $("#resume"); var toggle = $("#toggle"); var increase = $("#increase"); Swal.showLoading(); function toggleButtons() { stop.disabled = !Swal.isTimerRunning(); resume.disabled = Swal.isTimerRunning(); } stop.addEventListener("click", function () { Swal.stopTimer(); toggleButtons(); }); resume.addEventListener("click", function () { Swal.resumeTimer(); toggleButtons(); }); toggle.addEventListener("click", function () { Swal.toggleTimer(); toggleButtons(); }); increase.addEventListener("click", function () { Swal.increaseTimer(5000); }); timerInterval = setInterval(function () { Swal.getContent().querySelector("strong").textContent = ( Swal.getTimerLeft() / 1000 ).toFixed(0); }, 100); }, onClose: function onClose() { clearInterval(timerInterval); } }); }); }); </script> }
the_stack
@using lsc.Common @using lsc.Model @using lsc.Model.Enume @model EnterCustomer @{ ViewData["Title"] = "添加客户"; Layout = "~/Pages/_Layout.cshtml"; List<DistrictInfo> ProvinceList = ViewBag.ProvinceList; } <blockquote class="layui-elem-quote"> 添加客户信息 </blockquote> <div class="manage-form-container"> <form class="layui-form" method="post" id="addform" action="/EnterCustom/SaveEnterCustom"> <div class="layui-form-item"> <label class="layui-form-label">客户全称</label> <div class="layui-input-block"> <input type="text" class="layui-input layui-form-text" name="EnterName" id="EnterName" value="@(Model!=null?Model.EnterName:"")" placeholder="请输入客户名称" autocomplete="off" /> </div> </div> @*<div class="layui-form-item"> <label class="layui-form-label">客户简称</label> <div class="layui-input-block"> <input type="text" class="layui-input layui-form-text" name="Abbreviation" id="Abbreviation" value="@(Model!=null?Model.Abbreviation:"")" placeholder="请输入客户简称" autocomplete="off" /> </div> </div>*@ <div class="layui-form-item"> <label class="layui-form-label">客户类型</label> <div class="layui-input-block"> <select class="layui-form-select" name="CustomerType" lay-search> <option value=""></option> <option value="1" @(Model != null && Model.CustomerType == CustomerTypeEnum.Dealer ? "selected" : "")>代理经销商</option> @*<option value="2" @(Model != null && Model.CustomerType == CustomerTypeEnum.Ordinary ? "selected" : "")>普通客户</option> <option value="3" @(Model != null && Model.CustomerType == CustomerTypeEnum.BigCustomer ? "selected" : "")>集团大客户</option> <option value="4" @(Model != null && Model.CustomerType == CustomerTypeEnum.Cooperation ? "selected" : "")>业务合作商</option> <option value="5" @(Model != null && Model.CustomerType == CustomerTypeEnum.Same ? "selected" : "")>怀疑同行</option>*@ <option value="6" @(Model != null && Model.CustomerType == CustomerTypeEnum.Colleges ? "selected" : "")>高校</option> <option value="7" @(Model != null && Model.CustomerType == CustomerTypeEnum.Commission ? "selected" : "")>教委</option> <option value="8" @(Model != null && Model.CustomerType == CustomerTypeEnum.VocationalSchools ? "selected" : "")>中职</option> <option value="9" @(Model != null && Model.CustomerType == CustomerTypeEnum.MiddleSchool ? "selected" : "")>中学</option> <option value="10" @(Model != null && Model.CustomerType == CustomerTypeEnum.PrimarySchool ? "selected" : "")>小学</option> <option value="11" @(Model != null && Model.CustomerType == CustomerTypeEnum.Special ? "selected" : "")>特教</option> <option value="12" @(Model != null && Model.CustomerType == CustomerTypeEnum.Prison ? "selected" : "")>监狱</option> <option value="13" @(Model != null && Model.CustomerType == CustomerTypeEnum.JDS ? "selected" : "")>戒毒所</option> <option value="14" @(Model != null && Model.CustomerType == CustomerTypeEnum.Judicial ? "selected" : "")>公检法</option> <option value="15" @(Model != null && Model.CustomerType == CustomerTypeEnum.ArmedPolice ? "selected" : "")>武警部队</option> <option value="16" @(Model != null && Model.CustomerType == CustomerTypeEnum.Hospital ? "selected" : "")>医院</option> <option value="17" @(Model != null && Model.CustomerType == CustomerTypeEnum.Other ? "selected" : "")>其他客户</option> </select> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">关系等级</label> <div class="layui-input-block"> <select class="layui-form-select" name="Relationship"> <option value=""></option> <option value="1" @(Model != null && Model.Relationship == RelationshipEnume.Intimate ? "selected" : "")>密切</option> <option value="2" @(Model != null && Model.Relationship == RelationshipEnume.Better ? "selected" : "")>较好</option> <option value="3" @(Model != null && Model.Relationship == RelationshipEnume.Commonly ? "selected" : "")>一般</option> <option value="4" @(Model != null && Model.Relationship == RelationshipEnume.Poor ? "selected" : "")>较差</option> </select> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">阶段</label> <div class="layui-input-block"> <select class="layui-form-select" name="Phase"> <option value=""></option> <option value="1" @(Model != null && Model.Phase == PhaseEnume.Pre_sale ? "selected" : "")>售前跟踪</option> <option value="2" @(Model != null && Model.Phase == PhaseEnume.Demand_Confirmation ? "selected" : "")>需求确定</option> <option value="3" @(Model != null && Model.Phase == PhaseEnume.In_Sales ? "selected" : "")>售中跟单</option> <option value="4" @(Model != null && Model.Phase == PhaseEnume.Sign_Contract ? "selected" : "")>签约洽谈</option> <option value="5" @(Model != null && Model.Phase == PhaseEnume.After_Sale ? "selected" : "")>成交售后</option> <option value="6" @(Model != null && Model.Phase == PhaseEnume.Invalid ? "selected" : "")>跟单失败</option> <option value="7" @(Model != null && Model.Phase == PhaseEnume.Shelve ? "selected" : "")>暂且搁置</option> <option value="8" @(Model != null && Model.Phase == PhaseEnume.Other ? "selected" : "")>其他阶段</option> </select> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">价值评估</label> <div class="layui-input-block"> <select class="layui-form-select" name="ValueGrade"> <option value=""></option> <option value="1" @(Model != null && Model.ValueGrade == ValueGradeEnume.Senior ? "selected" : "")>高</option> <option value="2" @(Model != null && Model.ValueGrade == ValueGradeEnume.Intermediate ? "selected" : "")>中</option> <option value="3" @(Model != null && Model.ValueGrade == ValueGradeEnume.Lower ? "selected" : "")>低</option> </select> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">客户来源</label> <div class="layui-input-block"> <select class="layui-form-select" name="Source"> <option value=""></option> <option value="1" @(Model != null && Model.Source == CustSource.CustTelephone ? "selected" : "")>客户来电</option> <option value="2" @(Model != null && Model.Source == CustSource.Excavate ? "selected" : "")>主动挖掘</option> <option value="3" @(Model != null && Model.Source == CustSource.WebConsulting ? "selected" : "")>网站咨询</option> <option value="4" @(Model != null && Model.Source == CustSource.Introduction ? "selected" : "")>客户介绍</option> <option value="6" @(Model != null && Model.Source == CustSource.Tender ? "selected" : "")>招标</option> <option value="7" @(Model != null && Model.Source == CustSource.Exhibition ? "selected" : "")>展会</option> <option value="8" @(Model != null && Model.Source == CustSource.QQqun ? "selected" : "")>QQ&微信群</option> <option value="5" @(Model != null && Model.Source == CustSource.Other ? "selected" : "")>其他来源</option> </select> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">省份</label> <div class="layui-input-block"> <select class="layui-form-select" name="Province" lay-filter="Province" lay-search> <option value=""></option> @if (ProvinceList != null && ProvinceList.Count > 0) { foreach (var p in ProvinceList) { <option value="@p.Name" data-id="@p.ID" @(Model!=null && Model.Province==(p.Name)?"selected":"")>@p.Name</option> } } </select> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">城市</label> <div class="layui-input-block"> <select class="layui-form-select" name="City" lay-filter="City" lay-search> <option value=""></option> @if (Model != null) { <option value="@Model.City" selected data-id="">@Model.City</option> } </select> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">热点客户</label> <div class="layui-input-block"> <input type="checkbox" name="IsHeat" lay-skin="switch" @(Model != null && Model.IsHeat ? "checked" : "")> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">热度</label> <div class="layui-input-block"> <select class="layui-form-select" name="DegreeOfHeat"> <option value=""></option> <option value="1" @(Model != null && Model.DegreeOfHeat == DegreeOfHeatEnume.Senior ? "selected" : "")>高热</option> <option value="2" @(Model != null && Model.DegreeOfHeat == DegreeOfHeatEnume.Intermediate ? "selected" : "")>中热</option> <option value="3" @(Model != null && Model.DegreeOfHeat == DegreeOfHeatEnume.Lower ? "selected" : "")>低热</option> </select> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">热点分类</label> <div class="layui-input-block"> <select class="layui-form-select" name="HeatTYPE"> <option value=""></option> <option value="1" @(Model != null && Model.HeatTYPE == HeatTypeEnum.Intentional ? "selected" : "")>有意向客户</option> <option value="2" @(Model != null && Model.HeatTYPE == HeatTypeEnum.Key_Account ? "selected" : "")>重点客户</option> <option value="3" @(Model != null && Model.HeatTYPE == HeatTypeEnum.Hopeful ? "selected" : "")>有望签单客户</option> </select> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">热点说明</label> <div class="layui-input-block"> <textarea placeholder="请输入热点说明" class="layui-textarea" name="HeatMsg">@(Model != null ? Model.HeatMsg:"")</textarea> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">手机号</label> <div class="layui-input-block"> <input type="text" placeholder="请输入手机号" onchange="telonchanged()" @(Model!=null&& !Model.Telephone.IsNull() ? "readonly" : "") class="layui-input layui-form-text" autocomplete="off" name="Telephone" id="Telephone" value="@(Model != null ? Model.Telephone:"")" /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">座机号</label> <div class="layui-input-block"> <input type="text" placeholder="请输入手机号" onchange="phonechanged()" @(Model!=null&& !Model.Landline.IsNull() ? "readonly" : "") class="layui-input layui-form-text" autocomplete="off" name="Landline" value="@(Model != null ? Model.Landline:"")" /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">传真</label> <div class="layui-input-block"> <input type="text" placeholder="请输入传真" class="layui-input layui-form-text" autocomplete="off" name="FaxNumber" @(Model != null ? Model.FaxNumber : "") /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">邮编</label> <div class="layui-input-block"> <input type="text" placeholder="请输入邮编" class="layui-input layui-form-text" autocomplete="off" name="ZipCode" @(Model != null ? Model.ZipCode : "") /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">邮箱</label> <div class="layui-input-block"> <input type="text" placeholder="请输入邮箱" class="layui-input layui-form-text" autocomplete="off" ay-verify="email" name="Email" @(Model != null ? Model.Email : "") /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">网址</label> <div class="layui-input-block"> <input type="text" placeholder="请输入网址" class="layui-input layui-form-text" autocomplete="off" ay-verify="url" name="WebSit" @(Model != null ? Model.WebSit : "") /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">地址</label> <div class="layui-input-block"> <input type="text" placeholder="请输入地址" class="layui-input layui-form-text" autocomplete="off" name="Address" @(Model != null ? Model.Address : "") /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">开票资料</label> <div class="layui-input-block"> <textarea placeholder="请输入开票资料" class="layui-textarea" name="InvoiceMsg">@(Model != null ? Model.InvoiceMsg:"")</textarea> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">客户简介</label> <div class="layui-input-block"> <textarea placeholder="请输入客户简介" class="layui-textarea" name="CustAbstract">@(Model != null ? Model.CustAbstract:"")</textarea> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">备注</label> <div class="layui-input-block"> <textarea placeholder="请输入备注信息" class="layui-textarea" name="Rem">@(Model != null ? Model.Rem:"")</textarea> </div> </div> <input type="hidden" name="ID" value="@(Model!=null ? Model.ID:0)" id="ID" /> <input type="hidden" name="mobile" id="mobile" value="@(Model!=null ? Model.Telephone + Model.Landline:"")" /> <div class="layui-form-item"> <div class="layui-input-block"> <button class="layui-btn" lay-submit lay-filter="*">保存</button> </div> </div> </form> </div> @section Scripts{ <script src="~/layui/jquery.validate.js"></script> <script src="~/layui/jquery.form.js"></script> <script type="text/javascript"> var form, layer layui.use(['form', 'element', 'layer'], function () { layer = layui.layer form = layui.form form.on('select(Province)', function (data) { citylist(); }); }) jQuery.validator.addMethod("isPhone", function (value, element) { var length = value.length; var mobile = /^(((13[0-9]{1})|(15[0-9]{1})|(18[0-9]{1})|(17[0-9]{1}))+\d{8})$/; return this.optional(element) || (length == 11 && mobile.test(value)); }, "请填写正确的手机号码");//可以自定义默认提示信息 jQuery.validator.addMethod("isTel", function (value, element) { var length = value.length; var phone = /^\d{3,4}-\d{7,8}$/ ; return this.optional(element) || (phone.test(value)); }, "请填写正确的固定电话");//可以自定义默认提示信息 $('#addform').validate({ ignore:"", rules: { EnterName: { required: true, maxlength:64, remote: { url: "/EnterCustom/ExistsEnterName", type: 'get', data: { id: function () { return $("#ID").val() }, EnterName: function () { return $("#EnterName").val() } }, dataFilter: function (data, type) { var jdata = JSON.parse(data) if (jdata.result) { return false } else { return true } } } }, CustomerType: { required: true }, Telephone: { isPhone: true }, Landline: { isTel: true }, mobile: { required: true }, Source: { required: true }, Province: { required: true }, City: { required: true }, Email: { email: true }, WebSit: { url: true }, Address: { maxlength: 126 }, Phase: { required: true } }, messages: { EnterName: { required: "请输入客户名称", maxlength:"客户名称最多64个字", remote:"客户名称已存在" }, CustomerType: { required: "请选择客户类型" }, Telephone: { isPhone:"请输入正确格式的手机号" }, Landline: { Landline:"请输入正确格式的固定电话号" }, mobile: { required:"手机号和固话号至少填一个" }, Source: { required: "请选择客户来源" }, Province: { required: "请选择省份" }, City: { required: "请选择城市" }, Email: { email: "请输入正确格式的邮箱" }, WebSit: { url: "请输入正确格式的网址" }, Address: { maxlength: "地址最多126个字" }, Phase: { required: "请选择现在所处的阶段" } }, errorPlacement: function (error, element) { console.log(element) if (element.attr("name") == "mobile") { error.insertAfter("#Telephone"); } else { error.insertAfter(element); } //if (element.is(":radio")) // error.appendTo(element.parent().next().next()); //else if (element.is(":checkbox")) // error.appendTo(element.next()); //else // error.appendTo(element.parent().next()); }, submitHandler: function (form) { layer.load(0, { shade: false }) $(form).ajaxSubmit(function (res) { if (res.code == 1) { layer.msg('保存成功', { icon: 6 }); //window.location = '/EnterCustom/Index' window.location = '/EnterCustom/AddEnterCustContacts?type=1&id=0&EnterCustID=' + res.id } else { layer.msg('保存失败', { icon: 5 }); } }) } }) citylist = function () { var pid = $("select[name='Province']").find('option:selected').attr('data-id') $.get('/EnterCustom/GetCityList?id=' + pid, function (result) { if (result.code == 1) { $("select[name='City'] option").each(function () { if ($(this).val() != '') { $(this).remove(); } }) for (var i = 0; i < result.citylist.length; i++) { $("select[name='City']").append("<option value='" + result.citylist[i].name + "'>" + result.citylist[i].name + "</option>") } form.render('select'); } }) } var telonchanged = function () { console.log($("input[name='Telephone']").val()); $("#mobile").val($("input[name='Telephone']").val() + $("input[name='Landline']").val()); } var phonechanged = function () { $("#mobile").val($("input[name='Landline']").val() + $("input[name='Telephone']").val()); } </script> }
the_stack
 @{ ViewBag.Title = "User"; } <div style="width:100%;height:100%"> <div class="panel-body" style="padding-bottom:0px;"> <div class="panel panel-default"> <div class="panel-heading">查询条件</div> <div class="panel-body"> <form id="formSearch" class="form-horizontal"> <div class="form-group" style="margin-top:15px"> <label class="control-label col-sm-1" for="txt_search_userName">用户名</label> <div class="col-sm-3"> <input type="text" class="form-control" id="txt_search_userName"> </div> <div class="col-sm-4" style="text-align:left;"> <button type="button" style="margin-left:50px" id="btn_query" class="btn btn-primary">查询</button> </div> </div> </form> </div> </div> <div id="toolbar" class="btn-group"> <button id="btn_add" type="button" class="btn btn-default"> <span class="glyphicon glyphicon-plus" aria-hidden="true"></span>新增 </button> <button id="btn_edit" type="button" class="btn btn-default"> <span class="glyphicon glyphicon-pencil" aria-hidden="true"></span>修改 </button> <button id="btn_delete" type="button" class="btn btn-default"> <span class="glyphicon glyphicon-remove" aria-hidden="true"></span>删除 </button> </div> <table id="tb_users" style="height:15%"></table> </div> </div> <form id="edit-form" class="form-inline"> <div class="modal fade" id="myModal" tabindex="-1" role="dialog" aria-labelledby="myModalLabel"> <div class="modal-dialog" role="document" style="width:65%"> <div class="modal-content"> <div class="modal-header"> <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button> <h4 class="modal-title" id="myModalLabel">新增</h4> </div> <div class="modal-body"> <div class="form-group"> <label for="txt_userID">用户ID</label> <input type="text" name="UserID" class="form-control has-feedback" id="txt_userID" placeholder="自动生成" disabled> </div> <div class="form-group"> <label for="txt_userName">用户名</label> <input type="text" name="UserName" class="form-control" id="txt_userName" placeholder="用户名"> </div> <div class="form-group show"> <label for="slt_userRoles">角色</label> <select id="slt_userRoles" class="select2 form-control" style="width:60%"></select> </div> <div class="form-group"> <label for="txt_password"> 密码</label> <input type="password" name="Password" class="form-control" id="txt_password" placeholder="密码"> </div> <div class="form-group"> <label for="txt_realName">真实姓名</label> <input type="text" name="RealName" class="form-control" id="txt_realName" placeholder="真实姓名"> </div> <div class="form-group"> <label for="txt_mobile"> 手机</label> <input type="text" name="Mobile" class="form-control" id="txt_mobile" placeholder="手机"> </div> <div class="form-group"> <label for="txt_email"> 邮箱</label> <input type="text" name="Email" class="form-control" id="txt_email" placeholder="邮箱"> </div> <div class="form-group"> <label> 锁定</label> <div class="checkbox"> <label> <input id="ckb_isEnlocked" type="checkbox"> </label> </div> </div> </div> <div class="modal-footer"> <button type="button" class="btn btn-default" data-dismiss="modal"><span class="glyphicon glyphicon-remove" aria-hidden="true"></span>关闭</button> <button type="submit" id="btn_submit" class="btn btn-primary"><span class="glyphicon glyphicon-floppy-disk"></span>保存</button> </div> </div> </div> </div> </form> <style> .form-group { padding: 3px; } .form-group > label { width: 85px; text-align: right; } .form-group > input { padding-right: 42.5px; } .panel { margin-bottom: 5px; } .panel-body { padding: 5px; } </style> @section scripts { <script type="text/javascript"> var $btnAdd = "#btn_add", $btnEdit = "#btn_edit", $btnDelete = "#btn_delete", $btnQuery = "#btn_query", $modalLabel = "#myModalLabel", $modal = "#myModal", $formID = "#edit-form", $tableID = "#tb_users", $queryUrl = "/User/Query", $createUrl = "/User/Create", $editUrl = "/User/Edit", $deleteUrl = "/User/Delete", $roleQueryUrl = "../Role/Query"; $toolbar = "#toolbar"; function initEditForm(data) { $("#slt_userRoles").val(data.RoleIDs).trigger("change"); $("#txt_userID").val(data.UserID); $("#txt_userName").val(data.Name); $("#txt_realName").val(data.RealName); $("#txt_mobile").val(data.Mobile); $("#txt_email").val(data.Email); $("#ckb_isEnlocked").prop("checked", data.IsLocked); } function getFormData() { var data = {}; data.Name = $("#txt_userName").val(); data.RealName = $("#txt_realName").val(); data.Mobile = $("#txt_mobile").val(); data.Email = $("#txt_email").val(); data.Password = $("#txt_password").val(); data.IsLocked = $("#ckb_isEnlocked").prop("checked"); data.RoleIDs = $("#slt_userRoles").val(); return data; } function $Post(url, func) { $.ajax({ type: 'POST', url: url, data: null, success:func }); } $(function () { //1.初始化Table var oTable = new TableInit(); oTable.Init(); //2.初始化Button的点击事件 var oButtonInit = new ButtonInit(); oButtonInit.Init(); }); var TableInit = function () { var oTableInit = new Object(); function isLockedFormatter(value) { if (value) { return "<span class='label label-danger'>" + value + "</span>"; } else { return "<span class='label label-success'>" + value + "</span>"; } }; //初始化Table oTableInit.Init = function () { $($tableID).bootstrapTable({ url: $queryUrl, //请求后台的URL(*) method: 'post', //请求方式(*) toolbar: $toolbar, //工具按钮用哪个容器 striped: true, //是否显示行间隔色 cache: false, //是否使用缓存,默认为true,所以一般情况下需要设置一下这个属性(*) pagination: true, //是否显示分页(*) sortable: false, //是否启用排序 sortOrder: "asc", //排序方式 queryParams: oTableInit.queryParams,//传递参数(*) responseHandler: function (res) { return res.Data; }, sidePagination: "server", //分页方式:client客户端分页,server服务端分页(*) pageNumber: 1, //初始化加载第一页,默认第一页 pageSize: 10, //每页的记录行数(*) pageList: [10, 25, 50, 100], //可供选择的每页的行数(*) strictSearch: true, showColumns: true, //是否显示所有的列 showRefresh: true, //是否显示刷新按钮 minimumCountColumns: 2, //最少允许的列数 clickToSelect: true, //是否启用点击选中行 height: 400, //行高,如果没有设置height属性,表格自动根据记录条数觉得表格高度 uniqueId: "ID", //每一行的唯一标识,一般为主键列 showToggle: true, //是否显示详细视图和列表视图的切换按钮 cardView: false, //是否显示详细视图 detailView: false, //是否显示父子表 columns: [{ checkbox: true }, { field: 'Name', title: '名称' }, { field: 'RealName', title: '真实姓名' }, { formatter: isLockedFormatter, field: 'IsLocked', title: '锁定' } , { field: 'Mobile', title: '手机' } , { field: 'Email', title: '邮箱' }, { field: 'CreatedOn', title: '创建时间' }, { field: 'CreatedBy', title: '创建人' }, { field: 'LastUpdatedOn', title: '最后更新时间' }, { field: 'LastUpdatedBy', title: '最后更新人' }] }); }; //得到查询的参数 oTableInit.queryParams = function (params) { var temp = { //这里的键的名字和控制器的变量名必须一直,这边改动,控制器也需要改成一样的 Limit: params.limit, //页面大小 Offset: (params.offset), //页码 Name: $("#txt_search_userName").val() }; return temp; }; return oTableInit; }; var ButtonInit = function () { var oInit = new Object(); var isCreate, postdata = {}; var InitForm = function (args) { var editForm = $($formID).data('bootstrapValidator'); if (editForm) { isCreate = args.isCreate; postdata.ID = args.ID; editForm.resetForm(); if (isCreate) { editForm.enableFieldValidators('Password', true); } else { editForm.enableFieldValidators('Password', false); } return; } $Post($roleQueryUrl,function (data, textStatus, jqXHR) { $("#slt_userRoles").select2({ multiple: true, data: $.Enumerable.From(data.Data.rows).Select(function (r, i) { return { id: r.ID, text: r.Name }; }).ToArray() }) }); $($formID).bootstrapValidator({ message: 'This value is not valid', feedbackIcons: { valid: 'glyphicon glyphicon-ok', invalid: 'glyphicon glyphicon-remove', validating: 'glyphicon glyphicon-refresh' }, submitHandler: function (validator, form, submitButton) { $.extend(postdata, getFormData()); $.ajax({ type: "post", url: isCreate ? $createUrl : $editUrl, data: postdata, success: function (data, status) { Ewin.alertResponseData(data, '提交数据成功'); $($modal).modal('hide'); $($tableID).bootstrapTable('refresh'); }, error: function () { Ewin.error('Error'); }, complete: function () { } }); }, fields: { UserName: { validators: { notEmpty: { message: 'The user name is required' } } }, RealName: { validators: { notEmpty: { message: 'The real name is required' } } }, //Mobile: { // validators: { // phone: { // message: 'The mobile erro', // country: 'CN' // } // } //}, Email: { validators: { emailAddress: { message: 'The email erro' } } }, Password: { validators: { stringLength: { min: 3, message: 'The password must greater than 3 length' }, notEmpty: { message: 'The password is required' } } } } }); }; oInit.Init = function () { InitForm(); //新增数据click事件注册 $($btnAdd).click(function () { $($modalLabel).text("新增"); $($modal).find(".form-control").val(""); $("#ckb_isEnlocked").prop("checked", true); $("#slt_userRoles").val(null).trigger("change"); InitForm({ isCreate: true, ID: '' }); $($modal).modal(); }); //编辑数据click事件注册 $($btnEdit).click(function () { var arrselections = $($tableID).bootstrapTable('getSelections'); if (arrselections.length > 1) { Ewin.warning('只能选择一行进行编辑'); return; } if (arrselections.length <= 0) { Ewin.warning('请选择有效数据'); return; } $($modalLabel).text("编辑"); $($modal).find(".form-control").val(""); initEditForm(arrselections[0]); InitForm({ isCreate: false, ID: arrselections[0].ID }); $($modal).modal(); }); //删除数据click事件注册 $($btnDelete).click(function () { var arrselections = $($tableID).bootstrapTable('getSelections'); if (arrselections.length <= 0) { Ewin.warning('请选择有效数据'); return; } Ewin.confirm({ message: "确认要删除选择的数据吗?" }).on(function (e) { if (!e) { return; } $.ajax({ type: "post", url: $deleteUrl, data: { ids: $.Enumerable.From(arrselections).Select('$.ID').ToArray() }, success: function (data, status) { if (status == "success") { Ewin.alertResponseData(data, '删除成功'); $($tableID).bootstrapTable('refresh'); } }, dataType: "json", error: function () { Ewin.error('Error'); }, complete: function () { } }); }); }); //条件查询click事件注册 $($btnQuery).click(function () { $($tableID).bootstrapTable('refresh'); }); }; return oInit; }; </script> }
the_stack
@model SmartAdmin.Domain.Models.Notification @{ /**/ ViewBag.Title = "消息推送"; ViewData["PageName"] = "notifications_index"; ViewData["Heading"] = "<i class='fal fa-comments-alt text-primary'></i> 消息推送"; ViewData["Category1"] = "系统管理"; ViewData["PageDescription"] = ""; } @section HeadBlock { <link href="~/css/formplugins/bootstrap-daterangepicker/bootstrap-daterangepicker.css" rel="stylesheet" asp-append-version="true" /> <link href="~/js/easyui/themes/insdep/easyui.css" rel="stylesheet" asp-append-version="true" /> } <div class="row"> <div class="col-lg-12 col-xl-12"> <div id="panel-1" class="panel"> <div class="panel-hdr"> <h2> 消息推送 </h2> <div class="panel-toolbar"> <button class="btn btn-panel bg-transparent fs-xl w-auto h-auto rounded-0" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"><i class="fal fa-window-minimize"></i></button> <button class="btn btn-panel bg-transparent fs-xl w-auto h-auto rounded-0" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"><i class="fal fa-expand"></i></button> @*<button class="btn btn-panel bg-transparent fs-xl w-auto h-auto rounded-0" data-action="panel-close" data-toggle="tooltip" data-offset="0,10" data-original-title="Close"><i class="fal fa-times"></i></button>*@ </div> </div> <div class="panel-container enable-loader show"> <div class="loader"><i class="fal fa-spinner-third fa-spin-4x fs-xxl"></i></div> <div class="panel-content py-2 rounded-bottom border-faded border-left-0 border-right-0 text-muted bg-faded bg-subtlelight-fade"> <div class="row no-gutters align-items-center"> <div class="col"> <!-- 开启授权控制请参考 @@if (Html.IsAuthorize("Create") --> <div class="btn-group btn-group-sm"> <button name="searchbutton" onclick="reloadData()" class="btn btn-default"> <span class="fal fa-search mr-1"></span> 查询 </button> </div> <div class="btn-group btn-group-sm"> <button name="appendbutton" onclick="appendData()" class="btn btn-default"> <span class="fal fa-plus mr-1"></span> 发布新事件消息 </button> </div> <div class="btn-group btn-group-sm"> <button name="deletebutton" onclick="removeData()" class="btn btn-default"> <span class="fal fa-times mr-1"></span> 删除 </button> </div> <div class="btn-group btn-group-sm"> <button name="savebutton" onclick="acceptChanges()" class="btn btn-default"> <span class="fal fa-save mr-1"></span> 保存 </button> </div> <div class="btn-group btn-group-sm"> <button name="cancelbutton" onclick="rejectChanges()" class="btn btn-default"> <span class="fal fa-ban mr-1"></span> 取消 </button> </div> <div class="btn-group btn-group-sm "> <button name="exportbutton" onclick="exportexcel()" class="btn btn-default"> <span class="fal fa-file-excel mr-1"></span> 导出 </button> </div> </div> </div> </div> <div class="panel-content"> <div class="table-responsive"> <table id="notifications_datagrid"> </table> </div> </div> </div> </div> </div> </div> @section ScriptsBlock { <script src="~/js/dependency/moment/moment.js" asp-append-version="true"></script> <script src="~/js/dependency/numeral/numeral.min.js" asp-append-version="true"></script> <script src="~/js/formplugins/bootstrap-daterangepicker/bootstrap-daterangepicker.js" asp-append-version="true"></script> <script src="~/js/easyui/jquery.easyui.min.js" asp-append-version="true"></script> <script src="~/js/easyui/plugins/datagrid-filter.js" asp-append-version="true"></script> <script src="~/js/easyui/plugins/columns-ext.js" asp-append-version="true"></script> <script src="~/js/easyui/plugins/columns-reset.js" asp-append-version="true"></script> <script src="~/js/easyui/locale/easyui-lang-zh_CN.js" asp-append-version="true"></script> <script src="~/js/easyui/jquery.easyui.component.js" asp-append-version="true"></script> <script src="~/js/jquery.extend.formatter.js" asp-append-version="true"></script> <script src="~/js/jquery.custom.extend.js" asp-append-version="true"></script> <script src="~/js/plugin/jquery.serializejson/jquery.serializejson.js" asp-append-version="true"></script> <script type="text/javascript"> //全屏事件 document.addEventListener('panel.onfullscreen', () => { $dg.treegrid('resize'); }); //是否强制从后台取值 const REQUIRBACKEND = false; //是否开启行内编辑 const EDITINLINE = true; //上传导入参数设定 const entityname = "Notification"; var notification = {}; //执行导出下载Excel function exportexcel() { const filterRules = JSON.stringify($dg.datagrid('options').filterRules); //console.log(filterRules); $.messager.progress({ title: '正在执行导出!' }); let formData = new FormData(); formData.append('filterRules', filterRules); formData.append('sort', 'Id'); formData.append('order', 'asc'); $.postDownload('/Notifications/ExportExcel', formData).then(res => { $.messager.progress('close'); toastr.success('导出成功!'); }).catch(err => { //console.log(err); $.messager.progress('close'); $.messager.alert('失败', err.statusText, 'error'); }); } var editIndex = undefined; //重新加载数据 function reloadData() { $dg.datagrid('uncheckAll'); $dg.datagrid('reload'); } //关闭编辑状态 function endEditing() { if (editIndex === undefined) { return true; } if ($dg.datagrid('validateRow', editIndex)) { $dg.datagrid('endEdit', editIndex); return true; } else { const invalidinput = $('input.validatebox-invalid',$dg.datagrid('getPanel')); const fieldnames = invalidinput.map((index, item) => { return $(item).attr('placeholder') || $(item).attr('id'); }); $.messager.alert('提示', `${Array.from(fieldnames)} 输入有误.`, 'error'); return false; } } //单击列开启编辑功能 function onClickCell(index, field) { notification = $dg.datagrid('getRows')[index]; const _actions = ['action', 'ck']; if (!EDITINLINE || $.inArray(field, _actions) >= 0) { return; } if (editIndex !== index) { if (endEditing()) { $dg.datagrid('selectRow', index) .datagrid('beginEdit', index); hook = true; editIndex = index; const ed = $dg.datagrid('getEditor', { index: index, field: field }); if (ed) { ($(ed.target).data('textbox') ? $(ed.target).textbox('textbox') : $(ed.target)).focus(); } } else { $dg.datagrid('selectRow', editIndex); } } } //新增记录 async function appendData() { notification = { Read: false, From: '', To: 'ALL', Group: '', PublishDate: new Date(), }; if (endEditing()) { //对必填字段进行默认值初始化 $dg.datagrid('insertRow', { index: 0, row: notification }); editIndex = 0; $dg.datagrid('selectRow', editIndex) .datagrid('beginEdit', editIndex); hook = true; } } //删除编辑的行 function removeData() { if (EDITINLINE) { if (editIndex !== undefined) { $dg.datagrid('cancelEdit', editIndex) .datagrid('deleteRow', editIndex); editIndex = undefined; hook = true; }else{ const rows = $dg.datagrid('getChecked'); rows.slice().reverse().forEach(row => { const rowindex = $dg.datagrid('getRowIndex', row); $dg.datagrid('deleteRow', rowindex); hook = true; }); } } else { deletechecked(); } } //删除该行 function deleteRow(id) { $.messager.confirm('确认', '你确定要删除该记录?', result => { if (result) { $.post('/Notifications/DeleteCheckedAsync', { id: [id] }) .done(response => { if (response.success) { toastr.error('成功删除'); reloadData(); } else { $.messager.alert('错误', response.err,'error'); } }) .fail((jqXHR, textStatus, errorThrown) => { $.messager.alert('异常', `${jqXHR.status}: ${jqXHR.statusText} `, 'error'); }); } }); } //删除选中的行 function deletechecked() { const id = $dg.datagrid('getChecked').filter(item=>item.Id!=null && item.Id > 0).map(item => { return item.Id; });; if (id.length > 0) { $.messager.confirm('确认', `你确定要删除这 <span class='badge badge-icon position-relative'>${id.length} </span> 行记录?`, result => { if (result) { $.post('/Notifications/DeleteChecked', { id: id }) .done(response => { if (response.success) { toastr.error(`成功删除【${id.length}】行记录`); reloadData(); } else { $.messager.alert('错误', response.err,'error'); } }) .fail((jqXHR, textStatus, errorThrown) => { $.messager.alert('异常', `${jqXHR.status}: ${jqXHR.statusText} `, 'error'); }); } }); } else { $.messager.alert('提示', '请先选择要删除的记录!','question'); } } //提交保存后台数据库 function acceptChanges() { if (endEditing()) { if ($dg.datagrid('getChanges').length > 0) { const inserted = $dg.datagrid('getChanges', 'inserted').map(item => { item.TrackingState = 1; return item; }); const updated = $dg.datagrid('getChanges', 'updated').map(item => { item.TrackingState = 2 return item; }); const deleted = $dg.datagrid('getChanges', 'deleted').map(item => { item.TrackingState = 3 return item; }); //过滤已删除的重复项 const changed = inserted.concat(updated.filter(item => { return !deleted.includes(item); })).concat(deleted); //console.table(changed); $.post('/Notifications/AcceptChanges', { notifications: changed }) .done(response => { //console.log(response); if (response.success) { toastr.success('保存成功'); $dg.datagrid('acceptChanges'); reloadData(); hook = false; } else { $.messager.alert('错误', response.err, 'error'); } }) .fail((jqXHR, textStatus, errorThrown) => { $.messager.alert('异常', `${jqXHR.status}: ${jqXHR.statusText} `, 'error'); }); } } } function rejectChanges() { $dg.datagrid('rejectChanges'); editIndex = undefined; hook = false; } function getChanges() { const rows = $dg.datagrid('getChanges'); //console.log(rows.length + ' rows are changed!'); } //弹出明细信息 async function showdetailswindow(id,index) { var notification = $dg.datagrid('getRows')[index]; if (REQUIRBACKEND) { notification = await $.get('/Notifications/GetItemAsync/' + id); } opendetailwindow(notification,'Modified'); } //初始化定义datagrid var $dg = $('#notifications_datagrid'); $(() => { //定义datagrid结构 $dg.datagrid({ rownumbers: true, checkOnSelect: false, selectOnCheck: true, idField: 'Id', sortName: 'Id', sortOrder: 'desc', remoteFilter: true, singleSelect: false, method: 'get', onClickCell: onClickCell, pagination: true, clientPaging: false, striped: true, height: 670, pageSize: 15, pageList: [15, 20, 50, 100, 500, 2000], onBeforeLoad: function () { $('.enable-loader').removeClass('enable-loader') }, onLoadSuccess: function (data) { editIndex = undefined; $("button[name*='deletebutton']").prop("disabled", true); $("button[name*='savebutton']").prop("disabled", true); $("button[name*='cancelbutton']").prop("disabled", true); }, onCheck: function () { $("button[name*='deletebutton']").prop("disabled", false); }, onUncheck: function () { console.log('onUncheck'); }, onSelect: function (index, row) { notification = row; }, onBeginEdit: function (index, row) { }, onEndEdit: function (index, row) { editIndex = undefined; }, onBeforeEdit: function (index, row) { row.editing = true; $("button[name*='deletebutton']").prop("disabled", false); $("button[name*='cancelbutton']").prop("disabled", false); $("button[name*='savebutton']").prop("disabled", false); $(this).datagrid('refreshRow', index); }, onAfterEdit: function (index, row) { row.editing = false; $(this).datagrid('refreshRow', index); }, onCancelEdit: function (index, row) { row.editing = false; $("button[name*='savebutton']").prop("disabled", true); $("button[name*='cancelbutton']").prop("disabled", true); $("button[name*='deletebutton']").prop("disabled", true); $(this).datagrid('refreshRow', index); }, frozenColumns: [[ /*开启CheckBox选择功能*/ { field: 'ck', checkbox: true }, ]], columns: [[ { /*类型*/ field: 'Group', title: '<span class="required">@Html.DisplayNameFor(model => model.Group)</span>', width: 100, align: 'right', hidden: false, editor: { type: 'combobox', options: { prompt: '@Html.DescriptionFor(model => model.Group)', required: true, data: [ { value: '操作日志', text: '操作日志' }, { value: '审批记录', text: '审批记录' }, { value: '审计记录', text: '审计记录' } ], } }, sortable: true, resizable: true }, { /*发出时间*/ field: 'PublishDate', title: '<span class="required">@Html.DisplayNameFor(model => model.PublishDate)</span>', width: 150, align: 'right', hidden: false, editor: { type: 'datetimebox', options: { prompt: '@Html.DescriptionFor(model => model.PublishDate)', required: true, showSeconds: true } }, formatter: datetimeformatter, sortable: true, resizable: true }, { /*主题*/ field: 'Title', title: '<span class="required">@Html.DisplayNameFor(model => model.Title)</span>', width: 220, hidden: false, editor: { type: 'textbox', options: { prompt: '@Html.DescriptionFor(model => model.Title)', required: true, validType: 'length[0,128]' } }, sortable: true, resizable: true }, { /*消息内容*/ field: 'Content', title: '@Html.DisplayNameFor(model => model.Content)', width: 300, hidden: false, editor: { type: 'textbox', options: { prompt: '@Html.DescriptionFor(model => model.Content)', required: false, validType: 'length[0,255]' } }, sortable: true, resizable: true }, { /*链接*/ field: 'Link', title: '@Html.DisplayNameFor(model => model.Link)', width: 160, hidden: false, editor: { type: 'textbox', options: { prompt: '@Html.DescriptionFor(model => model.Link)', required: false, validType: 'length[0,255]' } }, sortable: true, resizable: true }, { /*发布源*/ field: 'Publisher', title: '@Html.DisplayNameFor(model => model.Publisher)', width: 160, hidden: false, editor: { type: 'textbox', options: { prompt: '@Html.DescriptionFor(model => model.Publisher)', required: false, validType: 'length[0, 128]' } }, sortable: true, resizable: true }, { /*已读*/ field: 'Read', title: '<span class="required">@Html.DisplayNameFor(model => model.Read)</span>', width: 80, align: 'center', hidden: false, editor: { type: 'checkbox', options: { id:'editor_read', prompt: '@Html.DescriptionFor(model => model.Read)', required: true } }, formatter: booleanformatter, sortable: true, resizable: true }, { /*From*/ field: 'From', title: '@Html.DisplayNameFor(model => model.From)', width: 140, hidden: false, editor: { type: 'textbox', options: { prompt: '@Html.DescriptionFor(model => model.From)', required: false, validType: 'length[0,50]' } }, sortable: true, resizable: true }, { /*From*/ field: 'To', title: '@Html.DisplayNameFor(model => model.To)', width: 120, hidden: false, editor: { type: 'textbox', options: { prompt: '@Html.DescriptionFor(model => model.To)', required: false, validType: 'length[0,50]' } }, sortable: true, resizable: true } ]] }) .datagrid('enableFilter', [ { /*类型*/ field: 'Group', type: 'combobox', options: { data: [ { value: '操作日志', text: '操作日志' }, { value: '审批记录', text: '审批记录' }, { value: '审计记录', text: '审计记录' } ], onChange: value => { $dg.datagrid('addFilterRule', { field: 'Group', op: 'equal', value: value }); $dg.datagrid('doFilter'); } } }, { /*发出时间*/ field: 'PublishDate', type: 'dateRange', options: { onChange: value => { $dg.datagrid('addFilterRule', { field: 'PublishDate', op: 'between', value: value }); $dg.datagrid('doFilter'); } } }, { /*已读*/ field: 'Read', type: 'booleanfilter' }, ]) .datagrid('load', '/Notifications/GetData'); }); </script> }
the_stack
@using lsc.Model @using lsc.Model.Enume @model List<lsc.Model.EnterCustomer> @{ ViewData["Title"] = "客户管理"; Layout = "~/Pages/_Layout.cshtml"; List<DistrictInfo> ProvinceList = ViewBag.ProvinceList; List<UserRoleJurisdiction> userrolejurlist = ViewData["userrolejurlist"] as List<UserRoleJurisdiction>; List<ModuleInfo> modulelist = ViewData["modulelist"] as List<ModuleInfo>; var module = modulelist.FirstOrDefault(x => x.Name == "客户信息管理"); UserRoleJurisdiction usrrolejur = null; if (module != null) { usrrolejur = userrolejurlist.FirstOrDefault(x => x.ModuleID == module.ID); } List<EnterCustContacts> eclist = ViewBag.eclist; List<EnterCustPhaseLog> ecplogList = ViewBag.ecplogList; } <blockquote class="layui-elem-quote"> 在客户管理中展示了自己管理的客户信息,可以对客户进行相关操作 <a class="layui-btn layui-btn-normal" href="/EnterCustom/AddEnterCustom?id=0">添加客户</a> <a class="layui-btn layui-btn-normal" href="/EnterCustom/AddEnterCustomQuick">快速录入客户</a> <a href="http://Resources.lsc.com:8082\docfile\excel\CRM客户信息模板.xlsx" class="layui-btn layui-btn-normal">下载模板</a> <button class="layui-btn test" lay-data="{url: '/UploadApi/uploadEnterCustom', accept: 'file'}">上传客户信息</button> </blockquote> <fieldset class="layui-elem-field layui-field-title" style="margin-top: 20px;"> <legend>客户信息综合查询</legend> </fieldset> <div class="layui-fluid"> <div class="layui-row"> <form class="layui-form" method="post" action="/EnterCustom/Index" id="queryform"> <div class="layui-col-md4 query-from-item"> <label class="layui-form-label">客户名称</label> <div class="layui-input-inline"> <input type="text" class="layui-input" name="EnterName" value="@(!string.IsNullOrEmpty(ViewBag.EnterName)? ViewBag.EnterName:"")" /> </div> </div> <div class="layui-col-md4 query-from-item"> <label class="layui-form-label">客户类型</label> <div class="layui-input-inline"> <select class="layui-form-select" name="CustomerType" lay-search> <option value=""></option> <option value="1" @(ViewBag.CustomerType==1?"selected":"" )>代理经销商</option> @*<option value="2" @(ViewBag.CustomerType==2?"selected":"" )>普通客户</option> <option value="3" @(ViewBag.CustomerType==3?"selected":"" )>集团大客户</option> <option value="4" @(ViewBag.CustomerType==4?"selected":"" )>业务合作商</option> <option value="5" @(ViewBag.CustomerType==5?"selected":"" )>怀疑同行</option>*@ <option value="6" @(ViewBag.CustomerType == 6 ? "selected" : "")>高校</option> <option value="7" @(ViewBag.CustomerType == 7 ? "selected" : "")>教委</option> <option value="8" @(ViewBag.CustomerType == 8 ? "selected" : "")>中职</option> <option value="9" @(ViewBag.CustomerType == 9 ? "selected" : "")>中学</option> <option value="10" @(ViewBag.CustomerType == 10 ? "selected" : "")>小学</option> <option value="11" @(ViewBag.CustomerType == 11 ? "selected" : "")>特教</option> <option value="12" @(ViewBag.CustomerType == 12 ? "selected" : "")>监狱</option> <option value="13" @(ViewBag.CustomerType == 13 ? "selected" : "")>戒毒所</option> <option value="14" @(ViewBag.CustomerType == 14 ? "selected" : "")>公检法</option> <option value="15" @(ViewBag.CustomerType == 15 ? "selected" : "")>武警部队</option> <option value="16" @(ViewBag.CustomerType == 16 ? "selected" : "")>医院</option> <option value="17" @(ViewBag.CustomerType == 17 ? "selected" : "")>其他客户</option> </select> </div> </div> <div class=" layui-col-md4 query-from-item"> <label class="layui-form-label">关系等级</label> <div class="layui-input-inline"> <select class="layui-form-select" name="Relationship"> <option value=""></option> <option value="1" @(ViewBag.Relationship == 1?"selected":"" )>密切</option> <option value="2" @(ViewBag.Relationship == 2?"selected":"" )>较好</option> <option value="3" @(ViewBag.Relationship == 3?"selected":"" )>一般</option> <option value="4" @(ViewBag.Relationship == 4?"selected":"" )>较差</option> </select> </div> </div> <div class="layui-col-md4 query-from-item"> <label class="layui-form-label">阶段</label> <div class="layui-input-inline"> <select class="layui-form-select" name="Phase"> <option value=""></option> <option value="1" @(ViewBag.Phase == 1?"selected":"" )>售前跟踪</option> <option value="2" @(ViewBag.Phase == 2?"selected":"" )>需求确定</option> <option value="3" @(ViewBag.Phase == 3?"selected":"" )>售中跟单</option> <option value="4" @(ViewBag.Phase == 4?"selected":"" )>签约洽谈</option> <option value="5" @(ViewBag.Phase == 5?"selected":"" )>成交售后</option> <option value="6" @(ViewBag.Phase == 6?"selected":"" )>跟单失败</option> <option value="7" @(ViewBag.Phase == 7?"selected":"" )>暂且搁置</option> <option value="8" @(ViewBag.Phase == 8?"selected":"" )>其他阶段</option> </select> </div> </div> <div class="layui-col-md4 query-from-item"> <label class="layui-form-label">价值评估</label> <div class="layui-input-inline"> <select class="layui-form-select" name="ValueGrade"> <option value=""></option> <option value="1" @(ViewBag.ValueGrade == 1?"selected":"" )>高</option> <option value="2" @(ViewBag.ValueGrade == 2?"selected":"" )>中</option> <option value="3" @(ViewBag.ValueGrade == 3?"selected":"" )>低</option> </select> </div> </div> <div class="layui-col-md4 query-from-item"> <label class="layui-form-label">客户来源</label> <div class="layui-input-inline"> <select class="layui-form-select" name="Source"> <option value=""></option> <option value="1" @(ViewBag.Source == 1?"selected":"" )>客户来电</option> <option value="2" @(ViewBag.Source == 2?"selected":"" )>主动挖掘</option> <option value="3" @(ViewBag.Source == 3?"selected":"" )>网站咨询</option> <option value="4" @(ViewBag.Source == 4?"selected":"" )>客户介绍</option> <option value="5" @(ViewBag.Source == 5?"selected":"" )>其他来源</option> <option value="6" @(ViewBag.Source == 6 ? "selected" : "" )>招标</option> <option value="7" @(ViewBag.Source == 7 ? "selected" : "" )>展会</option> <option value="8" @(ViewBag.Source == 8 ? "selected" : "" )>QQ&微信群</option> </select> </div> </div> <div class="layui-col-md4 query-from-item"> <label class="layui-form-label">省份</label> <div class="layui-input-inline"> <select class="layui-form-select" name="Province" lay-filter="Province" lay-search> <option value=""></option> @if (ProvinceList != null) { foreach (var p in ProvinceList) { <option value="@p.Name" data-id="@p.ID" @(ViewBag.Province == p.Name ? "selected":"" )>@p.Name</option> } } </select> </div> </div> <div class="layui-col-md4 query-from-item"> <label class="layui-form-label">城市</label> <div class="layui-input-inline"> <select class="layui-form-select" name="City" lay-filter="City" lay-search> <option value="@ViewBag.City">@ViewBag.City</option> </select> </div> </div> <div class="layui-col-md4 query-from-item"> <label class="layui-form-label">热点客户</label> <div class="layui-input-inline"> <input type="checkbox" name="IsHeat" lay-skin="switch" @(ViewBag.IsHeat==true? "checked":"")> </div> </div> <div class="layui-col-md4 query-from-item"> <label class="layui-form-label">热度</label> <div class="layui-input-inline"> <select class="layui-form-select" name="DegreeOfHeat"> <option value=""></option> <option value="1" @(ViewBag.DegreeOfHeat==1?"selected":"") >高热</option> <option value="2" @(ViewBag.DegreeOfHeat==2?"selected":"") >中热</option> <option value="3" @(ViewBag.DegreeOfHeat==3?"selected":"") >低热</option> </select> </div> </div> <div class="layui-col-md4 query-from-item"> <label class="layui-form-label">最近追踪</label> <div class="layui-input-inline"> <select class="layui-form-select" name="UpdateTime"> <option value=""></option> <option value="1" @(ViewBag.UpdateTime == 1?"selected":"") >7天未追踪</option> <option value="2" @(ViewBag.UpdateTime == 2?"selected":"") >15天未追踪</option> <option value="3" @(ViewBag.UpdateTime == 3?"selected":"") >30天未追踪</option> <option value="4" @(ViewBag.UpdateTime == 4?"selected":"") >60天未追踪</option> <option value="5" @(ViewBag.UpdateTime == 5?"selected":"") >90天未追踪</option> <option value="6" @(ViewBag.UpdateTime == 6 ? "selected" : "")>最近三天追踪</option> <option value="7" @(ViewBag.UpdateTime == 7 ? "selected" : "")>今天追踪</option> </select> </div> </div> <div class="layui-col-md4 query-from-item"> <label class="layui-form-label">手机号</label> <div class="layui-input-inline"> <input type="text" class="layui-input" name="Telephone" value="@(!string.IsNullOrEmpty(ViewBag.Telephone)? ViewBag.Telephone:"")" /> </div> </div> <div class="layui-col-md4 query-from-item"> <label class="layui-form-label">QQ号</label> <div class="layui-input-inline"> <input type="text" class="layui-input" name="QQ" value="@(!string.IsNullOrEmpty(ViewBag.QQ)? ViewBag.QQ:"")" /> </div> </div> <div class="layui-col-md4 query-from-item"> <div class="layui-input-block"> <input type="hidden" value="1" name="page" /> <button class="layui-btn" lay-submit lay-filter="*">查询</button> </div> </div> </form> </div> <div class="layui-row" id="content_table"> <table class="layui-table" lay-filter="test"> <thead> <tr> @*<th lay-data="{field:'EnterName', width:250}">客户名称</th> <th lay-data="{field:'CustomerType', width:100}">种类</th> <th lay-data="{field:'Relationship', width:90}">最近联系记录</th> <th lay-data="{field:'ValueGrade', width:90}">联系人</th> <th lay-data="{field:'Source', width:90}">客户来源</th> <th lay-data="{field:'Phase', width:100}">阶段</th> <th lay-data="{field:'IsHeat', width:90}">是否热点</th> <th lay-data="{field:'UpdateTime', width:160}">跟踪时间</th> <th lay-data="{field:'Rem', width:200}">备注</th> <th lay-data="{field:'set', width:520}">操作</th>*@ <th>客户名称</th> <th >种类</th> <th >最近联系记录</th> <th >联系人</th> <th >客户来源</th> <th >阶段</th> <th >是否热点</th> <th >跟踪时间</th> <th >备注</th> <th >操作</th> </tr> </thead> <tbody> @if (Model != null && Model.Count > 0) { foreach (var ec in Model) { <tr class="laytable-cell-1-picurl"> <td><a href="/EnterCustom/EnterCustomInfo?id=@(ec.ID)">@ec.EnterName</a></td> @*<td>@ec.CustAbstract</td>*@ <td>@ec.CustomerType.TryToStr()</td> <td> @if (ecplogList!=null && ecplogList.Count>0) { var log = ecplogList.FirstOrDefault(x => x.EnterCustomerID == ec.ID); if (log!=null) { @Html.Raw(log.Rem) } } </td> <td> @if (!string.IsNullOrEmpty(ec.Telephone) || !string.IsNullOrEmpty(ec.Landline)) { <p>@(ec.Telephone)|@(ec.Landline)</p> <hr> } @if (eclist != null && eclist.Count > 0) { var lists = eclist.Where(x => x.EnterCustID == ec.ID); if (lists != null) { foreach (var l in lists) { <p>@(l.Name):@(l.Telephone)|@(l.Landline)</p> <hr> } } } </td> <td>@ec.Source.TryToStr()</td> <td><a href="/EnterCustom/EnterCustPhaseLogList?id=@(ec.ID)" style="color:#F581B1">@ec.Phase.TryToStr()</a></td> <td>@(ec.IsHeat ? "是" : "否")</td> @*<td>@ec.HeatTYPE.TryToStr()</td>*@ <td>@ec.UpdateTime.ToString("yyyy-MM-dd hh:mm:ss")</td> <td>@ec.Rem</td> <td> <div class="layui-btn-group"> <a href="javascript:;" class="layui-btn layui-btn-small" onclick="updatephase('@(ec.ID)')">更新阶段</a> <a href="/EnterCustom/AddSalesProject?EnterCustomerID=@(ec.ID)" class="layui-btn layui-btn-small">添加项目</a> <a href="javascript:;" onclick="showcontroller('@(ec.ID)')" class="layui-btn layui-btn-small">查看联系人</a> <a href="javascript:;" onclick="addecc('0','@(ec.ID)')" class="layui-btn layui-btn-small">添加联系人</a> <a href="javascript:;" class="layui-btn layui-btn-small" onclick="entercallback('@(ec.ID)')">放入客户池</a> @if (usrrolejur != null && usrrolejur.IsEdit) { <a href="/EnterCustom/AddEnterCustom?id=@(ec.ID)" class="layui-btn layui-btn-small">编辑</a> } <a href="javascript:;" onclick="addplan('@(ec.ID)')" class="layui-btn layui-btn-small">添加计划</a> @if (usrrolejur != null && usrrolejur.IsDelete) { <a href="javascript:;" class="layui-btn layui-btn-danger layui-btn-small" onclick="del('@(ec.ID)')">删除</a> } </div> </td> </tr> } } </tbody> </table> <div id="page"></div> </div> </div> @section Scripts{ <script type="text/javascript"> var layer, form layui.use(['form', 'element', 'layer', 'laypage', 'upload','table'], function () { form = layui.form layer = layui.layer var laypage = layui.laypage var upload = layui.upload var table = layui.table; // 文件上传组件 upload.render({ elem: '.test', exts: 'xlsx', data: {UserID:@(ViewBag.UserID),UserName:"@(ViewBag.UserName)"} , done: function (res, index, upload) { console.log(res) //获取当前触发上传的元素,一般用于 elem 绑定 class 的情况,注意:此乃 layui 2.1.0 新增 if (res.code == 0 && res.msg == 'OK') { if (res.data != null && res.data.src != null) { layer.msg('上传成功,部分信息有错误', { icon: 6 }); $(".layui-elem-quote").append("<a class=\"layui-btn layui-btn-normal\" href=\"" + res.data.src +"\" target=\"_blank\">点击查看失败信息</a>") } else { layer.msg('上传成功', { icon: 6 }); } } } }) form.on('submit(*)', function (data) { $("input[name='page']").val(1) $.post('/EnterCustom/Index', data.field, function (res) { }) //return false; //阻止表单跳转。如果需要表单跳转,去掉这段即可。 }); form.on('select(Province)', function (data) { citylist(); }); //分页 laypage.render({ elem: 'page' //分页容器的id , count: @(ViewBag.count) //总页数 ,limit:20 , skin: '#1E9FFF' //自定义选中色值 //,skip: true //开启跳页 ,curr: @(ViewBag.pageIndex) //获取起始页 , jump: function (obj, first) { console.log(obj) if (!first) { $("input[name='page']").val(obj.curr) $("#queryform").submit(); } } , hash: 'fenye' //自定义hash值 }); }) //$(function () { // $.post('/EnterCustom/EnterCustomlist', { page:1 }, function (res) { // $("#content_table").html(res) // }) //}) entercallback = function (id) { layer.confirm('是否放入客户池?', { icon: 3, title: '提示' }, function (index) { $.get('/EnterCustom/EnterCallback?id=' + id, function (res) { if (res.code == 1) { layer.msg('成功', { icon: 6 }); window.location = '/EnterCustom/Index' } else { layer.msg('失败', { icon: 5 }); } }); layer.close(index); }); } del = function (id) { layer.confirm('是否删除?', { icon: 3, title: '删除提示' }, function (index) { $.get('/EnterCustom/DelEnter?id=' + id, function (res) { if (res.code == 1) { layer.msg('成功', { icon: 6 }); window.location = '/EnterCustom/Index' } else { layer.msg('失败', { icon: 5 }); } }) layer.close(index); }); } var showcontroller = function (id) { layer.open({ type: 2, title: '联系人列表', shadeClose: true, shade: 0.8, area: ['1200px', '75%'], content: '/EnterCustom/EnterCustContactsList?EnterCustID=' + id }); } citylist = function () { var pid = $("select[name='Province']").find('option:selected').attr('data-id') $("select[name='City'] option").each(function () { if ($(this).val() != '') { $(this).remove(); } }) if (pid == undefined) { form.render('select'); return; } $.get('/EnterCustom/GetCityList?id=' + pid, function (result) { if (result.code == 1) { for (var i = 0; i < result.citylist.length; i++) { $("select[name='City']").append("<option value='" + result.citylist[i].name + "'>" + result.citylist[i].name + "</option>") } form.render('select'); } }) } updatephase = function(id){ layer.open({ type: 2, title: '更新客户所处的阶段', shadeClose: true, shade: 0.8, area: ['1200px', '70%'], content: '/EnterCustom/AddEnterCustPhaseLog?id=' + id }); } addplan = function (id) { layer.open({ type: 2, title: '添加工作计划', shadeClose: true, shade: 0.8, area: ['400px', '40%'], content: '/EnterCustom/AddWorkPlan?EnterCustID=' + id }); } addecc = function (id,eid) { layer.open({ type: 2, title: '编辑联系人', shadeClose: true, shade: 0.8, area: ['600px', '80%'], content: '/EnterCustom/AddEnterCustContacts?id=' + id + '&EnterCustID=' + eid }); } </script> }
the_stack
@page @model AlteditorModel @{ ViewData["Title"] = "AltEditor (beta)"; ViewData["PageName"] = "datatables_alteditor"; ViewData["Category1"] = "Datatables"; ViewData["Heading"] = "<i class='subheader-icon fal fa-table'></i> DataTables: <span class='fw-300'>AltEditor (beta)</span> <sup class='badge badge-primary fw-500'>ADDON</sup>"; ViewData["PageDescription"] = "Custom made editor plugin designed for Datatables"; } @section HeadBlock { <link rel="stylesheet" media="screen, print" href="~/css/datagrid/datatables/datatables.bundle.css"> <link rel="stylesheet" media="screen, print" href="~/css/theme-demo.css"> } <div class="alert alert-primary"> <div class="d-flex flex-start w-100"> <div class="mr-2 hidden-md-down"> <span class="icon-stack icon-stack-lg"> <i class="base base-2 icon-stack-3x opacity-100 color-primary-500"></i> <i class="base base-2 icon-stack-2x opacity-100 color-primary-300"></i> <i class="@(Settings.Theme.IconPrefix) fa-info icon-stack-1x opacity-100 color-white"></i> </span> </div> <div class="d-flex flex-fill"> <div class="flex-fill"> <span class="h5">About</span> <p> DataTables AltEditor is a MIT licensed free editor. The plugin adds capabilities to add, edit and delete rows in your datatables through the use of modals. We have modified the editor extensively to be used with @Settings.AppName and make your job a little easier. This current version of AltEditor is exclusive to @Settings.App and we intend to keep it up to date to be compatible with DataTables. </p> <p class="m-0"> You can find the definitions of its elements on their <a href="https://github.com/KasperOlesen/DataTable-AltEditor" target="_blank">official github</a> page. Note: Only use the exclusive plugin included with this WebApp as the one on github may not be compatible with @Settings.App. </p> </div> </div> </div> </div> <div class="row"> <div class="col-xl-12"> <div id="panel-1" class="panel"> <div class="panel-hdr"> <h2> Example <span class="fw-300"><i>Table</i></span> </h2> <div class="panel-toolbar"> <button class="btn btn-panel" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"></button> <button class="btn btn-panel" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"></button> <button class="btn btn-panel" data-action="panel-close" data-toggle="tooltip" data-offset="0,10" data-original-title="Close"></button> </div> </div> <div class="panel-container show"> <div class="panel-content"> <div class="panel-tag"> <p> Datatable accepts the following callback functions as arguments: <code>onAddRow(datatable, rowdata, success, error)</code>, <code>onEditRow(datatable, rowdata, success, error)</code>, <code>onDeleteRow(datatable, rowdata, success, error)</code> </p> <p> In the most common case, these function should call <code>$.ajax </code>as expected by the webservice. The two functions success and error should be passed as arguments to <code>$.ajax</code>. Webservice must return the modified row in JSON format, because the success() function expects this. Otherwise you have to write your own success() callback (e.g. refreshing the whole table). </p> </div> <ul class="nav nav-tabs" role="tablist"> <li class="nav-item"> <a class="nav-link active p-3" data-toggle="tab" href="#tab_default-1" role="tab"> <i class="fal fa-table text-success"></i> <span class="hidden-sm-down ml-1">Alt-Editor Example</span> </a> </li> <li class="nav-item"> <a class="nav-link p-3" data-toggle="tab" href="#tab_default-2" role="tab"> <i class="fal fa-cog text-info"></i> <span class="hidden-sm-down ml-1">Supported Modifiers</span> </a> </li> </ul> <div class="tab-content pt-4"> <div class="tab-pane fade show active" id="tab_default-1" role="tabpanel"> <div class="row"> <div class="col-xl-12"> <table id="dt-basic-example" class="table table-bordered table-hover table-striped w-100"></table> </div> <div class="col-xl-12"> <hr class="mt-5 mb-5"> <h5>Event <i>logs (AJAX Calls)</i></h5> <div id="app-eventlog" class="alert alert-primary p-1 h-auto my-3"></div> </div> </div> </div> <div class="tab-pane fade" id="tab_default-2" role="tabpanel"> <div class="alert alert-info"> <strong> IE Support </strong> <br> The latest update for Alt-Editor has dropped support for IE in general. We have included the latest version of Alt-editor (catered for SmartAdmin) inside <code>src/custom/plugins/datatables-alteditor/datatables-alteditor-latest.js</code> found only in the HTML flavor. You may switch to this latest version of Alt editor to gain access to the <strong>Support Modifiers</strong> below. </div> <div class="row"> <div class="col-12"> <table class="table table-bordered table-striped"> <thead> <tr> <th>Column option</th> <th>Accepted values</th> <th>Description</th> </tr> </thead> <tbody> <tr> <td><code>type</code></td> <td><code>"text" | "select" | "hidden" | ...</code></td> <td>Type of HTML input to be shown. The value <code>readonly</code> is accepted for backward compatibility, but deprecated.</td> </tr> <tr> <td><code>readonly</code></td> <td><code>true | false</code></td> <td>Add <code>readonly</code> HTML attribute</td> </tr> <tr> <td><code>disabled</code></td> <td><code>true | false</code></td> <td>Add <code>disabled</code> HTML attribute</td> </tr> <tr> <td><code>required</code></td> <td><code>true | false</code></td> <td>Add <code>required</code> HTML attribute</td> </tr> <tr> <td><code>hoverMsg</code></td> <td><code>"some msg"</code></td> <td>The message will appear as a tooltip over the input field.</td> </tr> <tr> <td><code>unique</code></td> <td><code>true | false</code></td> <td>Ensure that no two rows have the same value. The check is performed client side, not server side. Set HTML <code>"data-unique"</code> attribute. (Probably there's some issue with this).</td> </tr> <tr> <td><code>uniqueMsg</code></td> <td><code>"some msg"</code></td> <td>An error message that is displayed when the unique constraint is not respected. Set HTML <code>"data-uniqueMsg"</code> attribute.</td> </tr> <tr> <td><code>special</code></td> <td><code>"any string"</code></td> <td>Set HTML <code>"data-special"</code> attribute (don't know what's that needed for).</td> </tr> <tr> <td><code>editorOnChange</code></td> <td>function</td> <td>Custom onchange function. It will take as arguments the jquery event and the altEditor object.</td> </tr> <tr> <td colspan="3" class="py-4"><strong>Options for columns with type <code>"text"</code>:</strong></td> </tr> <tr> <td><code>pattern</code></td> <td><code>r.e.</code></td> <td>The typed text will be matched against given regular expression, before submit.</td> </tr> <tr> <td><code>msg</code></td> <td><code>"some msg"</code></td> <td>An error message that is displayed in case pattern is not matched. Set HTML <code>"data-errorMsg"</code> attribute.</td> </tr> <tr> <td><code>maxLength</code></td> <td><code>integer</code></td> <td>Set HTML <code>"maxlength"</code> attribute.</td> </tr> <tr> <td><code>datepicker</code></td> <td><code>{}</code></td> <td>Enable a datepicker component. jQuery-UI plugin must be linked. More datepicker configuration options may be passed within the object.</td> </tr> <tr> <td><code>datetimepicker</code></td> <td><code>{}</code></td> <td>Enable a datetimepicker component. jQuery datetimepicker plugin must be linked. More datetimepicker configuration options may be passed within the object.</td> </tr> <tr> <td colspan="3" class="py-4"><strong>Options for columns with type <code>"select"</code>:</strong></td> </tr> <tr> <td><code>options</code></td> <td><code>["a", "b", "c"]</code> or <code>{"a":"A", "b":"B", "c":"C"}</code></td> <td>The options that shall be presented.</td> </tr> <tr> <td><code>select2</code></td> <td><code>{}</code></td> <td>Enable a select2 component. Select2 jQuery plugin must be linked. More select2 configuration options may be passed within the object.</td> </tr> <tr> <td><code>multiple</code></td> <td><code>true | false</code></td> <td>Set HTML <code>"multiple"</code> attribute.</td> </tr> <tr> <td colspan="3" class="py-4"><strong>Options for columns with type <code>"textarea"</code>:</strong></td> </tr> <tr> <td><code>rows</code></td> <td><code>integer</code></td> <td>Set HTML <code>"rows"</code> attribute.</td> </tr> <tr> <td><code>cols</code></td> <td><code>integer</code></td> <td>Set HTML <code>"cols"</code> attribute.</td> </tr> </tbody> </table> </div> </div> </div> </div> </div> </div> </div> </div> </div> @section ScriptsBlock { <script src="~/js/datagrid/datatables/datatables.bundle.js"></script> <script> $(document).ready( function () { /* NOTES: Column id --------------------------------------------------- Please always keep in mind that DataTable framework allows two different kinds of "rows": Arrays and Objects. In first case columns are indexed through integers; in second case columns are indexed by their attribute name. Usually JSON's use the Object approach, but we cannot be sure. Row key --------------------------------------------------- There is no default key in the table. Inside your callback functions, probably you will need a row key to build URL's, in that case you can get them from the rowdata parameter. COLUMN DEFINITIONS: title = "string" - title name on table header th and on form labels --------------------------------------------------- id = "string" - id assigned to imput element when editing/adding in modal --------------------------------------------------- data = "string" - data name from the dataset --------------------------------------------------- type = "text" | "select" | "hidden" | "readonly" - Type of HTML input to be shown. --------------------------------------------------- hoverMsg = "some msg" - The message will appear as a tooltip over the input field. --------------------------------------------------- pattern = r.e. - If type is "input", the typed text will be matched against given regular expression, before submit. --------------------------------------------------- msg = "some string" - An error message that is displayed in case pattern is not matched. Set HTML "data-errorMsg" attribute. --------------------------------------------------- maxLength = integer - If type is "input", set HTML "maxlength" attribute. --------------------------------------------------- options = ["a", "b", "c"] - If type is "select", the options that shall be presented. --------------------------------------------------- select2 = {} - If type is "select", enable a select2 component. Select2 jQuery plugin must be linked. More select2 configuration options may be passed within the array. --------------------------------------------------- datepicker = {} - If type is "text", enable a datepicker component. jQuery-UI plugin must be linked. More datepicker configuration options may be passed within the array. --------------------------------------------------- multiple = true | false - Set HTML "multiple" attribute (for use with select2). --------------------------------------------------- unique = true | false - Ensure that no two rows have the same value. The check is performed client side, not server side. Set HTML "data-unique" attribute. (Probably there's some issue with this). --------------------------------------------------- uniqueMsg = "some string" - An error message that is displayed when the unique constraint is not respected. Set HTML "data-uniqueMsg" attribute. --------------------------------------------------- special = "any string" - Set HTML "data-special" attribute (don't know what's that needed for). --------------------------------------------------- defaultValue = "any string" - Adds a default value when adding a row --------------------------------------------------- */ // Event Lot var events = $("#app-eventlog"); // Column Definitions var columnSet = [{ title: "RowId", id: "DT_RowId", data: "DT_RowId", placeholderMsg: "Server Generated ID", "visible": false, "searchable": false, type: "readonly" }, { title: "Status", id: "status", data: "status", type: "select", "options": [ "active", "inactive", "disabled", "partial" ] }, { title: "IP Address", id: "ipAddress", data: "ipAddress", type: "text", pattern: "((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}", placeholderMsg: "e.g 82.84.86.88", errorMsg: "*Invalid address - Enter valid ip.", hoverMsg: "(Optional) - Ex: 82.84.86.88", unique: true, uniqueMsg: "Already exists. IP must be unique!", required: true }, { title: "Port Number", id: "port", data: "port", type: "number", pattern: "^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$", placeholderMsg: "e.g 6112", errorMsg: "*Invalid port - Enter valid port or range.", hoverMsg: "Ex: 6112 (single) or 6111:6333 (range)", unique: false, required: true }, { title: "Activation Date", id: "adate", data: "adate", type: "date", pattern: "((?:19|20)\d\d)-(0?[1-9]|1[012])-([12][0-9]|3[01]|0?[1-9])", placeholderMsg: "yyyy-mm-dd", errorMsg: "*Invalid date format. Format must be yyyy-mm-dd" }, { title: "User Email", id: "user", data: "user", type: "text", pattern: "^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$", placeholderMsg: "user@@domain.com", errorMsg: "*Invalid email - Enter valid email.", unique: true, required: true, uniqueMsg: "Email already in use" }, { title: "Package", id: "package", data: "package", type: "select", "options": [ "free", "silver", "gold", "platinum", "payg" ] }, { title: "Acc. Balance", id: "balance", data: "balance", type: "number", placeholderMsg: "Amount due", defaultValue: "0" }] /* start data table */ var myTable = $('#dt-basic-example').dataTable({ /* check datatable buttons page for more info on how this DOM structure works */ dom: "<'row mb-3'<'col-sm-12 col-md-6 d-flex align-items-center justify-content-start'f><'col-sm-12 col-md-6 d-flex align-items-center justify-content-end'B>>" + "<'row'<'col-sm-12'tr>>" + "<'row'<'col-sm-12 col-md-5'i><'col-sm-12 col-md-7'p>>", ajax: "/media/data/server-demo.json", columns: columnSet, /* selecting multiple rows will not work */ select: 'single', /* altEditor at work */ altEditor: true, responsive: true, /* buttons uses classes from bootstrap, see buttons page for more details */ buttons: [ { extend: 'selected', text: '<i class="@(Settings.Theme.IconPrefix) fa-times mr-1"></i> Delete', name: 'delete', className: 'btn-primary btn-sm mr-1' }, { extend: 'selected', text: '<i class="@(Settings.Theme.IconPrefix) fa-edit mr-1"></i> Edit', name: 'edit', className: 'btn-primary btn-sm mr-1' }, { text: '<i class="@(Settings.Theme.IconPrefix) fa-plus mr-1"></i> Add', name: 'add', className: 'btn-success btn-sm mr-1' }, { text: '<i class="@(Settings.Theme.IconPrefix) fa-sync mr-1"></i> Synchronize', name: 'refresh', className: 'btn-primary btn-sm' } ], columnDefs: [ { targets: 1, render: function(data, type, full, meta) { var badge = { "active": {'title': 'Active', 'class': 'badge-success'}, "inactive": {'title': 'Inactive', 'class': 'badge-warning'}, "disabled": {'title': 'Disabled', 'class': 'badge-danger'}, "partial": {'title': 'Partial', 'class': 'bg-danger-100 text-white'} }; if (typeof badge[data] === 'undefined') { return data; } return '<span class="badge ' + badge[data].class + ' badge-pill">' + badge[data].title + '</span>'; }, }, { targets: 7, type: 'currency', render: function(data, type, full, meta) { //var number = Number(data.replace(/[^0-9.-]+/g,"")); if (data >= 0) { return '<span class="text-success fw-500">$'+data+'</span>'; } else { return '<span class="text-danger fw-500">$'+data+'</span>'; } }, }, { targets: 6, render: function(data, type, full, meta) { var package = { "free": {'title': 'Free', 'class': 'bg-fusion-50', 'info': 'Free users are restricted to 30 days of use'}, "silver": {'title': 'Silver', 'class': 'bg-fusion-50 bg-fusion-gradient'}, "gold": {'title': 'Gold', 'class': 'bg-warning-500 bg-warning-gradient'}, "platinum": {'title': 'Platinum', 'class': 'bg-trans-gradient'}, "payg": {'title': 'PAYG', 'class': 'bg-success-500 bg-success-gradient'} }; if (typeof package[data] === 'undefined') { return data; } return '<div class="has-popover d-flex align-items-center"><span class="d-inline-block rounded-circle mr-2 ' + package[data].class + '" style="width:15px; height:15px;"></span><span>' + package[data].title + '</span></div>'; }, }, ], /* default callback for insertion: mock webservice, always success */ onAddRow: function(dt, rowdata, success, error) { console.log("Missing AJAX configuration for INSERT"); success(rowdata); // demo only below: events.prepend('<p class="text-success fw-500">'+JSON.stringify(rowdata, null, 4) + '</p>'); }, onEditRow: function(dt, rowdata, success, error) { console.log("Missing AJAX configuration for UPDATE"); success(rowdata); // demo only below: events.prepend('<p class="text-info fw-500">'+JSON.stringify(rowdata, null, 4) + '</p>'); }, onDeleteRow: function(dt, rowdata, success, error) { console.log("Missing AJAX configuration for DELETE"); success(rowdata); // demo only below: events.prepend('<p class="text-danger fw-500">'+JSON.stringify(rowdata, null, 4) + '</p>'); }, }); }); </script> }
the_stack
#addin nuget:?package=Cake.Coveralls&version=1.0.1 #addin nuget:?package=Cake.FileHelpers&version=4.0.1 //#addin nuget:?package=Cake.Issues&version=0.9.1 #addin nuget:?package=Cake.AppVeyor&version=5.0.1 #addin nuget:?package=Cake.ReSharperReports&version=0.11.1 // TOOLS #tool nuget:?package=GitReleaseManager&version=0.11.0 #tool nuget:?package=GitVersion.CommandLine&version=5.6.7 #tool nuget:?package=coveralls.io&version=1.4.2 #tool nuget:?package=OpenCover&version=4.7.922 #tool nuget:?package=ReportGenerator&version=4.8.7 #tool nuget:?package=JetBrains.ReSharper.CommandLineTools&version=2020.3.4 // ARGUMENTS var target = Argument("target", "Default"); if (string.IsNullOrWhiteSpace(target)) { target = "Default"; } var buildConfig = Argument("buildConfig", "Release"); if (string.IsNullOrEmpty(buildConfig)) { buildConfig = "Release"; } // Build configuration var repoOwner = "sharparchitecture"; var repoName = "Sharp-Architecture"; var local = BuildSystem.IsLocalBuild; var isPullRequest = AppVeyor.Environment.PullRequest.IsPullRequest; var isRepository = StringComparer.OrdinalIgnoreCase.Equals($"{repoOwner}/{repoName}", AppVeyor.Environment.Repository.Name); var isDebugBuild = string.Equals(buildConfig, "Debug", StringComparison.OrdinalIgnoreCase); var isReleaseBuild = string.Equals(buildConfig, "Release", StringComparison.OrdinalIgnoreCase); var isDevelopBranch = StringComparer.OrdinalIgnoreCase.Equals("develop", AppVeyor.Environment.Repository.Branch); var isReleaseBranch = AppVeyor.Environment.Repository.Branch.IndexOf("releases/", StringComparison.OrdinalIgnoreCase) >= 0 || AppVeyor.Environment.Repository.Branch.IndexOf("hotfixes/", StringComparison.OrdinalIgnoreCase) >= 0; var isTagged = AppVeyor.Environment.Repository.Tag.IsTag; var appVeyorJobId = AppVeyor.Environment.JobId; // Solution settings // Calculate version and commit hash GitVersion semVersion = GitVersion(); var nugetVersion = semVersion.NuGetVersion; var buildVersion = semVersion.FullBuildMetaData; var informationalVersion = semVersion.InformationalVersion; var nextMajorRelease = $"{semVersion.Major+1}.0.0"; var commitHash = semVersion.Sha; var milestone = semVersion.MajorMinorPatch; // Artifacts var artifactsDir = "./Drops"; var artifactsDirAbsolutePath = MakeAbsolute(Directory(artifactsDir)); var testCoverageOutputFile = new FilePath(artifactsDir + "/OpenCover.xml"); var codeCoverageReportDir = artifactsDir + "/CodeCoverageReport"; var codeInspectionsOutputFile = artifactsDir + "/Inspections/CodeInspections.xml"; var duplicateFinderOutputFile = artifactsDir + "/Inspections/CodeDuplicates.xml"; var packagesDir = artifactsDir + "/packages"; var srcDir = "./Src"; var testsRootDir = srcDir + "/tests"; var solutionFile = new FilePath(srcDir + "/SharpArch.sln"); var samplesDir = "./Samples"; var coverageFilter="+[SharpArch*]* -[SharpArch.Tests*]* -[SharpArch.Xunit*]* -[SharpArch.Infrastructure]SharpArch.Infrastructure.Logging.*"; string githubToken = null; // SETUP / TEARDOWN Setup((context) => { Information("Building version {0} (tagged: {1}, local: {2}, release branch: {3})...", nugetVersion, isTagged, local, isReleaseBranch); CreateDirectory(artifactsDir); CleanDirectory(artifactsDir); githubToken = context.EnvironmentVariable("GITHUB_TOKEN"); }); Teardown((context) => { // Executed AFTER the last task. }); Task("SetVersion") .Does(() => { CreateAssemblyInfo($"{srcDir}/Common/AssemblyVersion.cs", new AssemblyInfoSettings{ FileVersion = semVersion.MajorMinorPatch, InformationalVersion = semVersion.InformationalVersion, Version = semVersion.MajorMinorPatch }); }); Task("UpdateAppVeyorBuildNumber") .WithCriteria(() => AppVeyor.IsRunningOnAppVeyor) .ContinueOnError() .Does(() => { AppVeyor.UpdateBuildVersion(buildVersion); }); Task("Restore") .DoesForEach(GetFiles(solutionFile.ToString()).Union(GetFiles($"{samplesDir}/**/*.sln")), (sln) => { Information("Running in {0}", sln.GetDirectory().FullPath); DotNetCoreRestore(sln.GetDirectory().FullPath); } ); Task("InspectCode") .Does(() => { DupFinder(solutionFile, new DupFinderSettings { CachesHome = "./tmp/DupFinderCaches", DiscardCost = 70, DiscardFieldsName = false, DiscardLiterals = false, NormalizeTypes = true, ShowStats = true, ShowText = true, OutputFile = duplicateFinderOutputFile, ExcludePattern = new string [] { "../Docker/**/*", "Solution Items/**/*", "Tests/**/*", "Samples/**/*" } }); ReSharperReports( duplicateFinderOutputFile, System.IO.Path.ChangeExtension(duplicateFinderOutputFile, "html") ); InspectCode(solutionFile, new InspectCodeSettings() { OutputFile = codeInspectionsOutputFile, Profile = "SharpArch.AutoLoad.DotSettings", CachesHome = "./tmp/ReSharperCaches", SolutionWideAnalysis = true }); ReSharperReports( codeInspectionsOutputFile, System.IO.Path.ChangeExtension(codeInspectionsOutputFile, "html") ); }); Task("RunXunitTests") .DoesForEach(GetFiles(solutionFile.ToString()).Union(GetFiles($"{samplesDir}/**/*.sln")), (testProj) => { var projectPath = testProj.GetDirectory(); var projectFilename = testProj.GetFilenameWithoutExtension(); Information("Calculating code coverage for {0} ...", projectFilename); var openCoverSettings = new OpenCoverSettings { OldStyle = true, ReturnTargetCodeOffset = 0, ArgumentCustomization = args => args.Append("-mergeoutput").Append("-hideskipped:File;Filter;Attribute"), WorkingDirectory = projectPath, } .WithFilter(coverageFilter) .ExcludeByAttribute("*.ExcludeFromCodeCoverage*") .ExcludeByFile("*/*Designer.cs"); Func<string,ProcessArgumentBuilder> buildProcessArgs = (buildCfg) => { var pb = new ProcessArgumentBuilder() .AppendSwitch("--configuration", buildCfg) .AppendSwitch("--filter", "Category!=IntegrationTests") .AppendSwitch("--results-directory", artifactsDirAbsolutePath.FullPath) .Append("--no-build"); if (!local) { pb.AppendSwitch("--test-adapter-path", ".") .AppendSwitch("--logger", $"AppVeyor"); } else { pb.AppendSwitch("--logger", $"trx;LogFileName={projectFilename}.trx"); } return pb; }; // run open cover for debug build configuration OpenCover( tool => tool.DotNetCoreTool(projectPath.FullPath, "test", buildProcessArgs("Debug") ), testCoverageOutputFile, openCoverSettings); // run tests again if Release mode was requested if (isReleaseBuild) { Information("Running Release mode tests for {0}", projectFilename.ToString()); DotNetCoreTool(testProj.FullPath, "test", buildProcessArgs("Release") ); } }) .DeferOnError(); Task("CleanPreviousTestResults") .Does(() => { if (FileExists(testCoverageOutputFile)) DeleteFile(testCoverageOutputFile); DeleteFiles(artifactsDir + "/*.trx"); if (DirectoryExists(codeCoverageReportDir)) DeleteDirectory(codeCoverageReportDir, new DeleteDirectorySettings{ Recursive = true, Force = true }); }); Task("GenerateCoverageReport") .WithCriteria(() => local) .Does(() => { ReportGenerator(testCoverageOutputFile, codeCoverageReportDir); }); Task("RunUnitTests") .IsDependentOn("Build") .IsDependentOn("CleanPreviousTestResults") .IsDependentOn("RunXunitTests") .IsDependentOn("GenerateCoverageReport") .Does(() => { Information("Done Test"); }) .Finally(() => { if (!local) { CoverallsIo(testCoverageOutputFile); } }); Task("Build") .IsDependentOn("SetVersion") .IsDependentOn("UpdateAppVeyorBuildNumber") .IsDependentOn("Restore") .DoesForEach(GetFiles($"{srcDir}/**/*.sln").Union(GetFiles($"{samplesDir}/**/*.sln")), (solutionFile) => { var slnPath = solutionFile.GetDirectory().FullPath; var sln = solutionFile.GetFilenameWithoutExtension(); if (isReleaseBuild) { Information("Running {0} {1} build to calculate code coverage", sln, "Debug"); // need Debug mode build for code coverage calculation DotNetCoreBuild(slnPath, new DotNetCoreBuildSettings { NoRestore = true, Configuration = "Debug", }); } Information("Running {0} {1} build in {2}", sln, buildConfig, slnPath); DotNetCoreBuild(slnPath, new DotNetCoreBuildSettings { NoRestore = true, Configuration = buildConfig, }); }); Task("CreateNugetPackages") .Does(() => { Action<string> buildPackage = (string projectName) => { var projectFileName=projectName; Information("Pack {0}", projectFileName); DotNetCorePack(projectFileName, new DotNetCorePackSettings { Configuration = buildConfig, OutputDirectory = packagesDir, NoBuild = true, NoRestore = true, ArgumentCustomization = args => args.Append($"-p:Version={nugetVersion}") }); }; if (isTagged) { var releaseNotes = $"https://github.com/{repoOwner}/{repoName}/releases/tag/{milestone}"; Information("Updating ReleaseNotes Link: {0}", releaseNotes); XmlPoke("./Directory.Build.props", "/Project/PropertyGroup[@Label=\"Package\"]/PackageReleaseNotes", releaseNotes ); } foreach(var projectName in new[] {$"{solutionFile}"}) { buildPackage(projectName); }; }); Task("CreateRelease") .WithCriteria(() => isRepository && isReleaseBranch && !isPullRequest) .Does(() => { GitReleaseManagerCreate(githubToken, repoOwner, repoName, new GitReleaseManagerCreateSettings { Milestone = milestone, TargetCommitish = "master" }); }); Task("CloseMilestone") .WithCriteria(() => isRepository && isTagged && !isPullRequest) .Does(() => { GitReleaseManagerClose(githubToken, repoOwner, repoName, milestone); }); Task("Default") .IsDependentOn("UpdateAppVeyorBuildNumber") .IsDependentOn("Build") .IsDependentOn("RunUnitTests") // .IsDependentOn("InspectCode") .IsDependentOn("CreateNugetPackages") .IsDependentOn("CreateRelease") .IsDependentOn("CloseMilestone") .Does( () => {} ); // EXECUTION RunTarget(target);
the_stack
#tool nuget:?package=vswhere&version=2.7.1 // Cake Addins #addin nuget:?package=Cake.FileHelpers&version=3.2.1 #addin nuget:?package=Newtonsoft.Json&version=11.0.2 using System; using System.Text.RegularExpressions; using System.Xml; using System.Xml.Linq; using Newtonsoft.Json; using Newtonsoft.Json.Linq; var TARGET = Argument ("t", Argument ("target", "ci")); var MAX_CPU_COUNT = Argument("maxcpucount", 0); // Lists all the artifacts and their versions for com.android.support.* // https://dl.google.com/dl/android/maven2/com/android/support/group-index.xml // Master list of all the packages in the repo: // https://dl.google.com/dl/android/maven2/master-index.xml var REF_DOCS_URL = "https://bosstoragemirror.blob.core.windows.net/android-docs-scraper/a7/a712886a8b4ee709f32d51823223039883d38734/play-services-firebase.zip"; var REF_METADATA_URL = "https://bosstoragemirror.blob.core.windows.net/android-docs-scraper/a7/a712886a8b4ee709f32d51823223039883d38734/play-services-firebase-metadata.xml"; // These are a bunch of parameter names in the txt format which binding projects can use var REF_PARAMNAMES_URL = "https://bosstoragemirror.blob.core.windows.net/android-docs-scraper/a7/a712886a8b4ee709f32d51823223039883d38734/play-services-firebase-paramnames.txt"; // Resolve Xamarin.Android installation var XAMARIN_ANDROID_PATH = EnvironmentVariable ("XAMARIN_ANDROID_PATH"); var ANDROID_SDK_BASE_VERSION = "v1.0"; var ANDROID_SDK_VERSION = "v9.0"; string AndroidSdkBuildTools = $"29.0.2"; if (string.IsNullOrEmpty(XAMARIN_ANDROID_PATH)) { if (IsRunningOnWindows()) { var vsInstallPath = VSWhereLatest(new VSWhereLatestSettings { Requires = "Component.Xamarin", IncludePrerelease = true }); XAMARIN_ANDROID_PATH = vsInstallPath.Combine("Common7/IDE/ReferenceAssemblies/Microsoft/Framework/MonoAndroid").FullPath; } else { if (DirectoryExists("/Library/Frameworks/Xamarin.Android.framework/Versions/Current/lib/xamarin.android/xbuild-frameworks/MonoAndroid")) XAMARIN_ANDROID_PATH = "/Library/Frameworks/Xamarin.Android.framework/Versions/Current/lib/xamarin.android/xbuild-frameworks/MonoAndroid"; else XAMARIN_ANDROID_PATH = "/Library/Frameworks/Xamarin.Android.framework/Versions/Current/lib/xbuild-frameworks/MonoAndroid"; } } if (!DirectoryExists($"{XAMARIN_ANDROID_PATH}/{ANDROID_SDK_VERSION}")) throw new Exception($"Unable to find Xamarin.Android {ANDROID_SDK_VERSION} at {XAMARIN_ANDROID_PATH}."); // Load all the git variables var BUILD_COMMIT = EnvironmentVariable("BUILD_COMMIT") ?? "DEV"; var BUILD_NUMBER = EnvironmentVariable("BUILD_NUMBER") ?? "DEBUG"; var BUILD_TIMESTAMP = DateTime.UtcNow.ToString(); var REQUIRED_DOTNET_TOOLS = new [] { "xamarin-android-binderator", "xamarin.androidx.migration.tool" }; string nuget_version_template = // "71.vvvv.0-preview3" // pre AndroidX version "1xx.yy.zz.ww-suffix" // AndroidX version preview //"1xx.yy.zz" // AndroidX version stable/release ; string nuget_version_suffix = ""; string[] Configs = new [] { "Debug", "Release" }; var MONODROID_PATH = "/Library/Frameworks/Xamarin.Android.framework/Versions/Current/lib/mandroid/platforms/" + ANDROID_SDK_VERSION + "/"; if (IsRunningOnWindows ()) { var vsInstallPath = VSWhereLatest (new VSWhereLatestSettings { Requires = "Component.Xamarin", IncludePrerelease = true }); MONODROID_PATH = vsInstallPath.Combine ("Common7/IDE/ReferenceAssemblies/Microsoft/Framework/MonoAndroid/" + ANDROID_SDK_VERSION).FullPath; } var MSCORLIB_PATH = "/Library/Frameworks/Xamarin.Android.framework/Libraries/mono/2.1/"; if (IsRunningOnWindows ()) { var DOTNETDIR = new DirectoryPath (Environment.GetFolderPath (Environment.SpecialFolder.Windows)).Combine ("Microsoft.NET/"); if (DirectoryExists (DOTNETDIR.Combine ("Framework64"))) MSCORLIB_PATH = MakeAbsolute (DOTNETDIR.Combine("Framework64/v4.0.30319/")).FullPath; else MSCORLIB_PATH = MakeAbsolute (DOTNETDIR.Combine("Framework/v4.0.30319/")).FullPath; } string JAVA_HOME = EnvironmentVariable ("JAVA_HOME") ?? Argument ("java_home", ""); string ANDROID_HOME = EnvironmentVariable ("ANDROID_HOME") ?? Argument ("android_home", ""); string ANDROID_SDK_ROOT = EnvironmentVariable ("ANDROID_SDK_ROOT") ?? Argument ("android_sdk_root", ""); // Log some variables Information ($"JAVA_HOME : {JAVA_HOME}"); Information ($"ANDROID_HOME : {ANDROID_HOME}"); Information ($"ANDROID_SDK_ROOT : {ANDROID_SDK_ROOT}"); Information ($"MONODROID_PATH : {MONODROID_PATH}"); Information ($"MSCORLIB_PATH : {MSCORLIB_PATH}"); Information ($"XAMARIN_ANDROID_PATH : {XAMARIN_ANDROID_PATH}"); Information ($"ANDROID_SDK_VERSION : {ANDROID_SDK_VERSION}"); Information ($"BUILD_COMMIT: : {BUILD_COMMIT}"); Information ($"BUILD_NUMBER: : {BUILD_NUMBER}"); Information ($"BUILD_TIMESTAMP: : {BUILD_TIMESTAMP}"); // You shouldn't have to configure anything below here // ###################################################### void RunProcess(FilePath fileName, string processArguments) { var exitCode = StartProcess(fileName, processArguments); if (exitCode != 0) throw new Exception ($"Process {fileName} exited with code {exitCode}."); } void RunGradle(DirectoryPath root, string target) { root = MakeAbsolute(root); var proc = IsRunningOnWindows() ? root.CombineWithFilePath("gradlew.bat").FullPath : "bash"; var args = IsRunningOnWindows() ? "" : root.CombineWithFilePath("gradlew").FullPath; args += $" {target} -p {root}"; var exitCode = StartProcess(proc, args); if (exitCode != 0) throw new Exception($"Gradle exited with code {exitCode}."); } Task("javadocs") .Does(() => { EnsureDirectoryExists("./externals/"); if (!FileExists("./externals/docs.zip")) DownloadFile(REF_DOCS_URL, "./externals/docs.zip"); if (!DirectoryExists("./externals/docs")) Unzip ("./externals/docs.zip", "./externals/docs"); if (!FileExists("./externals/paramnames.txt")) DownloadFile(REF_PARAMNAMES_URL, "./externals/paramnames.txt"); if (!FileExists("./externals/paramnames.xml")) DownloadFile(REF_METADATA_URL, "./externals/paramnames.xml"); var astJar = new FilePath("./util/JavaASTParameterNames-1.0.jar"); var sourcesJars = GetFiles("./externals/**/*-sources.jar"); foreach (var srcJar in sourcesJars) { var srcJarPath = MakeAbsolute(srcJar).FullPath; var outTxtPath = srcJarPath.Replace("-sources.jar", "-paramnames.txt"); var outXmlPath = srcJarPath.Replace("-sources.jar", "-paramnames.xml"); StartProcess("java", "-jar \"" + MakeAbsolute(astJar).FullPath + "\" --text \"" + srcJarPath + "\" \"" + outTxtPath + "\""); StartProcess("java", "-jar \"" + MakeAbsolute(astJar).FullPath + "\" --xml \"" + srcJarPath + "\" \"" + outXmlPath + "\""); } }); Task("tools-update") .Does ( () => { /* dotnet tool uninstall -g Cake.Tool dotnet tool install -g Cake.Tool dotnet tool uninstall -g xamarin.androidbinderator.tool dotnet tool install -g xamarin.androidbinderator.tool dotnet tool uninstall -g xamarin.androidx.migration.tool dotnet tool install -g xamarin.androidx.migration.tool StartProcess("dotnet", "tool uninstall -g Cake.Tool"); StartProcess("dotnet", "tool install -g Cake.Tool"); */ StartProcess("dotnet", "tool uninstall -g xamarin.androidbinderator.tool"); StartProcess("dotnet", "tool install -g xamarin.androidbinderator.tool"); StartProcess("dotnet", "tool uninstall -g xamarin.androidx.migration.tool"); StartProcess("dotnet", "tool install -g xamarin.androidx.migration.tool"); } ); Task("binderate") .IsDependentOn("javadocs") .IsDependentOn("binderate-config-verify") .Does(() => { var configFile = MakeAbsolute(new FilePath("./config.json")).FullPath; var basePath = MakeAbsolute(new DirectoryPath ("./")).FullPath; RunProcess("xamarin-android-binderator", $"--config=\"{configFile}\" --basepath=\"{basePath}\""); RunTarget("binderate-prepare-dependencies-samples-packages-config"); RunTarget("binderate-prepare-dependencies-samples-packagereferences"); }); Task("binderate-prepare-dependencies-samples-packagereferences") .Does ( () => { // needed for offline builds 28.0.0.1 to 28.0.0.3 EnsureDirectoryExists("./output/"); EnsureDirectoryExists("./externals/"); FilePathCollection files = GetFiles("./samples/**/*.csproj"); foreach(FilePath file in files) { Information($"File: {file}"); XmlDocument xml = new XmlDocument(); xml.Load($"{file}"); } } ); Task("binderate-prepare-dependencies-samples-packages-config") .Does ( () => { // needed for offline builds 28.0.0.1 to 28.0.0.3 EnsureDirectoryExists("./output/"); EnsureDirectoryExists("./externals/"); FilePathCollection files = GetFiles("./samples/**/packages.config"); foreach(FilePath file in files) { Information($"File: {file}"); XmlDocument xml = new XmlDocument(); xml.Load($"{file}"); XmlNodeList list = xml.SelectNodes("/packages/package"); foreach (XmlNode xn in list) { string id = xn.Attributes["id"].Value; //Get attribute-id //string text = xn["Text"].InnerText; //Get Text Node string v = xn.Attributes["version"].Value; //Get attribute-id Information($" id : {id}"); Information($" version: {v}"); string url = $"https://www.nuget.org/api/v2/package/{id}/{v}"; string file1 = $"./externals/{id.ToLower()}.{v}.nupkg"; try { if ( ! FileExists(file1) ) { DownloadFile(url, file1); } } catch (System.Exception) { Error($"Unable to download {url}"); } } } return; } ); JArray binderator_json_array = null; Task("binderate-config-verify") .IsDependentOn("binderate-fix") .Does ( () => { using (StreamReader reader = System.IO.File.OpenText(@"./config.json")) { JsonTextReader jtr = new JsonTextReader(reader); JArray ja = (JArray)JToken.ReadFrom(jtr); Information("config.json"); //Information($"{ja}"); foreach(JObject jo in ja[0]["artifacts"]) { bool? dependency_only = (bool?) jo["dependencyOnly"]; if ( dependency_only == true) { continue; } string version = (string) jo["version"]; string nuget_version = (string) jo["nugetVersion"]; Information($"groupId = {jo["groupId"]}"); Information($"artifactId = {jo["artifactId"]}"); Information($"version = {version}"); Information($"nuget_version = {nuget_version}"); Information($"nugetId = {jo["nugetId"]}"); string[] version_parsed = nuget_version.Split(new string[] {"."}, StringSplitOptions.None); string nuget_version_new = nuget_version_template; string version_parsed_xx = version_parsed[0]; string version_parsed_yy = version_parsed[1]; string version_parsed_zz = version_parsed[2]; Information($"version_parsed_xx = {version_parsed_xx}"); if ( version_parsed_xx.Length == 1 ) { version_parsed_xx = string.Concat("0", version_parsed_xx); } Information($"version_parsed_xx = {version_parsed_xx}"); nuget_version_new = nuget_version_new.Replace("1xx", version_parsed_xx); nuget_version_new = nuget_version_new.Replace("yy", version_parsed_yy); nuget_version_new = nuget_version_new.Replace("zz", version_parsed_zz); if (version_parsed.Length == 4) { nuget_version_new = nuget_version_new.Replace("ww", version_parsed[3]); } else { nuget_version_new = nuget_version_new.Replace(".ww", ""); } nuget_version_new = nuget_version_new.Replace("-suffix", nuget_version_suffix); Information($"nuget_version_new = {nuget_version_new}"); Information($"nuget_version = {nuget_version}"); if( ! nuget_version_new.Contains($"{nuget_version}") ) { // AndroidX version // // pre AndroidX version Error("check config.json for nuget id - pre AndroidX version"); Error ($" groupId = {jo["groupId"]}"); Error ($" artifactId = {jo["artifactId"]}"); Error ($" version = {version}"); Error ($" nuget_version = {nuget_version}"); Error ($" nugetId = {jo["nugetId"]}"); Warning($" expected : "); Warning($" nuget_version = {nuget_version_new}"); throw new Exception("check config.json for nuget id"); return; } } } } ); Task("binderate-diff") .IsDependentOn("binderate") .Does ( () => { EnsureDirectoryExists("./output/"); // "git diff master:config.json config.json" > ./output/config.json.diff-from-master.txt" string process = "git"; string process_args = "diff master:config.json config.json"; IEnumerable<string> redirectedStandardOutput; ProcessSettings process_settings = new ProcessSettings () { Arguments = process_args, RedirectStandardOutput = true }; int exitCodeWithoutArguments = StartProcess(process, process_settings, out redirectedStandardOutput); System.IO.File.WriteAllLines("./output/config.json.diff-from-master.txt", redirectedStandardOutput.ToArray()); Information("Exit code: {0}", exitCodeWithoutArguments); } ); Task("binderate-fix") .Does ( () => { using (StreamReader reader = System.IO.File.OpenText(@"./config.json")) { JsonTextReader jtr = new JsonTextReader(reader); binderator_json_array = (JArray)JToken.ReadFrom(jtr); } Warning("config.json fixing missing folder strucutre ..."); foreach(JObject jo in binderator_json_array[0]["artifacts"]) { string groupId = (string) jo["groupId"]; string artifactId = (string) jo["artifactId"]; Information($" Verifying files for :"); Information($" group : {groupId}"); Information($" artifact : {artifactId}"); bool? dependency_only = (bool?) jo["dependencyOnly"]; if ( dependency_only == true) { continue; } string dir_group = $"source/{groupId}"; if ( ! DirectoryExists(dir_group) ) { Warning($" Creating {dir_group}"); CreateDirectory(dir_group); } string dir_artifact = $"{dir_group}/{artifactId}"; if ( ! DirectoryExists(dir_artifact) ) { Warning($" Creating artifact folder : {dir_artifact}"); CreateDirectory(dir_artifact); CreateDirectory($"{dir_artifact}/Transforms/"); CreateDirectory($"{dir_artifact}/Additions/"); } else { continue; } if ( ! FileExists($"{dir_artifact}/Transforms/Metadata.xml")) { Warning($" Creating file : {dir_artifact}/Metadata.xml"); CopyFile ( $"./source/template-group-id/template-artifact/Transforms/Metadata.xml", $"{dir_artifact}/Transforms/Metadata.xml" ); } if ( ! FileExists($"{dir_artifact}/Transforms/Metadata.Namespaces.xml")) { Warning($" Creating file : {dir_artifact}/Metadata.Namespaces.xml"); CopyFile ( $"./source/template-group-id/template-artifact/Transforms/Metadata.Namespaces.xml", $"{dir_artifact}/Transforms/Metadata.Namespaces.xml" ); } if ( ! FileExists($"{dir_artifact}/Transforms/Metadata.ParameterNames.xml")) { Warning($" Creating file : {dir_artifact}/Metadata.ParameterNames.xml"); CopyFile ( $"./source/template-group-id/template-artifact/Transforms/Metadata.ParameterNames.xml", $"{dir_artifact}/Transforms/Metadata.ParameterNames.xml" ); } if ( ! FileExists($"{dir_artifact}/Transforms/EnumFields.xml")) { Warning($" Creating file : {dir_artifact}/EnumFields.xml"); CopyFile ( $"./source/template-group-id/template-artifact/Transforms/EnumFields.xml", $"{dir_artifact}/Transforms/EnumFields.xml" ); } if ( ! FileExists($"{dir_artifact}/Transforms/EnumMethods.xml")) { Warning($" Creating file : {dir_artifact}/EnumMethods.xml"); CopyFile ( $"./source/template-group-id/template-artifact/Transforms/EnumMethods.xml", $"{dir_artifact}/Transforms/EnumMethods.xml" ); } if ( ! FileExists($"{dir_artifact}/Additions/Additions.cs")) { Warning($" Creating file : {dir_artifact}/Additions/Additions.cs"); CopyFile ( $"./source/template-group-id/template-artifact/Additions/Additions.cs", $"{dir_artifact}/Additions/Additions.cs" ); } } return; } ); Task("mergetargets") .Does(() => { /***************************** * BEGIN: Merge all the .targets together into one for the sake of compiling samples ******************************/ var generatedTargets = GetFiles("./generated/*/Xamarin.*.targets"); // Load the doc to append to, and the doc to append var xFileRoot = System.Xml.Linq.XDocument.Parse("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project ToolsVersion=\"4.0\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n</Project>"); System.Xml.Linq.XNamespace nsRoot = xFileRoot.Root.Name.Namespace; foreach (var generatedTarget in generatedTargets) { var xFileChild = System.Xml.Linq.XDocument.Load (MakeAbsolute (generatedTarget).FullPath); System.Xml.Linq.XNamespace nsChild = xFileRoot.Root.Name.Namespace; // Add all the elements under <Project> into the existing file's <Project> node foreach (var xItemToAdd in xFileChild.Element (nsChild + "Project").Elements ()) xFileRoot.Element (nsRoot + "Project").Add (xItemToAdd); } // Inject a property to prevent errors from missing assemblies in .targets // this allows us to use one big .targets file in all the projects and not have to figure out which specific // ones each project needs to reference for development purposes if (!xFileRoot.Descendants (nsRoot + "XamarinBuildResourceMergeThrowOnMissingAssembly").Any ()) { xFileRoot.Element (nsRoot + "Project") .AddFirst (new System.Xml.Linq.XElement (nsRoot + "PropertyGroup", new System.Xml.Linq.XElement (nsRoot + "XamarinBuildResourceMergeThrowOnMissingAssembly", false))); } xFileRoot.Save ("./generated/generated.targets"); /***************************** * END: Merge all the .targets together into one for the sake of compiling samples ******************************/ }); Task("libs-native") .Does(() => { string root = "./source/com.google.android.play/core.extensions/"; RunGradle(root, "build"); string outputDir = "./externals/com.xamarin.google.android.play.core.extensions/"; EnsureDirectoryExists(outputDir); CleanDirectories(outputDir); CopyFileToDirectory($"{root}/extensions-aar/build/outputs/aar/extensions-aar-release.aar", outputDir); Unzip($"{outputDir}/extensions-aar-release.aar", outputDir); MoveFile($"{outputDir}/classes.jar", $"{outputDir}/extensions.jar"); }); Task("libs") .IsDependentOn("libs-native") .Does(() => { Configs = new string[] { "Release" }; foreach(string config in Configs) { var settings = new DotNetCoreMSBuildSettings() .SetConfiguration(config) .SetMaxCpuCount(MAX_CPU_COUNT) .EnableBinaryLogger("./output/libs.binlog"); settings.Properties.Add("DesignTimeBuild", new [] { "false" }); settings.Properties.Add("AndroidSdkBuildToolsVersion", new [] { AndroidSdkBuildTools }); if (!string.IsNullOrEmpty(ANDROID_HOME)) { settings.Properties.Add("AndroidSdkDirectory", new [] { $"{ANDROID_HOME}" } ); } DotNetCoreRestore("./generated/GooglePlayServices.sln", new DotNetCoreRestoreSettings { MSBuildSettings = settings.EnableBinaryLogger("./output/restore.binlog") }); DotNetCoreMSBuild("./generated/GooglePlayServices.sln", settings); } }); Task("samples-directory-build-targets") .Does ( () => { Information("samples Director.Build.targets from config.json ..."); using (StreamReader reader = System.IO.File.OpenText(@"./config.json")) { JsonTextReader jtr = new JsonTextReader(reader); binderator_json_array = (JArray)JToken.ReadFrom(jtr); } foreach(JObject jo in binderator_json_array[0]["artifacts"]) { string version = (string) jo["version"]; string nuget_version = (string) jo["nugetVersion"]; Information($"groupId = {jo["groupId"]}"); Information($"artifactId = {jo["artifactId"]}"); Information($"version = {version}"); Information($"nuget_version = {nuget_version}"); Information($"nugetId = {jo["nugetId"]}"); } XmlDocument doc = new XmlDocument(); XmlElement element_p = doc.CreateElement( string.Empty, "Project", string.Empty ); doc.AppendChild( element_p ); XmlElement element_ig = doc.CreateElement( string.Empty, "ItemGroup", string.Empty ); element_p.AppendChild(element_ig); foreach(JObject jo in binderator_json_array[0]["artifacts"]) { string version = (string) jo["version"]; string nuget_version = (string) jo["nugetVersion"]; Information($"groupId = {jo["groupId"]}"); Information($"artifactId = {jo["artifactId"]}"); Information($"version = {version}"); Information($"nuget_version = {nuget_version}"); Information($"nugetId = {jo["nugetId"]}"); XmlElement element_pr = doc.CreateElement( string.Empty, "PackageReference", string.Empty ); element_ig.AppendChild(element_pr); XmlAttribute attr_update = doc.CreateAttribute("Update"); attr_update.Value = (string) jo["nugetId"]; element_pr.Attributes.Append(attr_update); XmlAttribute attr_version = doc.CreateAttribute("Version"); attr_version.Value = nuget_version; element_pr.Attributes.Append(attr_version); } doc.Save( System.IO.Path.Combine("samples", "Directory.Build.targets" )); return; } ); Task("samples") .IsDependentOn("libs") .IsDependentOn("samples-directory-build-targets") .IsDependentOn("mergetargets") .IsDependentOn("allbindingprojectrefs") .Does ( () => { Configs = new string[] { "Debug", "Release" }; DeleteDirectories(GetDirectories("./samples/**/bin/"), new DeleteDirectorySettings() { Force = true, Recursive = true }); DeleteDirectories(GetDirectories("./samples/**/obj/"), new DeleteDirectorySettings() { Force = true, Recursive = true }); EnsureDirectoryExists($@"./output/failed/"); var sampleSlns = GetFiles("./samples/all/**/*.sln") .Concat(GetFiles("./samples/com.google.android.gms/**/*.sln")) .Concat(GetFiles("./samples/com.google.firebase/**/*.sln")) ; foreach(string config in Configs) { foreach (var sampleSln in sampleSlns) { string filename_sln = sampleSln.GetFilenameWithoutExtension().ToString(); if ( ! filename_sln.Contains("BuildAll") ) { NuGetRestore(sampleSln, new NuGetRestoreSettings { }); // R8 errors } if ( sampleSln.ToString().Contains("com.google.android.gms/play-services-cast/CastingCall.sln") || sampleSln.ToString().Contains("com.google.android.gms/play-services-games/BeGenerous.sln") || sampleSln.ToString().Contains("com.google.android.gms/play-services-wallet/AndroidPayQuickstart.sln") || sampleSln.ToString().Contains("com.google.firebase/firebase-analytics/FirebaseAnalyticsQuickstart.sln") || sampleSln.ToString().Contains("com.google.firebase/firebase-storage/FirebaseStorageQuickstart.sln") ) { // skip problematic samples for now continue; } Information($"Solution: {filename_sln}"); string bl = MakeAbsolute(new FilePath($"./output/{filename_sln}{config}.sample.binlog")).FullPath; try { MSBuild ( sampleSln, c => { c.Configuration = config; c.Properties.Add("DesignTimeBuild", new [] { "false" }); c.BinaryLogger = new MSBuildBinaryLogSettings { Enabled = true, FileName = bl }; if (! string.IsNullOrEmpty(ANDROID_HOME)) { c.Properties.Add("AndroidSdkDirectory", new [] { $"{ANDROID_HOME}" } ); } } ); } catch (Exception exc) { Error($"Error: {exc}"); Error($" bl: {bl}"); Error($" bl: {bl.Replace($@"output", $@"output/failed")}"); if ( FileExists(bl) ) { DeleteFile(bl); } MoveFile(bl, bl.Replace($@"output", $@"output/failed")); } } } DeleteFiles(".output/system.*/nupkg"); DeleteFiles(".output/microsoft.*/nupkg"); DeleteFiles(".output/xamarin.android.support.*/nupkg"); DeleteFiles(".output/xamarin.android.arch.*/nupkg"); DeleteFiles(".output/xamarin.build.download.*/nupkg"); }); Task("allbindingprojectrefs") .Does(() => { Action<string,string> generateTargets = (string pattern, string file) => { var xmlns = (XNamespace)"http://schemas.microsoft.com/developer/msbuild/2003"; var itemGroup = new XElement(xmlns + "ItemGroup"); foreach (var nupkg in GetFiles(pattern)) { var filename = nupkg.GetFilenameWithoutExtension(); var match = Regex.Match(filename.ToString(), @"(.+?)\.(\d+[\.0-9\-a-zA-Z]+)"); itemGroup.Add(new XElement(xmlns + "PackageReference", new XAttribute("Include", match.Groups[1]), new XAttribute("Version", match.Groups[2]))); } var xdoc = new XDocument(new XElement(xmlns + "Project", itemGroup)); xdoc.Save(file); }; generateTargets("./output/Xamarin.Firebase.*.nupkg", "./output/FirebasePackages.targets"); generateTargets("./output/Xamarin.GooglePlayServices.*.nupkg", "./output/PlayServicesPackages.targets"); generateTargets("./output/Xamarin.Google.MLKit.*.nupkg", "./output/Google.MLKit.targets"); generateTargets("./output/Xamarin.Google.Play.*.nupkg", "./output/Google.Play.targets"); }); Task("nuget") .IsDependentOn("libs") .Does(() => { var outputPath = new DirectoryPath("./output"); var settings = new DotNetCoreMSBuildSettings() .SetConfiguration("Release") .SetMaxCpuCount(MAX_CPU_COUNT) .EnableBinaryLogger ("./output/nuget.binlog"); settings.Targets.Clear(); settings.Targets.Add("Pack"); settings.Properties.Add("PackageOutputPath", new [] { MakeAbsolute(outputPath).FullPath }); settings.Properties.Add("PackageRequireLicenseAcceptance", new [] { "true" }); settings.Properties.Add("DesignTimeBuild", new [] { "false" }); settings.Properties.Add("AndroidSdkBuildToolsVersion", new [] { $"{AndroidSdkBuildTools}" }); if (! string.IsNullOrEmpty(ANDROID_HOME)) { settings.Properties.Add("AndroidSdkDirectory", new[] { $"{ANDROID_HOME}" }); } DotNetCoreMSBuild ("./generated/GooglePlayServices.sln", settings); }); Task ("merge") .IsDependentOn ("libs") .Does (() => { var allDlls = GetFiles ($"./generated/*/bin/Release/monoandroid*/Xamarin.GooglePlayServices.*.dll") + GetFiles ($"./generated/*/bin/Release/monoandroid*/Xamarin.Firebase.*.dll"); var mergeDlls = allDlls .GroupBy(d => new FileInfo(d.FullPath).Name) .Select(g => g.FirstOrDefault()) .ToList(); EnsureDirectoryExists("./output/"); RunProcess("androidx-migrator", $"merge" + $" --assembly " + string.Join(" --assembly ", mergeDlls) + $" --output ./output/GooglePlayServices.Merged.dll" + $" --search \"{XAMARIN_ANDROID_PATH}/{ANDROID_SDK_VERSION}\" " + $" --search \"{XAMARIN_ANDROID_PATH}/{ANDROID_SDK_BASE_VERSION}\" " + $" --inject-assemblyname"); }); Task ("ci-setup") .WithCriteria (!BuildSystem.IsLocalBuild) .Does (() => { var glob = "./source/AssemblyInfo.cs"; ReplaceTextInFiles(glob, "{BUILD_COMMIT}", BUILD_COMMIT); ReplaceTextInFiles(glob, "{BUILD_NUMBER}", BUILD_NUMBER); ReplaceTextInFiles(glob, "{BUILD_TIMESTAMP}", BUILD_TIMESTAMP); }); Task("nuget-dependecies") .Does ( () => { string icanhasdotnet = "https://icanhasdot.net/Downloads/ICanHasDotnetCore.zip"; } ); // Task ("genapi") // .IsDependentOn ("libs") // .Does (() => // { // var GenApiToolPath = GetFiles ("./tools/**/GenAPI.exe").FirstOrDefault (); // // For some reason GenAPI.exe can't handle absolute paths on mac/unix properly, so always make them relative // // GenAPI.exe -libPath:$(MONOANDROID) -out:Some.generated.cs -w:TypeForwards ./relative/path/to/Assembly.dll // var libDirPrefix = IsRunningOnWindows () ? "output/" : ""; // var libs = new FilePath [] { // "./" + libDirPrefix + "Xamarin.Android.Support.Compat.dll", // "./" + libDirPrefix + "Xamarin.Android.Support.Core.UI.dll", // "./" + libDirPrefix + "Xamarin.Android.Support.Core.Utils.dll", // "./" + libDirPrefix + "Xamarin.Android.Support.Fragment.dll", // "./" + libDirPrefix + "Xamarin.Android.Support.Media.Compat.dll", // }; // foreach (var lib in libs) { // var genName = lib.GetFilename () + ".generated.cs"; // var libPath = IsRunningOnWindows () ? MakeAbsolute (lib).FullPath : lib.FullPath; // var monoDroidPath = IsRunningOnWindows () ? "\"" + MONODROID_PATH + "\"" : MONODROID_PATH; // Information ("GenAPI: {0}", lib.FullPath); // StartProcess (GenApiToolPath, new ProcessSettings { // Arguments = string.Format("-libPath:{0} -out:{1}{2} -w:TypeForwards {3}", // monoDroidPath + "," + MSCORLIB_PATH, // IsRunningOnWindows () ? "" : "./", // genName, // libPath), // WorkingDirectory = "./output/", // }); // } // MSBuild ("./GooglePlayServices.TypeForwarders.sln", c => c.Configuration = BUILD_CONFIG); // CopyFile ("./support-v4/source/bin/" + BUILD_CONFIG + "/Xamarin.Android.Support.v4.dll", "./output/Xamarin.Android.Support.v4.dll"); // }); // Task ("docs-api-diff") // .Does (async () => // { // var nupkgFiles = GetFiles ("./**/output/*.nupkg"); //get all of the nugets in the output // Information ("Found ({0}) Nuget's to Diff", nupkgFiles.Count ()); // foreach (var nupkgFile in nupkgFiles) //loop through each nuget that is found // { // Information("Diffing: {0}", nupkgFile); // await BuildApiDiff(nupkgFile); // } // }); Task ("clean") .Does (() => { if (DirectoryExists ("./externals")) DeleteDirectory ("./externals", new DeleteDirectorySettings { Recursive = true, Force = true }); if (DirectoryExists ("./generated")) DeleteDirectory ("./generated", new DeleteDirectorySettings { Recursive = true, Force = true }); CleanDirectories ("./**/packages"); CleanDirectories("./**/bin"); CleanDirectories("./**/obj"); }); Task ("ci") .IsDependentOn ("ci-setup") //.IsDependentOn ("tools-check") //.IsDependentOn ("inject-variables") .IsDependentOn ("binderate") .IsDependentOn ("nuget") //.IsDependentOn ("merge") .IsDependentOn ("samples"); RunTarget (TARGET);
the_stack
@model puck.core.Models.PuckImage @using puck.core.Models.EditorSettings @{ var settings = this.PuckEditorSettings<CropsEditorSettings>(inherit:false,modelTypeOverride:typeof(puck.core.Base.BaseModel)) ?? new CropsEditorSettings() { Crops=new List<puck.core.Models.CropInfo>() }; var guid = Guid.NewGuid(); } <div data-guid="@guid.ToString()" class="puckImage puckimage_@ViewData.ModelMetadata.PropertyName puckimage_@ViewData.TemplateInfo.HtmlFieldPrefix.Replace("[","_").Replace("]","_").Replace(".","_")"> <div style="display:none;" class="propName" data-propName="@ViewData.TemplateInfo.HtmlFieldPrefix"></div> <ul class="nav nav-tabs" role="tablist" id="myTab"> <li class="nav-item active"><a class="nav-link active" role="tab" @*data-toggle="tab"*@ href="#">Image info</a></li> <li class="nav-item"><a @*data-toggle="tab"*@ class="nav-link" href="#">Crops</a></li> </ul> <div class="tab-content"> <div class="tab-pane active" role="tabpanel" id="_puckImage_info_tab_@ViewData.ModelMetadata.PropertyName"> <div class="noedit settings_display_image"> @if (!string.IsNullOrEmpty(Model?.Path)) { <img alt="image" src="@Model.Path" width="100" /> <a href="@Model.Path" target="_blank" style="display:block;"> @{ var value = Model.Path.TrimEnd('/'); } @if (value.IndexOf("/") > -1) { @value.Substring(Model.Path.LastIndexOf("/") + 1) } else { @value } </a> } </div> @Html.HiddenFor(x=>x.Path, new { id = "" }) <div class="editor-label" style=""> @Html.LabelFor(x => x.Description) @Html.ValidationMessageFor(x => x.Description) </div> <div class="editor-field"> @Html.EditorFor(x => x.Description) </div> @if (!string.IsNullOrEmpty(Model?.Extension)) { <div class="field"> <span>Extension:</span>@Html.EditorFor(x => x.Extension) </div> } @if (!string.IsNullOrEmpty(Model?.Width?.ToString())) { <div class="field"> <span>Width:</span>@Html.EditorFor(x => x.Width) </div> } @if (!string.IsNullOrEmpty(Model?.Height?.ToString())) { <div class="field"> <span>Height:</span>@Html.EditorFor(x => x.Height) </div> } @if (!string.IsNullOrEmpty(Model?.Size?.ToString())) { <div class="field"> <span>Size:</span>@Html.EditorFor(x => x.Size) </div> } <br /> @Html.EditorFor(x => x.File) <div class="d-none">@Html.EditorFor(x=>x.CropUrls)</div> </div> <div class="tab-pane " role="tabpanel" id="_puckImage_crops_tab_@ViewData.ModelMetadata.PropertyName"> <div class="cropSizes"> @foreach (var c in settings.Crops ?? new List<puck.core.Models.CropInfo>()) { <div class="cropSize" data-alias="@c.Alias" data-width="@c.Width" data-height="@c.Height"> <div> <span>@c.Alias</span> <br /> <span>@c.Width<text>x</text>@c.Height</span> </div> </div> } </div> <div class="zoom-container p-2"> <div class="minus"> <i class="fas fa-search-minus" /> </div> <span data-zoom="1" class="zoom-amount">1</span> <div class="plus"> <i class="fas fa-search-plus" /> </div> </div> <div class=""></div> <div class="message"> @if (settings.Crops == null || settings.Crops.Count == 0) { <div>Look like you haven't set up any crops. Click <a class="addCropSizesLink" href="#settings?path=/puck/settings/editorparameters&stype=@(typeof(CropsEditorSettings).FullName)&mtype=BaseModel">here</a> to add some crop sizes.</div> } </div> <div class="viewport" style="height:auto;max-height:600px;width:500px;"> </div> </div> </div> <div class="selected_content"> <div style="display:none;"> @Html.EditorFor(x => x.Crops) </div> @foreach (var Crops in Model?.Crops ?? new List<puck.core.Models.CropModel> { }) { <div style="display:none;" class="cropInfo" data-alias="@Crops.Alias" data-width="@Crops.Width" data-height="@Crops.Height" data-left="@Crops.Left" data-top="@Crops.Top" data-right="@Crops.Right" data-bottom="@Crops.Bottom" data-zoom="@(Crops.Zoom??1)" data-zoom-left="@Crops.ZoomLeft" data-zoom-top="@Crops.ZoomTop" data-crop-left="@Crops.CropLeft" data-crop-top="@Crops.CropTop"> </div> } </div> </div> <script type="text/javascript"> onAfterDom(function () { var guid = "@guid.ToString()"; var modelType = "@Html.Raw(ViewBag.Level0Type.Name)"; var propname = "@Html.Raw(ViewData.TemplateInfo.HtmlFieldPrefix)"; var isPrePopulated = '@ViewBag.IsPrePopulated' == 'True'; var escapePropname = function (str) { return str.replace(/\[/g, "_").replace(/\]/g, "_").replace(/\./g, "_"); } var container = $("[data-guid='" + guid + "']"); var selected_container = container.find(".selected_content:first"); var getPropName = function () { return container.find("div:hidden.propName:first").attr("data-propName"); } //container.parents(".editor-field:first").css({ clear: "both" }); container.find(".addCropSizesLink").click(function (e) { if (!canChangeMainContent()) { e.preventDefault(); } }); var setValue = function () { selected_container.find("input:hidden").remove(); container.find(".cropSizes:first .cropSize").each(function (i) { var el = $(this); selected_container.append( "<input type='hidden' name='" + getPropName() + ".Crops[" + i + "].Alias' value='" + el.attr("data-alias") + "'/>" + "<input type='hidden' name='" + getPropName() + ".Crops[" + i + "].Width' value='" + el.attr("data-width") + "'/>" + "<input type='hidden' name='" + getPropName() + ".Crops[" + i + "].Height' value='" + el.attr("data-height") + "'/>" + (el.attr("data-left") == undefined ? "" : "<input type='hidden' name='" + getPropName() + ".Crops[" + i + "].Left' value='" + el.attr("data-left") + "'/>") + (el.attr("data-top") == undefined ? "" : "<input type='hidden' name='" + getPropName() + ".Crops[" + i + "].Top' value='" + el.attr("data-top") + "'/>") + (el.attr("data-right") == undefined ? "" : "<input type='hidden' name='" + getPropName() + ".Crops[" + i + "].Right' value='" + el.attr("data-right") + "'/>") + (el.attr("data-bottom") == undefined ? "" : "<input type='hidden' name='" + getPropName() + ".Crops[" + i + "].Bottom' value='" + el.attr("data-bottom") + "'/>") + (el.attr("data-zoom") == undefined ? "" : "<input type='hidden' name='" + getPropName() + ".Crops[" + i + "].Zoom' value='" + el.attr("data-zoom") + "'/>") + (el.attr("data-zoom-left") == undefined ? "" : "<input type='hidden' name='" + getPropName() + ".Crops[" + i + "].ZoomLeft' value='" + el.attr("data-zoom-left") + "'/>") + (el.attr("data-zoom-top") == undefined ? "" : "<input type='hidden' name='" + getPropName() + ".Crops[" + i + "].ZoomTop' value='" + el.attr("data-zoom-top") + "'/>") + (el.attr("data-zoom-top") == undefined ? "" : "<input type='hidden' name='" + getPropName() + ".Crops[" + i + "].CropLeft' value='" + el.attr("data-crop-left") + "'/>") + (el.attr("data-zoom-top") == undefined ? "" : "<input type='hidden' name='" + getPropName() + ".Crops[" + i + "].CropTop' value='" + el.attr("data-crop-top") + "'/>") ); }); } var zoomAmountContainer = container.find(".zoom-amount"); var viewportWidth = 500; if ($(window).width() < viewportWidth) viewportWidth = $(window).width() - 30; var originalWidth = @(Model?.Width??0); var originalHeight = @(Model?.Height??0); var imageUrl = '@(Model?.Path??"")'; var viewport = container.find(".viewport"); var selected_container = container.find(".selected_content"); var message_container = container.find(".message"); var setupDrag = function (viewport,img,zoom) { } var setupImage = function (cropAlias, cropWidth, cropHeight, leftFrac, topFrac, rightFrac, bottomFrac, zoom,zoomLeft,zoomTop) { //viewport.find("img").remove(); zoom = zoom || 1; zoomAmountContainer.attr("data-zoom", zoom).html(zoom); var img = $("<img/>").attr({ src: imageUrl }); if (viewport.find("img").length > 0) viewport.find("img").remove(); if (viewport.find("img").length == 0) { viewport.append(img).css({ overflow: "hidden" }); } else img = viewport.find("img"); var doSetup = function () { if (zoom > 1) { img.css({ cursor: "move" }); } else { img.css({ cursor: "default" }); } viewport.css({ width: viewportWidth }).find(".cropper").remove(); var imgWidth; var originalImgWidth; var cropperWidth; var cropperHeight; var aRatio = originalWidth / originalHeight; var resize = false; message_container.html(""); if (cropWidth > originalWidth || cropHeight > originalHeight) { imgWidth = cropWidth > viewportWidth ? viewportWidth : cropWidth; resize = true; if (!(originalWidth == 0 && originalHeight == 0)) message_container.html("image is smaller than target crop size, the image will be resized to fit."); } else { imgWidth = originalWidth > viewportWidth ? viewportWidth : originalWidth; originalImgWidth = imgWidth; imgWidth *= zoom; //dragContainer.css({ width: imgWidth + "px" }); if (zoom > 1) { viewport.css({ height: 600 + "px" }); } } var cropARatio = cropWidth / cropHeight; var imgHeight = imgWidth / aRatio; widthRatio = originalWidth / originalImgWidth; cropperWidth = cropWidth / widthRatio; cropperHeight = cropperWidth / cropARatio; img.css({ width: imgWidth + "px", height: imgHeight + "px" }); if (imgWidth < viewportWidth) viewport.css({ width: imgWidth }); viewport.css({ height: imgHeight }); //setupDrag(viewport, img, zoom); if (!resize) { var cropper = $("<div/>").attr({ class: "cropper" }).css({ width: cropperWidth, height: cropperHeight, zIndex: 2 }); var left = 0; var top = 0; if (leftFrac != undefined && leftFrac != "" && topFrac != undefined && topFrac != "") { left = (imgWidth * leftFrac) + Math.abs(zoomLeft); top = (imgHeight * topFrac) + Math.abs(zoomTop); //debugger; } else { left = (imgWidth - cropperWidth) / 2; top = (imgHeight - cropperHeight) / 2; } if (img.hasClass("ui-draggable") && img.data("uiDraggable")) { try { img.draggable("destroy"); } catch (error) { console.error(error); } img.data("uiDraggable", ""); } if (zoom > 1) { //debugger; img.css({ position: "absolute", left: zoomLeft + "px", top: zoomTop + "px" }); img.draggable({ drag: function (e, ui) { if (ui.position.left > 0) { ui.position.left = 0; } if (ui.position.top > 0) { ui.position.top = 0; } //console.log("l", ui.position.left, "p", (imgWidth - viewport.width())); //console.log("t", ui.position.top, "p", (imgHeight - viewport.height())); var l = ui.position.left + imgWidth + viewport.width(); if (ui.position.left + imgWidth < viewport.width()) { //debugger; ui.position.left = viewport.width() - imgWidth; } var t = ui.position.top + viewport.height(); if (ui.position.top < (viewport.height() - imgHeight)) { ui.position.top = viewport.height() - imgHeight; } //console.log("drag", e, ui); //console.log("vp ol", viewport.offset().left); //console.log("vp ot", viewport.offset().top); } }); } //debugger; if (left + cropperWidth > viewport.width()) { offsetLeft = (left + cropperWidth) - viewport.width(); img.css({ left: img.position().left - offsetLeft }); left -= offsetLeft; } if (top + cropperHeight > viewport.height()) { offsetTop = (top + cropperHeight) - viewport.height(); img.css({ top: img.position().top - offsetTop }); top -= offsetTop; } cropper.css({ left: left + "px", top: top + "px" }).draggable({ containment: 'parent' , stop: function (event, ui) { setPositions(cropAlias, ui.position.left, ui.position.top, imgWidth, imgHeight, cropperWidth, cropperHeight, zoom, img.position().left, img.position().top); setValue(); } }); viewport.append(cropper); //debugger; setPositions(cropAlias, left, top, imgWidth, imgHeight, cropperWidth, cropperHeight, zoom, img.position().left, img.position().top); setValue(); } } img.load(function () { doSetup(); var callSetup = function () { setTimeout(function () { if (img.is(":visible")) doSetup(); else callSetup(); }, 500); } callSetup(); }); } container.find(".minus").click(function () { if (zoomAmountContainer.attr("data-zoom") == "1") return; zoomAmountContainer.attr("data-zoom", zoomAmountContainer.attr("data-zoom") - 0.5).html(zoomAmountContainer.attr("data-zoom")); var cropSize = container.find(".cropSize.active"); cropSize.attr("data-zoom", zoomAmountContainer.attr("data-zoom")); //viewport.find("img").css({left:1,top:1}); initCropSize(cropSize); setTimeout(function () { viewport.find("img").get(0).style.left = "1px"; viewport.find("img").get(0).style.top = "1px"; },1000); }); container.find(".plus").click(function () { zoomAmountContainer.attr("data-zoom", zoomAmountContainer.attr("data-zoom") - 0 + 0.5).html(zoomAmountContainer.attr("data-zoom")); var cropSize = container.find(".cropSize.active"); cropSize.attr("data-zoom",zoomAmountContainer.attr("data-zoom")); viewport.find("img").css({left:1,top:1}); initCropSize(cropSize); }); var setPositions = function (cropAlias,posLeft,posTop,imgWidth,imgHeight,cropperWidth,cropperHeight,zoom,zoomLeft,zoomTop) { var leftF = (zoomLeft + posLeft) / imgWidth; var cropLeft = (Math.abs(zoomLeft) + posLeft) / imgWidth; var topF = (zoomTop + posTop) / imgHeight; var cropTop = (Math.abs(zoomTop) + posTop) / imgHeight; var rightF = (imgWidth - (Math.abs(zoomLeft) + posLeft + cropperWidth)) / imgWidth; var bottomF = (imgHeight - (Math.abs(zoomTop) + posTop + cropperHeight)) / imgHeight; var cropSize = container.find(".cropSizes:first .cropSize[data-alias='" + cropAlias + "']"); if (cropSize.length == 0) return; cropSize.attr("data-left", leftF); cropSize.attr("data-top", topF); cropSize.attr("data-right", rightF); cropSize.attr("data-bottom", bottomF); cropSize.attr("data-zoom", zoom); cropSize.attr("data-zoom-left", zoomLeft); cropSize.attr("data-zoom-top", zoomTop); cropSize.attr("data-crop-left", cropLeft); cropSize.attr("data-crop-top", cropTop); //console.log(leftF, topF, rightF, bottomF); } var initCropSize = function (el) { container.find(".cropSize").removeClass("active"); el.addClass("active"); var alias = el.attr("data-alias"); var width = el.attr("data-width"); var height = el.attr("data-height"); var left = el.attr("data-left"); var top = el.attr("data-top"); var right = el.attr("data-right"); var bottom = el.attr("data-bottom"); var zoom = el.attr("data-zoom"); var zoomLeft = el.attr("data-zoom-left"); var zoomTop = el.attr("data-zoom-top"); setupImage(alias, width, height, left, top, right, bottom,zoom,zoomLeft,zoomTop); } container.find(".cropSizes:first .cropSize").click(function () { var el = $(this); var zoomAmount = el.attr("data-zoom"); container.find(".zoom-amount").html(zoomAmount).attr("zoom-amount",zoomAmount); initCropSize(el); }); var load = function () { container.find(".selected_content:first .cropInfo").each(function () { var el = $(this); var alias = el.attr("data-alias"); var width = el.attr("data-width"); var height = el.attr("data-height"); var left = el.attr("data-left"); var top = el.attr("data-top"); var right = el.attr("data-right"); var bottom = el.attr("data-bottom"); var zoom = el.attr("data-zoom"); var zoomLeft = el.attr("data-zoom-left"); var zoomTop = el.attr("data-zoom-top"); var cropLeft = el.attr("data-crop-left"); var cropTop = el.attr("data-crop-top"); var elCropSize = container.find(".cropSizes:first .cropSize[data-alias='" + alias + "']"); if (elCropSize.length == 0) return; var cropSizeWidth = elCropSize.attr("data-width"); var cropSizeHeight = elCropSize.attr("data-height"); if (width != cropSizeWidth || height != cropSizeHeight) return; elCropSize.attr("data-left", left); elCropSize.attr("data-top", top); elCropSize.attr("data-right", right); elCropSize.attr("data-bottom", bottom); elCropSize.attr("data-zoom", zoom); elCropSize.attr("data-zoom-left", zoomLeft); elCropSize.attr("data-zoom-top", zoomTop); elCropSize.attr("data-crop-left", cropLeft); elCropSize.attr("data-crop-top",cropTop); }); //debugger; container.find(".cropSizes:first .cropSize").each(function () { var el = $(this); initCropSize(el); }); var zoomAmount = container.find(".cropSizes.active").attr("data-zoom"); container.find(".zoom-amount").html(zoomAmount).attr("zoom-amount",zoomAmount); } load(); container.find(".cropSize:first").click(); setValue(); }); </script> <style> .zoom-container .plus{ display:inline-block; } .zoom-container .plus i,.zoom-container .minus i{ padding:5px; cursor:pointer; } .zoom-container .minus{ display:inline-block; } .zoom-container .plus,.zoom-container .minus{ font-size:14px; } .zoom-container .zoom-amount { background:#ddd; padding:5px; color:#fff; font-weight:bold; } .puckImage .field div{ display:inline; } .puckImage .cropSize { border:1px solid #dddddd; margin:0px; padding:0px; display:inline-block; cursor:pointer; } .puckImage .cropSize.active { background:#eeeeee/*00ff7f*/; } .puckImage .cropSize > div { padding:8px; } .puckImage .cropper { border:1px solid #dddddd; background-color:rgba(255, 216, 0,0.5); position:absolute!important; top:0px; left:0px; } .puckImage .viewport { position:relative; } </style>
the_stack
@page @model SearchModel @{ ViewData["Title"] = "Search"; ViewData["PageName"] = "page_search"; ViewData["Category1"] = "Page Views"; ViewData["PreemptiveClass"] = "layout-composed"; } @section HeadBlock { <link rel="stylesheet" media="screen, print" href="~/css/fa-solid.css"> <link rel="stylesheet" media="screen, print" href="~/css/fa-brands.css"> } <div class="px-3 px-sm-5 pt-4"> <h1 class="mb-4"> 160 Results for "@Settings.AppName" <small class="mb-3"> Request time (0.23 seconds) </small> </h1> <div class="input-group input-group-lg mb-5 shadow-1 rounded"> <input type="text" class="form-control shadow-inset-2" id="filter-icon" aria-label="type 2 or more letters" placeholder="Search anything..." value="@Settings.AppName responsive webapp"> <div class="input-group-append"> <button class="btn btn-primary hidden-sm-down" type="button"><i class="@(Settings.Theme.IconPrefix) fa-search mr-lg-2"></i><span class="hidden-md-down">Search</span></button> <button type="button" class="btn btn-primary dropdown-toggle dropdown-toggle-split" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false"> <span class="sr-only">Toggle Dropdown</span> </button> <div class="dropdown-menu dropdown-menu-right"> <a class="dropdown-item" href="#">Data</a> <a class="dropdown-item" href="#">Images</a> <a class="dropdown-item" href="#">Users</a> <div role="separator" class="dropdown-divider"></div> <a class="dropdown-item active" href="#">Everything</a> </div> </div> </div> </div> <ul class="nav nav-tabs nav-tabs-clean px-3 px-sm-5" role="tablist"> <li class="nav-item"> <a class="nav-link active bg-transparent fs-lg fw-400" data-toggle="tab" href="#tab-all" role="tab">All</a> </li> <li class="nav-item"> <a class="nav-link bg-transparent fs-lg fw-400" data-toggle="tab" href="#tab-images" role="tab">Image</a> </li> </ul> <div class="d-flex px-3 px-sm-5 py-4"> <div class="custom-control custom-switch d-inline-flex"> <input type="checkbox" class="custom-control-input" id="customSwitch1" checked=""> <label class="custom-control-label fw-500 text-dark" for="customSwitch1"><span class="hidden-sm-down">Activate</span> Location</label> </div> <div class="d-inline-block"> <a href="javascript:void(0);" class="text-dark fw-500 dropdown-toggle ml-4" data-toggle="dropdown">Safe Search: Moderate</a> <div class="dropdown-menu"> <a class="dropdown-item px-3 py-2" href="#"> <p class="m-0">Strict</p> <p class="m-0 opacity-50">No Adult Content</p> </a> <a class="dropdown-item active px-3 py-2" href="#"> <p class="m-0">Moderate</p> <p class="m-0 opacity-50">No explicit video or images</p> </a> <a class="dropdown-item px-3 py-2" href="#"> <p class="m-0">Off</p> <p class="m-0 opacity-50">Don't filter adult content</p> </a> </div> </div> <div class="d-inline-block"> <a href="javascript:void(0);" class="text-dark fw-500 dropdown-toggle ml-4" data-toggle="dropdown">All Types</a> <div class="dropdown-menu"> <a class="dropdown-item px-3 py-2" href="#">Data</a> <a class="dropdown-item px-3 py-2" href="#">Images</a> <a class="dropdown-item px-3 py-2" href="#">Media</a> <div class="dropdown-divider"></div> <a class="dropdown-item px-3 py-2 active" href="#">All</a> </div> </div> </div> <div class="px-3 px-sm-5 pb-4"> <div class="tab-content"> <div class="tab-pane show active" id="tab-all" role="tabpanel" aria-labelledby="tab-all"> <div class="card"> <ul class="list-group list-group-flush"> <li class="list-group-item py-4 px-4"> <a href="javascript:void(0)" class="fs-lg fw-500">@Settings.App - Responsive Dashboard Template</a> <div class="fs-xs mt-1"> <a href="javascript:void(0)" class="text-success">http://wrapbootstrap.com/themes/admin</a> </div> <div class="mt-2"> <span class="text-muted">Dec 12, 2013</span> - Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book... </div> <div class="d-flex flex-row mt-3"> <div class="d-block text-truncate text-truncate-lg border-faded p-2"> <a href="javascript:void(0)" class="fs-md fw-500">Support us - @Settings.App WebApp</a> <details> <summary class="d-block text-truncate text-truncate-lg text-muted fs-xs">He consider is to vows, focuses </summary> <p class="d-block text-truncate text-truncate-lg fs-md">Sufficiently the secure our represent on eagerly magazine where expl</p> </details> </div> <div class="d-block text-truncate text-truncate-lg border-faded p-2 ml-2"> <a href="javascript:void(0)" class="fs-md fw-500">Contact - @Settings.App WebApp</a> <details> <summary class="d-block text-truncate text-truncate-lg text-muted fs-xs">Sufficiently the secure our represent on eagerly magazine where expl</summary> <p class="d-block text-truncate text-truncate-lg fs-md">Sufficiently the secure our represent on eagerly magazine where expl</p> </details> </div> <div class="d-block text-truncate text-truncate-lg border-faded p-2 ml-2"> <a href="javascript:void(0)" class="fs-md fw-500">About us</a> <details> <summary class="d-block text-truncate text-truncate-lg text-muted fs-xs">Which been italic, his based due and yet was hazar</summary> <p class="d-block text-truncate text-truncate-lg fs-md">Sufficiently the secure our represent on eagerly magazine where expl</p> </details> </div> </div> </li> <li class="list-group-item py-4 px-4"> <a href="javascript:void(0)" class="fs-lg fw-500">Develop your webapp with ease using @Settings.AppName</a> <div class="fs-xs mt-1"> <a href="javascript:void(0)" class="text-success">http://wrapbootstrap.com/themes/admin</a> </div> <div class="mt-2"> Acquired concept to however, lots have it human its after suspicious good clock, line tall and writing be he are was increased technology be client would, and in top children peacefully. The consider is to vows, focuses completely I to proposal room. Joke. That particularly problem uniforms, burden military the to ill road, people. </div> <span class="fs-sm d-flex align-items-center mt-3"> <a href="#" class="mr-2 mt-1" title="@Settings.AppName Colors"> <span class="d-block img-share" style="background-image:url('/img/thumbs/ng-thumb-2.png'); background-size: cover;"></span> </a> <a href="#" class="mr-2 mt-1" title="@Settings.AppName Alerts"> <span class="d-block img-share" style="background-image:url('/img/thumbs/ng-thumb-3.png'); background-size: cover;"></span> </a> <a href="#" class="mr-2 mt-1" title="@Settings.AppName Progress"> <span class="d-block img-share" style="background-image:url('/img/thumbs/ng-thumb-4.png'); background-size: cover;"></span> </a> </span> </li> <li class="list-group-item py-4 px-4"> <a href="javascript:void(0)" class="fs-lg fw-500">45,000 downloads for @Settings.App.</a> <div class="fs-xs mt-1"> <a href="javascript:void(0)" class="text-success">http://wrapbootstrap.com/themes/admin</a> </div> <div class="mt-2"> Greatest both welcoming the turn spineless, to which been italic, his based due and yet was hazardous necessary pros blind a okay. Written missions of and remedies. Have prior though point economics, real he please. </div> </li> <li class="list-group-item py-4 px-4"> <a href="javascript:void(0)" class="fs-lg fw-500">Company project Timeline Stock @Settings.AppName</a> <div class="fs-xs mt-1"> <a href="javascript:void(0)" class="text-success">http://wrapbootstrap.com/themes/admin</a> </div> <div class="mt-2"> Once freshlybrewed of it as and big behind he sufficiently the secure our represent on eagerly magazine where explanation every he they should a go presentations. </div> </li> <li class="list-group-item py-6 px-4"> <h3 class="fw-400" role="heading">Videos</h3> <div class="scrolling-wrapper"> <div class="card shadow-0 d-inline-flex mr-2" style="width: 200px;"> <div class="card-img-top position-relative" style="background-image:url('/img/thumbs/ng-thumb-video-1.png');display: block;height: 110px;width: 200px;"> <div class="w-100 h-100 d-flex align-items-center justify-content-center"> <i class="fas fa-play-circle display-3 m-0 opacity-50"></i> </div> </div> <div class="card-body py-3 px-2"> <a href="#" class="stretched-link fs-lg fw-500">@Settings.App Introduction</a> <p class="fs-xs mt-1">Some quick example text to build on the card title and make up the bulk of the card's content.</p> <span class="text-success fs-xs">YouTube</span> <span class="text-muted fs-xs opacity-50">- Jul 18, 2023 </span> </div> </div> <div class="card shadow-0 d-inline-flex mr-2" style="width: 200px;"> <div class="card-img-top position-relative" style="background-image:url('/img/thumbs/ng-thumb-video-2.png');display: block;height: 110px;width: 200px;"> <div class="w-100 h-100 d-flex align-items-center justify-content-center"> <i class="fas fa-play-circle display-3 m-0 opacity-50"></i> </div> </div> <div class="card-body py-3 px-2"> <a href="#" class="stretched-link fs-lg fw-500">Advanced layouts</a> <p class="fs-xs mt-1">Some quick example text to build on the card title and make up the bulk of the card's content.</p> <span class="text-success fs-xs">YouTube</span> <span class="text-muted fs-xs opacity-50">- Jan 15, 2023 </span> </div> </div> </div> </li> <li class="list-group-item py-4 px-4"> <a href="javascript:void(0)" class="fs-lg fw-500">Online software and management toolkit using @Settings.App UI</a> <div class="fs-xs mt-1"> <a href="javascript:void(0)" class="text-success">http://wrapbootstrap.com/themes/admin</a> </div> <div class="mt-2"> Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Aliquam laoreet turpis sed leo gravida interdum. Aliquam efficitur tempor enim ac pretium. Integer scelerisque dui sed ex laoreet congue. </div> </li> <li class="list-group-item py-4 px-4"> <a href="javascript:void(0)" class="fs-lg fw-500">Bootstrap Templates & Themes from WrapBootstrap</a> <div class="fs-xs mt-1"> <a href="javascript:void(0)" class="text-success">http://wrapbootstrap.com/themes/admin</a> </div> <div class="mt-2"> Been the as I have stairs familiarity he everything our except with now, there's he literature stage all the himself bed and skyline overhauls at how may maintain didn't depend train tone, sign he reassuring complete reached chance. </div> </li> <li class="list-group-item py-4 px-4"> <a href="javascript:void(0)" class="fs-lg fw-500">It impenetrable hazardous best.</a> <div class="fs-xs mt-1"> <a href="javascript:void(0)" class="text-success">http://wrapbootstrap.com/themes/admin</a> </div> <div class="mt-2"> Must and agency is she something sentences that italic, to would trust, a the far was for small quarter following thought, any at something over there of being me. Frequency; And cheerful, tone the at their lobby, are was the between touch have play suppliers, eye stash too the that. </div> </li> <li class="list-group-item py-4 px-4"> <a href="javascript:void(0)" class="fs-lg fw-500">Temple of appeared a somewhere, hands</a> <div class="fs-xs mt-1"> <a href="javascript:void(0)" class="text-success">http://fakewebsites.com/web-app/peoples-theme</a> </div> <div class="mt-2"> Time. Reason that staple in her many a do comments less recommended. Easier just dull two harmonics. Strenuous much back to over career here. Let's of with business the completely for anger get that up software god mostly my in phase he on knowing posterity drew he or each any that brown said in and have he nonsense, evils then first wait neuter. </div> </li> <li class="list-group-item py-4 px-4"> <a href="javascript:void(0)" class="fs-lg fw-500">You did to and bed seen, documents violin to groundtem</a> <div class="fs-xs mt-1"> <a href="javascript:void(0)" class="text-success">http://gotbootstrap.com/templates/kiwik-rickshaw-admin</a> </div> <div class="mt-2"> The wrong findings. Only into the considerations, stuffed puzzles was enough and how queen, and sooner person, different in to pass not. Able mellower to having as easier at live of process anger more, such, what avarice attention from warned congress, you the his o'clock enterprises then, an velocity a belong, samples copy and occasion films and day secure that dissolute by her ever the headline take what two as example, it is about but employed one in sleeping eyes the like. </div> </li> <li class="list-group-item py-4 px-4"> <a href="javascript:void(0)" class="fs-lg fw-500">To she enjoying world could to</a> <div class="fs-xs mt-1"> <a href="javascript:void(0)" class="text-success">http://sundaytimesnyc.com/business/admin</a> </div> <div class="mt-2"> Decades the and the of deceleration been from with key a the pass in such safe motors it abused years temple a she have rush one a yes, it hesitated of he refinements. Day into of was concise principles, those with but from behind most ideas been their explain text to their and spots present events do was, a best odd one is one in some we explains way one attempt. </div> </li> </ul> </div> </div> <div class="tab-pane" id="tab-images" role="tabpanel" aria-labelledby="tab-images"> <div class="py-2"> <div class="scrolling-wrapper demo-h-spacing"> <a href="#" class="btn btn-pills btn-outline-secondary d-inline-block js-waves-off"> chart </a> <a href="#" class="btn btn-pills btn-outline-secondary d-inline-block js-waves-off"> theme </a> <a href="#" class="btn btn-pills btn-outline-secondary d-inline-block js-waves-off"> layout </a> <a href="#" class="btn btn-pills btn-outline-secondary d-inline-block js-waves-off"> admin dashboard </a> <a href="#" class="btn btn-pills btn-outline-secondary d-inline-block js-waves-off"> bootstrap </a> <a href="#" class="btn btn-pills btn-outline-secondary d-inline-block js-waves-off"> app </a> <a href="#" class="btn btn-pills btn-outline-secondary d-inline-block js-waves-off"> header </a> <a href="#" class="btn btn-pills btn-outline-secondary d-inline-block js-waves-off"> awesome </a> <a href="#" class="btn btn-pills btn-outline-secondary d-inline-block js-waves-off"> responsive </a> </div> </div> <div class="row"> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/1.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/2.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/3.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/5.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/6.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/10.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/14.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/12.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/4.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/1.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/2.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/3.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/5.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/6.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/10.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/14.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/12.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/4.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/15.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/13.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/11.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/1.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/2.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/3.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> <div class="col-6 col-sm-4 col-md-4 col-lg-3 col-xl-2 py-2 px-2"> <a href="#" class="position-relative show-child-on-hover d-block shadow-hover"> <div class="position-absolute pos-top pos-left pos-right pos-bottom show-on-hover-parent"> <div class="position-absolute pos-top pos-bottom pos-left pos-right opacity-20 bg-primary-500 rounded"></div> <div class="position-absolute py-1 px-2 m-2 pos-bottom pos-right bg-primary text-white rounded opacity-90"> <small>394x222</small> </div> <div class="position-absolute pos-top pos-bottom pos-left pos-right d-flex align-items-center justify-content-center text-white"> <i class="@(Settings.Theme.IconPrefix) fa-plus-circle display-2 m-0 opacity-90"></i> </div> </div> <img src="~/img/demo/search/4.png" alt="Search @Settings.App Thumbnail" class="img-thumbnail"> </a> </div> </div> </div> </div> <ul class="pagination my-4"> <li class="page-item"> <a class="page-link" href="#" aria-label="Previous"> <span aria-hidden="true">«</span> </a> </li> <li class="page-item active"> <a class="page-link" href="#">1</a> </li> <li class="page-item"> <a class="page-link" href="#">2</a> </li> <li class="page-item"> <a class="page-link" href="#">3</a> </li> <li class="page-item"> <a class="page-link" href="#">4</a> </li> <li class="page-item"> <a class="page-link" href="#">5</a> </li> <li class="page-item"> <a class="page-link" href="#">6</a> </li> <li class="page-item"> <a class="page-link" href="#" aria-label="Next"> <span aria-hidden="true">»</span> </a> </li> </ul> </div> @section ScriptsBlock { <script> initApp.pushSettings("layout-composed", false); </script> }
the_stack
@{ ViewBag.MainMenu = "Settings"; ViewBag.LeftMenu = "Material"; ViewBag.Title = "图文消息"; Layout = "~/Views/Shared/_Layout.cshtml"; } <link rel="stylesheet" type="text/css" href="/Scripts/wangEditor/css/wangEditor.min.css"> <script type="text/javascript" src='/Scripts/wangEditor/js/wangEditor.min.js'></script> <style> .wangEditor-container .wangEditor-txt p, .wangEditor-container .wangEditor-txt h1, .wangEditor-container .wangEditor-txt h2, .wangEditor-container .wangEditor-txt h3, .wangEditor-container .wangEditor-txt h4, .wangEditor-container .wangEditor-txt h5 { margin: 0px 0; line-height: 1.8; } </style> <style type="text/css"> .divImageMaterialContainer { width: 170px; background-color: #FFF; } .tableImageMaterialContainer { width: 100%; border-collapse: collapse; border: solid #E7E7EB; border-width: 1px 0 0 1px; } .tableImageMaterialContainer td { border: 1px solid #E7E7EB; } .divArticleMaterialContainer { background-color: #FFF; font-size: 13px; } .tableArticleMaterialContainer { width: 100%; border-collapse: collapse; border: solid #E7E7EB; border-width: 1px 0 0 1px; } .tableArticleMaterialContainer td { border: 1px solid #E7E7EB; } .tableNoBorder { border: 0px; } .tableNoBorder td { border: 0px; } .divFirstArticleTitle { position: absolute; line-height: 24px; padding-left: 7px; vertical-align: middle; left: 0px; right: 0px; bottom: 0px; height: 24px; background-color: #000000; color: white; filter: alpha(opacity=70); -moz-opacity: 0.7; -khtml-opacity: 0.7; opacity: 0.7; } .divArticleFirstItem { cursor: pointer; border: 2px solid #FFF; position: relative; height: 100px; } .divArticleFirstItem:hover { border: 2px solid #393; } .divArticleItem { cursor: pointer; margin-top: 10px; margin-left: 5px; margin-right: 5px; border: 2px solid #FFF; } .divArticleItem:hover { border: 2px solid #393; } .divArticleItemActive { border: 2px solid skyblue; } </style> <script language="javascript"> var _mode = "create";//modify var _id; //文章列表,WeixinAddArticleMaterialArgs var _articleList; var _currentArticleIndex = -1; var _validator; //富文本编辑器 var _editor; $(document).ready(function () { _validator = $("#form").validate({ ignore: "", // 开启hidden验证, 1.9版本后默认关闭 onfocusout: false, onkeyup: false, showErrors: showValidationErrors, rules: { "txtName": "required", "txtTitle": "required", "hiddenMediaId": "required" }, messages: { "txtName": "请输入素材名称;", "txtTitle": "请输入标题;", "hiddenMediaId": "请选择封面图片;" } }); //_editor = $('#txtContent').wangEditor({ // 'menuConfig': [ // ['viewSourceCode'], // ['bold', 'underline', 'italic', 'foreColor', 'backgroundColor', 'strikethrough'], // ['blockquote', 'fontFamily', 'fontSize', 'setHead', 'list', 'justify'], // ['createLink', 'unLink', 'insertTable'], // ['insertLocation'], // ['undo', 'redo', 'fullScreen'] // ] //}); _editor = new wangEditor('divContentEditor'); _editor.config.menus = [ 'source', '|', 'bold', 'underline', 'italic', 'strikethrough', 'eraser', 'forecolor', 'bgcolor', '|', 'quote', 'fontfamily', 'fontsize', 'head', 'unorderlist', 'orderlist', 'alignleft', 'aligncenter', 'alignright', '|', 'link', 'unlink', 'table', '|', 'undo', 'redo', 'fullscreen' ]; _editor.create(); _id = getQueryString("id"); if (_id == undefined || _id == "") { //如果是新建默认就要有一篇 addArticle(); } else { loadData(_id); } }); function loadData(id) { _mode = "modify"; $("#spanTitle").html("修改图文素材"); // $("#btnRemove").show(); var loadLayerIndex = layer.load(0, { shade: [0.2, '#fff'] }); $.ajax({ url: "/Api/Material/GetArticleMaterial?id=" + id, type: "POST", dataType: "json", success: function (data, status, jqXHR) { layer.close(loadLayerIndex); if (data.Success) { _articleList = data.Data; // $("#txtId").val(pointCommodity.Id); $("#txtName").val(_articleList.name); //if (_articleList.weixinStatus != 2) //{ // $("#divFailedMessage").show(); //} if (_articleList.weixinStatus == 0) { $("#divRemoveCurrent").show(); $("#btnSaveAndPublish").show(); $("#btnSaveAndPublish").removeAttr("disabled"); $("#btnSaveAndPublish2").show(); $("#btnSaveAndPublish2").removeAttr("disabled"); $("#spanPublish").html("尚未发布"); } else { $("#divRemoveCurrent").hide(); $("#btnSaveAndPublish").hide(); $("#btnSaveAndPublish2").hide(); $("#spanPublish").html("已发布"); } switchArticle(0); } else { layerAlert(data.Message); } }, error: function (xmlHttpRequest) { layer.close(loadLayerIndex); alert("Error: " + xmlHttpRequest.status); } }); } //添加一篇文章并使之选中 function addArticle() { if (_articleList == null) _articleList = new Object(); if (_articleList.articles == undefined || _articleList.articles == null) { _articleList.articles = new Array(); } //Infrastructure.ArticleMaterialItemEntity var article = new Object(); article.title = "请输入标题"; article.thumb_media_id = ""; article.show_cover_pic = 0; article.author = "请输入作者"; article.digest = ""; article.content = ""; article.url = ""; article.content_source_url = ""; article.thumbUrl = ""; article.thumbName = ""; article.imgMappingList = new Array(); _articleList.articles[_articleList.articles.length] = article; switchArticle(_articleList.articles.length - 1); } function showArticleList() { if (_articleList.articles == undefined || _articleList.articles == null || _articleList.articles.length == 0) { document.getElementById('divArticleList').innerHTML = ""; return; } var gettpl = document.getElementById('articleListTemplate').innerHTML; laytpl(gettpl).render(_articleList.articles, function (html) { document.getElementById('divArticleList').innerHTML = html; }); } //选中并加载指定的文章 function switchArticle(index) { //切换前保存当前的文章 saveCurrentArticle(); _currentArticleIndex = index; showArticleList(); var article = _articleList.articles[index]; $("#txtTitle").val(article.title); $("#txtAuthor").val(article.author); $("#txtContentSourceUrl").val(article.content_source_url); if (article.show_cover_pic == 1) { $("#chkShowCoverPic").attr("checked", "checked"); } else { $("#chkShowCoverPic").removeAttr("checked"); } $("#txtDigest").val(article.digest); $("#hiddenMediaId").val(article.thumb_media_id); _editor.$txt.html(article.content); showMaterialImage(article); } //保存当前文章 function saveCurrentArticle() { if (_currentArticleIndex < 0) return; var article = _articleList.articles[_currentArticleIndex]; if (article == null) return; article.title = $("#txtTitle").val(); article.author = $("#txtAuthor").val(); article.content_source_url = $("#txtContentSourceUrl").val(); article.show_cover_pic = $("#chkShowCoverPic").is(":checked") ? 1 : 0; article.digest = $("#txtDigest").val(); article.content = _editor.$txt.html(); } function showMaterialImageSelect() { //alert(0); layer.open({ type: 2, area: ['760px', '450px'], //宽高 closeBtn: false, title: "", shift: _layerShift, content: '/Material/ImageSelect' }); } function __selectMaterialImageResult(image, layerIndex) { if (layerIndex != undefined && layerIndex != null) { layer.close(layerIndex); } var article = _articleList.articles[_currentArticleIndex]; article.thumb_media_id = image.MediaId; article.thumbUrl = image.Url; article.thumbName = image.Name; //showMaterialImage(article); switchArticle(_currentArticleIndex); } function showMaterialImage(article) { if (article.thumb_media_id != undefined && article.thumb_media_id != null && article.thumb_media_id != "") { $("#divMaterialImage_Select").hide(); $("#divMaterialImage_Image").show(); var gettpl = document.getElementById('tableMaterialImageTemplate').innerHTML; laytpl(gettpl).render(article, function (html) { document.getElementById('divMaterialImage_Image').innerHTML = html; }); } else { $("#divMaterialImage_Select").show(); $("#divMaterialImage_Image").hide(); } } function removeMaterialImage() { var article = _articleList.articles[_currentArticleIndex]; article.thumb_media_id = ""; article.thumbUrl = ""; article.thumbName = ""; //showMaterialImage(article); switchArticle(_currentArticleIndex); } function validateArticle() { for (var i = 0; i < _articleList.articles.length; i++) { var article = _articleList.articles[i]; if (article.title == "" || article.thumb_media_id == "") { switchArticle(i); return false; } } return true; } function save(callback) { saveCurrentArticle(); if (validateArticle() == false) { if (_validator.form() == false) { return; } } _articleList.name = $("#txtName").val(); var url = "/Api/Material/AddArticleMaterial"; if (_articleList.id != undefined && _articleList.id != null && _articleList.id != "") { url = "/Api/Material/UpdateArticleMaterial"; } var loadLayerIndex = layer.load(0, { shade: [0.2, '#fff'] }); $.ajax({ url: url, type: "POST", dataType: "json", data: JSON.stringify(_articleList), success: function (data, status, jqXHR) { // alert(data); layer.close(loadLayerIndex); if (data.Success) { if (_mode == "create") { layerAlert("保存成功。", function () { var loadLayerIndex = layer.load(0, { shade: [0.2, '#fff'] }); window.location.href = "/Material/ArticleEdit?id=" + data.Data.Id; }); } else { if (callback != undefined && callback != null) { callback(); } else { layerAlert("保存成功。"); } } } else { layerAlert(data.Message, function () { if (_mode == "create" && data.Data.Id != null && data.Data.Id != "") { window.location.href = "/Material/ArticleEdit?id=" + data.Data.Id; } }); } }, error: function (xmlHttpRequest) { layer.close(loadLayerIndex); alert("Error: " + xmlHttpRequest.status); } }); } function moveUp() { if (_currentArticleIndex <= 0) return; saveCurrentArticle(); var currentArticle = _articleList.articles[_currentArticleIndex]; _articleList.articles.splice(_currentArticleIndex, 1); var newIndex = _currentArticleIndex - 1; _articleList.articles.splice(newIndex, 0, currentArticle); _currentArticleIndex = -1; switchArticle(newIndex); } function moveDown() { if (_currentArticleIndex >= _articleList.articles.length - 1) return; saveCurrentArticle(); var currentArticle = _articleList.articles[_currentArticleIndex]; _articleList.articles.splice(_currentArticleIndex, 1); var newIndex = _currentArticleIndex + 1; _articleList.articles.splice(newIndex, 0, currentArticle); _currentArticleIndex = -1; switchArticle(newIndex); } function removeCurrent() { if (_currentArticleIndex < 0) return; if (_articleList.articles.length == 1) return; _articleList.articles.splice(_currentArticleIndex, 1); if (_articleList.articles.length == 0) { addArticle(); } _currentArticleIndex = -1; switchArticle(0); } function uploadFile() { fileUploadArgs = new Object(); fileUploadArgs.uploadMethod = "UploadToWeixinImg"; fileUploadArgs.getUploadResultApi = "/Api/Settings/GetUploadToWeixinImgResult?fileId={0}"; __showFileUpload(getUploadResult, fileUploadArgs); } function getUploadResult(fileServiceAddress, result) { // alert(JSON.stringify(result)); //当前文章 var article = _articleList.articles[_currentArticleIndex]; if (article == null) return; if (article.imgMappingList == undefined || article.imgMappingList == null) { article.imgMappingList = new Array(); } var url = fileServiceAddress + result.Data.StoreFilePath; var materialImgMapping = new Object(); materialImgMapping.Id = result.Data.Id; materialImgMapping.FileUrl = url; materialImgMapping.WeixinUrl = result.Data.WeixinUrl; article.imgMappingList[article.imgMappingList.length] = materialImgMapping; _editor.$txt.append("<img src='" + url + "' style='max-width:100%' />"); } function publish() { var confirmLayerIndex = layer.confirm("是否确认发布该素材到微信后台?<br/>注意:受限于微信接口,图文素材在发布到微信后台之后,您就不可以再添加或删除文章,但是可以对既有文章进行修改。", { btn: ['确认', '取消'], //按钮 shade: [0.4, '#393D49'], title: false, closeBtn: false, shift: _layerShift }, function () { layer.close(confirmLayerIndex); save(doPublish); }); } function doPublish() { var loadLayerIndex = layer.load(0, { shade: [0.2, '#fff'] }); $.ajax({ url: "/Api/Material/PublishArticleMaterial?id=" + _id, type: "POST", dataType: "json", success: function (data, status, jqXHR) { layer.close(loadLayerIndex); if (data.Success) { layerAlert("发布成功。", function () { var loadLayerIndex = layer.load(0, { shade: [0.2, '#fff'] }); window.location.href = "/Material/ArticleEdit?id=" + _id; }); } else { layerAlert(data.Message); } }, error: function (xmlHttpRequest) { layer.close(loadLayerIndex); alert("Error: " + xmlHttpRequest.status); } }); } </script> <script id="tableMaterialImageTemplate" type="text/html"> <div class="divImageMaterialContainer"> <table class="tableImageMaterialContainer"> <tr> <td height="150" align="center"><img style="max-width:166px; max-height:100%" src="{{ d.thumbUrl }}" /></td> </tr> <tr> <td height="30" style="word-wrap: break-word; word-break: break-all; "> <span class="font_gray_15" style="margin-left: 10px; ">{{ d.thumbName }}</span> </td> </tr> <tr> <td height="30" valign="middle" bgcolor="#F4F5F9"> <div> @*<div style="float: left; margin-left: 10px;"> <img src="/Content/Images/ico_edit.jpg" width="20" height="20"> </div>*@ <div style="float: right; margin-right: 10px;"> <img src="/Content/Images/ico_remove.jpg" width="20" height="20" onclick="removeMaterialImage()"> </div> <div style="clear: both"></div> </div> </td> </tr> </table> </div> </script> <script id="articleListTemplate" type="text/html"> <div class="divArticleMaterialContainer"> <table class="tableArticleMaterialContainer"> <tr> <td valign="top"> <div> <div class="divArticleFirstItem {{# if(_currentArticleIndex == 0){ }}divArticleItemActive{{# } }}" onclick="switchArticle(0)"> <div style="position:absolute; left:0px; right:0px; height:100px; overflow:hidden"> <img src="{{ d[0].thumbUrl }}" style=" width:100%;"> </div> <div class="divFirstArticleTitle"> {{ d[0].title }} </div> </div> {{# for(var i = 1, len = d.length; i < len; i++){ }} <div class="divDotLine" style="margin-top:10px;"> </div> <div class="divArticleItem {{# if(_currentArticleIndex == i){ }}divArticleItemActive{{# } }}" onclick="switchArticle({{i}})"> <table class="tableNoBorder" width="100%" border="0" cellspacing="0" cellpadding="0"> <tr> <td>{{ d[i].title }}</td> <td width="75" align="right"><img src="{{ d[i].thumbUrl }}" width="70" height="70"></td> </tr> </table> </div> {{# } }} </div> </td> </tr> {{# if(_articleList.weixinStatus==undefined || _articleList.weixinStatus == 0){ }} <tr> <td height="50" align="center" valign="middle" bgcolor="#F4F5F9"> <div> <a href="javascript:void(0)" onclick="addArticle()">+ 点击添加 </a> </div> </td> </tr> {{# } }} </table> </div> </script> @*<div id="divContentTips"> 配置公众平台账号,才能让你的店铺与微信公众号关联。 <a href="#">查看详细&gt;</a> </div>*@ @*<div style=" margin-top:20px;"> <table width="100%" border="0" cellspacing="0" cellpadding="0"> <tr> <td class="td_ContentTab_active">图文消息</td> <td class="td_ContentTab">图片库</td> <td>&nbsp;</td> </tr> <tr> <td colspan="3" bgcolor="#EEEEEE" height="2"></td> </tr> </table> </div>*@ <div style="margin-left:20px; margin-right:20px;"> <div style=" margin-top:15px; "> <span id="spanTitle" class="font_black_24">新建图文素材</span> <span id="spanPublish" style="color:gray;margin-left:20px;"></span> <div style="float:right;margin-left:20px;"> <input name="btnSaveAndPublish" type="button" class="btn_blue" id="btnSaveAndPublish" value="保存并发布到微信后台" onclick="publish()" disabled /> </div> <div style="float:right"> <input name="btnSave" type="button" class="btn_blue" id="btnSave" value="保存" onclick="save()" /> </div> <div style="clear:both"></div> </div> <div style="margin-top:15px; padding-left:20px;padding-top:5px; padding-bottom:5px; border: 1px dotted red; "> 注意:受限于微信接口,图文素材在发布到微信后台之后,您就不可以再添加或删除素材中的文章,但是可以对既有文章进行修改。 </div> @*<div id="divFailedMessage" style="margin-top:10px; color:red;display:none"> 未能成功同步到微信素材库,请点击“保存”按钮重试。 </div>*@ <form id="form"> <div style="margin-top:20px; padding-left:0px;"> <table width="100%" border="0" cellpadding="5" cellspacing="0"> <tr> <td width="120">名称:</td> <td><input id="txtName" name="txtName" type="text" class="input_16" style="width:300px; " maxlength="50" /></td> </tr> </table> </div> <div style="margin-top:20px;"> <table width="100%" border="0" cellspacing="0" cellpadding="0"> <tr> <td width="260" valign="top"> <div id="divArticleList"> </div> <div class="divDotBorder_gray" style="margin-top:10px;"> <div style="float:left"> <a href="javascript:void(0)" onclick="moveUp()">上移</a> <a href="javascript:void(0)" onclick="moveDown()">下移</a> </div> <div id="divRemoveCurrent" style="float:right"> <a href="javascript:void(0)" onclick="removeCurrent()">删除</a> </div> <div style="clear:both"></div> </div> </td> <td width="20"></td> <td valign="top"> <div> <div> <input id="txtTitle" name="txtTitle" type="text" class="input_16" maxlength="64" /> </div> <div class="divDotLine" style="margin-top:10px;"> </div> <div> <input id="txtAuthor" name="txtAuthor" type="text" class="input_16" style="margin-top:10px;" maxlength="8" /> </div> <div style="margin-top:10px;"> <div id="divContentEditor" style='height:300px; '></div> </div> <div> <div style="float:right;"> <input name="btnUpload" type="button" class="btn_white" id="btnUpload" value="上传图片" onclick="uploadFile()" /> </div> <div style="clear:both"></div> </div> <div style="margin-top:10px;"> 原文链接 </div> <div style="margin-top:1px;"> <input id="txtContentSourceUrl" name="txtContentSourceUrl" type="text" class="input_16" style="margin-top:10px;" maxlength="500" /> </div> <div style="margin-top:10px;"> <span>封面</span> <span>大图片建议尺寸:900像素 * 500像素</span> </div> <input type="hidden" id="hiddenMediaId" name="hiddenMediaId" /> <div id="divMaterialImage_Select" style="margin-top:10px;"> <div class="divDotBorder_gray" style="width:150px; height:150px; cursor:pointer" onclick="showMaterialImageSelect()"> <table height="100%" style="vertical-align:middle" width="100%" border="0" align="center" cellpadding="0" cellspacing="0"> <tr> <td align="center" valign="middle"> <div> <img src="/Content/Images/plus.jpg" width="35" height="35"> </div> <div style="margin-top:10px;"> 从素材库选择 </div> </td> </tr> </table> </div> </div> <div id="divMaterialImage_Image"> </div> <div style="margin-top:10px;"> <input name="" id="chkShowCoverPic" type="checkbox" class="input_checkbox" value="" /> <label for="chkShowCoverPic">封面图片显示在正文中</label><span style="margin-left:30px;"></span> </div> <div style="margin-top:10px;"> <span>摘要</span> <span>选填,如果不填写会默认抓取正文前54个字</span> </div> <div style="margin-top:10px;"> <textarea name="txtDigest" rows="4" class="input_18" id="txtDigest" maxlength="120"></textarea> </div> </div> </td> </tr> </table> </div> </form> </div> <div style=" margin-top:25px;"> <div style="float:right;margin-left:20px;"> <input name="btnSaveAndPublish2" type="button" class="btn_blue" id="btnSaveAndPublish2" value="保存并发布到微信后台" onclick="publish()" disabled /> </div> <div style="float:right"> <input name="btnSave" type="button" class="btn_blue" id="btnSave" value="保存" onclick="save()" /> </div> <div style="clear:both"></div> </div> @Helpers.FileUpload()
the_stack
// https://www.nuget.org/api/v2/package/Newtonsoft.Json/12.0.2 #r "sha256:b9b4e633ea6c728bad5f7cbbef7f8b842f7e10181731dbe5ec3cd995a6f60287" // from elm-fullstack-separate-assemblies-4505d5fa0951dbb5d83383b17058704c58ebc674-linux-x64.zip #r "sha256:67d1550a5b06e9b361fdc9220062dd960e036e7daaa063e92380d186f93089cf" using System; using System.Collections.Generic; using System.Collections.Immutable; using System.Linq; int loadCompositionLimitFileCount = 110; int loadCompositionLimitAggregateFileSize = 800_000; int loadCompositionLimitMaximumPathLength = 200; public class RequestStructure { public IReadOnlyList<ElmMakeRequestStructure> ElmMakeRequest; public IReadOnlyList<string> FormatElmModuleTextRequest; public IReadOnlyList<string> LoadCompositionRequest; } public class ResponseStructure { [Newtonsoft.Json.JsonProperty(NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] public IReadOnlyList<ElmMakeResponseStructure> ElmMakeResponse; [Newtonsoft.Json.JsonProperty(NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] public IReadOnlyList<FormatElmModuleTextResponseStructure> FormatElmModuleTextResponse; [Newtonsoft.Json.JsonProperty(NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] public IReadOnlyList<LoadCompositionResponseStructure> LoadCompositionResponse; [Newtonsoft.Json.JsonProperty(NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] public IReadOnlyList<string> ErrorResponse; } public class ElmMakeRequestStructure { public IReadOnlyList<FileWithPath> files; public IReadOnlyList<string> entryPointFilePathFromWorkingDirectory; public IReadOnlyList<string> workingDirectoryPath; public bool makeOptionDebug; } public class FormatElmModuleTextResponseStructure { public Maybe<string> formattedText; public ProcessOutput processOutput; } public class FileWithPath { public IReadOnlyList<string> path; public string contentBase64; } public class ElmMakeResponseStructure { public ProcessOutput processOutput; public Maybe<string> outputFileContentBase64; public ProcessOutput reportJsonProcessOutput; } public struct ProcessOutput { public string standardError; public string standardOutput; public int exitCode; } public class LoadCompositionResponseStructure { public string compositionId; public IReadOnlyList<FileWithPath> filesAsFlatList; public string urlInCommit; } string GetSerialResponseFromSerialRequest(string serializedRequest) { var request = Newtonsoft.Json.JsonConvert.DeserializeObject<RequestStructure>(serializedRequest); var response = GetResponseFromRequest(request); return Newtonsoft.Json.JsonConvert.SerializeObject(response); } ResponseStructure GetResponseFromRequest(RequestStructure request) { var elmMakeRequest = request.ElmMakeRequest?.FirstOrDefault(); if (elmMakeRequest != null) { return new ResponseStructure { ElmMakeResponse = ImmutableList.Create(ElmMake(elmMakeRequest)) }; } var formatElmModuleTextRequest = request.FormatElmModuleTextRequest?.FirstOrDefault(); if (formatElmModuleTextRequest != null) { return new ResponseStructure { FormatElmModuleTextResponse = ImmutableList.Create(ElmFormat.FormatElmModuleText(formatElmModuleTextRequest)) }; } var loadCompositionRequest = request.LoadCompositionRequest?.FirstOrDefault(); if (loadCompositionRequest != null) { var sourcePath = loadCompositionRequest; if (!(Uri.TryCreate(sourcePath, UriKind.Absolute, out var uriResult) && (uriResult.Scheme == Uri.UriSchemeHttp || uriResult.Scheme == Uri.UriSchemeHttps))) { return new ResponseStructure { ErrorResponse = ImmutableList.Create("This string is not a supported URL: '" + sourcePath + "'") }; } var loadFromGitResult = Pine.LoadFromGitHubOrGitLab.LoadFromUrl(sourcePath); if (loadFromGitResult?.Ok == null) { return new ResponseStructure { ErrorResponse = ImmutableList.Create( "Failed to load from path '" + sourcePath + "': " + loadFromGitResult?.Err) }; } if (loadFromGitResult?.Ok?.tree == null) { return new ResponseStructure { ErrorResponse = ImmutableList.Create("Did not find a tree object at '" + sourcePath + "'") }; } var composition = Pine.Composition.FromTreeWithStringPath(loadFromGitResult.Ok.tree); var compositionId = Pine.CommonConversion.StringBase16FromByteArray(Pine.Composition.GetHash(composition)); var blobs = loadFromGitResult.Ok.tree.EnumerateBlobsTransitive() .ToImmutableList(); var urlInCommit = loadFromGitResult.Ok.urlInCommit; ResponseStructure responseErrorExceedingLimit(string limitName) { return new ResponseStructure { ErrorResponse = ImmutableList.Create("Composition " + compositionId + " from " + urlInCommit + " exceeds supported limits: " + limitName) }; } var fileCount = blobs.Count(); if (loadCompositionLimitFileCount < fileCount) { return responseErrorExceedingLimit("File count: " + fileCount); } var aggregateFileSize = blobs.Sum(file => file.blobContent.Count); if (loadCompositionLimitAggregateFileSize < aggregateFileSize) { return responseErrorExceedingLimit("Aggregate file size: " + aggregateFileSize); } var maximumPathLength = blobs.Max(file => file.path.Sum(pathElement => pathElement.Length)); if (loadCompositionLimitMaximumPathLength < maximumPathLength) { return responseErrorExceedingLimit("Maximum path length: " + maximumPathLength); } var filesAsFlatList = blobs .Select(file => new FileWithPath { path = file.path, contentBase64 = Convert.ToBase64String(file.blobContent.ToArray()), }) .ToImmutableList(); return new ResponseStructure { LoadCompositionResponse = ImmutableList.Create( new LoadCompositionResponseStructure { compositionId = compositionId, filesAsFlatList = filesAsFlatList, urlInCommit = urlInCommit, }) }; } return new ResponseStructure { ErrorResponse = ImmutableList.Create("This request does not encode any supported case.") }; } ElmMakeResponseStructure ElmMake(ElmMakeRequestStructure elmMakeRequest) { var elmCodeFiles = elmMakeRequest.files .ToImmutableDictionary( file => (IImmutableList<string>)file.path.ToImmutableList(), file => (IReadOnlyList<byte>)Convert.FromBase64String(file.contentBase64)); var environmentFiles = elmCodeFiles.Select(file => (path: file.Key, content: file.Value)).ToImmutableList(); var entryPointFilePathFromWorkingDirectory = MakePlatformSpecificPath(elmMakeRequest.entryPointFilePathFromWorkingDirectory); var elmMakeOutputFileName = "elm-make-output.html"; var commandLineCommonArguments = "make " + entryPointFilePathFromWorkingDirectory + " " + (elmMakeRequest.makeOptionDebug ? "--debug" : ""); var commandLineArguments = commandLineCommonArguments + " --output=" + elmMakeOutputFileName; var reportJsonCommandLineArguments = commandLineCommonArguments + " --report=json"; (Pine.ExecutableFile.ProcessOutput processOutput, IReadOnlyCollection<(IImmutableList<string> path, IReadOnlyList<byte> content)> resultingFiles) commandResultsFromArguments(string arguments) { return Pine.ExecutableFile.ExecuteFileWithArguments( environmentFiles, GetElmExecutableFile, arguments, new Dictionary<string, string>() { // Avoid elm make failing on `getAppUserDataDirectory`. /* Also, work around problems with elm make like this: -- HTTP PROBLEM ---------------------------------------------------------------- The following HTTP request failed: <https://github.com/elm/core/zipball/1.0.0/> Here is the error message I was able to extract: HttpExceptionRequest Request { host = "github.com" port = 443 secure = True requestHeaders = [("User-Agent","elm/0.19.0"),("Accept-Encoding","gzip")] path = "/elm/core/zipball/1.0.0/" queryString = "" method = "GET" proxy = Nothing rawBody = False redirectCount = 10 responseTimeout = ResponseTimeoutDefault requestVersion = HTTP/1.1 } (StatusCodeException (Response {responseStatus = Status {statusCode = 429, statusMessage = "Too Many Requests"}, responseVersion = HTTP/1.1, responseHeaders = [("Server","GitHub.com"),("Date","Sun, 18 Nov 2018 16:53:18 GMT"),("Content-Type","text/html"),("Transfer-Encoding","chunked"),("Status","429 Too Many Requests"),("Retry-After","120") To avoid elm make failing with this error, break isolation here and reuse elm home directory. An alternative would be retrying when this error is parsed from `commandResults.processOutput.StandardError`. */ {"ELM_HOME", GetElmHomeDirectory()}, }, workingDirectory: elmMakeRequest.workingDirectoryPath.ToImmutableList()); } var commandResults = commandResultsFromArguments(commandLineArguments); var newFiles = commandResults.resultingFiles .Where(file => !environmentFiles.Any(inputFile => inputFile.Item1.SequenceEqual(file.path))) .Select(file => new FileWithPath { path = file.path, contentBase64 = Convert.ToBase64String(file.content.ToArray()), }) .ToImmutableList(); var outputFile = newFiles .Where(file => file.path.LastOrDefault() == elmMakeOutputFileName) .FirstOrDefault(); var outputFileContentBase64 = outputFile?.contentBase64; var processOutput = new ProcessOutput { standardOutput = commandResults.processOutput.StandardOutput, standardError = commandResults.processOutput.StandardError, exitCode = commandResults.processOutput.ExitCode, }; var reportJsonCommandResults = commandResultsFromArguments(reportJsonCommandLineArguments); var reportJsonProcessOutput = new ProcessOutput { standardOutput = reportJsonCommandResults.processOutput.StandardOutput, standardError = reportJsonCommandResults.processOutput.StandardError, exitCode = reportJsonCommandResults.processOutput.ExitCode, }; var responseStructure = new ElmMakeResponseStructure { processOutput = processOutput, outputFileContentBase64 = Maybe<string>.NothingFromNull(outputFileContentBase64), reportJsonProcessOutput = reportJsonProcessOutput, }; return responseStructure; } string MakePlatformSpecificPath(IReadOnlyList<string> path) => string.Join(System.IO.Path.DirectorySeparatorChar.ToString(), path); static public byte[] GetElmExecutableFile => Pine.CommonConversion.DecompressGzip(GetElmExecutableFileCompressedGzip); static public byte[] GetElmExecutableFileCompressedGzip => Pine.BlobLibrary.GetBlobWithSHA256(Pine.CommonConversion.ByteArrayFromStringBase16( System.Runtime.InteropServices.RuntimeInformation.IsOSPlatform(System.Runtime.InteropServices.OSPlatform.Linux) ? /* Loaded 2019-10-29 from https://github.com/elm/compiler/releases/download/0.19.1/binary-for-linux-64-bit.gz */ "e44af52bb27f725a973478e589d990a6428e115fe1bb14f03833134d6c0f155c" : /* Loaded 2019-10-29 from https://github.com/elm/compiler/releases/download/0.19.1/binary-for-windows-64-bit.gz */ "d1bf666298cbe3c5447b9ca0ea608552d750e5d232f9845c2af11907b654903b")); static public string overrideElmMakeHomeDirectory = null; static string elmHomeDirectory; static public string GetElmHomeDirectory() { elmHomeDirectory = overrideElmMakeHomeDirectory ?? elmHomeDirectory ?? System.IO.Path.Combine(Pine.Filesystem.CreateRandomDirectoryInTempDirectory(), "elm-home"); System.IO.Directory.CreateDirectory(elmHomeDirectory); return elmHomeDirectory; } static public class ElmFormat { static public FormatElmModuleTextResponseStructure FormatElmModuleText(string originalModuleText) { var elmModuleFileName = "ElmModuleToFormat.elm"; var elmModuleFilePath = ImmutableList.Create(elmModuleFileName); var elmFormatResult = Pine.ExecutableFile.ExecuteFileWithArguments( ImmutableList.Create( ((IImmutableList<string>)elmModuleFilePath, (IReadOnlyList<byte>)System.Text.Encoding.UTF8.GetBytes(originalModuleText))), GetElmFormatExecutableFile, " " + elmModuleFileName + " --yes", environmentStrings: null); var resultingFile = elmFormatResult.resultingFiles .FirstOrDefault(file => file.path.SequenceEqual(elmModuleFilePath)) .content; var formattedText = resultingFile == null ? null : System.Text.Encoding.UTF8.GetString(resultingFile.ToArray()); var processOutput = new ProcessOutput { standardOutput = elmFormatResult.processOutput.StandardOutput, standardError = elmFormatResult.processOutput.StandardError, exitCode = elmFormatResult.processOutput.ExitCode, }; return new FormatElmModuleTextResponseStructure { processOutput = processOutput, formattedText = Maybe<string>.NothingFromNull(formattedText), }; } static public byte[] GetElmFormatExecutableFile => Pine.CommonConversion.DecompressGzip(GetElmFormatExecutableFileCompressedGzip); static public byte[] GetElmFormatExecutableFileCompressedGzip => Pine.BlobLibrary.GetBlobWithSHA256(Pine.CommonConversion.ByteArrayFromStringBase16( System.Runtime.InteropServices.RuntimeInformation.IsOSPlatform(System.Runtime.InteropServices.OSPlatform.Linux) ? /* Loaded 2020-08-12 from https://github.com/avh4/elm-format/releases/download/0.8.3/elm-format-0.8.3-linux-x64.tgz */ "488a7eab12837d66aaed8eb23b80647a02c87c38daf6f1a3c4e60fff59fe01be" : /* Loaded 2020-08-12 from https://github.com/avh4/elm-format/releases/download/0.8.3/elm-format-0.8.3-win-i386.zip */ "5fc848a7215f400aae60bd02101809c63bd084e0972b9a8962633afc81a53cbd")); } public class Maybe<JustT> { [Newtonsoft.Json.JsonProperty(NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] public IReadOnlyList<object> Nothing; [Newtonsoft.Json.JsonProperty(NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] public IReadOnlyList<JustT> Just; static public Maybe<JustT> just(JustT j) => new Maybe<JustT> { Just = ImmutableList.Create(j) }; static public Maybe<JustT> nothing() => new Maybe<JustT> { Nothing = ImmutableList<object>.Empty }; static public Maybe<JustT> NothingFromNull(JustT maybeNull) => maybeNull == null ? nothing() : new Maybe<JustT> { Just = ImmutableList.Create(maybeNull) }; } string InterfaceToHost_Request(string request) { return GetSerialResponseFromSerialRequest(request); }
the_stack
@model Sheng.WeixinConstruction.Client.Shell.Models.LuckyTicketDrawResultViewModel @{ ViewBag.SubTitle = "活动"; Layout = "~/Views/Shared/_LayoutBlank.cshtml"; } <style type="text/css"> body { margin-bottom: 0.55rem; } .campaignName { font-size: 0.14rem; font-weight: bold; } .campaignDescription { font-size: 0.13rem; color: #666; } #divMemberInfo { font-size: 0.14rem; line-height: 0.14rem; text-align: center; margin-top: 0.05rem; } #divShareMask { position: absolute; top: 0px; bottom: 0px; left: 0px; right: 0px; width: 100%; height: 100%; background-color: black; text-align: right; filter: alpha(opacity=80); -moz-opacity: 0.8; -khtml-opacity: 0.8; opacity: 0.8; } .divLuckyTicketItem { text-align: center; font-size: 0.2rem; color: #FF7F00; background-image: url(/Content/Images/bg_star.jpg); background-repeat: repeat; padding-top: 0.05rem; padding-bottom: 0.05rem; margin-bottom: 0.05rem; } #divFooter { position: fixed; bottom: 0px; left: 0px; right: 0px; height: 0.4rem; font-size: 0.14rem; text-align: center; background-color: #F5F5F5; } </style> <script> var _currentPage = 1; var _totalPage = 1; var _status = @((int)Model.CampaignBundle.Campaign.Status); var _campaignId = getQueryString("campaignId"); $(document).ready(function () { var jsApiConfigStr = "@Newtonsoft.Json.JsonConvert.SerializeObject(Model.JsApiConfig)"; jsApiConfigStr = jsApiConfigStr.replace(new RegExp("&quot;", "gm"), "\""); jsApiConfigStr = jsApiConfigStr.replace(new RegExp("\r\n", "gm"), ""); jsApiConfigStr = jsApiConfigStr.replace(new RegExp("\n", "gm"), ""); var jsApiConfig = eval('(' + jsApiConfigStr + ')'); wx.config(jsApiConfig); loadData(); }); wx.ready(function () { wx.onMenuShareTimeline({ title: '@Model.CampaignBundle.Campaign.ShareTimelineTitle', // 分享标题 link: '@Request.Url.Scheme://@Request.Url.Host/Campaign/LuckyTicketShare/@Model.CampaignBundle.Campaign.Domain?campaignId=@Model.CampaignBundle.Campaign.Id&memberId=@ViewBag.Member.Id', // 分享链接 imgUrl: '@Model.CampaignBundle.Campaign.ShareImageUrl', // 分享图标 success: function () { shareSuccess("ShareTimeline"); }, cancel: function () { // 用户取消分享后执行的回调函数 } }); wx.onMenuShareAppMessage({ title: '@Model.CampaignBundle.Campaign.ShareAppMessageTitle', // 分享标题 desc: '@Model.CampaignBundle.Campaign.ShareAppMessageDescription', // 分享描述 link: '@Request.Url.Scheme://@Request.Url.Host/Campaign/LuckyTicketShare/@Model.CampaignBundle.Campaign.Domain?campaignId=@Model.CampaignBundle.Campaign.Id&memberId=@ViewBag.Member.Id', // 分享链接 imgUrl: '@Model.CampaignBundle.Campaign.ShareImageUrl', // 分享图标 type: 'link', // 分享类型,music、video或link,不填默认为link dataUrl: '', // 如果type是music或video,则要提供数据链接,默认为空 success: function () { shareSuccess("ShareAppMessage"); }, cancel: function () { // 用户取消分享后执行的回调函数 } }); }); wx.error(function (res) { alert("error:" + res); }); function shareSuccess(type) { var loadLayerIndex = layer.open({ type: 2, shadeClose: false, content: '请稍候...' }); $.ajax({ url: "/Api/Campaign/" + type + "/@ViewBag.Domain.Id?campaignId=" + _campaignId, type: "POST", dataType: "json", success: function (data, status, jqXHR) { // alert(data); layer.close(loadLayerIndex); if (data.Success) { var resultObj = data.Data; if (resultObj.Point == 0) return; var msg = ""; if (resultObj.Point > 0) { msg += "获得积分:" + resultObj.Point; } if (msg != "") { layerAlertBtn(msg); } } else { layerAlert(data.Message); } }, error: function (xmlHttpRequest) { layer.close(loadLayerIndex); //alert("Error: " + xmlHttpRequest.status); } }); } function loadData(targetPage) { if (targetPage > _totalPage) return; var loadLayerIndex = layer.open({ type: 2, shadeClose: false, content: '请稍候...' }); var args = new Object(); args.Page = targetPage || 1; args.PageSize = 10; args.CampaignId = _campaignId; $.ajax({ url: "/Api/Campaign/GetLuckyTicketWinLogList/@ViewBag.Domain.Id", type: "POST", dataType: "json", data: JSON.stringify(args), success: function (data, status, jqXHR) { layer.close(loadLayerIndex); if (data.Success) { var resultObj = data.Data; _currentPage = resultObj.Page; _totalPage = resultObj.TotalPage; if (_currentPage >= _totalPage) { $("#divPagingContainer").html("没有更多了"); } if(_currentPage==1) { document.getElementById('ticketNumberItemContainer').innerHTML = ""; } var gettpl = document.getElementById('ticketNumberItemTemplate').innerHTML; laytpl(gettpl).render(resultObj.ItemList, function (html) { document.getElementById('ticketNumberItemContainer').innerHTML += html; }); } else { layerAlert(data.Message); } }, error: function (xmlHttpRequest) { layer.close(loadLayerIndex); //alert("Error: " + xmlHttpRequest.status); } }); } function showCampaignPage(){ window.location.href= "/Campaign/LuckyTicket/@ViewBag.Domain.Id?campaignId=" + _campaignId } function showCampaignDescription() { var loadLayerIndex = layer.open({ type: 2, shadeClose: false, content: '请稍候...' }); $.ajax({ url: "/Api/Campaign/GetCampaignDescription/@ViewBag.Domain.Id?id=" + _campaignId, type: "POST", dataType: "json", success: function (data, status, jqXHR) { layer.close(loadLayerIndex); if (data.Success) { var resultObj = data.Data; var gettpl = document.getElementById('campaignDescription').innerHTML; laytpl(gettpl).render(resultObj, function (html) { var pageii = layer.open({ type: 1, content: html, shadeClose: false, style: 'position:fixed; left:0.1rem; top:0.1rem;right:0.1rem; bottom:0.1rem; border:none;' }); }); } else { layerAlert(data.Message); } }, error: function (xmlHttpRequest) { layer.close(loadLayerIndex); //alert("Error: " + xmlHttpRequest.status); } }); } </script> <script type="text/html" id="campaignDescription"> <div style="position: fixed; top: 0.1rem; left: 0.1rem; right: 0.1rem; bottom: 0.45rem; overflow: auto;"> {{ d }} </div> <div style="position: fixed; bottom: 0.1rem; left: 0.1rem; right: 0.1rem;"> <div class="divRectangle_Gray" style="margin-top:0.1rem;" onclick="layer.closeAll()"> 关 闭 </div> </div> </script> <script type="text/html" id="ticketNumberItemTemplate"> {{# for(var i = 0, len = d.length; i < len; i=i+1){ }} {{# var headimg = fitHeadImage(d[i].Headimgurl,46) }} <div style="margin-bottom:0.25rem;"> <div class="divLuckyTicketItem"> {{ d[i].TicketNumber }} </div> <div style="margin-left:0.15rem;margin-right:0.15rem; color: #535353;margin-top:0.1rem;"> <table width="100%" border="0" cellspacing="0" cellpadding="0"> <tr> <td style="width:0.6rem;" valign="top"> <img src="{{headimg}}" style="width:0.5rem; height:0.5rem"> </td> <td valign="top"> <div> 微信昵称: {{# if(d[i].NickName != undefined && d[i].NickName != null){ }} {{ d[i].NickName }} {{# } }} </div> <div style="margin-top:0.07rem;"> 中奖时间:{{ d[i].WinTime }} </div> <div style="margin-top:0.07rem;"> 中奖说明: {{# if(d[i].WinRemark != undefined && d[i].WinRemark != null){ }} {{ d[i].WinRemark }} {{# } }} </div> </td> </tr> </table> </div> </div> {{# } }} </script> <div id="divImageContainer"> <img src="@Model.CampaignBundle.Campaign.ImageUrl" name="img" id="img" style="width: 100%;"> </div> <div class="divContent"> <div style="margin-top: 0.06rem" class="campaignName"> @Model.CampaignBundle.Campaign.Name </div> <div style="margin-top: 0.06rem" class="campaignDescription"> @Html.Raw(Model.CampaignBundle.Campaign.Introduction) </div> <div class="divRectangle" style="margin-top:0.15rem;padding-left:0.1rem;padding-right:0.1rem; padding-top:0.1rem;padding-bottom:0.1rem; font-size:0.13rem;"> <div class="defaultColor"> 上次开奖时间:@Model.CampaignBundle.LuckyTicket.LastDrawTime </div> </div> <div style="margin-top:0.15rem; color: #535353;"> 中奖信息: </div> <div id="ticketNumberItemContainer" style="margin-top:0.1rem;font-size:0.13rem;"> </div> <div id="divPagingContainer" class="divPagingContainer" style="margin-top:0.1rem; text-align:center" onclick="loadData(_currentPage+1)"> 查看更多 </div> </div> <div id="divFooter"> <table align="center" border="0" style="height:100%;width:100%"> <tr> <td valign="middle" align="center" width="50%"> <input name="" type="button" class="button" value="活动说明" style="width:90%" onclick="showCampaignDescription()"> </td> <td valign="middle" align="center" width="50%"> <input name="" type="button" class="button" value="返回活动页面" style="width:90%" onclick="showCampaignPage()"> </td> </tr> </table> </div>
the_stack
@using puck.core.Abstract.EditorSettings @using puck.core.Models.EditorSettings @using puck.core.Models.EditorSettings.Attributes @model List<puck.core.Models.PuckReference> @{ I_Content_Picker_Settings settings = this.PuckEditorSettings<ContentPickerEditorSettingsAttribute>(); if (settings == null) { settings = this.PuckEditorSettings<ContentPickerEditorSettings>() ?? new puck.core.Models.EditorSettings.ContentPickerEditorSettings() { /*AllowDuplicates = false,*/ AllowUnpublished = true, MaxPick = 5, /*SelectionType = "both",*/ StartPaths = null, AllowedTypes="", Types = new Type[] { } }; } var startPath = ""; List<string> startIds = new List<string>(); string allowedTypes = ""; if (settings.Types != null && settings.Types.Any()) { allowedTypes = string.Join(",", settings.Types.Select(x => x.Name)); } else if (!string.IsNullOrEmpty(settings.AllowedTypes)) { allowedTypes = settings.AllowedTypes; } if (settings.StartPathIds!=null && settings.StartPathIds.Any()) { foreach (var startIdStr in settings.StartPathIds) { Guid startId; if (Guid.TryParse(startIdStr, out startId)) { startIds.Add(startId.ToString()); } } } else if (settings.StartPaths != null && settings.StartPaths.Count > 0) { startIds.AddRange(settings.StartPaths.Select(x=>x.Id.ToString())); } var guid = Guid.NewGuid(); var validationDummyGuid = Guid.NewGuid(); } <div data-guid="@guid.ToString()" class="editor_container puckpicker puckpicker_@ViewData.ModelMetadata.PropertyName puckpicker_@ViewData.TemplateInfo.HtmlFieldPrefix.Replace("[","_").Replace("]","_").Replace(".","_")"> <div style="display:none;" class="propName" data-propName="@ViewData.TemplateInfo.HtmlFieldPrefix"></div> <div style="display:none;" class="containerTrigger"></div> @if (ViewData.ModelMetadata.IsRequired) { <span class="field-validation-valid validationDummySummary" data-valmsg-for="v@(validationDummyGuid.ToString().Replace("-",""))" data-valmsg-replace="true"></span> <input class="validationDummy" type="hidden" data-val="true" data-val-required="The @ViewData.TemplateInfo.HtmlFieldPrefix field is required." id="@validationDummyGuid.ToString()" name="v@(validationDummyGuid.ToString().Replace("-",""))" /> } <div> <button type="button" class="btn btn-light addItem"><i class="fas fa-plus"></i></button> </div> <ul class="nodisplay contentTree nomenu"> <li class="node" data-id="00000000-0000-0000-0000-000000000000" data-path="/" data-children_path="/"></li> </ul> <div class="selected_content"> @if (Model != null) { foreach (var m in Model) { if (!string.IsNullOrEmpty(m.Variant)) { <div class="selected_node" data-nodename="" data-variant="@m.Variant" data-id="@m.Id"> <i class="fas fa-sort puckPickerSortHandle mr-3"></i><span class="nname"></span> - <span class="v">@m.Variant</span> <a href="#content?id=@(m.Id)&variant=@(m.Variant)" class="view">View</a> &nbsp;<i class='fas fa-minus-circle removeItem'></i> </div> } else { <div class="selected_node" data-variant="null" data-id="@m.Id"> <i class="fas fa-sort puckPickerSortHandle mr-3"></i> <span class="nname"></span>&nbsp; <i class='fas fa-minus-circle removeItem'></i> </div> } } } </div> </div> <script type="text/javascript"> onAfterDom(function () { var guid = "@guid.ToString()"; var modelType = "@Html.Raw(ViewBag.Level0Type.Name)"; var propname = "@Html.Raw(ViewData.TemplateInfo.HtmlFieldPrefix)"; var isPrePopulated = '@ViewBag.IsPrePopulated' == 'True'; var escapePropname = function (str) { return str.replace(/\[/g, "_").replace(/\]/g,"_").replace(/\./g,"_"); } var container = $("[data-guid='"+guid+"']"); var init = function (container) { var validationDummy = container.find(".validationDummy"); validationDummyCounter++; if (((validationDummy.attr("name")||"").match(/_/g) || []).length < 2) validationDummy.attr("name", validationDummy.attr("name") + "_" + validationDummyCounter); validationDummy.attr("data-val-required", "The " + container.parents(".fieldwrapper:first").find(".editor-label label").html() + " field is required."); container.find(".validationDummySummary").attr("data-valmsg-for", validationDummy.attr("name")); var getPropName = function () { return container.find("div:hidden.propName:first").attr("data-propName"); } container.on("puckListEditorUpdate", function (e, container) { init(container); }); container.find(".containerTrigger").unbind().click(function (e) { container = $(this).parents(".editor_container:first"); selected_container = container.find(".selected_content"); //console.log("new container",container); }); var allowedTypesCSV = "@Html.Raw(allowedTypes)"; var allowedTypes = []; var tempAllowedTypes = allowedTypesCSV.split(","); for (var i = 0; i < tempAllowedTypes.length; i++) { var aType = tempAllowedTypes[i]; if (aType && aType.trim() && aType.trim() != ",") allowedTypes.push(aType.trim()); } var selected_container = container.find(".selected_content"); var selected_content = function () { return selected_container.find(".selected_node"); } var _startPath = "@startPath" == "" ? startPath : "@startPath"; var _startIdsStr = "@string.Join(",",startIds)"; var _startIds = _startIdsStr.split(","); var _startPaths = []; container.find("ul.content li:first").attr("data-children_path", _startPath); var maxPick = "@settings.MaxPick"; var selectionType = "both"; @*"@settings.SelectionType";*@ //both|variant|node var allowUnpublished = "@settings.AllowUnpublished"; var allowDuplicates = false;@*"@settings.AllowDuplicates"=='True';*@ var setValue = function () { //console.log("container",container); container.find("input:hidden:not(.validationDummy)").remove(); validationDummy.val(""); selected_container.find(".selected_node").each(function (i) { validationDummy.val("1"); var node = $(this); container.append( "<input type='hidden' name='" + getPropName() + "[" + i + "].Id' value='" + node.attr("data-id") + "'/>" + "<input type='hidden' name='" + getPropName() + "[" + i + "].Variant' value='" + node.attr("data-variant") + "'/>" ); }); } setValue(); if (selected_container.hasClass("ui-sortable") && selected_container.data("uiSortable")) { try { selected_container.sortable("destroy"); } catch (error) { } selected_container.data("uiSortable", ""); } selected_container.sortable({axis:"y", handle: ".puckPickerSortHandle", stop: function (event, ui) { setValue(); }}); var loadData = function () { var ids = ""; selected_container.find(".selected_node").each(function () { ids += $(this).attr("data-id") + ","; }); if (!ids) return; getContentModels(ids, function (res) { for (var i = 0; i < res.length; i++) { var el = selected_container.find(".selected_node[data-id='" + res[i].Id + "']"); el.attr("data-nodename", res[i].NodeName); el.find(".nname").html(res[i].NodeName); el.addClass("exists"); } selected_container.find(".selected_node:not(.exists) .nname").html("- could not find this node, possibly deleted -"); }); } loadData(); selected_container.off("click.remove").on("click.remove", "i.removeItem", function (e) { var el = $(this).parent(); if (overlayEl) { overlayEl.find(".node[data-id='" + el.attr("data-id") + "']").removeClass("selected"); overlayEl.find(".node[data-id='" + el.attr("data-id") + "'] .variants:first .variant[data-variant='"+el.attr("data-variant")+"']").removeClass("selected"); } el.remove(); setValue(); }); var overlayEl = undefined; getContentModels(_startIdsStr || emptyGuid, function (res) { for (var i = 0; i < res.length; i++) { _startPaths.push(res[i].Path); } container.find(".addItem").unbind().click(function (e) { if (selected_container.find(".selected_node").length >= maxPick) { msg(undefined, "you cannot select any more, max selection is set to " + maxPick, undefined, undefined); return false; } var tree = container.find("ul.contentTree").clone().removeClass("nodisplay"); overlayEl = overlay(tree, 400, undefined, undefined, "Content Picker", true); el = overlayEl.find(".node:first"); var _startPathsCopy = _startPaths.length > 0 ? _startPaths.slice(0) : window.startPaths.slice(0); getDrawContent(emptyGuid, el, false, function () { if (overlayEl.find(".node").length == 1) { overlayEl.find("ul.contentTree").before($("<p/>").html("there is no content to select.")); } var afterDrawContent = function () { selected_container.find(".selected_node").each(function () { var selected = $(this); overlayEl.find(".node[data-id='" + selected.attr("data-id") + "']").addClass("selected"); overlayEl.find(".node[data-id='" + selected.attr("data-id") + "'] .variants:first .variant[data-variant='" + selected.attr("data-variant") + "']").addClass("selected"); }); el.find(".node").each(function () { var n = $(this); if (allowedTypes.length > 0 && !allowedTypes.includes(n.attr("data-type"))) { n.addClass("disallowed"); } }); } afterDrawContent(); loadTreePaths(_startPathsCopy, el.parents("ul:first"), afterDrawContent, false,false); }, false, _startPathsCopy); overlayEl.on("click", ".node span", function (e) { var clicked = $(this); var node = clicked.parents(".node:first"); if (node.data("disabled")) { msg(undefined,"you are not authorized to select this node"); return; } if (node.hasClass("disallowed")) { msg(undefined,"this selection is disallowed. type is \"" + node.attr("data-type") + "\" and allowed types are \""+allowedTypes.join(",")+"\""); return; } var isVariantSelection = clicked.hasClass("variant"); if (node.attr("data-published") == "false" && !allowUnpublished) { return false; } var variant; if (!isVariantSelection) { //changed to only allow variant selection var variants = node.attr("data-variants").split(","); var path = node.attr("data-path"); variants.sort(function (a, b) { var aOrder = getVariantOrder(a, path); var bOrder = getVariantOrder(b, path); return aOrder - bOrder; }); //variant = node.find(".variant:first").attr("data-variant"); variant = variants[0]; isVariantSelection = true; } else variant = clicked.attr("data-variant"); //check if node is already selected if (selected_container.find(".selected_node").filter("[data-id='" + node.attr("data-id") + "'][data-variant='" + variant + "']").length > 0 && !allowDuplicates) { //remove it from selected list selected_container.find(".selected_node").filter("[data-id='" + node.attr("data-id") + "'][data-variant='" + variant + "']").remove(); if (selected_container.find(".selected_node").filter("[data-id='" + node.attr("data-id") + "']").length == 0) node.removeClass("selected"); node.find(".variants:first .variant[data-variant='" + variant + "']").removeClass("selected"); setValue(); return; } if (selected_container.find(".selected_node").length >= maxPick) { msg(undefined, "you cannot select any more, max selection is set to " + maxPick, undefined, undefined); return false; } node.addClass("selected"); node.find(".variants:first .variant[data-variant='" + variant + "']").addClass("selected"); if (isVariantSelection && (selectionType == "variant" || selectionType == "both")) { selected_container.append( "<div class='selected_node' data-nodename='" + node.attr("data-nodename") + "' data-variant='" + variant + "' data-id='" + node.attr("data-id") + "'>" +"<i class=\"fas fa-sort puckPickerSortHandle mr-3\"></i>" + "<span>" + node.attr("data-nodename") + "</span> - <span class='v'>" + variant + "</span>" + "&nbsp;<a href='#content?id=" + node.attr("data-id") + "&variant=" + variant + "' class='view'>View</a> &nbsp;<i class='fas fa-minus-circle removeItem'></i></div>" ); } else if (!isVariantSelection && (selectionType == "node" || selectionType == "both")) { selected_container.append( "<div class='selected_node' data-variant='' data-id='" + node.attr("data-id") + "'><i class=\"fas fa-sort puckPickerSortHandle mr-3\"></i><span>" + node.attr("data-nodename") + "</span>&nbsp;<i class='fas fa-minus-circle remove'></i></div>" ); } setValue(); //reinitialize sortable since refresh doesn't seem to work if (selected_container.hasClass("ui-sortable") && selected_container.data("uiSortable")) { selected_container.sortable("destroy"); selected_container.data("uiSortable",""); selected_container.sortable({axis:"y", handle: ".puckPickerSortHandle", stop: function (event, ui) { setValue(); }}); } }); overlayEl.on("click", "ul.contentTree li.node i.expand", function () { //get children content var node = $(this).parents(".node:first"); var descendants = node.find("ul:first"); if (descendants.length > 0) {//show if (descendants.first().is(":hidden")) { node.find("i.expand:first").removeClass("fa-chevron-right").addClass("fa-chevron-down"); descendants.show(); } else {//hide node.find("i.expand:first").removeClass("fa-chevron-down").addClass("fa-chevron-right"); descendants.hide(); } } else { var _startPathsCopy = _startPaths.length > 0 ? _startPaths.slice(0) : window.startPaths.slice(0); getDrawContent(node.attr("data-id"), node, false, function () { node.find(".loader").hide(); node.find("i.expand:first").show(); selected_container.find(".selected_node").each(function () { var selected = $(this); overlayEl.find(".node[data-id='" + selected.attr("data-id") + "']").addClass("selected"); overlayEl.find(".node[data-id='" + selected.attr("data-id") + "'] .variants:first .variant[data-variant='"+selected.attr("data-variant")+"']").addClass("selected"); }); node.find(".node").each(function () { var n = $(this); if (allowedTypes.length > 0 && !allowedTypes.includes(n.attr("data-type"))) { n.addClass("disallowed"); } }); }, false, _startPathsCopy); node.find(".loader").show(); node.find("i.expand:first").removeClass("fa-chevron-right").addClass("fa-chevron-down").hide(); } }); }); }); selected_container.off("click.view").on("click.view", ".selected_node .view", function (e) { e.preventDefault(); var el = $(this); var selectedNode = el.parent(); var id = selectedNode.attr("data-id"); var variant = selectedNode.attr("data-variant"); var nodename = selectedNode.attr("data-nodename"); var container = $("<div />"); var inner = $("<div data-tabPrefix='overlayContent_' class='cont_inner'></div>").css({ width: "100%", height: "100%" }); var msgContainer = $("<div class='msgTop'></div>"); container.append(msgContainer).append(inner); var overlayEl = overlay(container, "90%", undefined, top, nodename); displayMarkup(null, undefined, variant, undefined, id, inner, undefined); }); } init(container); }); </script>
the_stack
using System; using System.Diagnostics; using System.IO; using System.Xml; using System.Collections.Generic; using System.Reflection; using System.Runtime.InteropServices; using System.Linq; using Foundation; using AppKit; using ObjCRuntime; // ------------------------------------------------------------------------- // StoryboardInflator for macOS by Kevin Mullins for Microsoft, Inc. // ------------------------------------------------------------------------- // This library provides limited support for using Storyboard files to // build and display the user interface for a Xamarin.Mac based workbook app. // Because macOS expects a compiled storyboard to be part of a running app's // bundle along with its executable code, the typical mechanisms to create // Outlets, Actions and Segues will not work. Instead, this library applies // the following hack to simulate Outlets: // // 1) Windows and Views (or View based controls such as NSButton), set their // Identifier property in Interface Builder to match the name of the // "Outlet". The StoryboardBinder will populate any like named Public // Property on any class it binds. // // 2) For Menu Items, the StoryboardBinder will use the Title of the Menu // Item + "MenuItem" to populate any like named Public Property on any // class that it binds. For example, the "New" menu item would bind to // the "NewMenuItem" property in class. // // 3) For Toolbar Items, the StoryboardBinder will use the Label of the // Item + "ToolbarItem" to populate any like named Public Property on any // class that it binds. For example, the "Color" menu item would bind to // the "ColorMenuItem" property in class. // // LIMITATIONS: Custom Segues are currently not supported. Instead, the app must // use the StoryboardInflator to inflate and populate any NIB that will then // need to be manually displayed. /// <summary> /// Defines the type of a Scene or Source Object used in a Segue. /// </summary> public enum StoryboardObjectType { /// <summary> /// The application (<c>NSApplication</c>) as defined in the Storyboard. /// </summary> Application, /// <summary> /// A Menu Item (<c>NSMenuItem</c>). /// </summary> MenuItem, /// <summary> /// A Window Controller (<c>NSWindowController</c>). /// </summary> WindowController, /// <summary> /// A Toolbar Item (<c>NSToolbarItem</c>). /// </summary> ToolbarItem, /// <summary> /// A View Controller (<c>NSViewController</c>). /// </summary> ViewController, /// <summary> /// Storyboard object type (<c>NSSplitViewController</c>). /// </summary> SplitViewController, /// <summary> /// A Button (<c>NSButton</c>). /// </summary> Button, /// <summary> /// An unknown object type /// </summary> Unknown } /// <summary> /// Defines the kind of Segue as defined in the Storyboard. /// </summary> public enum StoryboardSegueType { /// <summary> /// Displays the destination controller as a non-modal Window. /// </summary> Show, /// <summary> /// Displays the destination controller as a modal window. /// </summary> Modal, /// <summary> /// Displays the destination controller as a sheet. /// </summary> Sheet, /// <summary> /// Displays the destination controller as a popover. /// </summary> Popover, /// <summary> /// Displays the destination controller using a Custom Segue Class. /// </summary> Custom, /// <summary> /// Defines a containment relationship between the source object and the /// destination controller. /// </summary> Relationship } /// <summary> /// Holds the definition of a Segure as read from a Storyboard. /// </summary> public class StoryboardSegueDefinition : NSObject { #region Computed Properties /// <summary> /// Gets or sets the inflator used to load NIBs from the Storyboard. /// </summary> /// <value>The inflator.</value> public StoryboardInflator Inflator { get; set; } /// <summary> /// Gets or sets the type of the scene that they segue belongs to. /// </summary> /// <value>The type of the scene.</value> public StoryboardObjectType SceneType { get; set;} /// <summary> /// Gets or sets the scene identifier. /// </summary> /// <value>The scene identifier.</value> public string SceneID { get; set; } /// <summary> /// Gets or sets the type of the source object. /// </summary> /// <value>The type of the source object.</value> public StoryboardObjectType SourceObjectType { get; set;} /// <summary> /// Gets or sets the source object identifier. /// </summary> /// <value>The source object identifier.</value> public string SourceObjectID { get; set; } /// <summary> /// Gets or sets the destination controller identifier. /// </summary> /// <value>The destination controller identifier.</value> public string DestinationControllerID { get; set;} /// <summary> /// Gets or sets the kind of the segue. /// </summary> /// <value>The kind of the segue.</value> public StoryboardSegueType SegueKind { get; set; } /// <summary> /// Gets or sets the segue identifier. /// </summary> /// <value>The segue identifier.</value> public string SegueIdentifier { get; set;} /// <summary> /// Gets or sets the relationship. /// </summary> /// <remarks>This is only populated from containment Segues of <c>StoryboardSegueType.Relationship</c>.</remarks> /// <value>The relationship.</value> public string Relationship { get; set;} /// <summary> /// Gets or sets the popover anchor identifier. /// </summary> /// <value>The popover anchor identifier.</value> public string PopoverAnchorID { get; set;} /// <summary> /// Gets or sets the popover behavior. /// </summary> /// <value>The popover behavior.</value> public NSPopoverBehavior PopoverBehavior { get; set; } = NSPopoverBehavior.Transient; /// <summary> /// Gets or sets the edge the popover will be displayed from. /// </summary> /// <remarks>0 = Left, 1 = Top, 2 = Right, 3 = Bottom</remarks> /// <value>The popover edge.</value> public nuint PopoverEdge { get; set; } = 0; #endregion #region Constructors /// <summary> /// Initializes a new instance of the <see cref="T:StoryboardSegueDefinition"/> class. /// </summary> /// <param name="type">The <c>StoryboardSegueType</c> of the Segue.</param> /// <param name="inflator">The Inflator used to load NIB files.</param> public StoryboardSegueDefinition (StoryboardSegueType type, StoryboardInflator inflator) { // Initialize SegueKind = type; Inflator = inflator; } /// <summary> /// Initializes a new instance of the <see cref="T:StoryboardSegueDefinition"/> class. /// </summary> /// <param name="type">The <c>string</c> type of the Segue.</param> /// <param name="inflator">The Inflator used to load NIB files.</param> public StoryboardSegueDefinition (string type, StoryboardInflator inflator) { // Initialize switch (type) { case "show": SegueKind = StoryboardSegueType.Show; break; case "modal": SegueKind = StoryboardSegueType.Modal; break; case "sheet": SegueKind = StoryboardSegueType.Sheet; break; case "popover": SegueKind = StoryboardSegueType.Popover; break; case "custom": SegueKind = StoryboardSegueType.Relationship; break; } Inflator = inflator; } #endregion #region Private Methods /// <summary> /// Prepares to execute a segue loading the destination NIB and calling <c>PrepareForSegue</c> /// on the Source Controller so it can prepare the destination controller before it is /// presented to the user. /// </summary> /// <returns>The <c>NSStoryboardSegue</c> representing this Segue Definition.</returns> /// <param name="sender">The object that is launching the segue.</param> /// <param name="sourceController">Source controller for the segue.</param> private NSStoryboardSegue PrepareForSegue (NSObject sender, NSObject sourceController) { // Attempt to load destination var destinationController = Inflator.InstantiateControllerForPartialIdentifier (DestinationControllerID); // Was the NIB found? if (destinationController == null) return null; // Build new Segue var segue = new NSStoryboardSegue (SegueIdentifier, sourceController, destinationController); // Does the class contain the PrepareForSegue method? var controllerType = sourceController.GetType (); var methodInfo = controllerType.GetMethod ("PrepareForSegue"); if (methodInfo != null) { // Yes, wireup action to class methodInfo.Invoke (sourceController, new [] { segue, sender }); } // Return controller return segue; } /// <summary> /// Presents the non modal window to the user. /// </summary> /// <param name="segue">The <c>NSStoryboardSegue</c> to execute.</param> private void PresentNonModalWindow (NSStoryboardSegue segue) { NSWindowController windowController = null; // Take action based on the controller type if (segue.DestinationController is NSWindowController) { // Display the window to the user windowController = segue.DestinationController as NSWindowController; } else if (segue.DestinationController is NSViewController) { // Build a Window and Window Controller for this view var viewController = segue.DestinationController as NSViewController; var window = new NSWindow (viewController.View.Bounds, (NSWindowStyle.Titled | NSWindowStyle.Closable | NSWindowStyle.Miniaturizable | NSWindowStyle.Resizable), NSBackingStore.Buffered, false); windowController = new NSWindowController (window); // Attach the View Controller to the Window window.ContentView = viewController.View; window.ContentViewController = viewController; } // Found? if (windowController == null) return; // Present window controller windowController.Window.MakeKeyAndOrderFront ((NSObject)NSApplication.SharedApplication.Delegate); } /// <summary> /// Presents the modal window to the user. /// </summary> /// <param name="segue">The <c>NSStoryboardSegue</c> to execute.</param> private void PresentModalWindow (NSStoryboardSegue segue) { NSWindowController windowController = null; // Take action based on the controller type if (segue.DestinationController is NSWindowController) { // Display the window to the user windowController = segue.DestinationController as NSWindowController; } else if (segue.DestinationController is NSViewController) { // Build a Window and Window Controller for this view var viewController = segue.DestinationController as NSViewController; var window = new NSWindow (viewController.View.Bounds, (NSWindowStyle.Titled | NSWindowStyle.Closable | NSWindowStyle.Miniaturizable | NSWindowStyle.Resizable), NSBackingStore.Buffered, false); windowController = new NSWindowController (window); // Attach the View Controller to the Window window.ContentView = viewController.View; window.ContentViewController = viewController; } // Found? if (windowController == null) return; // Present window controller NSApplication.SharedApplication.RunModalForWindow (windowController.Window); } /// <summary> /// Presents the Window or View to the user as a sheet attached to the parent /// Window. /// </summary> /// <param name="segue">The <c>NSStoryboardSegue</c> to execute.</param> private void PresentSheet (NSStoryboardSegue segue) { NSViewController viewController = null; // Take action based on the controller type if (segue.DestinationController is NSWindowController) { // Display the window to the user var windowController = segue.DestinationController as NSWindowController; viewController = windowController.Window.ContentViewController; } else if (segue.DestinationController is NSViewController) { // Grab view controller viewController = segue.DestinationController as NSViewController; } // Found? if (viewController == null) return; // Present window controller if (segue.SourceController is NSWindowController) { var sourceController = segue.SourceController as NSWindowController; sourceController.Window.ContentViewController.PresentViewControllerAsSheet (viewController); } else if (segue.SourceController is NSViewController) { var sourceController = segue.SourceController as NSViewController; sourceController.PresentViewControllerAsSheet (viewController); } } /// <summary> /// Presents the Window or View to the user as a popover attached to a parent /// View. /// </summary> /// <param name="segue">The <c>NSStoryboardSegue</c> to execute.</param> /// <param name="sender">The <c>NSView</c> based element that the popover will be attached to.</param> private void PresentPopover (NSStoryboardSegue segue, NSObject sender) { NSViewController viewController = null; NSView view = null; // Take action based on the controller type if (segue.DestinationController is NSWindowController) { // Display the window to the user var windowController = segue.DestinationController as NSWindowController; viewController = windowController.Window.ContentViewController; } else if (segue.DestinationController is NSViewController) { // Grab view controller viewController = segue.DestinationController as NSViewController; } // Found? if (viewController == null) return; // Take action based on sender type if (sender is NSToolbarItem) { var item = sender as NSToolbarItem; if (item.View == null) { // Default to presenting as a sheet PresentSheet (segue); return; } else { // Use the items view view = item.View; } } else { // It's a view based control view = sender as NSView; } // Present window controller if (segue.SourceController is NSWindowController) { var sourceController = segue.SourceController as NSWindowController; sourceController.Window.ContentViewController.PresentViewController (viewController, viewController.View.Bounds, view, PopoverEdge, PopoverBehavior); } else if (segue.SourceController is NSViewController) { var sourceController = segue.SourceController as NSViewController; sourceController.PresentViewController (viewController, viewController.View.Bounds, view, PopoverEdge, PopoverBehavior); } } #endregion #region Public Methods /// <summary> /// Performs the segue as specified in this Segue Definition. /// </summary> /// <param name="sender">The object that is launching the segue.</param> /// <param name="sourceController">Source controller for the segue.</param> public void PerformSegue (NSObject sender, NSObject sourceController) { //NSApplication.SharedApplication.KeyWindow.Title = "Loading Segue"; // Prepare for segue var segue = PrepareForSegue (sender, sourceController); // Did the NIB load? if (segue == null) return; //NSApplication.SharedApplication.KeyWindow.Title = "Segue Loaded"; // Take action based on the Segue type switch (SegueKind) { case StoryboardSegueType.Show: PresentNonModalWindow (segue); break; case StoryboardSegueType.Modal: PresentModalWindow (segue); break; case StoryboardSegueType.Sheet: PresentSheet (segue); break; case StoryboardSegueType.Popover: PresentPopover (segue, sender); break; } } /// <summary> /// Sets the popover behavior. /// </summary> /// <param name="behavior">Behavior as a string value.</param> public void SetPopoverBehavior (string behavior) { // Anything to do? if (behavior == null) return; // Take action based on value switch (behavior) { case "t": PopoverBehavior = NSPopoverBehavior.Transient; break; case "semitransient": PopoverBehavior = NSPopoverBehavior.Semitransient; break; case "applicationDefined": PopoverBehavior = NSPopoverBehavior.ApplicationDefined; break; } } /// <summary> /// Sets the popover edge. /// </summary> /// <param name="edge">Edge as a string value.</param> public void SetPopoverEdge (string edge) { // Anything to do? if (edge == null) return; // Take action based on value switch (edge) { case "minX": PopoverEdge = 0; break; case "minY": PopoverEdge = 1; break; case "maxX": PopoverEdge = 2; break; case "maxY": PopoverEdge = 3; break; } } #endregion } /// <summary> /// Helper class used to compile a storyboard included in a workbook. /// </summary> public static class StoryboardCompiler { #region Public Methods /// <summary> /// Compiles the specified storyboardName. /// </summary> /// <param name="storyboardName">The Storyboard name of the storyboard to compile without the `.storyboard` extension.</param> public static void Compile (string storyboardName) { // Assemble the Interface Builder compiler call var dir = Directory.GetCurrentDirectory (); var command = "ibtool"; var arguments = $"\"{dir}/{storyboardName}.storyboard\" --compile \"{dir}/{storyboardName}.storyboardc\""; // Prepare to call the system to invoke the ibtool var startInfo = new ProcessStartInfo () { FileName = command, Arguments = arguments, UseShellExecute = false, CreateNoWindow = true, RedirectStandardOutput = true, RedirectStandardError = true, RedirectStandardInput = true, UserName = System.Environment.UserName }; // Invoke the ibtool and write the results to the // console using (Process process = Process.Start (startInfo)) { // Monitor for exit} process.WaitForExit (); using (var output = process.StandardOutput) { var results = output.ReadToEnd (); Console.WriteLine ("Results: {0}", (results == "") ? "Successful" : results); } } } #endregion } /// <summary> /// The StoryboardBinder class provides a mechanism to bind objects inflated from a /// compiled Storyboard NIB file to classes in a workbook. Currently it can bind: /// NSMenu, NSToolBar, NSWindowController, NSWindow, NSViewController, NSView and /// any NSView base controls (such as NSButton). /// /// It uses the following hack to support Outlets: a) NSWindow and NSView based /// controls are bound to Public Properties matching their Identifier property, /// b) NSMenuItems are bound to Public Properties matching Title + "MenuItem", /// c) NSToolbarItems are bound to Public Properties matching Label + "ToolbarItem". /// /// Custom Segues are currently not supported. For many standard items (such as /// NSButton and NSImageView), images will attempted to be bound to `.png` files of /// the same name in an `Images` directory in the workbooks directory. /// </summary> public static class StoryboardBinder { #region Public Methods /// <summary> /// Bind the specified storyboardClass and workbookClass. For high-level classes /// like NSWindowController and NSViewController, the binder will walk down the /// tree of objects binding lower-level items along the chain. /// </summary> /// <param name="storyboardClass">The high-level storyboard (or workbook) class to bind from.</param> /// <param name="workbookClass">The workbook class to bind to.</param> public static void Bind (NSObject storyboardClass, NSObject workbookClass) { // Take action based on the source object type if (storyboardClass is NSMenu) { BindMenu (storyboardClass as NSMenu, workbookClass); } else if (storyboardClass is NSWindowController) { BindWindowController (storyboardClass as NSWindowController, workbookClass); } else if (storyboardClass is NSWindow) { BindWindow (storyboardClass as NSWindow, workbookClass); } else if (storyboardClass is NSViewController) { BindViewController (storyboardClass as NSViewController, workbookClass); } else if (storyboardClass is NSView) { BindView (storyboardClass as NSView, workbookClass); } else if (storyboardClass is NSToolbar) { BindToolbar (storyboardClass as NSToolbar, workbookClass); } } /// <summary> /// Attempts to bind an NSImage that has been specified in Interface Builder in the /// form `TheImage.png` to a like-named image file in the `Images` directory in the /// same directory as the workbook. /// </summary> /// <returns>The NSImage loaded from the `Images` directory or the passed-in NSImage if not found.</returns> /// <param name="image">The source NSImage as specified in Interface Builder.</param> public static NSImage BindImage (NSImage image) { // Has an image been specified? if (image !=null && image.Name != null && image.Name.Contains (".png")) { // Yes, attempt load the image resource var imageName = $"Images/{image.Name}"; // Does the file exist? if (File.Exists (imageName)) { // Yes, load new image image = new NSImage (imageName); } } return image; } /// <summary> /// Binds any Menu Item to a Public Property in a workbook class that matches /// Title + "MenuItem". For example, the "New" Menu Item to the "NewMenuItem" /// Property. /// </summary> /// <remarks>Any image specified in an item will also attemp to be bound.</remarks> /// <param name="menu">The NSMenu to bind.</param> /// <param name="workbookClass">The workbook class to bind to.</param> public static void BindMenu (NSMenu menu, NSObject workbookClass) { // Bind all menu items for (nint n = 0; n < menu.Count; ++n) { var menuItem = menu.ItemAt (n); // Bind identity using a hack var propertyName = $"{menuItem.Title.Replace (" ", "").Replace ("…", "")}MenuItem"; BindProperty (propertyName, workbookClass, menuItem); // Bind image menuItem.Image = BindImage (menuItem.Image); // Bind actions BindAction (menuItem.Action, workbookClass, menuItem); // Has segue? if (menuItem.Target is StoryboardSegueDefinition) { var segue = menuItem.Target as StoryboardSegueDefinition; // Bind it menuItem.Activated += (sender, e) => { segue.PerformSegue (menuItem, workbookClass); }; } // Scan sub menus if (menuItem.Submenu != null) { BindMenu (menuItem.Submenu, workbookClass); } } } /// <summary> /// Binds any Toolbar Item to A Public Property in a workbook class that matches /// Label + "ToolbarItem". For example, the "Color" Toolbar Item to the "ColorToolbarItem" /// Property. /// </summary> /// <remarks>Any image specified in an item will also attemp to be bound.</remarks> /// <param name="toolbar">The NSToolbar to bind.</param> /// <param name="workbookClass">The workbook class to bind to.</param> public static void BindToolbar (NSToolbar toolbar, NSObject workbookClass) { // Bind all items on the Toolbar foreach (NSToolbarItem item in toolbar.Items) { // Bind identity using a hack var propertyName = $"{item.Label.Replace (" ", "")}ToolbarItem"; BindProperty (propertyName, workbookClass, item); // Bind Images item.Image = BindImage (item.Image); // Bind actions BindAction (item.Action, workbookClass, item); // Has segue? if (item.Target is StoryboardSegueDefinition) { var segue = item.Target as StoryboardSegueDefinition; // Bind it item.Activated += (sender, e) => { segue.PerformSegue (item, workbookClass); }; } } } /// <summary> /// Binds the Window Controller to the given workbook class. This method will walk down the /// tree of objects binding lower-level classes as well. /// </summary> /// <param name="controller">The NSWindowController to bind from.</param> /// <param name="workbookClass">The workbook class to bind to.</param> public static void BindWindowController (NSWindowController controller, NSObject workbookClass) { // Bind controlled window BindWindow (controller.Window, workbookClass); // Binding to a Window Controller? if (workbookClass is NSWindowController) { // Yes, bind controllers var windowController = workbookClass as NSWindowController; windowController.Window = controller.Window; // Is the window loaded? if (windowController.IsWindowLoaded) { // Simulate the window being loaded by the workbook's controller instance windowController.WindowWillLoad (); windowController.WindowDidLoad (); } } } /// <summary> /// Binds the Window to the given workbook class. This method will walk down the tree /// of objects binding lower-level classes as well. /// </summary> /// <param name="window">The NSWindow to bind from.</param> /// <param name="workbookClass">The workbook class to bind to.</param> public static void BindWindow (NSWindow window, NSObject workbookClass) { // Bind identity BindProperty (window.Identifier, workbookClass, window); // Does the window have a toolbar? if (window.Toolbar != null) { // Yes, bind it BindToolbar (window.Toolbar, workbookClass); } // Binding to a window instance? if (workbookClass is NSWindow && workbookClass.GetType ().Name == window.Identifier) { // Yes, bind objects var wbWindow = workbookClass as NSWindow; wbWindow.ContentViewController = window.ContentViewController; wbWindow.ContentView = window.ContentView; } // Does the window contain a Split View if (window.ContentView is NSSplitView) { // Bind split view contents BindSplitView (window.ContentView as NSSplitView, workbookClass); } else { // Bind content view controller BindViewController (window.ContentViewController, workbookClass); } } /// <summary> /// Binds the split view to the given workbook class. This method will walk down the /// tree of objects binding lower-level classes as well. /// </summary> /// <param name="splitView">Split view being bound.</param> /// <param name="workbookClass">The workbook class to bind to.</param> public static void BindSplitView (NSSplitView splitView, NSObject workbookClass) { // Bind identity BindProperty (splitView.Identifier, workbookClass, splitView); // Bind all the attached views foreach (NSView view in splitView.ArrangedSubviews) { // Bind controlled view BindView (view, workbookClass); } // Binding to a view controller? if (workbookClass is NSViewController) { // Yes, bind controllers var viewController = workbookClass as NSViewController; viewController.View = splitView; // Simulate the view being loaded by the workbook's controller instance viewController.ViewDidLoad (); viewController.ViewWillAppear (); viewController.ViewDidAppear (); viewController.ViewWillLayout (); viewController.ViewDidLayout (); } } /// <summary> /// Binds the View Controller to the given workbook class. This method will walk down the /// tree of objects binding lower-level classes as well. /// </summary> /// <param name="controller">The NSViewController to bind from.</param> /// <param name="workbookClass">The workbook class to bind to.</param> public static void BindViewController (NSViewController controller, NSObject workbookClass) { // Bind controlled view BindView (controller.View, workbookClass); // Binding to a view controller? if (workbookClass is NSViewController) { // Yes, bind controllers var viewController = workbookClass as NSViewController; viewController.View = controller.View; // Is the view loaded? if (controller.ViewLoaded) { // Simulate the view being loaded by the workbook's controller instance viewController.ViewDidLoad (); viewController.ViewWillAppear (); viewController.ViewDidAppear (); viewController.ViewWillLayout (); viewController.ViewDidLayout (); } } } /// <summary> /// Binds the View to the given workbook class. This method will transverse all SubViews /// in the given tree. /// </summary> /// <remarks>Any image specified in a known item will also attemp to be bound.</remarks> /// <param name="view">The NSView to bind from.</param> /// <param name="workbookClass">The workbook class to bind to.</param> public static void BindView (NSView view, NSObject workbookClass) { // Bind identity BindProperty (view.Identifier, workbookClass, view); // Bind images for known items if (view is NSButton) { var button = view as NSButton; button.Image = BindImage (button.Image); BindAction (button.Action, workbookClass, button); // Has segue? if (button.Target is StoryboardSegueDefinition) { var segue = button.Target as StoryboardSegueDefinition; // Bind it button.Activated += (sender, e) => { segue.PerformSegue (button, workbookClass); }; } } else if (view is NSPopUpButton) { var popup = view as NSPopUpButton; foreach (NSMenuItem item in popup.Items ()) { item.Image = BindImage (item.Image); } BindAction (popup.Action, workbookClass, popup); } else if (view is NSSegmentedControl) { var segment = view as NSSegmentedControl; for (nint n = 0; n < segment.SegmentCount; ++n) { var image = segment.GetImage (n); segment.SetImage (BindImage (image), n); } BindAction (segment.Action, workbookClass, segment); } else if (view is NSImageView) { var image = view as NSImageView; image.Image = BindImage (image.Image); } // Bind every subview foreach (NSView subview in view.Subviews) { BindView (subview, workbookClass); } } #endregion #region Private Methods /// <summary> /// Attempts to bind an object inflated from a compiled Storyboard to an "Outlet" /// property on the given workbook class. This hack is a workaround since true /// Storyboard Outlets are not supported in workbooks. /// </summary> /// <param name="propertyName">The name of the Public Property to bind the Outlet to.</param> /// <param name="workbookClass">The workbook class being bound to.</param> /// <param name="property">The object inflated from the compiled Storyboard.</param> private static void BindProperty (string propertyName, NSObject workbookClass, NSObject property) { // Anything to process? if (propertyName == null) return; // Does the controller contain the property? var controllerType = workbookClass.GetType (); var propertyInfo = controllerType.GetProperty (propertyName); if (propertyInfo != null && propertyInfo.CanWrite) { // Yes, save value in class var value = Convert.ChangeType (property, propertyInfo.PropertyType); propertyInfo.SetValue (workbookClass, value); } } /// <summary> /// Attempts to bind an object inflated from a compiled Storyboard to an "Action" /// property on the given workbook class. This hack is a workaround since true /// Storyboard Actions are not supported in workbooks. /// </summary> /// <param name="action">The selector that represents the Action to bind.</param> /// <param name="workbookClass">The workbook class being bound to.</param> /// <param name="menuItem">The Menu Item that is the target of the binding.</param> private static void BindAction (Selector action, NSObject workbookClass, NSMenuItem menuItem) { // Anything to process? if (action == null) return; // Switch to .NET style method name var actionName = action.Name.Substring(0,1).ToUpper() + action.Name.Substring(1).Replace (":", ""); // Does the class contain the method? var controllerType = workbookClass.GetType (); var methodInfo = controllerType.GetMethod (actionName); if (methodInfo != null) { // Yes, wireup action to class menuItem.Activated += (sender, e) => { methodInfo.Invoke (workbookClass, new [] { sender }); }; } } /// <summary> /// Attempts to bind an object inflated from a compiled Storyboard to an "Action" /// property on the given workbook class. This hack is a workaround since true /// Storyboard Actions are not supported in workbooks. /// </summary> /// <param name="action">The selector that represents the Action to bind.</param> /// <param name="workbookClass">The workbook class being bound to.</param> /// <param name="toolbarItem">The Toolbar Item that is the target of the binding.</param> private static void BindAction (Selector action, NSObject workbookClass, NSToolbarItem toolbarItem) { // Anything to process? if (action == null) return; // Switch to .NET style method name var actionName = action.Name.Substring (0, 1).ToUpper () + action.Name.Substring (1).Replace (":", ""); // Does the class contain the method? var controllerType = workbookClass.GetType (); var methodInfo = controllerType.GetMethod (actionName); if (methodInfo != null) { // Yes, wireup action to class toolbarItem.Activated += (sender, e) => { methodInfo.Invoke (workbookClass, new [] { sender }); }; } } /// <summary> /// Attempts to bind an object inflated from a compiled Storyboard to an "Action" /// property on the given workbook class. This hack is a workaround since true /// Storyboard Actions are not supported in workbooks. /// </summary> /// <param name="action">The selector that represents the Action to bind.</param> /// <param name="workbookClass">The workbook class being bound to.</param> /// <param name="control">The control that is the target of the binding.</param> private static void BindAction (Selector action, NSObject workbookClass, NSControl control) { // Anything to process? if (action == null) return; // Switch to .NET style method name var actionName = action.Name.Substring (0, 1).ToUpper () + action.Name.Substring (1).Replace (":", ""); // Does the class contain the method? var controllerType = workbookClass.GetType (); var methodInfo = controllerType.GetMethod (actionName); if (methodInfo != null) { // Yes, wireup action to class control.Activated += (sender, e) => { methodInfo.Invoke (workbookClass, new [] { sender }); }; } } #endregion } /// <summary> /// Helper class that can inflate Views and View Controllers from a compiled Storyboard /// that has been included in a workbook. /// </summary> public class StoryboardInflator : NSObject { #region Computed Properies /// <summary> /// Gets or sets the first responder that will act as the parent to all objects /// instantiated from the Storyboard. /// </summary> /// <value>The first responder.</value> public NSObject FirstResponder { get; set; } /// <summary> /// Gets or sets the bundle that contains the compiled Storyboard. /// </summary> /// <value>The bundle.</value> public NSBundle Bundle { get; set; } /// <summary> /// Gets or sets the main menu identifier. /// </summary> /// <value>The main menu identifier.</value> public string MainMenuIdentifier { get; set; } /// <summary> /// Gets or sets the entry point identifier for the initial object specified in /// Storyboard. /// </summary> /// <value>The entry point identifier.</value> public string EntryPointIdentifier { get; set; } /// <summary> /// Gets or sets the view controller identifiers to nib names dictionary. /// </summary> /// <value>The view controller identifiers to nib names.</value> public NSDictionary ViewControllerIdentifiersToNibNames { get; set; } /// <summary> /// Gets or sets the view controller identifiers to UUID dictionary. /// </summary> /// <value>The view controller identifiers to UUID.</value> public NSDictionary ViewControllerIdentifiersToUUIDs { get; set; } /// <summary> /// Gets or sets the segue definitions that were defined in the Storyboard. /// </summary> /// <value>The segue definition collection.</value> public List<StoryboardSegueDefinition> SegueDefinitions { get; set; } = new List<StoryboardSegueDefinition> (); #endregion #region Constructors /// <summary> /// Initializes a new instance of the <see cref="T:StoryboardInflator"/> class. /// </summary> /// <remarks>This constructor sets the First Responder to the AppDelegate.</remarks> /// <param name="bundleFile">The Bundle file that contains the compiled Storyboard.</param> public StoryboardInflator (string bundleFile) { // Initialize Initialize (bundleFile, (NSObject)NSApplication.SharedApplication.Delegate); } /// <summary> /// Initializes a new instance of the <see cref="T:StoryboardInflator"/> class. /// </summary> /// <param name="bundleFile">The Bundle file that contains the compiled Storyboard.</param> /// <param name="owner">The object that will act as the owner (First Responder) of /// any object instantiated from the Storyboard.</param> public StoryboardInflator (string bundleFile, NSObject owner) { // Initialize Initialize (bundleFile, owner); } /// <summary> /// Initialize the specified bundle file and owner. /// </summary> /// <param name="bundleFile">The Bundle file that contains the compiled Storyboard.</param> /// <param name="owner">The object that will act as the owner (First Responder) of /// any object instantiated from the Storyboard.</param> internal void Initialize (string bundleFile, NSObject owner) { // Initialize LoadSegueDefinitions (bundleFile); Bundle = new NSBundle (bundleFile); FirstResponder = owner; // Read the structure of the Storyboard EntryPointIdentifier = Bundle.InfoDictionary.ObjectForKey (new NSString ("NSStoryboardDesignatedEntryPointIdentifier")).ToString (); MainMenuIdentifier = Bundle.InfoDictionary.ObjectForKey (new NSString ("NSStoryboardMainMenu")).ToString (); ViewControllerIdentifiersToNibNames = Bundle.InfoDictionary.ObjectForKey (new NSString ("NSViewControllerIdentifiersToNibNames")) as NSDictionary; ViewControllerIdentifiersToUUIDs = Bundle.InfoDictionary.ObjectForKey (new NSString ("NSViewControllerIdentifiersToNibNames")) as NSDictionary; } #endregion #region Private Methods /// <summary> /// Loads the segue definitions from the non-compiled version of the Storyboard. /// </summary> /// <param name="bundleFile">The Bundle file that contains the compiled Storyboard.</param> private void LoadSegueDefinitions (string bundleFile) { // Open the non-compiled Storyboard var path = bundleFile.Replace (".storyboardc", ".storyboard"); var reader = new XmlTextReader (path); // Track the tree of objects that the Segue is defined in var parsingObject = StoryboardObjectType.Unknown; var parsingID = ""; var sceneType = StoryboardObjectType.Unknown; var sceneID = ""; // Read through the non-compiled Storyboard to find all Segue // definitions while (reader.Read ()) { // Take action based on node type switch (reader.Name) { case "application": sceneType = StoryboardObjectType.Application; var appID = reader.GetAttribute ("id"); if (appID != null) { sceneID = appID; } break; case "menuItem": var title = reader.GetAttribute ("title"); if (title != null) { parsingObject = StoryboardObjectType.MenuItem; parsingID = title; } break; case "windowController": sceneType = StoryboardObjectType.WindowController; var wcID = reader.GetAttribute ("id"); if (wcID != null) { parsingObject = StoryboardObjectType.WindowController; parsingID = wcID; sceneID = wcID; } break; case "toolbarItem": var tbID = reader.GetAttribute ("label"); if (tbID != null) { parsingObject = StoryboardObjectType.ToolbarItem; parsingID = tbID; } break; case "viewController": sceneType = StoryboardObjectType.ViewController; var vcID = reader.GetAttribute ("id"); if (vcID != null) { sceneID = vcID; } break; case "splitViewController": sceneType = StoryboardObjectType.SplitViewController; var svcID = reader.GetAttribute ("id"); if (svcID != null) { sceneID = svcID; } break; case "button": var buttonID = reader.GetAttribute ("identifier"); if (buttonID != null) { parsingObject = StoryboardObjectType.Button; parsingID = buttonID; } break; case "segue": // Read the Segue's Definition var destination = reader.GetAttribute ("destination"); var kind = reader.GetAttribute ("kind"); var identifier = reader.GetAttribute ("identifier"); var id = reader.GetAttribute ("id"); var relationship = reader.GetAttribute ("relationship"); var popoverAnchor = reader.GetAttribute ("popoverAnchorView"); var popoverBehavior = reader.GetAttribute ("popoverBehavior"); var popoverEdge = reader.GetAttribute ("preferredEdge"); // Create a new definition, populate it and add it to // the collection var definition = new StoryboardSegueDefinition (kind, this) { SceneType = sceneType, SceneID = sceneID, SourceObjectType = parsingObject, SourceObjectID = parsingID, DestinationControllerID = destination, SegueIdentifier = (identifier == null) ? id : identifier, Relationship = relationship, PopoverAnchorID = popoverAnchor }; definition.SetPopoverBehavior (popoverBehavior); definition.SetPopoverEdge (popoverEdge); SegueDefinitions.Add (definition); break; } } } /// <summary> /// Pulls the type name from the description. /// </summary> /// <returns>The class type name.</returns> /// <remarks>This method is used for debugging.</remarks> /// <param name="description">The description.</param> private string TypeName (string description) { var posn = description.IndexOf (":"); return description.Substring (1, posn - 1); } /// <summary> /// Pulls the class handle pointer from the description. /// </summary> /// <returns>The handle pointer.</returns> /// <remarks>This method is used for debugging.</remarks> /// <param name="description">The description.</param> private IntPtr ClassHandlePointer (string description) { var posn = description.IndexOf (":"); var value = description.Substring (posn + 1, description.Length - posn - 1); return Marshal.StringToHGlobalUni (value); } /// <summary> /// Prints the top objects to the console. /// </summary> /// <remarks>This method is used for debugging.</remarks> /// <param name="topLevelObjects">The collection of top level objects.</param> private void PrintTopObjects (NSArray topLevelObjects) { NSObject element = null; // Scan all top level object for the window controller for (nuint n = 0; n < topLevelObjects.Count; ++n) { // Access the current object element = topLevelObjects.GetItem<NSObject> (n); Console.WriteLine ("> Type: {0}", TypeName (element.Description)); } } /// <summary> /// Checks every Menu and Menu Item to see if a Segue has been attached to it. /// If so, it sets the <c>StoryboardSegueDefinition</c> as the Menu Item's /// <c>Target</c>. /// </summary> /// <param name="menu">The <c>NSMenu</c> to scan.</param> private void LoadMenuSegueTargets (NSMenu menu) { // Check all items to see if they have been bound to a segue for (nint x = 0; x < menu.Count; ++x) { var menuItem = menu.ItemAt (x); // Has a segue been defined for this item? var segueDefinition = FindSegueDefinition (StoryboardObjectType.MenuItem, menuItem.Title); if (segueDefinition != null) menuItem.Target = segueDefinition; // Scan sub menus if (menuItem.Submenu != null) { LoadMenuSegueTargets (menuItem.Submenu); } } } /// <summary> /// Checks every Toolbar Item to see if a Segue has been attached to it. /// If so, it sets the <c>StoryboardSegueDefinition</c> as the Toolbar Item's /// <c>Target</c>. /// </summary> /// <param name="toolbar">The <c>NSToolbar</c> to scan.</param> private void LoadToolbarItemSegueTargets (NSToolbar toolbar) { // Check all items to see if they have been bound to a segue foreach (NSToolbarItem item in toolbar.Items) { // Has a segue been defined for this item? var segueDefinition = FindSegueDefinition (StoryboardObjectType.ToolbarItem, item.Label); if (segueDefinition != null) { item.Target = segueDefinition; } } } /// <summary> /// Checks every View and Sub View to see if a Segue has been attached to it. /// If so, it sets the <c>StoryboardSegueDefinition</c> as the View's /// <c>Target</c>. /// </summary> /// <param name="view">The <c>NSView</c> to scan.</param> public void LoadViewSegueTargets (NSView view) { // Bind segues for known items if (view is NSButton) { var button = view as NSButton; // Has a segue been defined for this item? var segueDefinition = FindSegueDefinition (StoryboardObjectType.Button, button.Identifier); if (segueDefinition != null) { button.Target = segueDefinition; } } // Bind every subview foreach (NSView subview in view.Subviews) { LoadViewSegueTargets (subview); } } /// <summary> /// Loads the NIB from the specified NIB name and recursivly loads any sub NIBs /// for a known set of types such as <c>NSWindowController</c>, <c>NSViewController</c>, /// etc. /// </summary> /// <returns>The main top level element from the NIB as a known type such as /// <c>NSWindowController</c>, <c>NSViewController</c>, etc.</returns> /// <param name="nibName">The name of the NIB to load.</param> private NSObject LoadNib (string nibName) { NSObject mainElement = null; var topLevelObjects = new NSArray (); NSObject element = null; var subNibName = ""; // Anything to process? if (nibName == null) return null; //Console.WriteLine ("Loading {0} -->", nibName); // Load the given NIB if (Bundle.LoadNibNamed (nibName, FirstResponder, out topLevelObjects)) { // Discovery //Console.WriteLine ("Parsing {0}:", nibName); //PrintTopObjects (topLevelObjects); // Scan all top level object for a known object type for (nuint n = 0; n < topLevelObjects.Count; ++n) { // Access the current object element = topLevelObjects.GetItem<NSObject> (n); // Search for known types if (element is NSMenu) { // Found main menu if (mainElement == null) mainElement = element; var menu = element as NSMenu; LoadMenuSegueTargets (menu); } else if (element is NSWindowController) { // Found window controller mainElement = element; var windowController = element as NSWindowController; // Load the window's content view if (windowController.Window.ContentViewController is NSSplitViewController) { // Access Split View Controller var splitViewController = windowController.Window.ContentViewController as NSSplitViewController; // Manufacture a new Split View and configure it the same as // the Split View from the storyboard var splitView = new NSSplitView (splitViewController.View.Frame) { ArrangesAllSubviews = splitViewController.SplitView.ArrangesAllSubviews, DividerStyle = splitViewController.SplitView.DividerStyle, IsVertical = splitViewController.SplitView.IsVertical }; // Load all the attached views foreach (NSSplitViewItem svItem in splitViewController.SplitViewItems) { subNibName = svItem.ViewController.NibName; var view = LoadNib (subNibName) as NSView; splitView.AddArrangedSubview (view); } splitView.AdjustSubviews (); // Attach to window windowController.Window.ContentView = splitView; } else { subNibName = windowController.Window.ContentViewController.NibName; var contentView = LoadNib (subNibName) as NSView; // Found? if (contentView != null) { // Yes, attach it to the window windowController.Window.ContentViewController.View = contentView; windowController.Window.ContentView = contentView; //Console.WriteLine ("* {0} View Attached", subNibName); } } // Is there a toolbar? if (windowController.Window.Toolbar != null) { // Scan items for attached segues LoadToolbarItemSegueTargets (windowController.Window.Toolbar); } } else if (element is NSViewController) { // Found View Controller if (mainElement == null) mainElement = element; var viewController = element as NSViewController; // Load the controller's view subNibName = viewController.NibName; var view = LoadNib (subNibName) as NSView; viewController.View = view; } else if (element is NSView) { // Found View if (mainElement == null) mainElement = element; var view = element as NSView; LoadViewSegueTargets (view); } } } else { // Report error Console.WriteLine ("Unable to load: {0}", nibName); } // Return the found main element return mainElement; } #endregion #region Public Methods /// <summary> /// Instantiates the main menu from the storyboard. /// </summary> /// <returns>The main menu.</returns> public NSMenu InstantiateMainMenu () { return LoadNib (MainMenuIdentifier) as NSMenu; } /// <summary> /// Instantiates the main menu from the storyboard and binds it to the /// given instance of a workbook class. /// </summary> /// <returns>The main menu.</returns> /// <param name="parent">The workbook instance to bind to.</param> public NSMenu InstantiateMainMenu (NSObject parent) { // Inflate menu var menu = LoadNib (MainMenuIdentifier) as NSMenu; // Bind menu to parent class StoryboardBinder.BindMenu (menu, parent); // return inflated menu return menu; } /// <summary> /// Instantiates the initial controller as specified in the Storyboard. /// </summary> /// <returns>The initial controller.</returns> public NSObject InstantiateInitialController () { // Load element var element = LoadNib (EntryPointIdentifier); // Is this a window controller? if (element is NSWindowController) { // Yes, display the window to the user var windowController = element as NSWindowController; windowController.Window.MakeKeyAndOrderFront ((NSObject)NSApplication.SharedApplication.Delegate); } // Return loaded object return element; } /// <summary> /// Instantiates the initial controller as specified in the Storyboard and binds it to /// the given instance of a workbook class. /// </summary> /// <returns>The initial controller.</returns> /// <param name="parent">The workbook instance to bind to.</param> public NSObject InstantiateInitialController (NSObject parent) { // Get initial element var element = InstantiateInitialController (); // Bind element to parent class StoryboardBinder.Bind (element, parent); // Return loaded object return element; } /// <summary> /// Instantiates the controller from the NIB of the given name. /// </summary> /// <returns>The controller for nib name.</returns> /// <param name="nibName">Nib name.</param> public NSObject InstantiateControllerForNibName (string nibName) { var element = LoadNib (nibName); // Return loaded object return element; } /// <summary> /// Returns the NIB name for the given Identifier or the empty string ("") if /// not found. /// </summary> /// <returns>The name for identifier or empty string ("") if not found.</returns> /// <param name="identifier">The identifier to search for.</param> public string NibNameForIdentifier (string identifier) { for (int n = 0; n < ViewControllerIdentifiersToNibNames.Keys.Count (); ++n) { // Get the key and value var key = ViewControllerIdentifiersToNibNames.Keys [n].ToString (); var value = ViewControllerIdentifiersToNibNames.Values [n].ToString (); // Found? if (key == identifier) return value; } // Return found name return ""; } /// <summary> /// Returns the NIB name for the partial Identifier or the empty string ("") if /// not found. /// </summary> /// <returns>The name for identifier or empty string ("") if not found.</returns> /// <param name="identifier">The partial identifier to search for.</param> public string NibNameForPartialIdentifier (string identifier) { for (int n = 0; n < ViewControllerIdentifiersToNibNames.Keys.Count (); ++n) { // Get the key and value var key = ViewControllerIdentifiersToNibNames.Keys [n].ToString (); var value = ViewControllerIdentifiersToNibNames.Values [n].ToString (); // Found? if (key.Contains(identifier)) return value; } // Return found name return ""; } /// <summary> /// Returns the UUID for the given Identifier or the empty string ("") if /// not found. /// </summary> /// <returns>The name for identifier or empty string ("") if not found.</returns> /// <param name="identifier">The identifier to search for.</param> public string UUIDForIdentifier (string identifier) { for (int n = 0; n < ViewControllerIdentifiersToUUIDs.Keys.Count (); ++n) { // Get the key and value var key = ViewControllerIdentifiersToUUIDs.Keys [n].ToString (); var value = ViewControllerIdentifiersToUUIDs.Values [n].ToString (); // Found? if (key == identifier) return value; } // Return found name return ""; } /// <summary> /// Instantiates the controller for the given identifier. /// </summary> /// <returns>The controller for identifier.</returns> /// <param name="identifier">The identifier to instantiate.</param> public NSObject InstantiateControllerForIdentifier (string identifier) { var nibName = NibNameForIdentifier (identifier); // Found? if (nibName == "") { // No return null; } else { // Yes, instantiate named nib return InstantiateControllerForNibName (nibName); } } /// <summary> /// Instantiates the controller for the given partial identifier. /// </summary> /// <returns>The controller for partial identifier.</returns> /// <param name="identifier">The partial identifier to instantiate.</param> public NSObject InstantiateControllerForPartialIdentifier (string identifier) { var nibName = NibNameForPartialIdentifier (identifier); // Found? if (nibName == "") { // No return null; } else { // Yes, instantiate named nib return InstantiateControllerForNibName (nibName); } } /// <summary> /// Finds the segue definition for the given Scene and Source Object. /// </summary> /// <returns>The segue definition or <c>null</c> if not found.</returns> /// <param name="sceneType">Scene type.</param> /// <param name="sceneID">Scene identifier.</param> /// <param name="sourceObjectType">Source object type.</param> /// <param name="sourceObjectID">Source object identifier.</param> public StoryboardSegueDefinition FindSegueDefinition (StoryboardObjectType sceneType, string sceneID, StoryboardObjectType sourceObjectType, string sourceObjectID) { // Scan all definitions foreach (StoryboardSegueDefinition definition in SegueDefinitions) { // Found? if (definition.SceneType == sceneType && definition.SceneID == sceneID && definition.SourceObjectType == sourceObjectType && definition.SourceObjectID == sourceObjectID) { return definition; } } // Not found return null; } /// <summary> /// Finds the segue definition for the given Source Object. /// </summary> /// <returns>The segue definition or <c>null</c> if not found</returns> /// <param name="sourceObjectType">Source object type.</param> /// <param name="sourceObjectID">Source object identifier.</param> public StoryboardSegueDefinition FindSegueDefinition (StoryboardObjectType sourceObjectType, string sourceObjectID) { // Scan all definitions foreach (StoryboardSegueDefinition definition in SegueDefinitions) { // Found? if (definition.SourceObjectType == sourceObjectType && definition.SourceObjectID == sourceObjectID) { return definition; } } // Not found return null; } /// <summary> /// Finds the segue definition for the given Segue Identifier. /// </summary> /// <returns>The segue definition or <c>null</c> if not found.</returns> /// <param name="identifier">The Segue Identifier to find.</param> public StoryboardSegueDefinition FindSegueDefinition (string identifier) { // Scan all definitions foreach (StoryboardSegueDefinition definition in SegueDefinitions) { // Found? if (definition.SegueIdentifier == identifier) { return definition; } } // Not found return null; } #endregion }
the_stack
@{ ViewBag.Title = "InformationItemEdit"; Layout = "~/Views/Shared/_LayoutBlank.cshtml"; } <link rel="stylesheet" type="text/css" href="/Scripts/wangEditor/css/wangEditor.min.css"> <script type="text/javascript" src='/Scripts/wangEditor/js/wangEditor.min.js'></script> <style> .wangEditor-container .wangEditor-txt p, .wangEditor-container .wangEditor-txt h1, .wangEditor-container .wangEditor-txt h2, .wangEditor-container .wangEditor-txt h3, .wangEditor-container .wangEditor-txt h4, .wangEditor-container .wangEditor-txt h5 { margin: 0px 0; line-height: 1.8; } </style> <style type="text/css"> .divImageMaterialContainer { width: 170px; float: left; margin-right: 10px; margin-top: 0px; background-color: #FFF; } .tableImageMaterialContainer { width: 100%; border-collapse: collapse; border: solid #E7E7EB; border-width: 1px 0 0 1px; } .tableImageMaterialContainer td { border: 1px solid #E7E7EB; } </style> <script> var _informationId = getQueryString("informationId"); var _categoryId = getQueryString("categoryId"); var _mode = "create";//modify var _validator; var _data = null; var _editor; $(document).ready(function () { $("[keyenter]").keypress(function (e) { if (e.keyCode == 13) { save(); } }); $("#txtImageUrl").blur(function () { loadImage(); }); _validator = $("#form").validate({ onfocusout: false, onkeyup: false, showErrors: showValidationErrors, rules: { "txtName": "required" }, messages: { "txtName": "请输入信息名称;" } }); //_editor = $('#txtDescription').wangEditor({ // 'menuConfig': [ // ['viewSourceCode'], // ['bold', 'underline', 'italic', 'foreColor', 'backgroundColor', 'strikethrough'], // ['blockquote', 'fontFamily', 'fontSize', 'setHead', 'list', 'justify'], // ['createLink', 'unLink', 'insertTable'], // ['insertLocation'], // ['undo', 'redo', 'fullScreen'] // ] //}); _editor = new wangEditor('divDescriptionEditor'); _editor.config.menus = [ 'source', '|', 'bold', 'underline', 'italic', 'strikethrough', 'eraser', 'forecolor', 'bgcolor', '|', 'quote', 'fontfamily', 'fontsize', 'head', 'unorderlist', 'orderlist', 'alignleft', 'aligncenter', 'alignright', '|', 'link', 'unlink', 'table', '|', 'undo', 'redo', 'fullscreen' ]; _editor.create(); load(); }); function load() { var id = getQueryString("id"); if (id == undefined || id == "") { return; } _mode = "modify"; $("#spanTitle").html("修改"); $("#btnRemove").show(); var loadLayerIndex = layer.load(0, { shade: [0.2, '#fff'] }); $.ajax({ url: "/Api/Information/GetInformationItem?id=" + id, type: "POST", dataType: "json", success: function (data, status, jqXHR) { layer.close(loadLayerIndex); if (data.Success) { _data = data.Data; $("#txtId").val(_data.Id); $("#txtName").val(_data.Name); $("#txtImageUrl").val(_data.Image); $("#txtPhoneNumber").val(_data.PhoneNumber); $("#txtIntroduction").val(_data.Introduction); _editor.$txt.html(_data.Description); loadImage(); loadDetailImageList(); } else { layerAlert(data.Message); } }, error: function (xmlHttpRequest) { layer.close(loadLayerIndex); alert("Error: " + xmlHttpRequest.status); } }); } function save() { if (_validator.form() == false) { return; } var loadLayerIndex = layer.load(0, { shade: [0.2, '#fff'] }); var url = "/Api/Information/CreateInformationItem"; if (_mode == "modify") { url = "/Api/Information/UpdateInformationItem"; } if (_data == undefined || _data == null) { _data = new Object(); } _data.Information = _informationId; _data.Category = _categoryId; _data.Name = $("#txtName").val(); _data.Image = $("#txtImageUrl").val(); _data.PhoneNumber = $("#txtPhoneNumber").val(); _data.Introduction = $("#txtIntroduction").val(); _data.Description = _editor.$txt.html(); //if (_data.ImageList == undefined || _data.ImageList == null) { // _data.ImageList = new Array(); //} $.ajax({ url: url, type: "POST", dataType: "json", data: JSON.stringify(_data), success: function (data, status, jqXHR) { layer.close(loadLayerIndex); if (data.Success) { var index = parent.layer.getFrameIndex(window.name); if (_mode == "create") { parent.loadDataAndCloseLayer(index); } else { parent.loadDataOnPageAndCloseLayer(index); } } else { layerAlert(data.Message); } }, error: function (xmlHttpRequest) { layer.close(loadLayerIndex); alert("Error: " + xmlHttpRequest.status); } }); } function removeData() { var id = $("#txtId").val(); if (id == undefined || id == "") { return; } var msg = "是否确认删除?" var confirmLayerIndex = layer.confirm(msg, { btn: ['确认', '取消'], //按钮 shade: [0.4, '#393D49'], title: false, closeBtn: false, shift: _layerShift }, function () { layer.close(confirmLayerIndex); var loadLayerIndex = layer.load(0, { shade: [0.2, '#fff'] }); $.ajax({ url: "/Api/Information/RemoveInformationItem?id=" + id, type: "POST", dataType: "json", success: function (data, status, jqXHR) { if (data.Success) { var index = parent.layer.getFrameIndex(window.name); parent.loadDataOnPageAndCloseLayer(index); } else { layer.closeAll(); layerAlert(data.Message); } }, error: function (xmlHttpRequest) { layer.closeAll(); alert("Error: " + xmlHttpRequest.status); } }); }); } function cancel() { var index = parent.layer.getFrameIndex(window.name); parent.layer.close(index); } function loadImage() { $("#image").attr("src", $("#txtImageUrl").val()); } function uploadFile() { __showFileUpload(getUploadResult); } function getUploadResult(fileServiceAddress, result) { var url = fileServiceAddress + result.Data.StoreFilePath; $("#txtImageUrl").val(url); loadImage(); } function removeImage() { $("#txtImageUrl").val(""); loadImage(); } //详细图片 function uploadDetailImage() { __showFileUpload(getUploadDetailImageResult); } function getUploadDetailImageResult(fileServiceAddress, result) { var url = fileServiceAddress + result.Data.StoreFilePath; var imageListItem = new Object(); imageListItem.Id = result.Data.Id; imageListItem.Url = url; if (_data == undefined || _data == null) { _data = new Object(); } if (_data.ImageList == undefined || _data.ImageList == null) { _data.ImageList = new Array(); } _data.ImageList[_data.ImageList.length] = imageListItem; loadDetailImageList(); } function loadDetailImageList() { if (_data == undefined || _data == null || _data.ImageList == undefined || _data.ImageList == null) { document.getElementById('divImageListContainer').innerHTML = ""; return; } var gettpl = document.getElementById('imageListTemplate').innerHTML; laytpl(gettpl).render(_data.ImageList, function (html) { document.getElementById('divImageListContainer').innerHTML = html; }); } function removeDetailImageItem(id) { for (var i = 0; i < _data.ImageList.length; i++) { if (_data.ImageList[i].Id == id) { _data.ImageList.splice(i, 1); break; } } loadDetailImageList(); } /////// ////详细图片 function uploadDescriptionFile() { __showFileUpload(getUploadDescriptionFileResult); } function getUploadDescriptionFileResult(fileServiceAddress, result) { var url = fileServiceAddress + result.Data.StoreFilePath; _editor.$txt.append("<img src='" + url + "' style='max-width:100%' />"); } function closeAllLayer() { layer.closeAll(); } </script> <script id="imageListTemplate" type="text/html"> {{# for(var i = 0, len = d.length; i < len; i++){ }} <div class="divImageMaterialContainer"> <table class="tableImageMaterialContainer"> <tr> <td height="150" align="center"><img style="max-width:166px; max-height:100%" src="{{ d[i].Url }}" /></td> </tr> <tr> <td height="30" valign="middle" bgcolor="#F4F5F9"> <div> <div style="float: right; margin-right: 10px;"> <img src="/Content/Images/ico_remove.jpg" width="20" height="20" onclick="removeDetailImageItem('{{ d[i].Id }}')"> </div> <div style="clear: both"></div> </div> </td> </tr> </table> </div> {{# } }} <div style="clear:both"></div> </script> <div style="margin-left:20px; margin-right:20px; margin-top:20px;"> <span id="spanTitle" class="font_black_24">信息</span> </div> <div style=" background-color:#ccc; margin-left:20px; margin-right:20px; margin-top:10px; height:1px;"> </div> <div style=" position:absolute; overflow:auto ;margin-top:25px;left:30px; right:30px; bottom:60px; top:50px; "> <form id="form"> <input type="hidden" id="txtId" /> <table width="100%" border="0" cellspacing="0" cellpadding="0"> <tr> <td width="110" height="36">名称:</td> <td><input id="txtName" name="txtName" type="text" class="input_16" style="width:96%; " maxlength="25" keyenter /></td> </tr> <tr> <td valign="top"> <div style="margin-top:5px;"> 封面图片: </div> </td> <td valign="top"> <div class="divBorder_gray" style="margin-bottom:5px;width:96%;"> <div style="padding:5px;"> <table width="100%" border="0" cellspacing="0" cellpadding="0"> <tr> <td width="120"><img id="image" alt="" style="max-height:100px;" /></td> <td align="right"> <input id="txtImageUrl" name="txtImageUrl" type="hidden" class="input_16" style="width:96%; " maxlength="500" keyenter /> <a href="javascript:void(0)" onclick="uploadFile()">上传新图片</a><br /> <a href="javascript:void(0)" onclick="removeImage()">删除图片</a> </td> </tr> </table> </div> </div> </td> </tr> @*<tr> <td height="36">&nbsp;</td> <td><input id="txtImageUrl" name="txtImageUrl" type="text" class="input_16" style="width:96%; " keyenter /></td> </tr>*@ <tr> <td width="110" height="36" valign="top"> <div style="margin-top:5px;"> 详细图片: </div> </td> <td> <div class="divBorder_gray" style="margin-bottom:5px;width:96%;"> <div style="padding:5px;"> <div id="divImageListContainer"> @*<div style="float:left"> <img src="http://wxcfile1.shengxunwei.com/FileStore/2a58d820-de07-4c8f-80b9-b5cb5a1028b4/eff4f12c-640d-4776-98be-0ccee007e9cf.jpg" /> </div> <div style="clear:both"></div>*@ </div> <div style="text-align:right"><a href="javascript:void(0)" onclick="uploadDetailImage()">上传新图片</a><br /></div> </div> </div> </td> </tr> <tr> <td width="110" height="36">电话:</td> <td><input id="txtPhoneNumber" name="txtPhoneNumber" type="text" class="input_16" style="width:96%; " maxlength="15" keyenter /></td> </tr> <tr> <td width="110" height="36">简要说明:</td> <td> <textarea id='txtIntroduction' rows="3" style='width:96%; ' class="input_16" maxlength="150"></textarea> </td> </tr> <tr> <td width="110" height="36" valign="top"> <div style="margin-top:10px;"> 详细说明: </div> </td> <td> <div style="width:96%; margin-top:10px;"> <div id="divDescriptionEditor" style='height:400px; '></div> </div> </td> </tr> <tr> <td width="110">&nbsp;</td> <td> <div style="width:96%;"> <div style="float:right;"> <input name="btnUpload" type="button" class="btn_white" id="btnUpload" value="上传图片" onclick="uploadDescriptionFile()" /> </div> <div style="clear:both"></div> </div> </td> </tr> </table> </form> </div> <div style=" background-color:#ccc; position:absolute; bottom:55px; left:20px;right:20px; height:1px;"> </div> <div style="position:absolute; bottom:15px; left:20px;right:20px;"> <div style="float:left;"> <input name="btnRemove" type="button" class="btn_red" id="btnRemove" value="删 除" style="display:none" onclick="removeData()" /> </div> <div style="float:right"> <input name="btnSave" type="button" class="btn_blue" id="btnSave" value="保 存" onclick="save()" /> <input name="btnCancel" type="button" class="btn_blue" id="btnCancel" value="取 消" onclick="cancel()" /> </div> <div style="clear:both"> </div> </div> @Helpers.FileUpload()
the_stack
@using System.Globalization @using Abp.Configuration @using Abp.Configuration.Startup @using Abp.Web.Security.AntiForgery @using Volo.PostgreSqlDemo @using Volo.PostgreSqlDemo.Configuration @using Volo.PostgreSqlDemo.SignalR @using Volo.PostgreSqlDemo.Web.Resources @using Volo.PostgreSqlDemo.Web.Views.Shared.Components.SideBarNav @using Volo.PostgreSqlDemo.Web.Views.Shared.Components.SideBarUserArea @using Volo.PostgreSqlDemo.Web.Views.Shared.Components.TopBarLanguageSwitch @using Volo.PostgreSqlDemo.Web.Views.Shared.Components.RightSideBar @inject Volo.PostgreSqlDemo.Timing.AppTimes AppTimes @inject IAbpAntiForgeryManager AbpAntiForgeryManager @inject IMultiTenancyConfig MultiTenancyConfig @inject IWebResourceManager WebResourceManager @inject ISettingManager SettingManager; @{ Layout = null; AbpAntiForgeryManager.SetCookie(Context); var uiTheme = await SettingManager.GetSettingValueAsync(AppSettingNames.UiTheme); } <!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no" name="viewport"> <meta name="description" content=""> <meta name="author" content=""> <link rel="shortcut icon" href="~/favicon.ico"> <title>PostgreSqlDemo</title> <environment names="Development"> <link href="~/fonts/roboto/roboto.css" rel="stylesheet" asp-append-version="true" /> <link href="~/fonts/material-icons/materialicons.css" rel="stylesheet" asp-append-version="true" /> <link href="~/lib/bootstrap/dist/css/bootstrap.css" rel="stylesheet" asp-append-version="true" /> <link href="~/lib/bootstrap-select/dist/css/bootstrap-select.css" rel="stylesheet" asp-append-version="true" /> <link href="~/lib/toastr/toastr.css" rel="stylesheet" asp-append-version="true" /> <link href="~/lib/famfamfam-flags/dist/sprite/famfamfam-flags.css" rel="stylesheet" asp-append-version="true" /> <link href="~/lib/font-awesome/css/font-awesome.css" rel="stylesheet" asp-append-version="true" /> <link href="~/lib/Waves/dist/waves.css" rel="stylesheet" asp-append-version="true" /> <link href="~/lib/animate.css/animate.css" rel="stylesheet" asp-append-version="true" /> <link href="~/css/materialize.css" rel="stylesheet" asp-append-version="true" /> <link href="~/css/style.css" rel="stylesheet" asp-append-version="true"> <link href="~/css/themes/all-themes.css" rel="stylesheet" asp-append-version="true" /> <link href="~/view-resources/Views/Shared/_Layout.css" rel="stylesheet" asp-append-version="true" /> </environment> <environment names="Staging,Production"> <link href="~/view-resources/Views/_Bundles/shared-layout.min.css" rel="stylesheet" asp-append-version="true" /> </environment> <!-- View specific styles --> @RenderSection("styles", required: false) <script type="text/javascript"> // This is used to get the application's root path from javascript. It's useful if you're running application in a virtual directory under IIS. var abp = abp || {}; abp.appPath = '@ApplicationPath'; </script> </head> <body class="@("theme-" + uiTheme)"> <!-- Page Loader --> <div class="page-loader-wrapper"> <div class="loader"> <div class="preloader"> <div class="spinner-layer pl-red"> <div class="circle-clipper left"> <div class="circle"></div> </div> <div class="circle-clipper right"> <div class="circle"></div> </div> </div> </div> <p>Please wait...</p> </div> </div> <!-- #END# Page Loader --> <!-- Overlay For Sidebars --> <div class="overlay"></div> <!-- #END# Overlay For Sidebars --> <!-- Search Bar --> <div class="search-bar"> <div class="search-icon"> <i class="material-icons">search</i> </div> <input type="text" placeholder="START TYPING..."> <div class="close-search"> <i class="material-icons">close</i> </div> </div> <!-- #END# Search Bar --> <!-- Top Bar --> <nav class="navbar"> <div class="container-fluid"> <div class="navbar-header"> <a href="javascript:void(0);" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar-collapse" aria-expanded="false"></a> <a href="javascript:void(0);" class="bars"></a> <a class="navbar-brand" asp-controller="Home" asp-action="Index"><i class="fa fa-cubes"></i> PostgreSqlDemo</a> </div> <div class="collapse navbar-collapse" id="navbar-collapse"> <ul class="nav navbar-nav navbar-right"> <!-- Call Search --> <li><a href="javascript:void(0);" class="js-search" data-close="true"><i class="material-icons">search</i></a></li> <!-- #END# Call Search --> @await Component.InvokeAsync(typeof(TopBarLanguageSwitchViewComponent)) <li class="pull-right"><a href="javascript:void(0);" class="js-right-sidebar" data-close="true"><i class="material-icons">more_vert</i></a></li> </ul> </div> </div> </nav> <!-- #Top Bar --> <section> <aside id="leftsidebar" class="sidebar"> @await Component.InvokeAsync(typeof(SideBarUserAreaViewComponent)) @await Component.InvokeAsync(typeof(SideBarNavViewComponent), new { activeMenu = ViewBag.CurrentPageName }) <!-- Footer --> <div class="legal"> <div class="copyright"> &copy; @DateTime.Now.Year <a href="javascript:void(0);">PostgreSqlDemo</a>. </div> <div class="version"> <b>Version </b> @AppVersionHelper.Version [@AppVersionHelper.ReleaseDate.ToString("yyyyMMdd")] </div> </div> <!-- #Footer --> </aside> @await Component.InvokeAsync(typeof(RightSideBarViewComponent)) </section> <section class="content"> <div class="container-fluid"> @RenderBody() </div> </section> <environment names="Development"> <script src="~/lib/json2/json2.js" asp-append-version="true"></script> <script src="~/lib/jquery/dist/jquery.js" asp-append-version="true"></script> <script src="~/lib/bootstrap/dist/js/bootstrap.js" asp-append-version="true"></script> <script src="~/lib/moment/min/moment-with-locales.js" asp-append-version="true"></script> <script src="~/lib/jquery-validation/dist/jquery.validate.js" asp-append-version="true"></script> <script src="~/lib/blockUI/jquery.blockUI.js" asp-append-version="true"></script> <script src="~/lib/toastr/toastr.js" asp-append-version="true"></script> <script src="~/lib/sweetalert/dist/sweetalert.min.js" asp-append-version="true"></script> <script src="~/lib/spin.js/spin.js" asp-append-version="true"></script> <script src="~/lib/spin.js/jquery.spin.js" asp-append-version="true"></script> <script src="~/lib/bootstrap-select/dist/js/bootstrap-select.js" asp-append-version="true"></script> <script src="~/lib/jquery-slimscroll/jquery.slimscroll.js" asp-append-version="true"></script> <script src="~/lib/Waves/dist/waves.js" asp-append-version="true"></script> <script src="~/lib/push.js/push.js" asp-append-version="true"></script> <script src="~/lib/abp-web-resources/Abp/Framework/scripts/abp.js" asp-append-version="true"></script> <script src="~/lib/abp-web-resources/Abp/Framework/scripts/libs/abp.jquery.js" asp-append-version="true"></script> <script src="~/lib/abp-web-resources/Abp/Framework/scripts/libs/abp.toastr.js" asp-append-version="true"></script> <script src="~/lib/abp-web-resources/Abp/Framework/scripts/libs/abp.blockUI.js" asp-append-version="true"></script> <script src="~/lib/abp-web-resources/Abp/Framework/scripts/libs/abp.spin.js" asp-append-version="true"></script> <script src="~/lib/abp-web-resources/Abp/Framework/scripts/libs/abp.sweet-alert.js" asp-append-version="true"></script> <script src="~/js/admin.js"></script> <script src="~/js/main.js" asp-append-version="true"></script> <script src="~/view-resources/Views/Shared/_Layout.js" asp-append-version="true"></script> @if (SignalRFeature.IsAvailable && !SignalRFeature.IsAspNetCore) { <script src="~/lib/signalr/jquery.signalR.js" asp-append-version="true"></script> } </environment> <environment names="Staging,Production"> <script src="~/view-resources/Views/_Bundles/shared-layout.min.js" asp-append-version="true"></script> </environment> <script> // Localizing momentjs moment.locale('@CultureInfo.CurrentUICulture.Name'); </script> <!-- Dynamic scripts of ABP system (They are created on runtime and can not be bundled) --> <script src="~/AbpServiceProxies/GetAll?v=@(AppTimes.StartupTime.Ticks)" type="text/javascript"></script> <script src="~/AbpScripts/GetScripts?v=@(AppTimes.StartupTime.Ticks)" type="text/javascript"></script> @if (SignalRFeature.IsAvailable) { <!-- SignalR scripts --> if (SignalRFeature.IsAspNetCore) { <script src="~/lib/signalr-client/signalr.min.js" asp-append-version="true"></script> <script src="~/lib/abp-web-resources/Abp/Framework/scripts/libs/abp.signalr-client.js" asp-append-version="true"></script> } else { <script src="~/signalr/hubs"></script> <script src="~/lib/abp-web-resources/Abp/Framework/scripts/libs/abp.signalr.js" type="text/javascript"></script> } } @WebResourceManager.RenderScripts() <!-- View specific scripts --> @RenderSection("scripts", required: false) </body> </html>
the_stack
 @model MOE.Common.Models.ViewModel.LinkPivotResultViewModel <table id="AdjustmentTable" class="table table-striped table-bordered table-condensed"> <caption>Adjustments</caption> <thead> <tr> <th class="vertical-center text-center" scope="col">Link</th> <th class="vertical-center text-center" scope="col">Signal</th> <th class="vertical-center text-center" scope="col">Location</th> <th class="vertical-center text-center" scope="col">Link Delta</th> <th class="vertical-center text-center" id="EditLinkDelta" scope="col">Edit Link Delta</th> <th class="vertical-center text-center" scope="col">Offset(+ to Offset)</th> <th class="vertical-center text-center" scope="col">Existing Offset</th> <th class="vertical-center text-center" scope="col">New Offset</th> </tr> </thead> <tbody> @foreach (var item in Model.Adjustments) { <tr> <td class="vertical-center"> @Html.DisplayFor(modelItem => item.LinkNumber) </td> <td class="vertical-center"> @Html.DisplayFor(modelItem => item.SignalId) </td> <td class="vertical-center"> @Html.DisplayFor(modelItem => item.Location) </td> <td class="vertical-center"> @Html.DisplayFor(modelItem => item.Delta) </td> <td class="vertical-center"> @Html.TextBoxFor(modelItem => item.Delta, new { id = "uxEditDelta" + @item.LinkNumber, @Class = "DisplayInput", aria_labelledby = "EditLinkDelta", onchange = "AdjustOffset()" }) </td> <td class="vertical-center"> @Html.DisplayFor(modelItem => item.Adjustment, new { id = "uxAdjustmentLabel" + @item.LinkNumber }) </td> <td class="vertical-center"> <input id="Text"+ @item.LinkNumber title="Modifiable Existing Offset" class="DisplayInput" type="text" onchange="AdjustOffset()" value="0" aria_labelledby="ExistingOffsetLabel" /> </td> <td class="vertical-center"> <span id="uxNewOffsetLabel">@item.Adjustment</span> </td> </tr> } </tbody> </table> <table class="table table-body-striped table-bordered table-condensed table-body-hover"> <caption>Approach Link Comparison</caption> <thead> <tr> <th class="vertical-center text-center" rowspan="2">Link</th> <th class="vertical-center text-center" colspan="2">Approaches</th> <th class="vertical-center text-center" colspan="3">Upstream AOG</th> <th class="vertical-center text-center" colspan="3">Downstream AOG</th> <th class="vertical-center text-center" colspan="3">Total Link AOG</th> <th class="vertical-center text-center" rowspan="2">Delta</th> <th class="vertical-center text-center" rowspan="2" >AOG Chart</th> </tr> <tr> <th class="vertical-center text-center">Upstream</th> <th class="vertical-center text-center">Downstream</th> <th class="vertical-center text-center">Existing</th> <th class="vertical-center text-center">Predicted</th> <th class="vertical-center text-center">Change</th> <th class="vertical-center text-center">Existing</th> <th class="vertical-center text-center">Predicted</th> <th class="vertical-center text-center">Change</th> <th class="vertical-center text-center">Existing</th> <th class="vertical-center text-center">Predicted</th> <th class="vertical-center text-center">Change</th> </tr> </thead> @foreach (var item in Model.ApproachLinks) { <tbody> <tr> <td class="vertical-center" rowspan="2"> @item.LinkNumber </td> <td class="vertical-center"> @item.UpstreamCombinedLocation </td> <td class="vertical-center"> @item.DownstreamCombinedLocation </td> <td class="vertical-center"> @Html.DisplayFor(modelItem => item.AOGUpstreamBefore) </td> <td class="vertical-center"> @Html.DisplayFor(modelItem => item.AOGUpstreamPredicted) </td> <td class="vertical-center " rowspan="2"> <div class="jsChart-Small" > <canvas id="@item.UpstreamChartName"></canvas> <script type="text/javascript"> var data = { labels: ["", "", "", ""], datasets: [ { data: [@item.UpstreamChartExisting, @item.UpstreamChartPositiveChange, @item.UpstreamChartNegativeChange, @item.UpstreamChartRemaining ], backgroundColor: ["#B7E8A7", "#339933", "#cc0000", "#c0c0c0"], hoverBackgroundColor: ["#BDE8AE", "#8cd98c", "#ff0000", "#e6e6e6"], borderWidth: [1, 1, 1, 1] } ] }; // Get context with jQuery - using jQuery's .get() method. var ctx = $("#@item.UpstreamChartName").get(0).getContext("2d"); var myPieChart = new Chart(ctx, { type: 'pie', data: data, options: { legend: { display: false } } }); </script> </div> </td> <td class="vertical-center"> @Html.DisplayFor(modelItem => item.AOGDownstreamBefore) </td> <td class="vertical-center"> @Html.DisplayFor(modelItem => item.AOGDownstreamPredicted) </td> <td class="vertical-center" rowspan="2"> <div class="jsChart-Small"> <canvas id="@item.DownstreamChartName"></canvas> <script type="text/javascript"> var data = { labels: ["", "", "", ""], datasets: [ { data: [@item.DownstreamChartExisting, @item.DownstreamChartPositiveChange, @item.DownstreamChartNegativeChange, @item.DownstreamChartRemaining ], backgroundColor: ["#B7E8A7", "#339933", "#cc0000", "#c0c0c0"], hoverBackgroundColor: ["#BDE8AE", "#8cd98c", "#ff0000", "#e6e6e6"], borderWidth: [1, 1, 1, 1] } ] }; // Get context with jQuery - using jQuery's .get() method. var ctx = $("#@item.DownstreamChartName").get(0).getContext("2d"); var myPieChart = new Chart(ctx, { type: 'pie', data: data, options: { legend: { display: false } } }); </script> </div> </td> <td class="vertical-center BoldText"> @item.AogTotalBefore </td> <td class="vertical-center BoldText"> @item.AogTotalPredicted </td> <td class="vertical-center" rowspan="2"> <div class="jsChart-Small"> <canvas id="@item.TotalChartName"></canvas> <script type="text/javascript"> var data = { labels: ["", "", "", ""], datasets: [ { data: [@item.TotalChartExisting, @item.TotalChartPositiveChange, @item.TotalChartNegativeChange, @item.TotalChartRemaining ], backgroundColor: ["#B7E8A7", "#339933", "#cc0000", "#c0c0c0"], hoverBackgroundColor: ["#BDE8AE", "#8cd98c", "#ff0000", "#e6e6e6"], borderWidth: [1, 1, 1, 1] } ] }; // Get context with jQuery - using jQuery's .get() method. var ctx = $("#@item.TotalChartName").get(0).getContext("2d"); var myPieChart = new Chart(ctx, { type: 'pie', data: data, options: { legend: { display: false } } }); </script> </div> </td> <td class="vertical-center" rowspan="2"> @Html.DisplayFor(modelItem => item.Delta) </td> <td class="vertical-center text-left" rowspan="2"> <a href="@item.ResultChartLocation" alt="Link to Result Chart Image" target="_blank"><img class="ResultChartImage" src="@item.ResultChartLocation" alt="Result Chart Image" /></a> <button type="button" onclick="GetPCDOptions('@item.SignalId', '@item.DownSignalID' , '@item.UpstreamApproachDirection' , '@item.DownstreamApproachDirection' , @item.Delta )">PCD Options</button> </td> </tr> <tr> <td class="vertical-center"> @Html.DisplayFor(modelItem => item.Location) </td> <td class="vertical-center"> @Html.DisplayFor(modelItem => item.DownLocation) </td> <td class="vertical-center"> @Html.DisplayFor(modelItem => item.PAOGUpstreamBefore)% </td> <td class="vertical-center"> @Html.DisplayFor(modelItem => item.PAOGUpstreamPredicted)% </td> <td class="vertical-center"> @Html.DisplayFor(modelItem => item.PAOGDownstreamBefore)% </td> <td class="vertical-center"> @Html.DisplayFor(modelItem => item.PAOGDownstreamPredicted)% </td> <td class="vertical-center BoldText"> @item.PAogTotalBefore% </td> <td class="vertical-center BoldText"> @item.PAogTotalPredicted% </td> </tr> </tbody> } <tfoot> <tr> <th class="vertical-center text-center" colspan="3" rowspan="2">Corridor Summary</th> <th class="vertical-center text-center">@Model.TotalAogUpstreamBefore</th> <th class="vertical-center text-center">@Model.TotalAogUpstreamPredicted</th> <th class="vertical-center text-center" rowspan="2"> <div class="jsChart-Small" > <canvas id="SummaryUpstreamChangeChart"></canvas> <script type="text/javascript"> var data = { labels: ["", "", "", ""], datasets: [ { data: [@Model.TotalUpstreamChartExisting, @Model.TotalUpstreamChartPositiveChange, @Model.TotalUpstreamChartNegativeChange, @Model.TotalUpstreamChartRemaining ], backgroundColor: ["#B7E8A7", "#339933", "#cc0000", "#c0c0c0"], hoverBackgroundColor: ["#BDE8AE", "#8cd98c", "#ff0000", "#e6e6e6"], borderWidth: [1, 1, 1, 1] } ] }; // Get context with jQuery - using jQuery's .get() method. var ctx = $("#SummaryUpstreamChangeChart").get(0).getContext("2d"); var myPieChart = new Chart(ctx, { type: 'pie', data: data, options: { legend: { display: false } } }); </script> </div> </th> <th class="vertical-center text-center">@Model.TotalAogDownstreamBefore</th> <th class="vertical-center text-center">@Model.TotalAogDownstreamPredicted</th> <th class="vertical-center text-center" rowspan="2"> <div class="jsChart-Small" > <canvas id="SummaryDownstreamChangeChart"></canvas> <script type="text/javascript"> var data = { labels: ["", "", "", ""], datasets: [ { data: [@Model.TotalDownstreamChartExisting, @Model.TotalDownstreamChartPositiveChange, @Model.TotalDownstreamChartNegativeChange, @Model.TotalDownstreamChartRemaining ], backgroundColor: ["#B7E8A7", "#339933", "#cc0000", "#c0c0c0"], hoverBackgroundColor: ["#BDE8AE", "#8cd98c", "#ff0000", "#e6e6e6"], borderWidth: [1, 1, 1, 1] } ] }; // Get context with jQuery - using jQuery's .get() method. var ctx = $("#SummaryDownstreamChangeChart").get(0).getContext("2d"); var myPieChart = new Chart(ctx, { type: 'pie', data: data, options: { legend: { display: false } } }); </script> </div> </th> <th class="vertical-center text-center">@Model.TotalAogBefore</th> <th class="vertical-center text-center">@Model.TotalAogPredicted</th> <th class="vertical-center text-center" rowspan="2"> <div class="jsChart-Small" > <canvas id="TotalChangeChart"></canvas> <script type="text/javascript"> var data = { labels: ["", "", "", ""], datasets: [ { data: [@Model.TotalChartExisting, @Model.TotalChartPositiveChange, @Model.TotalChartNegativeChange, @Model.TotalChartRemaining ], backgroundColor: ["#B7E8A7", "#339933", "#cc0000", "#c0c0c0"], hoverBackgroundColor: ["#BDE8AE", "#8cd98c", "#ff0000", "#e6e6e6"], borderWidth: [1, 1, 1, 1] } ] }; // Get context with jQuery - using jQuery's .get() method. var ctx = $("#TotalChangeChart").get(0).getContext("2d"); var myPieChart = new Chart(ctx, { type: 'pie', data: data, options: { legend: { display: false } } }); </script> </div> </th> </tr> <tr> <th class="vertical-center text-center">@Model.TotalPaogUpstreamBefore %</th> <th class="vertical-center text-center">@Model.TotalPaogUpstreamPredicted %</th> <th class="vertical-center text-center">@Model.TotalPaogDownstreamBefore %</th> <th class="vertical-center text-center">@Model.TotalPaogDownstreamPredicted %</th> <th class="vertical-center text-center">@Model.TotalPaogBefore %</th> <th class="vertical-center text-center">@Model.TotalPaogPredicted %</th> </tr> </tfoot> </table>
the_stack
@{ ViewBag.Title = "API Documentation"; var siteBaseUrl = "https://openchargemap.org/site/"; var apiBaseUrl = "https://api.openchargemap.io/v3/"; var apiSandboxBaseUrl = "https://sandbox.api.openchargemap.io/v2/"; } <link rel="stylesheet" href="~/Content/docs.css" type="text/css" /> <div class="row"> <div class="col-sm-4 hidden-xs"><div id="toc" style="position:fixed;"></div></div> <div class="col-sm-8 col-xs-12"> <article> <section> <h2 id="intro"><i class="fa fa-cogs"></i> The Open Charge Map API</h2> <div class="alert alert-danger">Use of the OCM API is subject to <a href="@Url.Action("Terms","About")" class="alert-link">terms and conditions</a>. By using the API you indicate acceptance of these terms.</div> <p>If you wish to export charging location data into your own systems or applications the most flexible way is to use our API, which provides an export in a variety of formats. If you wish to regularly refresh the entire dataset, please clone our data from <a href="https://github.com/openchargemap/ocm-data">GitHub</a>. You can also opt to run your own private <a href="https://github.com/openchargemap/ocm-system/tree/master/API/OCM.Net/OCM.API.Worker">API mirror</a>.</p> <h4>Fair Usage Policy</h4> <p> <strong class="text text-danger">The basic API is provided as a free service with no warranty or service level agreement. Providing this API to you costs us actual money for server resources and data transfer fees. See funding: <a href="https://opencollective.com/openchargemap">https://opencollective.com/openchargemap</a></strong> </p> <p> If you will be calling the API regularly (from an app or server) you <strong>must provide your API key</strong> as an <code>X-API-Key</code> header (case sensitive) or set the <code>key=YourAPIKey</code> url parameter. You should also set your http user-agent to a custom value to help identify your app. </p> <p> <strong>To obtain a free API key Sign In and choose 'my apps' from the profile menu.</strong> </p> <p>Do not repeatedly call the API with duplicate queries. Debounce/throttle your API requests to minimise the work our API has to do. The API administrator (Open Charge Map) reserves the right to ban API callers (including automated banning) if callers make excessive/indescriminate use of the API, at the discretion of the OCM administrator.</p> <p>If you need to make a high volume of queries against the API please host your own API mirror or import the data into your own API.</p> </section> <section> <p><em>API V3 (documentation last updated Nov 2019)</em></p> <h3>Service Base URL: </h3> <p> <code>@Html.Raw(apiBaseUrl)</code> </p> <p> Uptime last 7 days: <a href="https://www.statuscake.com" target="_blank" title="Uptime For Last 7 Days : API Monitoring by StatusCake"><img src="https://app.statuscake.com/button/index.php?Track=41SG6hzhJW&Days=7&Design=5" /></a> last 30 days: <a href="https://www.statuscake.com" target="_blank" title="Uptime For Last 30 Days : API Monitoring by StatusCake"><img src="https://app.statuscake.com/button/index.php?Track=41SG6hzhJW&Days=30&Design=5" /></a> </p> <h2 id="POI"><i class="fa fa-map-marker"></i> Retrieving POI Data</h2> <p> <code>@Html.Raw(apiBaseUrl + "poi/")</code> </p> <h3>Example API Calls: </h3> <p>Return charging location information for the US in JSON format, limited to the first 10 results: <code><a href="@Html.Raw(apiBaseUrl+"poi/")?output=json&countrycode=US&maxresults=10">@Html.Raw(apiBaseUrl + "poi/")?output=json&amp;countrycode=US&amp;maxresults=10</a></code></p> <p> The default output contains a lot of information. Here is the same call as above, but with the most compact output (formatting removed, reference data as IDs instead of full objects, null fields skipped): <code><a href="@Html.Raw(apiBaseUrl+"poi/")?output=json&countrycode=US&maxresults=10&compact=true&verbose=false">@Html.Raw(apiBaseUrl + "poi/")?output=json&amp;countrycode=US&amp;maxresults=10<em>0&amp;compact=true&amp;verbose=false</em></a></code> </p> <p>Return KML format results suitable for viewing in google earth/maps etc (UK, max 500 locations): <code><a href="@Html.Raw(apiBaseUrl+"poi/")?output=kml&countrycode=GB&maxresults=50">@Html.Raw(apiBaseUrl + "poi/")?output=kml&amp;countrycode=GB&amp;maxresults=500</a></code></p> <p><div class="alert alert-warning">Data returned by the API has mixed licensing and applicable copyright attribution (included in results as "Data Provider"). If you require <em>Open</em> licensed data you currently must filter by <code>opendata=true</code> to return only original OCM data.</div></p> <h3>Service Parameters:</h3> <table class="table table-striped table-bordered table-condensed"> <tr> <th>Parameter</th> <th>Description</th> <th>Default</th> </tr> <tr> <td>key</td> <td>Your API Key. Required for apps/services which intend to make repeated calls to the API either as a <code>key</code> parameter in the call URL or as an <code>X-API-Key</code> http header</td> <td>(blank)</td> </tr> <tr> <td>client</td> <td>Optional custom identifier for your app or service (if you can't set a custom http User Agent header)</td> <td>(blank)</td> </tr> <tr> <td>output</td> <td>json, geojson, xml, csv <em>JSON format is recommended as highest fidelity</em></td> <td>json</td> </tr> <tr> <td>maxresults</td> <td>limit on max number of results returned</td> <td>100</td> </tr> <tr> <td>countrycode</td> <td>GB, US etc. Single ISO Country Code.</td> <td>(blank)</td> </tr> <tr> <td>countryid</td> <td>exact match on a given numeric country id (comma separated list)</td> <td>(blank)</td> </tr> <tr> <td>latitude</td> <td>latitude reference for distance calculation</td> <td>(blank)</td> </tr> <tr> <td>longitude</td> <td>longitude reference for distance calculation</td> <td>(blank)</td> </tr> <tr> <td>distance</td> <td>return results based on specified distance from specified latitude/longitude</td> <td>(blank)</td> </tr> <tr> <td>distanceunit</td> <td>Miles or KM</td> <td>Miles</td> </tr> <tr> <td>operatorid</td> <td>exact match on a given EVSE operator id (comma separated list)</td> <td>(blank)</td> </tr> <tr> <td>connectiontypeid</td> <td>exact match on a given connection type id (comma separated list)</td> <td>(blank)</td> </tr> <tr> <td>levelid</td> <td>exact match on a given charging level (1-3) id (comma separated list)</td> <td>(blank)</td> </tr> <tr> <td>minpowerkw</td> <td>minimum output power in kW (this information is not known for many locations)</td> <td>(blank)</td> </tr> <tr> <td>usagetypeid</td> <td>exact match on a given usage type id (comma separated list)</td> <td>(blank)</td> </tr> <tr> <td>statustypeid</td> <td>exact match on a given status type id (comma separated list)</td> <td>(blank)</td> </tr> <tr> <td>dataproviderid</td> <td>exact match on a given data provider id id (comma separated list). Use opendata=true for only OCM provided ("Open") data.</td> <td>(blank)</td> </tr> <tr> <td>modifiedsince</td> <td>POIs modified since the given date (UTC) e.g. 2016-09-15T09:30</td> <td>(blank)</td> </tr> <tr> <td>opendata</td> <td>true or false. Set to true to include only Open Data licensed content, false to return only non-open licensed data. <span class="label label-warning">By default all available data is returned. You should refer to the license of the original data provider in each case.</span></td> <td>(blank)</td> </tr> <tr> <td>includecomments</td> <td>true or false. Set to true to also include user comments and media items (photos) per charging location. </td> <td>false</td> </tr> <tr> <td>verbose</td> <td>true or false. Set to false to get a smaller result set with null items removed. </td> <td>true</td> </tr> <tr> <td>compact</td> <td>true or false. Set to true to remove reference data objects from output (just returns IDs for common reference data such as DataProvider etc). </td> <td>false</td> </tr> <tr> <td>camelcase</td> <td>true or false. Set to true to get a property names in camelCase format. </td> <td>false</td> </tr> <tr> <td>callback</td> <td>specify the name of the JSONP callback (if required), JSON response type only.</td> <td>(blank)</td> </tr> <tr> <td>chargepointid</td> <td>exact match on a given POI id (comma separated list).</td> <td>(blank)</td> </tr> </table> <p>Additionally from v3 of the API onwards you can query using a bounding box, polygon or polyline (for a route etc). See <a href="https://developers.google.com/maps/documentation/utilities/polylinealgorithm">here</a> for more info on polyline encoding. :</p> <table class="table table-striped table-bordered table-condensed"> <tr> <th>Parameter</th> <th>Description</th> <th>Default</th> </tr> <tr> <td>boundingbox</td> <td>specify top left and bottom right box corners as: (lat,lng),(lat2,lng2)</td> <td>(blank)</td> </tr> <tr> <td>polygon</td> <td>Specify an encoded polyline for the polygon shape. Polygon will be automatically closed from the last point to the first point.</td> <td>(blank)</td> </tr> <tr> <td>polyline</td> <td>encoded polyline, use with distance param to increase search distance along line. Polyline is expanded into a polygon to cover the search distance. </td> <td>(blank)</td> </tr> </table> </section> <section> <h2 id="referencedata"><i class="fa fa-table"></i> Retrieving Core Reference Data</h2> <p>Our core list of lookup values is termed Core Reference Data. This is the data you would require in order to present the user with Dropdown lists etc of possible values for Connection Type etc. This only returns content in JSON format.</p> <p> <code>@Html.Raw(apiBaseUrl + "referencedata/")</code> </p> <h3>Example API Calls: </h3> <p>Return all reference data in JSON format: <code><a href="@Html.Raw(apiBaseUrl + "referencedata/")">@Html.Raw(apiBaseUrl + "referencedata/")</a></code></p> <p> Optional filter parameters: </p> <table> <tr> <td>countryid</td> <td>exact match on a given numeric country id (comma separated list)</td> <td>(blank)</td> </tr> </table> </section> <section> <h2 id="comments"><i class="fa fa-comment"></i> Add New Comment/Check-In</h2> <p>To submit a new comment or check-in again a specific POI, use the following API endpoint to POST a JSON format comment</p> <p> <code>@Html.Raw(apiBaseUrl + "?action=comment_submission&format=json")</code> </p> <p> Your JSON submission should be in the body of your POST and contain the following (for example): </p> <pre> <code> { "ChargePointID": 12345, "CommentTypeID": 10, "UserName": "A. Nickname", "Comment": "This place is awesome, free cake for EV owners!", "Rating": 5, "RelatedURL": "http://awesomevplace.com", "CheckinStatusTypeID": 0 }</code> </pre> <p> ChargePointID is the numeric OCM-ID of the POI in location. Values for CommentTypeID and CheckinStatusTypeID can be found in <a href="#referencedata">core reference data</a>. </p> <p> ChargePointID, CommentTypeID and CheckinStatusTypeID are mandatory, all other fields are optional. </p> <div class="row"> <div class="col-sm-6"> <h3>Comment Types</h3> <div id="api-comment-types"> </div> </div> <div class="col-sm-6"> <h3>Check-In Status Types</h3> <div id="api-checkin-types"></div> </div> </div> </section> <!--<h2 id="odata">Direct database access via OData</h2> <p> In addition to the above services there is a read only <a href="http://odata.org">OData</a> service for browsing our database directly at <code>http://api.openchargemap.io/odata/odata.svc</code> which is provided for direct programmatic access to our live database. The advantage of this is it provides an output which very closely matches our internal database structure so no data is lost in translation. You can view this data using a number of free OData browser tools including: <a href="http://metasapiens.com/sesame/data-browser/preview?cn-provider=OData&cn-Uri=http%3a%2f%2fapi.openchargemap.io%2fodata%2fodata.svc%2f" target="_blank">Sesame Data Browser (beta)</a> (external link) </p> --> <section> <h2 id="linking"><i class="fa fa-external-link"></i> Linking to OCM Content and Features</h2> <p>In addition to the API there are a number of standardised URLs which can be used to initiate certain actions, this can be useful to launch from within an app or for hyperlinking. The user can then sign in/register as required and proceed with the required action:</p> <h3>POI related actions</h3> <table class="table table-striped"> <tr><th>Action</th><th>URL</th></tr> <tr><td>View POI Details</td><td><code>@Html.Raw(siteBaseUrl + "poi/details/{OCM-ID}")</code></td></tr> <tr><td>Add a New POI</td><td><code>@Html.Raw(siteBaseUrl + "poi/add")</code></td></tr> <tr><td>Add a Comment/Check-In to an existing POI</td><td><code>@Html.Raw(siteBaseUrl + "poi/addcomment/{OCM-ID}")</code> where <code>{OCM-ID}</code> is the numeric ID of the POI to add a comment to.</td></tr> <tr><td>Add a Photo to an existing POI</td><td><code>@Html.Raw(siteBaseUrl + "poi/addmediaitem/{OCM-ID}")</code></td></tr> </table> </section> </article> </div> </div> <script type="text/javascript" src="~/js/OCM/SharedLibs/OCM_Data.js"></script> <script type="text/javascript" src="~/js/OCM/TableOfContents.js"></script> <script type="text/javascript"> $(function () { prepareTableOfContents(); fetchReferenceData(); }); var dataAPI = new OCM.API(); function fetchReferenceData() { dataAPI.fetchCoreReferenceData("populateCoreReferenceData") } function populateCoreReferenceData(result) { if (result != null) { dataAPI.referenceData = result; dataAPI.sortCoreReferenceData(); populateRefDataTable("api-comment-types", dataAPI.referenceData.UserCommentTypes); populateRefDataTable("api-checkin-types", dataAPI.referenceData.CheckinStatusTypes); } } function populateRefDataTable(elementId, referenceDataCollection) { var refDataTable = "<table class=\"table table-condensed table-striped\"><tr><th>ID</th><th>Title</th></tr>"; for (var i = 0; i < referenceDataCollection.length; i++) { var item = referenceDataCollection[i]; refDataTable += "<tr><td>" + item.ID + "</td><td>" + item.Title + "</td></tr>"; } refDataTable += "</table>"; $("#" + elementId).html(refDataTable); } </script>
the_stack
@model LoginViewModel @inject SignInManager<UserEntity> SignInManager @{ Layout = "~/Views/Shared/_Root.cshtml"; ViewData["Title"] = Localizer["Log in"]; } <div id="content"> <div id="logo"> <svg version="1.1" id="layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" width="150px" height="150px" viewBox="0 0 150 150" enable-background="new 0 0 150 150" xml:space="preserve"> <g> <path fill="#249CD9" d="M14.862,108.752c0-4.834,0-9.668,0-14.502c2.822,0.234,5.724,0.218,8.559,0.177 c2.837-0.044,5.373-0.032,7.792,0.031c2.417,0.061,4.618,0.204,6.768,0.314c2.144,0.112,4.067,0.171,5.826,0.027 c6.028-0.435,10.388-1.677,13.179-4.178c2.777-2.453,4.052-5.926,3.372-10.728c-0.353-2.55-1.283-4.832-2.643-6.819 c-1.354-1.983-2.979-3.735-4.588-5.159c-1.619-1.428-3.146-2.575-4.578-3.457c-1.45-0.889-2.726-1.577-4.385-2.292 c-1.752-0.756-3.808-1.569-6.548-2.771c-2.721-1.193-5.978-2.878-9.351-5.172c-3.367-2.28-6.686-5.242-9.216-8.649 c-2.56-3.401-3.925-7.31-3.912-11.205c-0.02-4.82,1.965-8.03,4.592-10.464c2.652-2.441,5.333-4.632,8.323-7.094 c2.996-2.416,6.258-5.136,10.343-7.215c4.056-2.092,8.541-3.413,13.09-3.384c10.466,0.021,17.306,2.74,22.098,4.72 c-0.637,5.697-1.186,10.913-1.648,15.778c-8.079-0.623-14.598-1.054-22.431-0.424c-2.144,0.167-4.254,0.583-6.333,1.178 c-2.082,0.593-3.896,1.489-5.464,2.618c-1.568,1.131-2.814,2.525-3.735,4.197c-0.923,1.668-1.239,3.832-0.862,6.462 c0.342,2.442,1.123,4.57,2.261,6.414c1.13,1.841,2.595,3.52,4.125,4.93c1.534,1.414,3.075,2.618,4.516,3.564 c1.464,0.956,2.79,1.733,4.45,2.46c1.713,0.754,3.634,1.498,6.324,2.659c2.668,1.153,5.949,2.813,9.487,5.187 c3.526,2.358,7.108,5.457,9.909,9.025c2.831,3.565,4.36,7.607,4.331,11.506c0.003,5.219-1.935,8.666-4.471,11.199 c-2.572,2.538-5.247,4.762-8.28,7.189c-3.042,2.382-6.408,5.009-10.644,6.861c-4.208,1.866-8.97,3.049-13.959,3.021 c-1.661,0.003-3.696-0.292-6.067-0.795c-2.375-0.502-4.742-1.186-7.158-1.875c-2.42-0.69-4.711-1.382-7.028-1.935 C18.59,109.598,16.558,109.184,14.862,108.752z" /> </g> <g> <path fill="#249CD9" d="M134.32,52.104c-19.58,9.17-25.526,11.286-38.813,6.349c2.111,16.338,3.296,24.661,3.554,32.006 c11.717,1.458,7.017,0.989,29.287-1.405c-0.039,3.242-0.035,6.456,0.012,9.704c-22.234-2.836-17.601-3.391-29.32-1.665 c-0.393,9.475-2.304,20.856-5.734,48.908c-4.102,0-8.203,0-12.305,0c0-35.01,0-70.02,0-105.029c17.773,0,35.547,0,53.32,0 C134.32,44.682,134.32,48.393,134.32,52.104z" /> </g> </svg> </div> <div id="content-box" class="clearfix"> <div id="zone-main" class="zone-instance"> <div class="zone-content"> <div data-zone-location="Page" class="block-instance login"> <div class="block-content"> <form asp-controller="Account" asp-action="Login" asp-route-returnurl="@ViewData["ReturnUrl"]" method="post"> <fieldset> <legend>Login</legend> <div class="row"> <div class="col-sm-12"> <div class="login_tips alert alert-danger" style="display:none"> </div> <div asp-validation-summary="All" class="text-danger"></div> <div class="form-group rock-text-box"> <label class="control-label" asp-for="Email">@Localizer["Email"]</label> <div class="control-wrapper"> <input asp-for="Email" class="form-control"> </div> <span class="validation-error help-inline" style="display:none">Username is Required.</span> </div> <div class="form-group rock-text-box"> <label class="control-label" asp-for="Password">@Localizer["Password"]</label> <div class="control-wrapper"> <input type="hidden" name="tbPassword_dvrm" id="tbPassword_dvrm" value="True"> <input asp-for="Password" class="form-control" autocomplete="off" value=""> </div> <span class="validation-error help-inline" style="display:none">Password is Required.</span> </div> <div class="checkbox"> <label><input asp-for="RememberMe">@Localizer["Remember me?"]</label> </div> <input type="submit" value="@Localizer["Log in"]" id="btnlogin" class="btn btn-primary"> <a href="/Account/ForgotPassword" id="btnHelp" class="btn btn-link">@Localizer["Forgot your password?"]</a> </div> </div> </fieldset> </form> </div> </div> <div data-zone-location="Page" class="block-instance html-content"> <div class="block-content"> <div class="alert alert-info margin-t-lg"> <strong>Demo Account</strong> To login as a demo administrator use the username 'administrator' with the password of '123456'. </div> </div> </div> </div> </div> </div> </div> <script> var contentPath = '@Url.Content("~")'.substr(0, '@Url.Content("~")'.length - 1); var returnUrl = '@ViewData["ReturnUrl"]'; //回车键 document.onkeydown = function (e) { if (!e) e = window.event; if ((e.keyCode || e.which) == 13) { var btlogin = document.getElementById("btnlogin"); btnlogin.click(); } } // add quick fade-in effect to the page $(document).ready(function () { $("body").attr("id", "splash"); $("#content").sfFadeIn(); //错误提示 if (top.$.cookie('learun_login_error') != null) { switch (top.$.cookie('learun_login_error')) { case "Overdue": formMessage('登录已超时,请重新登录'); break; case "OnLine": formMessage('您的帐号已在其它地方登录,请重新登录'); break; case "-1": formMessage('未知错误,请重新登录'); break; default: break; } top.$.cookie('learun_login_error', '', { path: "/", expires: -1 }); } //是否自动登录 if (top.$.cookie('learn_autologin') == 1) { $("#autologin").attr("checked", 'true'); $("#Email").val(top.$.cookie('learn_username')); $("#Password").val(top.$.cookie('learn_password')); CheckLogin(1); } //设置下次自动登录 $("#RememberMe").click(function () { if (!$(this).attr('checked')) { $(this).attr("checked", 'true'); top.$.cookie('learn_autologin', 1, { path: "/", expires: 7 }); } else { $(this).removeAttr("checked"); top.$.cookie('learn_autologin', '', { path: "/", expires: -1 }); top.$.cookie('learn_username', '', { path: "/", expires: -1 }); top.$.cookie('learn_password', '', { path: "/", expires: -1 }); } }) //登录按钮事件 $("#btnlogin").click(function () { var $username = $("#Email"); var $password = $("#Password"); //var $verifycode = $("#verifycode"); if ($username.val() == "") { $username.focus(); formMessage('请输入账户或手机号或邮箱。'); return false; } else if ($password.val() == "") { $password.focus(); formMessage('请输入密码。'); return false; } // else if ($verifycode.val() == "") { // $verifycode.focus(); // formMessage('请输入验证码。'); // return false; //} else { CheckLogin(0); } }); //点击切换验证码 $("#login_verifycode").click(function () { $("#verifycode").val(''); $("#login_verifycode").attr("src", contentPath + "/Login/VerifyCode?time=" + Math.random()); }); }); //登录验证 function CheckLogin(autologin) { $("#btnlogin").addClass('active').attr('disabled', 'disabled'); $("#btnlogin").find('span').hide(); var username = $.trim($("#Email").val()); var password = $.trim($("#Password").val()); var verifycode = "";// $.trim($("#verifycode").val()); if (top.$.cookie('learn_password') == "" || top.$.cookie('learn_password') == null) { // password = $.md5(password); } SF.utility.loading(true); $.ajax({ url: contentPath + "/Login", data: { Email: $.trim(username), password: $.trim(password), verifycode: verifycode, rememberMe: autologin, returnUrl: "" }, type: "post", dataType: "json", success: function (data) { if (data.state == "success") { if (top.$.cookie('learn_autologin') == 1) { top.$.cookie('learn_username', $.trim(username), { path: "/", expires: 7 }); top.$.cookie('learn_password', $.trim(password), { path: "/", expires: 7 }); } if (returnUrl == "") window.location.href = contentPath == "" ? '/' : contentPath; else window.location.href = returnUrl; } else { if (data.message.length >= 30) { SF.dialogs.alert(data.message) } else { formMessage(data.message); } $("#btnlogin").removeClass('active').removeAttr('disabled'); $("#btnlogin").find('span').show(); $("#login_verifycode").trigger("click"); } SF.utility.loading(false); }, error: function (XMLHttpRequest, textStatus, errorThrown) { //通常情况下textStatus和errorThrown只有其中一个包含信息 //调用本次ajax请求时传递的options参数 var data = JSON.parse(XMLHttpRequest.responseText); formMessage(data.message); $("#btnlogin").removeClass('active').removeAttr('disabled'); $("#btnlogin").find('span').show(); SF.utility.loading(false); } }); } //提示信息 function formMessage(msg, type) { $('.login_tips').html(""); $('.login_tips').prepend(msg); $('.login_tips').addClass("alert-danger"); if (type == "success") { $('.login_tips').removeClass("alert-success"); } $('.login_tips').show(); } </script>
the_stack
@model SmartAdmin.WebUI.Data.Models.Tenant @{ /**/ ViewData["Title"] = "租户管理"; ViewData["PageName"] = "tenants_index"; ViewData["Heading"] = "<i class='fal fa-users text-primary'></i> 租户管理"; ViewData["Category1"] = "系统管理"; ViewData["PageDescription"] = ""; } @section HeadBlock { <link href="~/js/easyui/themes/insdep/easyui.css" rel="stylesheet" asp-append-version="true" /> } <div class="row"> <div class="col-lg-12 col-xl-12"> <div id="panel-1" class="panel"> <div class="panel-hdr"> <h2> 租户管理 </h2> <div class="panel-toolbar"> <button class="btn btn-panel bg-transparent fs-xl w-auto h-auto rounded-0" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"><i class="fal fa-window-minimize"></i></button> <button class="btn btn-panel bg-transparent fs-xl w-auto h-auto rounded-0" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"><i class="fal fa-expand"></i></button> </div> </div> <div class="panel-container show"> <div class="panel-content py-2 rounded-bottom border-faded border-left-0 border-right-0 text-muted bg-faded bg-subtlelight-fade"> <div class="row no-gutters align-items-center"> <div class="col"> <!-- 开启授权控制请参考 @@if (Html.IsAuthorize("Create") --> <div class="btn-group btn-group-sm"> <button name="searchbutton" class="btn btn-default"> <span class="fal fa-search mr-1"></span> 刷新 </button> </div> <div class="btn-group btn-group-sm"> <button name="registbutton" class="btn btn-default"> <span class="fal fa-plus mr-1"></span> 注册新租户 </button> </div> <div class="btn-group btn-group-sm"> <button name="deletebutton" disabled class="btn btn-default"> <span class="fal fa-times mr-1"></span> 删除 </button> </div> <div class="btn-group btn-group-sm"> <button name="savebutton" disabled class="btn btn-default"> <span class="fal fa-save mr-1"></span> 保存 </button> </div> <div class="btn-group btn-group-sm"> <button name="cancelbutton" disabled class="btn btn-default"> <span class="fal fa-ban mr-1"></span> 取消 </button> </div> </div> </div> </div> <div class="panel-content"> <div class="table-responsive"> <table id="tenant_datagrid"></table> </div> </div> </div> </div> </div> </div> <!-- END MAIN CONTENT --> @section ScriptsBlock { <script src="~/js/dependency/moment/moment.js" asp-append-version="true"></script> <script src="~/js/easyui/jquery.easyui.min.js" asp-append-version="true"></script> <script src="~/js/easyui/plugins/datagrid-filter.js" asp-append-version="true"></script> <script src="~/js/easyui/jquery.easyui.component.js" asp-append-version="true"></script> <script type="text/javascript"> (function () { var tentan = { init: function () { this.cacheDom(); this.bindEvents(); }, cacheDom: function () { this.editIndex = undefined; this.$dg = $('#tenant_datagrid').datagrid({ rownumbers: true, checkOnSelect: false, selectOnCheck: false, idField: 'Id', sortName: 'Id', sortOrder: 'desc', remoteFilter: true, singleSelect: false, method: 'get', onClickCell: (index, field) => { this.tentan =this.$dg.datagrid('getRows')[index]; const _actions = ['action', 'ck']; if (!true || $.inArray(field, _actions) >= 0) { return; } if (this.editIndex !== index) { if (this.endEditing()) { this.$dg.datagrid('selectRow', index) .datagrid('beginEdit', index); this.hook = true; this.editIndex = index; const ed =this.$dg.datagrid('getEditor', { index: index, field: field }); if (ed) { ($(ed.target).data('textbox') ? $(ed.target).textbox('textbox') : $(ed.target)).focus(); } } else { this.$dg.datagrid('selectRow', index); } } }, pagination: true, clientPaging: false, striped: true, onLoadSuccess: ()=> { this.editIndex = undefined; this.$deletebutton.prop('disabled', true); this.$savebutton.prop('disabled', true); this.$cancelbutton.prop('disabled', true); }, onCheck: ()=> { this.$deletebutton.prop('disabled', false); }, onUncheck: ()=> { const checked =this.$dg.datagrid('getChecked').length > 0; this.$deletebutton.prop('disabled', !checked); }, onSelect: (index, row)=> { this.tenant = row; }, onBeginEdit: (index, row)=> { //const editors = $(this).datagrid('getEditors', index); }, onEndEdit: (index, row) =>{ }, onBeforeEdit: (index, row) => { row.editing = true; this.editIndex = index; this.$deletebutton.prop('disabled', false); this.$savebutton.prop('disabled', false); this.$cancelbutton.prop('disabled', false); this.$dg.datagrid('refreshRow', index); }, onAfterEdit: (index, row)=> { row.editing = false; this.editIndex = undefined; this.$dg.datagrid('refreshRow', index); }, onCancelEdit: (index, row)=> { row.editing = false; this.editIndex = undefined; this.$deletebutton.prop('disabled', true); this.$savebutton.prop('disabled', true); this.$cancelbutton.prop('disabled', true); this.$dg.datagrid('refreshRow', index); }, columns: [[ { field: 'ck', checkbox: true }, { field: 'Name', title: '<span class="text-danger">*</span>租户名称', width: 150, sortable: true, resizable: true, editor: { type: 'textbox', options: { prompt: '租户名称', required: true, validType: 'length[0,50]' } }, }, { field: 'Description', title: '描述', width: 150, sortable: true, resizable: true, editor: { type: 'textbox', options: { prompt: '描述', required: false, validType: 'length[0,128]' } }, }, { field: 'ConnectionStrings', title: '数据库连接', width: 320, sortable: true, resizable: true, editor: { type: 'textbox', options: { prompt: '数据库连接字符串', multiline: true, required: false, validType: 'length[0,500]' } }, }, { field: 'Disabled', title: '是否禁用', width: 100, sortable: true, resizable: true, formatter: checkboxformatter, editor: { type: 'checkboxeditor' } } ]] }) .datagrid('enableFilter', [ { field: 'Disabled', type: 'booleanfilter' }]) .datagrid('load', '/Tenants/GetData'); this.$searchbutton = $('button[name="searchbutton"]'); this.$registbutton = $('button[name="registbutton"]'); this.$deletebutton = $('button[name="deletebutton"]'); this.$savebutton = $('button[name="savebutton"]'); this.$cancelbutton = $('button[name="cancelbutton"]'); this.tenant = null; }, bindEvents: function () { document.addEventListener('panel.onfullscreen', () => { setTimeout(() => { this.$dg.datagrid('resize'); }, 200) }); this.$searchbutton.on('click', this.reloadData.bind(this)); this.$registbutton.on('click', this.append.bind(this)); this.$deletebutton.on('click', this.remove.bind(this)); this.$savebutton.on('click', this.acceptChanges.bind(this)); this.$cancelbutton.on('click', this.rejectChanges.bind(this)); }, reloadData: function () { this.$dg.datagrid('unselectAll'); this.$dg.datagrid('uncheckAll'); this.$dg.datagrid('reload'); }, append: function () { this.tenant = { Disabled: false }; if (this.endEditing()) { //对必填字段进行默认值初始化 this.$dg.datagrid('insertRow', { index: 0, row: this.tenant }); this.editIndex = 0; this.$dg.datagrid('selectRow', this.editIndex) .datagrid('beginEdit', this.editIndex); this.hook = true; } }, remove: function () { const rows = this.$dg.datagrid('getChecked'); if (rows.length > 0) { const id = rows.filter(item => item.Id != null).map(item => { return item.Id; }); $.messager.confirm('确认', `你确定要删除这 <span class='badge badge-icon position-relative'>${id.length}</span> 个租户?`, result => { if (result) { $.post('/Tenants/DeleteChecked', { id: id }) .done(response => { if (response.success) { toastr.success('删除成功', '确认', { timeOut: 2000 }); this.reloadData(); } else { $.messager.alert('错误', response.err, 'error'); } }) .fail((jqXHR, textStatus, errorThrown) => { //console.log(errorThrown); $.messager.alert('失败', errorThrown, 'error'); }); } }); } else { $.messager.alert('提示', '请选择要删除的租户'); } }, acceptChanges: function () { if (this.endEditing()) { console.log(this.$dg.datagrid('getChanges')); if (this.$dg.datagrid('getChanges').length > 0) { //debugger const inserted =this.$dg.datagrid('getChanges', 'inserted').map(item => { item.TrackingState = 1; return item; }); const updated =this.$dg.datagrid('getChanges', 'updated').map(item => { item.TrackingState = 2 return item; }); const deleted =this.$dg.datagrid('getChanges', 'deleted').map(item => { item.TrackingState = 3 return item; }); //过滤已删除的重复项 const changedarray = inserted.concat(updated.filter(item => { return !deleted.includes(item); })).concat(deleted); console.log(changedarray); $.post('/Tenants/SaveData', { tenant: changedarray }) .done(response => { //console.log(response); if (response.success) { toastr.success('保存成功'); this.$dg.datagrid('acceptChanges'); this.reloadData(); this.hook = false; } else { $.messager.alert('错误', response.err, 'error'); } }) .fail((jqXHR, textStatus, errorThrown) => { $.messager.alert('异常', `${jqXHR.status}: ${jqXHR.statusText} `, 'error'); }); } } }, rejectChanges:function () { this.$dg.datagrid('rejectChanges'); this.editIndex = undefined; this.hook = false; }, endEditing: function () { if (this.editIndex === undefined) { return true; } if (this.$dg.datagrid('validateRow', this.editIndex)) { this.$dg.datagrid('endEdit', this.editIndex); return true; } else { const invalidinput = $('input.validatebox-invalid',this.$dg.datagrid('getPanel')); const fieldnames = invalidinput.map((index, item) => { return $(item).attr('placeholder') || $(item).attr('id'); }); $.messager.alert('提示', `${Array.from(fieldnames)} 输入有误.`, 'error'); return false; } }, } tentan.init(); })(); </script> }
the_stack
@using System.Activities.Statements <!DOCTYPE html> <html > <head> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width" /> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <title>@ViewBag.Title</title> <meta content="餐饮软件,点餐宝,点餐系统" name="keywords" /> <meta content="尚可思贸易有限公司" name="description" /> <title>尚可思贸易有限公司</title> <link href="~/Content/CSS/bootstrap.min.css" rel="stylesheet" /> <link rel="stylesheet" href="~/Content/CSS/font-awesome.min.css" /> <script src="~/Content/Js/jquery-2.0.3.min.js"></script> <link href="~/Content/CSS/niqiu.css" rel="stylesheet" /> <!--[if IE 7]> <link rel="stylesheet" href="~/Content/CSS/font-awesome-ie7.min.css" /> <![endif]--> <!-- page specific plugin styles --> <link rel="stylesheet" href="~/Content/CSS/fullcalendar.css" /> <!-- fonts --> @*<link rel="stylesheet" href="http://fonts.googleapis.com/css?family=Open+Sans:400,300" />*@ <!-- ace styles --> <link rel="stylesheet" href="~/Content/CSS/ace.min.css" /> <link rel="stylesheet" href="~/Content/CSS/ace-rtl.min.css" /> <link rel="stylesheet" href="~/Content/CSS/ace-skins.min.css" /> <!--[if lte IE 8]> <link rel="stylesheet" href="~/Content/CSS/ace-ie.min.css" /> <![endif]--> <!-- inline styles related to this page --> <!-- ace settings handler --> <script src="~/Content/Js/ace-extra.min.js"></script> <!-- HTML5 shim and Respond.js IE8 support of HTML5 elements and media queries --> <!--[if lt IE 9]> <script src="~/Content/Js/html5shiv.js"></script> <script src="~/Content/Js/respond.min.js"></script> <![endif]--> </head> <body> <div class="navbar navbar-default" id="navbar"> <script type="text/javascript"> try { ace.settings.check('navbar', 'fixed') } catch (e) { } </script> <div class="navbar-container" id="navbar-container"> <div class="navbar-header pull-left"> <a href="#" class="navbar-brand"> <small> <i class="icon-leaf"></i> Thanks Admin </small> </a><!-- /.brand --> </div><!-- /.navbar-header --> <div class="navbar-header pull-right" role="navigation"> <ul class="nav ace-nav"> <li class="green"> <a data-toggle="dropdown" class="dropdown-toggle" href="#"> <i class="icon-envelope icon-animated-vertical"></i> <span class="badge badge-success">0</span> </a> </li> <li class="light-blue"> <a data-toggle="dropdown" href="#" class="dropdown-toggle"> <img class="nav-user-photo" src="~/Content/avatars/user.jpg" alt="Jason's Photo" /> <span class="user-info"> @if( Session["uname"]!=null){ <small>Welcome,</small> @Session["uname"].ToString(); } </span> <i class="icon-caret-down"></i> </a> <ul class="user-menu pull-right dropdown-menu dropdown-yellow dropdown-caret dropdown-close"> <li> <a href="#"> <i class="icon-cog"></i> Settings </a> </li> <li> <a href="#"> <i class="icon-user"></i> Profile </a> </li> <li class="divider"></li> <li> <a href="@Url.Action("LogOff","User")"> <i class="icon-off"></i> Logout </a> </li> </ul> </li> </ul><!-- /.ace-nav --> </div><!-- /.navbar-header --> </div><!-- /.container --> </div> <div class="main-container" id="main-container"> <script type="text/javascript"> try { ace.settings.check('main-container', 'fixed') } catch (e) { } </script> <div class="main-container-inner"> <a class="menu-toggler" id="menu-toggler" href="#"> <span class="menu-text"></span> </a> <div class="sidebar" id="sidebar"> <script type="text/javascript"> try { ace.settings.check('sidebar', 'fixed') } catch (e) { } </script> <div class="sidebar-shortcuts" id="sidebar-shortcuts"> <div class="sidebar-shortcuts-large" id="sidebar-shortcuts-large"> <button class="btn btn-success"> <i class="icon-signal"></i> </button> <button class="btn btn-info"> <i class="icon-pencil"></i> </button> <button class="btn btn-warning"> <i class="icon-group"></i> </button> <button class="btn btn-danger"> <i class="icon-cogs"></i> </button> </div> <div class="sidebar-shortcuts-mini" id="sidebar-shortcuts-mini"> <span class="btn btn-success"></span> <span class="btn btn-info"></span> <span class="btn btn-warning"></span> <span class="btn btn-danger"></span> </div> </div> <!-- #sidebar-shortcuts --> <ul class="nav nav-list"> <li class="active"> <a href="#" class="dropdown-toggle"> <i class="icon-dashboard"></i> <span class="menu-text"> 控制台 </span> <b class="arrow icon-angle-down"></b> </a> <ul class="submenu"> <li class="active"> <a href="@Url.Action("Index","Admin")"> <i class="icon-double-angle-right"></i> 信息总汇 </a> </li> @*<li> <a href="@Url.Action("HomeImgSet","Admin")"> <i class="icon-double-angle-right"></i> 首页图片设置 </a> </li>*@ <li> <a href="@Url.Action("LogoSet","Admin")"> <i class="icon-double-angle-right"></i> Logo </a> </li> <li> <a href="@Url.Action("WeiXins","Company")"> <i class="icon-double-angle-right"></i> 二维码相关 </a> </li> <li> <a href="@Url.Action("Introduce","Company")"> <i class="icon-double-angle-right"></i> 关于我们 </a> </li> <li> <a href="@Url.Action("HistroyList","Company")"> <i class="icon-double-angle-right"></i> 时间轴 </a> </li> @*<li> <a href="@Url.Action("PageOne","Admin")"> <i class="icon-double-angle-right"></i> 首页子页面1 </a> </li> <li> <a href="@Url.Action("PageTwo","Admin")"> <i class="icon-double-angle-right"></i> 首页子页面2 </a> </li> <li> <a href="@Url.Action("PageThree","Admin")"> <i class="icon-double-angle-right"></i> 首页子页面3 </a> </li> <li> <a href="@Url.Action("PageFour","Admin")"> <i class="icon-double-angle-right"></i> 首页子页面4 </a> </li>*@ <li> <a href="@Url.Action("Agences","Company")"> <i class="icon-double-angle-right"></i> 商户申请 </a> </li> <li> <a href="@Url.Action("Index","QQ")"> <i class="icon-double-angle-right"></i> QQ管理 </a> </li> </ul> </li> <li> <a href="#" class="dropdown-toggle"> <i class="icon-list"></i> <span class="menu-text"> 产品 </span> <b class="arrow icon-angle-down"></b> </a> <ul class="submenu"> <li> <a href="@Url.Action("Index","Product")"> <i class="icon-double-angle-right"></i> 产品管理 </a> </li> <li> <a href="@Url.Action("Detail","Product")"> <i class="icon-double-angle-right"></i> 新增产品 </a> </li> <li> <a href="@Url.Action("CategoryList","Product")"> <i class="icon-double-angle-right"></i> 分类管理 </a> </li> </ul> </li> <li> <a href="#" class="dropdown-toggle"> <i class="icon-list"></i> <span class="menu-text"> 公司</span> <b class="arrow icon-angle-down"></b> </a> <ul class="submenu"> <li> <a href="@Url.Action("Index","Company")"> <i class="icon-double-angle-right"></i> 公司简介 </a> </li> <li> <a href="@Url.Action("EditAddress","Company")"> <i class="icon-double-angle-right"></i> 联系方式 </a> </li> <li> <a href="@Url.Action("Index","Contact")"> <i class="icon-double-angle-right"></i> 留言管理 </a> </li> </ul> </li> <li> <a href="#" class="dropdown-toggle"> <i class="icon-user-md"></i> <span class="menu-text"> 用户 </span> <b class="arrow icon-angle-down"></b> </a> <ul class="submenu"> <li> <a href="@Url.Action("Index","User")"> <i class="icon-double-angle-right"></i> 用户管理 </a> </li> <li> <a href="@Url.Action("EditInfo","User")"> <i class="icon-double-angle-right"></i> 个人设置 </a> </li> </ul> </li> <li> <a href="#" class="dropdown-toggle"> <i class="icon-list"></i> <span class="menu-text"> 案例 </span> <b class="arrow icon-angle-down"></b> </a> <ul class="submenu"> <li> <a href="@Url.Action("Index","Example")"> <i class="icon-double-angle-right"></i> 案例管理 </a> </li> <li> <a href="@Url.Action("Detail","Example")"> <i class="icon-double-angle-right"></i> 新增案例 </a> </li> <li> <a href="@Url.Action("CategoryList","Example")"> <i class="icon-double-angle-right"></i> 分类管理 </a> </li> </ul> </li> </ul><!-- /.nav-list --> <div class="sidebar-collapse" id="sidebar-collapse"> <i class="icon-double-angle-left" data-icon1="icon-double-angle-left" data-icon2="icon-double-angle-right"></i> </div> <script type="text/javascript"> try { ace.settings.check('sidebar', 'collapsed') } catch (e) { } </script> </div> <div class="main-content"> <div class="breadcrumbs" id="breadcrumbs"> <script type="text/javascript"> try { ace.settings.check('breadcrumbs', 'fixed') } catch (e) { } </script> <ul class="breadcrumb"> <li> <i class="icon-home home-icon"></i> <a href="@Url.Action("Index","Admin")">Home</a> </li> <li id="currentpart" class="active">控制台</li> </ul><!-- .breadcrumb --> <div class="nav-search" id="nav-search"> <form class="form-search"> <span class="input-icon"> <input type="text" placeholder="Search ..." class="nav-search-input" id="nav-search-input" autocomplete="off" /> <i class="icon-search nav-search-icon"></i> </span> </form> </div><!-- #nav-search --> </div> <div class="page-content"> <div class="page-header"> <h1> <span>控制台</span> <small> <i class="icon-double-angle-right"></i> <span>查看</span> </small> </h1> </div><!-- /.page-header --> <div class="row-fluid"> <div class="span12"> <!-- PAGE CONTENT BEGINS --> @RenderBody() <!-- PAGE CONTENT ENDS --> </div><!-- /.col --> </div><!-- /.row --> </div><!-- /.page-content --> </div><!-- /.main-content --> <div class="ace-settings-container" id="ace-settings-container"> <div class="btn btn-app btn-xs btn-warning ace-settings-btn" id="ace-settings-btn"> <i class="icon-cog bigger-150"></i> </div> <div class="ace-settings-box" id="ace-settings-box"> <div> <div class="pull-left"> <select id="skin-colorpicker" class="hide"> <option data-skin="default" value="#438EB9">#438EB9</option> <option data-skin="skin-1" value="#222A2D">#222A2D</option> <option data-skin="skin-2" value="#C6487E">#C6487E</option> <option data-skin="skin-3" value="#D0D0D0">#D0D0D0</option> </select> </div> <span>&nbsp; Choose Skin</span> </div> <div> <input type="checkbox" class="ace ace-checkbox-2" id="ace-settings-navbar" /> <label class="lbl" for="ace-settings-navbar"> Fixed Navbar</label> </div> <div> <input type="checkbox" class="ace ace-checkbox-2" id="ace-settings-sidebar" /> <label class="lbl" for="ace-settings-sidebar"> Fixed Sidebar</label> </div> <div> <input type="checkbox" class="ace ace-checkbox-2" id="ace-settings-breadcrumbs" /> <label class="lbl" for="ace-settings-breadcrumbs"> Fixed Breadcrumbs</label> </div> <div> <input type="checkbox" class="ace ace-checkbox-2" id="ace-settings-rtl" /> <label class="lbl" for="ace-settings-rtl"> Right To Left (rtl)</label> </div> <div> <input type="checkbox" class="ace ace-checkbox-2" id="ace-settings-add-container" /> <label class="lbl" for="ace-settings-add-container"> Inside <b>.container</b> </label> </div> </div> </div><!-- /#ace-settings-container --> </div><!-- /.main-container-inner --> <a href="#" id="btn-scroll-up" class="btn-scroll-up btn btn-sm btn-inverse"> <i class="icon-double-angle-up icon-only bigger-110"></i> </a> </div><!-- /.main-container --> <!-- basic scripts --> <!--[if !IE]> <script src="http://ajax.googleapis.com/ajax/libs/jquery/2.0.3/jquery.min.js"></script> <![endif]--> <!--[if IE]> <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script> <![endif]--> <!--[if IE]> <script type="text/javascript"> window.jQuery || document.write("<script src='~/Content/Js/jquery-1.10.2.min.js'>"+"<"+"/script>"); </script> <![endif]--> <script type="text/javascript"> if ("ontouchend" in document) document.write("<script src='~/Content/Js/jquery.mobile.custom.min.js'>" + "<" + "/script>"); </script> <script src="~/Content/Js/bootstrap.min.js"></script> <script src="~/Content/Js/typeahead-bs2.min.js"></script> <!-- page specific plugin scripts --> <script src="~/Content/Js/jquery-ui-1.10.3.custom.min.js"></script> <script src="~/Content/Js/jquery.ui.touch-punch.min.js"></script> <script src="~/Content/Js/bootbox.min.js"></script> <!-- ace scripts --> <script src="~/Content/Js/ace-elements.min.js"></script> <script src="~/Content/Js/ace.min.js"></script> <!-- inline scripts related to this page --> <script type="text/javascript"> jQuery(function ($) { /* initialize the external events -----------------------------------------------------------------*/ $('#external-events div.external-event').each(function () { // create an Event Object (http://arshaw.com/fullcalendar/docs/event_data/Event_Object/) // it doesn't need to have a start or end var eventObject = { title: $.trim($(this).text()) // use the element's text as the event title }; // store the Event Object in the DOM element so we can get to it later $(this).data('eventObject', eventObject); // make the event draggable using jQuery UI $(this).draggable({ zIndex: 999, revert: true, // will cause the event to go back to its revertDuration: 0 // original position after the drag }); }); /* initialize the calendar -----------------------------------------------------------------*/ }) </script> <div style="display:none"><script src='http://v7.cnzz.com/stat.php?id=155540&web_id=155540' language='JavaScript' charset='gb2312'></script></div> <script src="~/Content/Js/niqiu.js"></script> @RenderSection("scripts", required: false) </body></html>
the_stack
@{ Layout = null; } @model MIMS.Entity.Model.PSS_PurchasePlan <!DOCTYPE html> <html> <head> <meta name="viewport" content="width=device-width" /> <title>PurchasePlanDetail</title> @*Easyui需要引入的文件*@ <script type="text/javascript" src="~/Content/jquery-easyui-1.4.5/jquery.min.js"></script> <script type="text/javascript" src="~/Content/jquery-easyui-1.4.5/jquery.easyui.min.js"></script> <link href="~/Content/jquery-easyui-1.4.5/themes/default/easyui.css" rel="stylesheet" /> <link href="~/Content/jquery-easyui-1.4.5/themes/icon.css" rel="stylesheet" /> <link rel="shortcut icon" href="/favicon.ico" /> <link rel="bookmark" href="/favicon.ico" /> <script src="~/Content/My97DatePicker/WdatePicker.js"></script> <script src="~/Scripts/jquery.validate.min.js"></script> @*本地css和js引入*@ <link href="~/Content/Site.css" rel="stylesheet" /> <script src="~/Content/Site.js"></script> <script> $(function () { var status = @Model.PurchaseStatus; var msg = ""; if(status==0) msg = "未采购"; else if(status == 1) msg = "采购中"; else msg = "已采购"; $('#status').text(msg); if(status != 0) { $('#body').layout('remove', 'west'); } else { InitPhaDialoggrid(); } InitGrid(); InitDialog(); $('#ok').click(function () { $('#errdiv').removeClass('dis'); $('#form').submit(); }); $('#cancel').click(function () { $('#dd').dialog('close'); }); }); //实现对DataGird控件的绑定操作 function InitGrid() { $('#grid').datagrid({ url: '../PSS_PurchasePlanDetail/LoadList', singleSelect: true, fit: true, nowrap: true,//单行显示 autoRowHeight: false, striped: false, //斑马纹 collapsible: true, //可折叠 pagination: true, pageSize: 20, fitColumns: true, //自适应列宽 rownumbers: true, sortName: 'ID', //根据某个字段给easyUI排序 sortOrder: 'asc', remoteSort: true, loadMsg: '加载中,请等待', queryParams: ({ query:'@Model.PurchaseNo' }), //异步查询的参数 columns: [[ { title: '编号', field: 'ID', hidden: true }, { title: '药品编号', field: 'PhaCode', hidden: true }, { title: '采购单号', field: 'PurchaseNo', hidden: true }, { title: '药品名称', field: 'PhaName', sortable: true}, { title: '规格', field: 'Spec', sortable: true }, { title: '单位', field: 'Unit', sortable: true, align: 'center' }, { title: '拼音代码', field: 'PinyinCode',sortable: true, align: 'center' }, { title: '产地', field: 'OrginName', sortable: true, align: 'center' }, { title: '采购价格', field: 'InWarehousePrice', sortable: true, align: 'center' }, { title: '采购数量', field: 'PurchaseNum', sortable: true, align: 'center' }, { title: '采购金额', field: 'InWarehouseSum', sortable: true, align: 'center' }, { title: '库存', field: 'Stock', sortable: true, align: 'center' }, { title: '供货单位', field: 'CompanyName',sortable: true }, ]], onDblClickRow: function (rowIndex, rowData) { }, onClickRow: function (index, row) { $('#del_id').val(row['ID']); console.log(row['PhaCode']); } }) var p = $('#grid').datagrid('getPager'); $(p).pagination({ pageSize: 20, pageList: [10, 12, 20, 40, 60, 100, 200], beforePageText: '第', afterPageText: '页 共 {pages} 页', displayMsg: '当前显示 {from} - {to} 条记录 共 {total} 条记录', buttons: [{ iconCls:'icon-remove', handler:function(){ if(@Model.PurchaseStatus == 0){ getAjax("../PSS_PurchasePlanDetail/Del", { id: $('#del_id').val() }, function (data) { if ($('#del_id').val() == null || $('#del_id').val() == "") return false; if (data) { $.messager.alert('提示', '删除药品成功!'); $('#dd').dialog('close'); $("#grid").datagrid("reload"); return true; } else { $.messager.alert('提示', '操作失败!'); return false; } }); } else $.messager.alert('提示', '当前采购状态下无法编辑操作失败!'); } }] }); }; //药品选择器 function InitPhaDialoggrid() { $('#phagrid').datagrid({ url: '../PHA_Accounts/LoadList', fit: true, nowrap: true,//单行显示 autoRowHeight: false, singleSelect: true, striped: false, //斑马纹 collapsible: true, //可折叠 pagination: true, pageSize: 20, fitColumns: true, //自适应列宽 rownumbers: true, sortName: 'PhaCode', //根据某个字段给easyUI排序 sortOrder: 'asc', remoteSort: true, loadMsg: '加载中,请等待', queryParams: ({ pinyin: $('#phagrid_dir').val() }), //异步查询的参数 columns: [[ { title: '编号', field: 'PhaCode', hidden: true }, { title: '药名', field: 'PhaName', sortable: true }, { title: '规格', field: 'Spec', sortable: true }, { title: '单位', field: 'Unit', sortable: true, }, { title: '产地', field: 'OrginName' } ]], toolbar: '#phagrid_tb', onDblClickRow: function (rowIndex, rowData) { $('#errdiv').attr('class', 'dis'); $('#PurchaseNum').val(""); getAjax("../PHA_Accounts/LoadForm", { phaCode: rowData['PhaCode'], orginID: rowData['OrginID'] }, function (data) { var data = eval("(" + data + ")"); SetWebControls(data); }); $('#dd').dialog('open'); } }) var p = $('#phagrid').datagrid('getPager'); $(p).pagination({ pageSize: 20, pageList: [10, 12, 20, 40, 60, 100, 200], beforePageText: '第', afterPageText: '页 共 {pages} 页', displayMsg: '当前显示 {from} - {to} 条记录 共 {total} 条记录', }); }; //初始化对话框 function InitDialog() { $('#dd').dialog({ title: '添加药品', width: 700, height: 160, top: ($(window).height() - 160) * 0.5, //居中 left: ($(window).width() - 700) * 0.5, closed: true, cache: false, modal: true, buttons: '#dlg-buttons' }); } </script> <script> //表单验证和提交 $(function () { $('#form').validate({ rules: { PurchaseNum: { required: true } }, messages: { PurchaseNum: "采购数量不能为空!" }, errorLabelContainer: '#errdiv>ul', wrapper: 'li', submitHandler: function (form) { var postData = GetWebControls("#form"); getAjax("/PSS_PurchasePlanDetail/AcceptClick", postData, function (data) { if (data) { $.messager.alert('提示', '添加药品成功!'); $('#dd').dialog('close'); $("#grid").datagrid("reload"); return true; } else { $.messager.alert('提示', '操作失败!'); return false; } }); }, invalidHandler: function (form, validator) { //不通过回调 return false; } }); }); </script> <style> .dis { display: none; } .tb-col-120 { width: 120px; } .tb-col-80 { width: 80px; } #errdiv { margin-left: 5px; } </style> </head> <body class="easyui-layout" id="body"> <div id="left_part" data-options="region:'west'" style="float:left;width:340px;"> <div id="phaselect"> <div style="position:fixed;width:340px;height:100%;"> <table id="phagrid"></table> </div> <div id="phagrid_tb" style="padding:3px"> <span>拼音码:</span> <input id="phagrid_dir" style="border:1px solid #ccc"> <a href="#" class="easyui-linkbutton" plain="true" onclick="InitPhaDialoggrid()">查找</a> </div> </div> </div> <div data-options="region:'center'" style="float:left"> <div data-options="region:'north'" style="height:25%;"> <h2 style="text-align:center;margin-top:20px;">中心医院药品采购单</h2> <div> <p style="font-weight:900;color:red;font-size:medium;text-align:right;margin-right:20%"> No.<span id="PurchaseNo">@Model.PurchaseNo</span> </p> <div style="margin:10px 0 10px 20px "> <div style="display:inline-block;margin-right:10px">采购日期:<span id="PurchaseDate" style="color:red">@Model.PurchaseDate.Split(' ')[0]</span></div> <div style="display:inline-block;margin-right:10px">操作工号:<span id="OperateNo" style="color:red">@Model.OperateNo</span></div> <div style="display:inline-block;margin-right:10px">操作日期:<span id="OperateDate" style="color:red">@Model.OperateDate.Split(' ')[0]</span></div> <div style="display:inline-block;margin-right:10px">采购状态:<span id="status" style="color:red"></span></div> </div> <div style="margin:0 0 10px 20px"> <p id="OperateNo">备注:@Model.Remark</p> </div> </div> </div> <div data-options="region:'south'" style="height:70%"> <table id="grid"></table> <input id="del_id" type="hidden" name="del_id" value=" " /> </div> </div> <div id="dd" class="easyui-dialog"> <form id="form" name="form" method="post"> <input type="hidden" id="PurchaseNo" name="PurchaseNo" value="@Model.PurchaseNo" /> <input type="hidden" id="PhaCode" name="PhaCode" value=" " /> <input type="hidden" id="OrginID" name="OrginID" value=" " /> <table style="margin:8px"> <tr> <td>药品名:</td> <td><input type="text" id="PhaName" name="PhaName" class="tb-col-120" value=" " disabled="disabled" /></td> <td>规格:</td> <td><input type="text" id="Spec" name="Spec" class="tb-col-80" value=" " disabled="disabled" /></td> <td>单位:</td> <td><input type="text" id="Unit" name="Unit" class="tb-col-80" value="" disabled="disabled" /></td> <td>产地:</td> <td><input type="text" id="OrginName" name="OrginName" class="tb-col-80" value=" " disabled="disabled" /></td> </tr> <tr> <td>进货价格:</td> <td><input type="text" id="InWarehousePrice" name="InWarehousePrice" class="tb-col-120" value=" " disabled="disabled" /></td> <td>采购数量:</td> <td><input type="text" id="PurchaseNum" name="PurchaseNum" class="tb-col-80" value="" /></td> <td>生产厂家:</td> <td colspan="3"><input type="text" id="CompanyName" name="CompanyName" value=" " disabled="disabled" /></td> </tr> </table> <div class="errdiv" id="errdiv"> <ul></ul> </div> <div id="dlg-buttons"> <a id="ok" href="#" class="easyui-linkbutton" iconcls="icon-ok">确定</a> <a id="cancel" href="#" class="easyui-linkbutton" iconcls="icon-cancel">取消</a> </div> </form> </div> </body> </html>
the_stack
@page @model GeneralModel @{ ViewData["Title"] = "General Docs"; ViewData["PageName"] = "docs_general"; ViewData["Heading"] = "<i class='subheader-icon fal fa-book'></i> Documentation: <span class='fw-300'>General Docs</span>"; ViewData["Category1"] = "Documentation"; ViewData["PageDescription"] = "Product documentation, plugin reference, and online help"; } @section HeadBlock { <link rel="stylesheet" media="screen, print" href="~/css/theme-demo.css"> <link rel="stylesheet" media="screen, print" href="~/css/fa-duotone.css"> <link rel="stylesheet" media="screen, print" href="~/css/fa-brands.css"> } @if (ViewBag.Productionready?.Length > 0) { <div class="alert alert-danger alert-dismissible p-2 mb-4"> <button type="button" class="close" data-dismiss="alert" aria-label="Close"> <span aria-hidden="true"> <i class="fal fa-times"></i> </span> </button> <div class="d-flex flex-start w-100"> <div class="mr-2 hidden-sm-down"> <span class="icon-stack icon-stack-lg"> <i class="base-14 icon-stack-3x color-primary-400"></i> <i class="base-14 icon-stack-2x color-primary-600 opacity-70"></i> <i class="fal fa-newspaper icon-stack-1x text-white opacity-90"></i> </span> </div> <div class="d-flex flex-fill align-items-center"> <div class="flex-fill"> <span class="h5">To access the full documentation, instructions and/or tutorials please purchase the Responsive WebApp <strong>Full</strong> edition on WrapBootstrap: <a href="https://wrapbootstrap.com/theme/smartadmin-responsive-webapp-WB0573SK0?ref=myorange" rel="nofollow">SmartAdmin Responsive WebApp</a>.</span> </div> </div> </div> </div> } else { <a href="javascript:void(0);" name="top"></a> <ul class="fs-md fw-700 mb-5 list-spaced"> <li> <a href="#introduction">Introduction</a> </li> <li> <a href="#gulp">Gulp Installation For Advanced Usage</a> </li> <li> <a href="#buildjson">Build.json For Gulp Usage</a> </li> <li> <a href="#app.config.js">Configuration JS</a> </li> <li> <a href="#applayouts">Layout Options</a> </li> <li> <a href="#skinsthemes">Skins & Themes</a> </li> <li> <a href="#seedslim">Seed & Slim Projects</a> </li> <li> <a href="#filestructure">File Structure</a> </li> <li> <a href="#pluginreference">Plugin Reference & Dependency</a> </li> <li> <a href="#rtlsupport">RTL Support</a> </li> <li> <a href="#productsupport">Product Support</a> </li> <li> <a href="#changingcomponentcolors">Changing Component Colors</a> </li> <li> <a href="#browsersupport">Browser Support</a> </li> <li> <a href="#components">Components</a> </li> </ul> <div class="card mb-g p-2"> <div class="card-body"> <h2 class="fw-700 m-0"> Introduction <a href="javascript:void(0);" name="introduction"></a> <small> First of all, thank you so much for supporting our product! We are excited to have you as our customer and as with any Theme and/or Product, good documentation is essential to provide you with that first solid impression and we hope we have delivered on that expectation. </small> </h2> <div class="panel-tag"> You can use the Table of Contents below to quickly jump to each section and find a quick reference to all the pages that we currently have available. However, if you do find something missing, do feel free to voice your opinion and/or requests on our Support Forum! We check these on a near daily basis and many customers have already contributed great ideas that have made it into updates of the Theme and the documentation! </div> <p> <strong>Once again it is great to have you onboard and thank you -very- much for supporting this project!</strong> </p> <p> This template and projects demonstrates the core principles and concepts of how you can integrate SmartAdmin 4 with your favorite backend platform. We aim, and do our best, to create a reference project based on community adopted guidelines and principles. That said, with our years of experience and our focus on quality we believe that this template will provide you with a great starting point that you can use for your own project purposes and customizations. </p> <p class="mb-g"> What this template, and documentation is not however, is a tutorial and/or introduction in all things HTML. For that we would kindly ask you to check the Resources listed below and go through all the pages for all the components, each component has either a link to its documentation source or examples of usage. Some level of knowledge on these topics will be required when explaining and discussing the theme and project, but we will do our best to ensure that all levels of experience can benefit and read along. </p> <strong>Where to Start?</strong> <p>The best area to start is with this documentation page. If you are looking for a blank slate, a nice bet would be to go with <a href="blank.html" title="blank" target="_blank">Blank Page</a>, it has everything you need to get started with the barebone essentials only.</p> </div> </div> <div class="card mb-g p-2"> <div class="card-body"> <h2 class="fw-700 m-0"> Gulp Installation For Advanced Usage <a href="javascript:void(0);" name="gulp"></a> <small class="m-0"> This webapp UI toolkit is based on Bootstrap 4 framework that provides a complete frontend user interface to build any type of web or mobile application. The package includes state of the art build tools to manage and deploy project assets with ease. This documentation will provide information you need to be able to use @Settings.AppName in your applications, customising it to match your development needs and requirements at a fast pace environment. <br> <br> This section guides you to install all required software and libraries in order to fully utilize this UI toolkit for your project(s). <br> <a class="h3 mt-5 btn btn-primary" target="_blank" href="https://www.youtube.com/watch?v=LwD-kYlZXtw">Watch the video ►</a> </small> </h2> <hr class="my-5"> <ol class="list-spaced fs-md"> <li> <strong class="mb-1 d-block fs-lg"> Install Git (<span class="fw-500 text-danger">important</span>) </strong> <p> Before you start using Git, you have to make it available on your computer. Even if it’s already installed, it’s probably a good idea to update to the latest version. You can either install it as a package or via another installer, or <a href="https://git-scm.com/book/en/v2/Getting-Started-Installing-Git" target="_blank">download the source code</a> and compile it yourself </p> </li> <li> <strong class="mb-1 d-block fs-lg"> Install Node.js </strong> <p> Node.js® is a JavaScript runtime built on Chrome's V8 JavaScript engine. Node.js is required in order to run the application build tools. Download the latest version of Node and install it: <a href="https://nodejs.org/en/download/" target="_blank">nodejs.org/en/download/</a> </p> </li> <li> <strong class="mb-1 d-block fs-lg"> Install NPM </strong> <p class="mb-1"> Npm is the package manager for JavaScript and the world’s largest software registry. Npm is a separate project from Node.js, and tends to update more frequently. As a result, even if you’ve just downloaded Node.js (and therefore npm), you’ll probably need to update your npm. </p> <code class="bg-fusion-500 d-block pt-2 pr-3 pb-2 pl-3 fw-700 mb-g w-100"> npm install npm@@latest -g </code> <p class="mb-1"> Verify that npm in successfully installed, and version of installed npm will appear. </p> <code class="bg-fusion-500 d-block pt-2 pr-3 pb-2 pl-3 fw-700 mb-g w-100"> npm --version </code> </li> <li> <strong class="mb-1 d-block fs-lg"> Install Gulp </strong> <p class="mb-1"> Gulp is a toolkit that helps you automate your time-consuming tasks in development workflow. To install gulp globally. </p> <code class="bg-fusion-500 d-block pt-2 pr-3 pb-2 pl-3 fw-700 mb-g w-100"> npm install gulp-cli -g </code> <p class="mb-1"> If you have previously installed a version of <code>gulp</code> globally, please remove it to make sure old version doesn't collide with new <code>gulp-cli</code> </p> <code class="bg-fusion-500 d-block pt-2 pr-3 pb-2 pl-3 fw-700 mb-g w-100"> npm rm --global gulp </code> <p class="mb-1"> Verify that gulp in successfully installed, and version of installed gulp will appear. </p> <code class="bg-fusion-500 d-block pt-2 pr-3 pb-2 pl-3 fw-700 mb-g w-100"> gulp --version </code> </li> <li> <strong class="mb-1 d-block fs-lg"> Install NPM Packages </strong> <p class="mb-1"> NPM packages are a great way to ensure your files are up to date and everyone in your development tree is using the same version for the files. To install the npm you simple type: </p> <code class="bg-fusion-500 d-block pt-2 pr-3 pb-2 pl-3 fw-700 mb-1 w-100"> npm install </code> <div class="mb-g help-block"> You may or may not see some messages regarding vulnerabilities, we and the npm community, are aware of these issues and you can choose to ignore them or upgrade your jquery to the latest version (warning: doing so may break IE10 Datatables responsive plugin) </div> <p class="mb-1"> Check outdated files and versions by typing: </p> <code class="bg-fusion-500 d-block pt-2 pr-3 pb-2 pl-3 fw-700 mb-g w-100"> npm outdated </code> <p> If you are updating the npm packages, please be sure to read their changelogs for any breaking changes before you do any major update. To update a package, simple open your package.json file and change the version number run <code>npm i</code> command </p> </li> <li> <strong class="mb-1 d-block fs-lg"> Build project </strong> <p> Once all your NPM packages are installed you can now run the command to build your project. The build project will compile your project and create the necessary HTML files, CSS, and JS scripts related for each page. Once the compilation is completed, gulp will switch to 'watch' mode and watch for changes in your JS/HBS templates/SCSS files. Any changes you make, gulp will auto compile the project in seconds. </p> <code class="bg-fusion-500 d-block pt-2 pr-3 pb-2 pl-3 fw-700 mb-g w-100"> gulp build </code> <p> Once the compilation is complete, you can go to <a href="http://localhost:4000" target="_blank">http://localhost:4000</a> to view your compiled project </p> </li> <li> <strong class="mb-1 d-block fs-lg"> Other commands </strong> <p class="mb-1"> Gulp watch will initialize the file watch process and start the server </p> <code class="bg-fusion-500 d-block pt-2 pr-3 pb-2 pl-3 fw-700 mb-g w-100"> gulp watch </code> <p class="mb-1"> Gulp build-nav will build the _nav.hbs file from your nav.json file </p> <code class="bg-fusion-500 d-block pt-2 pr-3 pb-2 pl-3 fw-700 mb-g w-100"> gulp build-nav </code> </li> </ol> </div> </div> <div class="card mb-g p-2"> <div class="card-body"> <h2 class="fw-700 m-0"> Build.json For Gulp Usage <a href="javascript:void(0);" name="buildjson"></a> <small> Configure your project files <code>build.json</code>. You can completely slim down your project through the build.json file. </small> </h2> <div class="fw-500 mb-1"> File app.config.js contents </div> <div class="border border-dark mb-g overflow-hidden rounded"> <pre class="prettyprint fs-md mb-0" style="max-height: 250px;"> { "config": { "debug": false, "data" : { "app": "@Settings.App", "app-name": "@Settings.AppName", "app-flavor": "@Settings.AppFlavor", "app-flavor-subscript": "", "user": "@(Settings.Theme.User)", "email": "@(Settings.Theme.Email)", "twitter": "@(Settings.Theme.Twitter)", "avatar": "@(Settings.Theme.Avatar)", "version": "@Settings.Theme.ThemeVersion", "bs4v": "@ViewBag.Bs4v", "logo": "@(Settings.Theme.Logo)", "logo-m": "@ViewBag.LogoM", "copyright": "@ViewBag.Copyright", "copyright-inverse": "@ViewBag.CopyrightInverse", "iconPrefix": "@(Settings.Theme.IconPrefix)", "layoutSettings": true, "chatInterface": true, "shortcutMenu": true, "appLayoutShortcut": true, "appShortcutModal": true, "appHeader": true, "appFooter": true, "appSidebar": true }, "compile": { "jsUglify": false, "cssMinify": false, "jsSourcemaps": false, "cssSourcemaps": true, "autoprefixer": true, "seedOnly": false, "rtl": false }, "path": { "src": "./src", "node_modules": "./node_modules", "exportPath": "./src/template/include/_nav.hbs", "outputPath": "./src/template/include" }, "dist": { "theme": { "path": "./dist" } } }, "build": { "vendors": { "base": { "src": { "required": { "pace-js": { "scripts": [ "{$config.path.node_modules}/pace-js/pace.js" ] }, "jquery": { "scripts": [ "{$config.path.node_modules}/jquery/dist/jquery.js" ] }, "jquery-ui": { "scripts": [ "{$config.path.src}/custom/plugins/jquery-ui-cust/jquery-ui-cust.js" ] }, "bootstrap": { "styles": [ "{$config.path.src}/scss/bootstrap.scss" ], "scripts": [ "{$config.path.node_modules}/popper.js/dist/umd/popper.js", "{$config.path.node_modules}/bootstrap/dist/js/bootstrap.js" ] }, "bootbox": { "scripts": [ "{$config.path.src}/custom/plugins/bootbox/bootbox-cust.js", "{$config.path.src}/custom/plugins/bootbox/bootbox-config.js" ] }, "jquery-snippets": { "scripts": [ "{$config.path.src}/custom/plugins/jquery-snippets/jquery-snippets.js" ] }, "throttle-debounce": { "scripts": [ "{$config.path.node_modules}/jquery-throttle-debounce/jquery.ba-throttle-debounce.js" ] }, "webfonts": { "styles": [ "{$config.path.src}/scss/app.icons.scss" ], "fonts": [ "{$config.path.src}/custom/webfonts/fontawesome-pro-master/webfonts/**", "{$config.path.src}/custom/webfonts/nextgen-fonts-master/webfonts/**", "{$config.path.src}/custom/webfonts/plugins/summernote/**" ] } }, "optional": { "jquery-slimscroll": { "scripts": [ "{$config.path.node_modules}/jquery-slimscroll/jquery.slimscroll.js" ] }, "waves": { "scripts": [ "{$config.path.node_modules}/node-waves/dist/waves.js" ] }, "smartpanels": { "scripts": [ "{$config.path.src}/custom/plugins/smartpanels/smartpanels.js" ] }, "app-images" : { "images" : [ "{$config.path.src}/img/**" ] }, "app-data": { "json" : [ "{$config.path.src}/custom/webfonts/fontawesome-pro-master/used-for-demo-pages/*.json", "{$config.path.src}/custom/webfonts/nextgen-fonts-master/used-for-demo-pages/*.json", "{$config.path.src}/custom/plugins/datatables/demo-data/*.json", "{$config.path.src}/custom/docs-data/*.json", "{$config.path.src}/custom/lang/*.json" ] }, "app-media": { "media" : [ "{$config.path.src}/custom/media/**" ] } } }, "bundle": { "styles": "{$config.output}/css/vendors.bundle.css", "scripts": "{$config.output}/js/vendors.bundle.js", "images": "{$config.output}/img", "fonts": "{$config.output}/webfonts", "json": "{$config.output}/media/data/", "media": "{$config.output}/media" } }, "custom": { "i18next": { "src": { "scripts": [ "{$config.path.node_modules}/i18next-client/i18next.js" ] }, "bundle": { "scripts": "{$config.output}/js/i18n/i18n.js" } }, "fontawesome": { "light" : { "src": { "styles":[ "{$config.path.src}/custom/webfonts/fontawesome-pro-master/scss/light.scss" ] }, "bundle": { "styles": "{$config.output}/css/fa-light.scss" } }, "regular" : { "src": { "styles":[ "{$config.path.src}/custom/webfonts/fontawesome-pro-master/scss/regular.scss" ] }, "bundle": { "styles": "{$config.output}/css/fa-regular.scss" } }, "solid" : { "src": { "styles":[ "{$config.path.src}/custom/webfonts/fontawesome-pro-master/scss/solid.scss" ] }, "bundle": { "styles": "{$config.output}/css/fa-solid.scss" } }, "duotone" : { "src": { "styles":[ "{$config.path.src}/custom/webfonts/fontawesome-pro-master/scss/duotone.scss" ] }, "bundle": { "styles": "{$config.output}/css/fa-duotone.scss" } }, "brand" : { "src": { "styles":[ "{$config.path.src}/custom/webfonts/fontawesome-pro-master/scss/brands.scss" ] }, "bundle": { "styles": "{$config.output}/css/fa-brands.scss" } } } }, "addons": { "statistics" : { "dygraph" : { "plugin" : { "src": { "styles":[ "{$config.path.node_modules}/dygraphs/dist/dygraph.css" ], "scripts": [ "{$config.path.node_modules}/dygraphs/dist/dygraph.min.js" ] }, "bundle": { "styles": "{$config.output}/css/statistics/dygraph/dygraph.css", "scripts": "{$config.output}/js/statistics/dygraph/dygraph.js" } }, "demo-data" : { "src": { "scripts": [ "{$config.path.src}/custom/demo-data/demo-data-dygraph.js" ] }, "bundle": { "scripts": "{$config.output}/js/statistics/demo-data/demo-data-dygraph.js" } } }, "flotchart" : { "src": { "scripts": [ "{$config.path.node_modules}/flot/jquery.flot.js", "{$config.path.node_modules}/flot/jquery.colorhelpers.js", "{$config.path.node_modules}/flot/jquery.flot.canvas.js", "{$config.path.node_modules}/flot/jquery.flot.categories.js", "{$config.path.node_modules}/flot/jquery.flot.crosshair.js", "{$config.path.node_modules}/flot/jquery.flot.errorbars.js", "{$config.path.node_modules}/flot/jquery.flot.fillbetween.js", "{$config.path.node_modules}/flot/jquery.flot.image.js", "{$config.path.node_modules}/flot/jquery.flot.navigate.js", "{$config.path.node_modules}/flot/jquery.flot.pie.js", "{$config.path.node_modules}/flot/jquery.flot.resize.js", "{$config.path.node_modules}/flot/jquery.flot.selection.js", "{$config.path.node_modules}/flot/jquery.flot.stack.js", "{$config.path.node_modules}/flot-spline/js/jquery.flot.spline.js", "{$config.path.node_modules}/flot/jquery.flot.symbol.js", "{$config.path.node_modules}/flot/jquery.flot.threshold.js", "{$config.path.node_modules}/jquery.flot.tooltip/js/jquery.flot.tooltip.js", "{$config.path.node_modules}/flot/jquery.flot.time.js" ] }, "bundle": {"scripts": "{$config.output}/js/statistics/flot/flot.bundle.js"} }, "chartjs" : { "src": { "styles":[ "{$config.path.node_modules}/chart.js/dist/Chart.css" ], "scripts": [ "{$config.path.node_modules}/chart.js/dist/Chart.bundle.js" ] }, "bundle": { "styles": "{$config.output}/css/statistics/chartjs/chartjs.css", "scripts": "{$config.output}/js/statistics/chartjs/chartjs.bundle.js" } }, "chartist" : { "src": { "styles":[ "{$config.path.src}/custom/plugins/chartist/chartist.scss" ], "scripts": [ "{$config.path.node_modules}/chartist/dist/chartist.js" ] }, "bundle": { "styles": "{$config.output}/css/statistics/chartist/chartist.css", "scripts": "{$config.output}/js/statistics/chartist/chartist.js" } }, "d3" : { "src": { "scripts": [ "{$config.path.node_modules}/d3/dist/d3.js" ] }, "bundle": { "scripts": "{$config.output}/js/statistics/d3/d3.js" } }, "c3" : { "plugin": { "src": { "styles":[ "{$config.path.node_modules}/c3/c3.css" ], "scripts": [ "{$config.path.node_modules}/c3/c3.js" ] }, "bundle": { "styles": "{$config.output}/css/statistics/c3/c3.css", "scripts": "{$config.output}/js/statistics/c3/c3.js" } }, "demo-data": { "src": { "scripts": [ "{$config.path.src}/custom/demo-data/demo-c3.js" ] }, "bundle": { "scripts": "{$config.output}/js/statistics/demo-data/demo-c3.js" } } }, "peity" : { "src": { "scripts": [ "{$config.path.node_modules}/peity/jquery.peity.js", "{$config.path.src}/custom/plugins/peity/jquery.peity.config.js" ] }, "bundle": { "scripts": "{$config.output}/js/statistics/peity/peity.bundle.js" } }, "sparkline" : { "src": { "scripts": [ "{$config.path.node_modules}/jquery-sparkline/jquery.sparkline.js", "{$config.path.src}/custom/plugins/jquery-sparkline/jquery-sparkline.config.js" ] }, "bundle": { "scripts": "{$config.output}/js/statistics/sparkline/sparkline.bundle.js" } }, "easypiechart" : { "src": { "scripts": [ "{$config.path.node_modules}/easy-pie-chart/dist/jquery.easypiechart.js", "{$config.path.src}/custom/plugins/easy-pie-chart/jquery.easypiechart.config.js" ] }, "bundle": { "scripts": "{$config.output}/js/statistics/easypiechart/easypiechart.bundle.js" } } }, "datagrid" : { "datatables" : { "core" : { "src": { "styles":[ "{$config.path.node_modules}/datatables.net-bs4/css/dataTables.bootstrap4.css", "{$config.path.node_modules}/datatables.net-autofill-bs4/css/autoFill.bootstrap4.css", "{$config.path.node_modules}/datatables.net-buttons-bs4/css/buttons.bootstrap4.css", "{$config.path.node_modules}/datatables.net-colreorder-bs4/css/colReorder.bootstrap4.css", "{$config.path.node_modules}/datatables.net-fixedcolumns-bs4/css/fixedColumns.bootstrap4.css", "{$config.path.node_modules}/datatables.net-fixedheader-bs4/css/fixedHeader.bootstrap4.css", "{$config.path.node_modules}/datatables.net-keytable-bs4/css/keyTable.bootstrap4.css", "{$config.path.node_modules}/datatables.net-responsive-bs4/css/responsive.bootstrap4.css", "{$config.path.node_modules}/datatables.net-rowgroup-bs4/css/rowGroup.bootstrap4.css", "{$config.path.node_modules}/datatables.net-rowreorder-bs4/css/rowReorder.bootstrap4.css", "{$config.path.node_modules}/datatables.net-scroller-bs4/css/scroller.bootstrap4.css", "{$config.path.node_modules}/datatables.net-select-bs4/css/select.bootstrap4.css", "{$config.path.src}/custom/plugins/datatables/datatables.styles.app.scss" ], "scripts": [ "{$config.path.node_modules}/datatables.net/js/jquery.dataTables.js", "{$config.path.node_modules}/datatables.net-bs4/js/dataTables.bootstrap4.js", "{$config.path.src}/custom/plugins/datatables/datatables.styles.app.js", "{$config.path.node_modules}/datatables.net-autofill/js/dataTables.autoFill.js", "{$config.path.node_modules}/datatables.net-autofill-bs4/js/autoFill.bootstrap4.js", "{$config.path.node_modules}/datatables.net-buttons/js/dataTables.buttons.js", "{$config.path.node_modules}/datatables.net-buttons-bs4/js/buttons.bootstrap4.js", "{$config.path.node_modules}/datatables.net-buttons/js/buttons.html5.js", "{$config.path.node_modules}/datatables.net-buttons/js/buttons.print.js", "{$config.path.node_modules}/datatables.net-buttons/js/buttons.colVis.js", "{$config.path.src}/custom/plugins/datatables/datatables.styles.buttons.app.js", "{$config.path.node_modules}/datatables.net-colreorder/js/dataTables.colReorder.js", "{$config.path.node_modules}/datatables.net-colreorder-bs4/js/colReorder.bootstrap4.js", "{$config.path.node_modules}/datatables.net-fixedcolumns/js/dataTables.fixedColumns.js", "{$config.path.node_modules}/datatables.net-fixedcolumns-bs4/js/fixedColumns.bootstrap4.js", "{$config.path.node_modules}/datatables.net-fixedheader/js/dataTables.fixedHeader.js", "{$config.path.node_modules}/datatables.net-fixedheader-bs4/js/fixedHeader.bootstrap4.js", "{$config.path.node_modules}/datatables.net-keytable/js/dataTables.keyTable.js", "{$config.path.node_modules}/datatables.net-keytable-bs4/js/keyTable.bootstrap4.js", "{$config.path.node_modules}/datatables.net-responsive/js/dataTables.responsive.js", "{$config.path.node_modules}/datatables.net-responsive-bs4/js/responsive.bootstrap4.js", "{$config.path.node_modules}/datatables.net-rowgroup/js/dataTables.rowGroup.js", "{$config.path.node_modules}/datatables.net-rowgroup-bs4/js/rowGroup.bootstrap4.js", "{$config.path.node_modules}/datatables.net-rowreorder/js/dataTables.rowReorder.js", "{$config.path.node_modules}/datatables.net-rowreorder-bs4/js/rowReorder.bootstrap4.js", "{$config.path.node_modules}/datatables.net-scroller/js/dataTables.scroller.js", "{$config.path.node_modules}/datatables.net-scroller-bs4/js/scroller.bootstrap4.js", "{$config.path.node_modules}/datatables.net-select/js/dataTables.select.js", "{$config.path.node_modules}/datatables.net-select-bs4/js/select.bootstrap4.js", "{$config.path.src}/custom/plugins/datatables-alteditor/datatables-alteditor.js" ] }, "bundle": { "styles": "{$config.output}/css/datagrid/datatables/datatables.bundle.css", "scripts": "{$config.output}/js/datagrid/datatables/datatables.bundle.js" } }, "export" : { "src": { "scripts": [ "{$config.path.node_modules}/jszip/dist/jszip.js", "{$config.path.node_modules}/pdfmake/build/pdfmake.js", "{$config.path.node_modules}/pdfmake/build/vfs_fonts.js" ] }, "bundle": { "scripts": "{$config.output}/js/datagrid/datatables/datatables.export.js" } } } }, "notifications" : { "toastr" : { "src": { "styles":[ "{$config.path.node_modules}/toastr/toastr.scss", "{$config.path.src}/custom/plugins/toastr/toastr-custom.scss" ], "scripts": [ "{$config.path.node_modules}/toastr/toastr.js" ] }, "bundle": { "styles": "{$config.output}/css/notifications/toastr/toastr.css", "scripts": "{$config.output}/js/notifications/toastr/toastr.js" } }, "sweetalert2" : { "src": { "styles":[ "{$config.path.src}/custom/plugins/sweetalert2/sweetalert2.scss" ], "scripts": [ "{$config.path.node_modules}/es6-promise-polyfill/promise.js", "{$config.path.node_modules}/sweetalert2/dist/sweetalert2.js" ] }, "bundle": { "styles": "{$config.output}/css/notifications/sweetalert2/sweetalert2.bundle.css", "scripts": "{$config.output}/js/notifications/sweetalert2/sweetalert2.bundle.js" } } }, "formplugins" : { "colorpicker" : { "src": { "styles":[ "{$config.path.node_modules}/bootstrap-colorpicker/dist/css/bootstrap-colorpicker.css" ], "scripts": [ "{$config.path.node_modules}/bootstrap-colorpicker/dist/js/bootstrap-colorpicker.js" ] }, "bundle": { "styles": "{$config.output}/css/formplugins/bootstrap-colorpicker/bootstrap-colorpicker.css", "scripts": "{$config.output}/js/formplugins/bootstrap-colorpicker/bootstrap-colorpicker.js" } }, "datepicker" : { "src": { "styles": [ "{$config.path.node_modules}/bootstrap-datepicker/dist/css/bootstrap-datepicker3.css", "{$config.path.src}/custom/plugins/datepicker/datepicker-custom.scss" ], "scripts": [ "{$config.path.node_modules}/bootstrap-datepicker/dist/js/bootstrap-datepicker.js" ] }, "bundle": { "styles": "{$config.output}/css/formplugins/bootstrap-datepicker/bootstrap-datepicker.css", "scripts": "{$config.output}/js/formplugins/bootstrap-datepicker/bootstrap-datepicker.js" } }, "daterangepicker" : { "src": { "styles": [ "{$config.path.node_modules}/bootstrap-daterangepicker/daterangepicker.css", "{$config.path.src}/custom/plugins/daterangepicker/daterangepicker-custom.scss" ], "scripts": [ "{$config.path.node_modules}/bootstrap-daterangepicker/daterangepicker.js" ] }, "bundle": { "styles": "{$config.output}/css/formplugins/bootstrap-daterangepicker/bootstrap-daterangepicker.css", "scripts": "{$config.output}/js/formplugins/bootstrap-daterangepicker/bootstrap-daterangepicker.js" } }, "dropzone" : { "src": { "styles": [ "{$config.path.node_modules}/dropzone/dist/dropzone.css", "{$config.path.src}/custom/plugins/dropzone/dropzone-custom.scss" ], "scripts": [ "{$config.path.node_modules}/dropzone/dist/dropzone.js" ] }, "bundle": { "styles": "{$config.output}/css/formplugins/dropzone/dropzone.css", "scripts": "{$config.output}/js/formplugins/dropzone/dropzone.js" } }, "ion-rangeslider": { "src": { "styles": [ "{$config.path.node_modules}/ion-rangeslider/css/ion.rangeSlider.css", "{$config.path.src}/custom/plugins/ion-rangeslider/ion.rangeslider-custom.scss" ], "scripts": [ "{$config.path.node_modules}/ion-rangeslider/js/ion.rangeSlider.js" ] }, "bundle": { "styles": "{$config.output}/css/formplugins/ion-rangeslider/ion-rangeslider.css", "scripts": "{$config.output}/js/formplugins/ion-rangeslider/ion-rangeslider.js" } }, "inputmusk": { "src": { "scripts": [ "{$config.path.node_modules}/inputmask/dist/jquery.inputmask.bundle.js" ] }, "bundle": { "scripts": "{$config.output}/js/formplugins/inputmask/inputmask.bundle.js" } }, "cropper": { "src": { "styles": [ "{$config.path.node_modules}/cropperjs/dist/cropper.css", "{$config.path.src}/custom/plugins/cropperjs/cropper-demo.scss" ], "scripts": [ "{$config.path.node_modules}/cropperjs/dist/cropper.js", "{$config.path.node_modules}/jquery-cropper/dist/jquery-cropper.js" ] }, "bundle": { "styles": "{$config.output}/css/formplugins/cropperjs/cropper.css", "scripts": "{$config.output}/js/formplugins/cropperjs/cropper.js" } }, "select2": { "src": { "styles": [ "{$config.path.node_modules}/select2/dist/css/select2.css", "{$config.path.src}/custom/plugins/select2/select2-cust.scss" ], "scripts": [ "{$config.path.node_modules}/select2/dist/js/select2.full.js" ] }, "bundle": { "styles": "{$config.output}/css/formplugins/select2/select2.bundle.css", "scripts": "{$config.output}/js/formplugins/select2/select2.bundle.js" } }, "summernote": { "src": { "styles": [ "{$config.path.node_modules}/summernote/dist/summernote-bs4.css", "{$config.path.src}/custom/plugins/summernote/summernote-custom.scss" ], "scripts": [ "{$config.path.node_modules}/summernote/dist/summernote-bs4.js" ] }, "bundle": { "styles": "{$config.output}/css/formplugins/summernote/summernote.css", "scripts": "{$config.output}/js/formplugins/summernote/summernote.js" } }, "markdown": { "src": { "styles": [ "{$config.path.node_modules}/bootstrap-markdown/css/bootstrap-markdown.min.css" ], "scripts": [ "{$config.path.node_modules}/markdown/lib/markdown.js", "{$config.path.node_modules}/bootstrap-markdown/js/bootstrap-markdown.js" ] }, "bundle": { "styles": "{$config.output}/css/formplugins/bootstrap-markdown/bootstrap-markdown.css", "scripts": "{$config.output}/js/formplugins/bootstrap-markdown/bootstrap-markdown.js" } }, "nouislider": { "src": { "styles": [ "{$config.path.node_modules}/nouislider/distribute/nouislider.css" ], "scripts": [ "{$config.path.node_modules}/nouislider/distribute/nouislider.js" ] }, "bundle": { "styles": "{$config.output}/css/formplugins/nouislider/nouislider.css", "scripts": "{$config.output}/js/formplugins/nouislider/nouislider.js" } }, "wizard": { "src": { "styles": [ "{$config.path.node_modules}/smartwizard/dist/css/smart_wizard.css", "{$config.path.node_modules}/smartwizard/dist/css/smart_wizard_theme_arrows.css", "{$config.path.node_modules}/smartwizard/dist/css/smart_wizard_theme_circles.css", "{$config.path.node_modules}/smartwizard/dist/css/smart_wizard_theme_dots.css" ], "scripts": [ "{$config.path.node_modules}/smartwizard/dist/js/jquery.smartWizard.js" ] }, "bundle": { "styles": "{$config.output}/css/formplugins/smartwizard/smartwizard.css", "scripts": "{$config.output}/js/formplugins/smartwizard/smartwizard.js" } } }, "miscellaneous" : { "fullcalendar" : { "src": { "styles":[ "{$config.path.node_modules}/@@fullcalendar/core/main.css", "{$config.path.node_modules}/@@fullcalendar/daygrid/main.css", "{$config.path.node_modules}/@@fullcalendar/list/main.css", "{$config.path.node_modules}/@@fullcalendar/timegrid/main.css", "{$config.path.node_modules}/@@fullcalendar/bootstrap/main.css", "{$config.path.src}/custom/plugins/@@fullcalendar/core-main-override.scss" ], "scripts": [ "{$config.path.node_modules}/@@fullcalendar/core/main.js", "{$config.path.node_modules}/@@fullcalendar/daygrid/main.js", "{$config.path.node_modules}/@@fullcalendar/list/main.js", "{$config.path.node_modules}/@@fullcalendar/timegrid/main.js", "{$config.path.node_modules}/@@fullcalendar/interaction/main.js", "{$config.path.src}/custom/plugins/@@fullcalendar/bootstrap-main-cust.js" ] }, "bundle": { "styles": "{$config.output}/css/miscellaneous/fullcalendar/fullcalendar.bundle.css", "scripts": "{$config.output}/js/miscellaneous/fullcalendar/fullcalendar.bundle.js" } }, "lightgallery" : { "src": { "styles":[ "{$config.path.node_modules}/justifiedGallery/dist/css/justifiedGallery.css", "{$config.path.src}/custom/plugins/lightgallery/lightgallery.scss" ], "scripts": [ "{$config.path.node_modules}/justifiedGallery/dist/js/jquery.justifiedGallery.js", "{$config.path.node_modules}/jquery-mousewheel/jquery.mousewheel.js", "{$config.path.node_modules}/lightgallery/src/js/lightgallery.js", "{$config.path.node_modules}/lightgallery/modules/lg-autoplay.js", "{$config.path.node_modules}/lightgallery/modules/lg-fullscreen.js", "{$config.path.node_modules}/lightgallery/modules/lg-hash.js", "{$config.path.node_modules}/lightgallery/modules/lg-pager.js", "{$config.path.node_modules}/lightgallery/modules/lg-thumbnail.js", "{$config.path.node_modules}/lightgallery/modules/lg-zoom.js" ] }, "bundle": { "styles": "{$config.output}/css/miscellaneous/lightgallery/lightgallery.bundle.css", "scripts": "{$config.output}/js/miscellaneous/lightgallery/lightgallery.bundle.js" } }, "smartvoice" : { "src": { "scripts": [ "{$config.path.src}/custom/plugins/smartvoice/smartvoice-config.js", "{$config.path.src}/custom/plugins/smartvoice/smartvoice.js" ] }, "bundle": { "scripts": "{$config.output}/js/miscellaneous/lightgallery/smartvoice.bundle.js" } }, "reactions" : { "src": { "styles":[ "{$config.path.src}/custom/plugins/reactions/reactions.scss" ] }, "bundle": { "styles": "{$config.output}/css/miscellaneous/reactions/reactions.css" } }, "jqvmap" : { "core" : { "src": { "styles":[ "{$config.path.node_modules}/jqvmap/dist/jqvmap.css", "{$config.path.src}/custom/plugins/jqvmap/jqvmap-cust.scss" ], "scripts": [ "{$config.path.node_modules}/jqvmap/dist/jquery.vmap.js", "{$config.path.node_modules}/jqvmap/dist/maps/jquery.vmap.world.js" ] }, "bundle": { "styles": "{$config.output}/css/miscellaneous/jqvmap/jqvmap.bundle.css", "scripts": "{$config.output}/js/miscellaneous/jqvmap/jqvmap.bundle.js" } }, "maps" : { "algeria" : { "src": { "scripts": [ "{$config.path.node_modules}/jqvmap/dist/maps/jquery.vmap.algeria.js" ] }, "bundle": { "scripts": "{$config.output}/js/miscellaneous/jqvmap/maps/jquery.vmap.algeria.js" } }, "argentina" : { "src": { "scripts": [ "{$config.path.node_modules}/jqvmap/dist/maps/jquery.vmap.argentina.js" ] }, "bundle": { "scripts": "{$config.output}/js/miscellaneous/jqvmap/maps/jquery.vmap.argentina.js" } }, "brazil" : { "src": { "scripts": [ "{$config.path.node_modules}/jqvmap/dist/maps/jquery.vmap.brazil.js" ] }, "bundle": { "scripts": "{$config.output}/js/miscellaneous/jqvmap/maps/jquery.vmap.brazil.js" } }, "canada" : { "src": { "scripts": [ "{$config.path.node_modules}/jqvmap/dist/maps/jquery.vmap.canada.js" ] }, "bundle": { "scripts": "{$config.output}/js/miscellaneous/jqvmap/maps/jquery.vmap.canada.js" } }, "europe" : { "src": { "scripts": [ "{$config.path.node_modules}/jqvmap/dist/maps/jquery.vmap.europe.js" ] }, "bundle": { "scripts": "{$config.output}/js/miscellaneous/jqvmap/maps/jquery.vmap.europe.js" } }, "france" : { "src": { "scripts": [ "{$config.path.node_modules}/jqvmap/dist/maps/jquery.vmap.france.js" ] }, "bundle": { "scripts": "{$config.output}/js/miscellaneous/jqvmap/maps/jquery.vmap.france.js" } }, "germany" : { "src": { "scripts": [ "{$config.path.node_modules}/jqvmap/dist/maps/jquery.vmap.germany.js" ] }, "bundle": { "scripts": "{$config.output}/js/miscellaneous/jqvmap/maps/jquery.vmap.germany.js" } }, "greece" : { "src": { "scripts": [ "{$config.path.node_modules}/jqvmap/dist/maps/jquery.vmap.greece.js" ] }, "bundle": { "scripts": "{$config.output}/js/miscellaneous/jqvmap/maps/jquery.vmap.greece.js" } }, "iran" : { "src": { "scripts": [ "{$config.path.node_modules}/jqvmap/dist/maps/jquery.vmap.iran.js" ] }, "bundle": { "scripts": "{$config.output}/js/miscellaneous/jqvmap/maps/jquery.vmap.iran.js" } }, "iraq" : { "src": { "scripts": [ "{$config.path.node_modules}/jqvmap/dist/maps/jquery.vmap.iraq.js" ] }, "bundle": { "scripts": "{$config.output}/js/miscellaneous/jqvmap/maps/jquery.vmap.iraq.js" } }, "russia" : { "src": { "scripts": [ "{$config.path.node_modules}/jqvmap/dist/maps/jquery.vmap.russia.js" ] }, "bundle": { "scripts": "{$config.output}/js/miscellaneous/jqvmap/maps/jquery.vmap.russia.js" } }, "tunisia" : { "src": { "scripts": [ "{$config.path.node_modules}/jqvmap/dist/maps/jquery.vmap.tunisia.js" ] }, "bundle": { "scripts": "{$config.output}/js/miscellaneous/jqvmap/maps/jquery.vmap.tunisia.js" } }, "turkey" : { "src": { "scripts": [ "{$config.path.node_modules}/jqvmap/dist/maps/jquery.vmap.turkey.js" ] }, "bundle": { "scripts": "{$config.output}/js/miscellaneous/jqvmap/maps/jquery.vmap.turkey.js" } }, "usa" : { "src": { "scripts": [ "{$config.path.node_modules}/jqvmap/dist/maps/jquery.vmap.usa.js" ] }, "bundle": { "scripts": "{$config.output}/js/miscellaneous/jqvmap/maps/jquery.vmap.usa.js" } } } }, "nestable" : { "src": { "styles":[ "{$config.path.src}/custom/plugins/nestable/nestable.scss" ], "scripts": [ "{$config.path.node_modules}/nestable/jquery.nestable.js" ] }, "bundle": { "styles": "{$config.output}/css/miscellaneous/nestable/nestable.css", "scripts": "{$config.output}/js/miscellaneous/nestable/nestable.js" } }, "treeview" : { "src": { "styles":[ "{$config.path.src}/custom/plugins/treeview/treeview.scss" ], "scripts": [ "{$config.path.src}/custom/plugins/treeview/treeview.js" ] }, "bundle": { "styles": "{$config.output}/css/miscellaneous/treeview/treeview.css", "scripts": "{$config.output}/js/miscellaneous/treeview/treeview.js" } } }, "dependencyplugins" : { "moment" : { "src": { "scripts": [ "{$config.path.node_modules}/moment/min/moment.min.js" ] }, "bundle": { "scripts": "{$config.output}/js/dependency/moment/moment.js" } } } } }, "app": { "base": { "src": { "styles": [ "{$config.path.src}/scss/app.core.scss" ], "scripts": [ "{$config.path.src}/js/_config/app.config.js", "{$config.path.src}/js/_modules/app.navigation.js", "{$config.path.src}/js/_modules/app.menu.slider.js", "{$config.path.src}/js/_modules/app.init.js", "{$config.path.src}/js/_modules/app.resize.trigger.js", "{$config.path.src}/js/_modules/app.scroll.trigger.js", "{$config.path.src}/js/_modules/app.domReady.js", "{$config.path.src}/js/_modules/app.orientationchange.js", "{$config.path.src}/js/_modules/app.window.load.js", "{$config.path.src}/js/_config/app.colors.js" ] }, "bundle": { "styles": "{$config.output}/css/app.bundle.css", "scripts": "{$config.output}/js/app.bundle.js" } }, "custom": { "auth" : { "login" : { "src": { "styles":[ "{$config.path.src}/content/pages/page_login_alt/page-login-alt.scss" ] }, "bundle": { "styles": "{$config.output}/css/page-login-alt.css" } } }, "invoice" : { "src": { "styles":[ "{$config.path.src}/content/pages/page_invoice/page-invoice.scss" ] }, "bundle": { "styles": "{$config.output}/css/page-invoice.css" } } }, "demo": { "theme": { "src": { "styles": [ "{$config.path.src}/scss/theme-demo.scss" ] }, "bundle": { "styles": "{$config.output}/css/theme-demo.css" } } }, "themes": { "theme-1": { "src": { "styles": [ "{$config.path.src}/scss/_themes/cust-theme-1.scss" ] }, "bundle": { "styles": "{$config.output}/css/themes/cust-theme-1.css" } }, "theme-2": { "src": { "styles": [ "{$config.path.src}/scss/_themes/cust-theme-2.scss" ] }, "bundle": { "styles": "{$config.output}/css/themes/cust-theme-2.css" } }, "theme-3": { "src": { "styles": [ "{$config.path.src}/scss/_themes/cust-theme-3.scss" ] }, "bundle": { "styles": "{$config.output}/css/themes/cust-theme-3.css" } }, "theme-4": { "src": { "styles": [ "{$config.path.src}/scss/_themes/cust-theme-4.scss" ] }, "bundle": { "styles": "{$config.output}/css/themes/cust-theme-4.css" } }, "theme-5": { "src": { "styles": [ "{$config.path.src}/scss/_themes/cust-theme-5.scss" ] }, "bundle": { "styles": "{$config.output}/css/themes/cust-theme-5.css" } }, "theme-6": { "src": { "styles": [ "{$config.path.src}/scss/_themes/cust-theme-6.scss" ] }, "bundle": { "styles": "{$config.output}/css/themes/cust-theme-6.css" } }, "theme-7": { "src": { "styles": [ "{$config.path.src}/scss/_themes/cust-theme-7.scss" ] }, "bundle": { "styles": "{$config.output}/css/themes/cust-theme-7.css" } }, "theme-8": { "src": { "styles": [ "{$config.path.src}/scss/_themes/cust-theme-8.scss" ] }, "bundle": { "styles": "{$config.output}/css/themes/cust-theme-8.css" } }, "theme-9": { "src": { "styles": [ "{$config.path.src}/scss/_themes/cust-theme-9.scss" ] }, "bundle": { "styles": "{$config.output}/css/themes/cust-theme-9.css" } }, "theme-10": { "src": { "styles": [ "{$config.path.src}/scss/_themes/cust-theme-10.scss" ] }, "bundle": { "styles": "{$config.output}/css/themes/cust-theme-10.css" } }, "theme-11": { "src": { "styles": [ "{$config.path.src}/scss/_themes/cust-theme-11.scss" ] }, "bundle": { "styles": "{$config.output}/css/themes/cust-theme-11.css" } }, "theme-12": { "src": { "styles": [ "{$config.path.src}/scss/_themes/cust-theme-12.scss" ] }, "bundle": { "styles": "{$config.output}/css/themes/cust-theme-12.css" } }, "theme-13": { "src": { "styles": [ "{$config.path.src}/scss/_themes/cust-theme-13.scss" ] }, "bundle": { "styles": "{$config.output}/css/themes/cust-theme-13.css" } } } } } } </pre> </div> <div class="table-responsive"> <table class="table table-bordered table-hover"> <thead> <tr> <th style="width:260px"> variable </th> <th style="width: 100px"> value </th> <th> description </th> </tr> </thead> <tbody> <tr> <td> config.debug </td> <td> <code>boolean</code> </td> <td> spits out debugging data and error messages on npm log file </td> </tr> <tr> <td> config.data.* </td> <td> <code>string</code> </td> <td> global data for the template, control profile images, user names, etc </td> </tr> <tr> <td> config.compile.jsUglify </td> <td> <code>boolean</code> </td> <td> minifies all javascript files in the project </td> </tr> <tr> <td> config.compile.cssMinify </td> <td> <code>boolean</code> </td> <td> minifies all css files in the project </td> </tr> <tr> <td> config.compile.jsSourcemaps </td> <td> <code>boolean</code> </td> <td> generates js source maps from the scss files for easier debugging options using the browser's inspection tool </td> </tr> <tr> <td> config.compile.cssSourcemaps </td> <td> <code>boolean</code> </td> <td> generates css source maps from the scss files for easier debugging options using the browser's inspection tool </td> </tr> <tr> <td> config.compile.autoprefixer </td> <td> <code>boolean</code> </td> <td> we recommend you leave this set to true. This will auto-generate all the necessary CSS browser prefixes for different browser types </td> </tr> <tr> <td> config.compile.seedOnly </td> <td> <code>boolean</code> </td> <td> generates the seed project navigation menu, all other assets will be intact, can be removed manually (but will not be called into the main project) </td> </tr> <tr> <td> config.path.* </td> <td> <code>string</code> </td> <td> addresses source and dist path of your porject files, change this if you change your source file path </td> </tr> <tr> <td> build.vendor.* </td> <td> <code>string</code> </td> <td> link all sources for plugins from the node_modules directory, you can concatinte files here and also rename them if needed </td> </tr> <tr> <td> build.app.* </td> <td> <code>string</code> </td> <td> concatinates all the main core files for the theme </td> </tr> </tbody> </table> </div> </div> </div> <div class="card mb-g p-2"> <div class="card-body"> <h2 class="fw-700 m-0"> Configuration JS (app.config.js) <a href="javascript:void(0);" name="app.config.js"></a> <small> Your <code>app.config.js</code> mainly controls the behaviour of your application, you can configure the navigation speed, disable visual effects, and change localstorage settings. To learn more about app configuration and how it works, check out the <a href="/plugin/appcore" class="fw-500" title="app.core.js">app.core.js</a> page </small> </h2> <div class="fw-500 mb-1"> File app.config.js contents </div> <div class="border border-dark mb-g overflow-hidden rounded"> <pre class="prettyprint fs-md mb-0" style="max-height: 250px;"> var myapp_config = { VERSION: '@Settings.Theme.ThemeVersion', root_: $('body'), // used for core app reference root_logo: $('.page-sidebar > .page-logo'), // used for core app reference throttleDelay: 450, // for window.scrolling & window.resizing filterDelay: 150, // for keyup.functions thisDevice: null, // desktop or mobile isMobile: (/iphone|ipad|ipod|android|blackberry|mini|windows\sce|palm/i.test(navigator.userAgent.toLowerCase())), //popular device types available on the market mobileMenuTrigger: null, // used by pagescrolling and appHeight script, do not change! mobileResolutionTrigger: 992, //the resolution when the mobile activation fires isWebkit: ((!!window.chrome && !!window.chrome.webstore) === true || Object.prototype.toString.call(window.HTMLElement).indexOf('Constructor') > 0 === true), isChrome: (/chrom(e|ium)/.test(navigator.userAgent.toLowerCase())), isIE: ( (window.navigator.userAgent.indexOf('Trident/') ) > 0 === true ), debugState: true, // outputs debug information on browser console rippleEffect: true, // material design effect that appears on all buttons mythemeAnchor: '#mytheme', activateLastTab: true, navAnchor: $('#js-primary-nav'), //changing this may implicate slimscroll plugin target navHooks: $('#js-nav-menu'), //changing this may implicate CSS targets navAccordion: true, //nav item when one is expanded the other closes navInitalized: 'js-nav-built', //nav finished class navFilterInput: $('#nav_filter_input'), //changing this may implicate CSS targets navHorizontalWrapperId: 'js-nav-menu-wrapper', navSpeed: 500, //ms mythemeColorProfileID: $('#js-color-profile'), navClosedSign: 'fal fa-angle-down', navOpenedSign: 'fal fa-angle-up', appIconPrefix: 'fal', appDateHook: $('.js-get-date'), storeLocally: true, /* * Used with initApp.loadScripts * DOC: Please leave it blank */ jsArray : [] }; </pre> </div> <div class="fw-500 mb-1"> Config file definition </div> <div class="table-responsive"> <table class="table table-bordered table-hover"> <thead> <tr> <th style="width:260px"> variable </th> <th style="width: 100px"> value </th> <th> description </th> </tr> </thead> <tbody> <tr> <td> myapp_config.VERSION </td> <td> <code>integer</code> </td> <td> application version number </td> </tr> <tr> <td> myapp_config.root_ </td> <td> <code>string</code> </td> <td> used for core app reference </td> </tr> <tr> <td> myapp_config.root_logo </td> <td> <code>boolean</code> </td> <td> used for core app reference to detect logo click behaviour </td> </tr> <tr> <td> myapp_config.throttleDelay </td> <td> <code>boolean</code> </td> <td> for window.scrolling & window.resizing </td> </tr> <tr> <td> myapp_config.filterDelay </td> <td> <code>boolean</code> </td> <td> keyup.functions for the search filter </td> </tr> <tr> <td> myapp_config.mobileResolutionTrigger </td> <td> <code>boolean</code> </td> <td> the resolution when the mobile activation fires </td> </tr> <tr> <td> myapp_config.debugState </td> <td> <code>boolean</code> </td> <td> outputs debug information on browser console </td> </tr> <tr> <td> myapp_config.rippleEffect </td> <td> <code>boolean</code> </td> <td> global configuration for material design effect that appears on all buttons </td> </tr> <tr> <td> myapp_config.mythemeAnchor </td> <td> <code>string</code> </td> <td> this anchor is created dynamically and CSS is loaded as an override theme </td> </tr> <tr> <td> myapp_config.navAnchor </td> <td> <code>string</code> </td> <td> this is the root anchor point where the menu script will begin its build </td> </tr> <tr> <td> myapp_config.navAccordion </td> <td> <code>string</code> </td> <td> nav item when one is expanded the other closes </td> </tr> <tr> <td> myapp_config.navSpeed </td> <td> <code>integer</code> </td> <td> the rate at which the menu expands revealing child elements on click, lower rate reels faster expansion of nav childs </td> </tr> <tr> <td> myapp_config.navClosedSign </td> <td> <code>string</code> </td> <td> main navigation's collapse icon </td> </tr> <tr> <td> myapp_config.navOpenedSign </td> <td> <code>string</code> </td> <td> main navigation's expand icon </td> </tr> <tr> <td> myapp_config.storeLocally </td> <td> <code>boolean</code> </td> <td> saveSettings to localStorage, to store settings to a DB instead of LocalStorage use initApp.pushSettings("className1 className2") </td> </tr> </tbody> </table> </div> </div> </div> <div class="card mb-g p-2"> <div class="card-body"> <h2 class="fw-700 m-0"> Layout Options <a href="javascript:void(0);" name="applayouts"></a> <small> @Settings.AppName has over 285 layout combinations, you can check them out at <a href="/settings/layoutoptions" class="fw-500">Layout Options</a> page. </small> </h2> <p class="panel-tag"> You can also try them out through the <a href="#" class="fw-500" data-toggle="modal" data-target=".js-modal-settings"><i class="fal fa-cog"></i> Settings Panel</a>. The classes comes with 3 unique prefixes, <code>.header-*</code>, <code>.nav-*</code>, and <code>.mod-*</code>. You can also save your state settings to your database using <code>initApp.pushSettings()</code> and <code> initApp.getSettings()</code> methods, more info found in the <a href="/settings/savingdb" class="fw-500">Saving to Database</a> page. </p> <p class="fw-500"> Add the following classes to your <code>body</code> tag in order to see the effect. You may need to clear your <code>localStorage</code> first if you intend to add it manually. </p> <div class="table-responsive"> <table class="table table-bordered table-hover"> <thead> <tr> <th style="width:260px"> class name </th> <th> description </th> </tr> </thead> <tbody> <tr> <td> <code>.header-function-fixed</code> </td> <td> header is in a fixed position at all times, effecting mobile & desktop view </td> </tr> <tr> <td> <code>.nav-function-fixed</code> </td> <td> Left panel position becomes fixed, activates custom scroll plugin </td> </tr> <tr> <td> <code>.nav-function-minify</code> </td> <td> Navigation text are collapsed. Only visible portion are the icons. Hover the icons to reveal any child elements </td> </tr> <tr> <td> <code>.nav-function-hidden</code> </td> <td> Navigation is revealed upon user hovering the visible portion of the navigation </td> </tr> <tr> <td> <code>.nav-function-top</code> </td> <td> Main navigation shifts to the top (horizontal nav) </td> </tr> <tr> <td> <code>.mod-main-boxed</code> </td> <td> Contain layout to 1200px max width. Some classes are not compatible with this setting </td> </tr> <tr> <td> <code>.nav-mobile-push</code> </td> <td> Content panel pushed on menu reveal </td> </tr> <tr> <td> <code>.nav-mobile-no-overlay</code> </td> <td> Removes mesh on menu reveal </td> </tr> <tr> <td> <code>.nav-mobile-slide-out</code> </td> <td> Content overlaps menu </td> </tr> <tr> <td> <code>.mod-bigger-font</code> </td> <td> Fonts are bigger for readability </td> </tr> <tr> <td> <code>.mod-high-contrast</code> </td> <td> 4.5:1 text contrast ratio to meet WCAG 2 AA standards </td> </tr> <tr> <td> <code>.mod-color-blind</code> </td> <td> Color vision deficiency (this is a progressive UI option) </td> </tr> <tr> <td> <code>.mod-pace-custom</code> </td> <td> Preloader will be inside content </td> </tr> <tr> <td> <code>.mod-clean-page-bg</code> </td> <td> A white background for your webapp </td> </tr> <tr> <td> <code>.mod-hide-nav-icons</code> </td> <td> Hides navigation icons for a slick and clean look (some classes will be incompatible) </td> </tr> <tr> <td> <code>.mod-disable-animation</code> </td> <td> Disables css based animations </td> </tr> <tr> <td> <code>.mod-hide-info-card</code> </td> <td> Hides info card from left panel </td> </tr> <tr> <td> <code>.mod-lean-subheader</code> </td> <td> Distinguished page header </td> </tr> <tr> <td> <code>.mod-nav-link</code> </td> <td> Clear breakdown of nav links (some options will be incompatble) </td> </tr> </tbody> </table> </div> </div> </div> <div class="card mb-g p-2"> <div class="card-body"> <h2 class="fw-700 m-0"> Skins & Themes <a href="javascript:void(0);" name="skinsthemes"></a> <small> @Settings.AppName contains over 10 pre-built CSS skins to choose from. All located inside <code>dist/css/themes</code> or you would like to edit the source, go to <code>src/scss/_themes</code>. The skin files generated are very lightweight, ranging from 10-20kb in size, which only contains the color overrides. </small> </h2> <h4> How to change theme colors <small> There are more than one way to change your skin or theme colors. Use any one of the following methods below: </small> </h4> <div class="fw-500 mb-1"> <span class="color-info-500">Method 1:</span> Changing skins programatically via button action, add the following line to any clickable object. Remove <code>data-themesave</code> attribute if you do not plan to save the theme to localStorage. </div> <div class="border border-dark mb-g overflow-hidden rounded"> <pre class="prettyprint m-0">data-action="theme-update" data-themesave="" data-theme="/css/themes/cust-theme-1.css"</pre> </div> <div class="fw-500 mb-1"> <span class="color-info-500">Method 2:</span> Changing skins manually, add the skin CSS file to your html <code>head</code>, it must be added after apps' main CSS files </div> <div class="border border-dark mb-g overflow-hidden rounded"> <pre class="prettyprint m-0"> link rel="stylesheet" media="screen, print" href="~/css/vendors.bundle.css" link rel="stylesheet" media="screen, print" href="~/css/app.bundle.css" link id="mytheme" rel="stylesheet" href="~/css/themes/cust-theme-1.css" &lt;-- Theme CSS override</pre> </div> <div class="fw-500 mb-1"> <span class="color-info-500">Method 3:</span> Changing skins programatically via script. <code>themeName</code> is the location of where your theme's CSS file is located including the full file name, path and extension. <code>themeSave</code> is to indicate if you wish to save this change to the <code>localStorage</code>. </div> <div class="border border-dark mb-g overflow-hidden rounded"> <pre class="prettyprint m-0"> //initApp.updateTheme(themeName, themeSave); //saves the theme initApp.updateTheme('css/themes/cust-theme-1.css'); // does not save initApp.updateTheme('css/themes/cust-theme-1.css', false);</pre> </div> <div class="height-1 mb-3"></div> <h4> Theme Color references <small> Add the theme URL by following the steps above </small> </h4> <table class="table table-bordered mb-g"> <thead> <tr> <td> Theme Name </td> <td> Theme URL </td> <td> Theme Colors </td> </tr> </thead> <tbody> <tr> <td> Default </td> <td> <code>base CSS</code> </td> <td> <div class="d-flex align-items-center"> <span class="d-inline-block mx-1 mt-1 p-2 bg-primary-500 rounded-circle"></span> <span class="d-inline-block mx-1 mt-1 p-2 bg-success-500 rounded-circle"></span> <span class="d-inline-block mx-1 mt-1 p-2 bg-danger-500 rounded-circle"></span> <span class="d-inline-block mx-1 mt-1 p-2 bg-warning-500 rounded-circle"></span> <span class="d-inline-block mx-1 mt-1 p-2 bg-info-500 rounded-circle"></span> <span class="d-inline-block mx-1 mt-1 p-2 bg-fusion-500 rounded-circle"></span> </div> </td> </tr> <tr> <td> Tapestry </td> <td> <code>css/themes/cust-theme-1.css</code> </td> <td> <div class="d-flex align-items-center"> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#b56a9f"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#1dc958"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#c139fd"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#b8ff41"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#21e2f3"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#505050"></span> </div> </td> </tr> <tr> <td> Atlantis </td> <td> <code>css/themes/cust-theme-2.css</code> </td> <td> <div class="d-flex align-items-center"> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#9fcb3d"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#1dc9b7"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#fd3995"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#ffc241"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#2196F3"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#444444"></span> </div> </td> </tr> <tr> <td> Indigo </td> <td> <code>css/themes/cust-theme-3.css</code> </td> <td> <div class="d-flex align-items-center"> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#4679cc"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#1dc9b7"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#fd3995"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#ffc241"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#2196F3"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#494949"></span> </div> </td> </tr> <tr> <td> Dodger Blue </td> <td> <code>css/themes/cust-theme-4.css</code> </td> <td> <div class="d-flex align-items-center"> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#2198F3"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#69FB13"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#FC1349"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#FF9A13"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#BB1BF4"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#434a51"></span> </div> </td> </tr> <tr> <td> Tradewind </td> <td> <code>css/themes/cust-theme-5.css</code> </td> <td> <div class="d-flex align-items-center"> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#6ab5b4"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#85b86c"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#b57d6a"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#e0d07e"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#8f6ab5"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#505050"></span> </div> </td> </tr> <tr> <td> Cranberry </td> <td> <code>css/themes/cust-theme-6.css</code> </td> <td> <div class="d-flex align-items-center"> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#dd5293"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#1dc9b7"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#fd3995"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#ffc241"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#2196F3"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#585858"></span> </div> </td> </tr> <tr> <td> Oslo Gray </td> <td> <code>css/themes/cust-theme-7.css</code> </td> <td> <div class="d-flex align-items-center"> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#868e96"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#1dc9b7"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#b57d6a"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#ffc241"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#2196F3"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#4e4e4e"></span> </div> </td> </tr> <tr> <td> Chetwode Blue </td> <td> <code>css/themes/cust-theme-8.css</code> </td> <td> <div class="d-flex align-items-center"> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#7c91df"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#93ff87"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#ff8793"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#ffbf87"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#90c7f2"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#6e6e6e"></span> </div> </td> </tr> <tr> <td> Apricot </td> <td> <code>css/themes/cust-theme-9.css</code> </td> <td> <div class="d-flex align-items-center"> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#e59c6c"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#6c9be3"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#e77070"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#ede267"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#cb6de3"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:dimgray"></span> </div> </td> </tr> <tr> <td> Blue Smoke </td> <td> <code>css/themes/cust-theme-10.css</code> </td> <td> <div class="d-flex align-items-center"> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#778c85"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#8eff37"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#af37ff"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#37ffa8"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#3787ff"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#424242"></span> </div> </td> </tr> <tr> <td> Green Smoke </td> <td> <code>css/themes/cust-theme-11.css</code> </td> <td> <div class="d-flex align-items-center"> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#a2b077"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#7fd894"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#f88c71"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#e892d7"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#d9b5a3"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#545454"></span> </div> </td> </tr> <tr> <td> Wild Blue Yonder </td> <td> <code>css/themes/cust-theme-12.css</code> </td> <td> <div class="d-flex align-items-center"> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#7976b3"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#1dc958"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#c139fd"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#b8ff41"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#21e2f3"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#555555"></span> </div> </td> </tr> <tr> <td> Emerald </td> <td> <code>css/themes/cust-theme-13.css</code> </td> <td> <div class="d-flex align-items-center"> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#55ce5f"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#1dc958"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#c139fd"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#b8ff41"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#21e2f3"></span> <span class="d-inline-block mx-1 mt-1 p-2 rounded-circle" style="background:#525252"></span> </div> </td> </tr> </tbody> </table> <div class="height-1 mb-3"></div> <h4> Generate your own unique Themes <small> You can generate any number of themes by following the instructions below. </small> </h4> <ol class="list-spaced"> <li> <strong>Go over to <code>smartadmin-html-full\src\scss\_themes</code></strong> </li> <li> <strong>Create an empty file with <code>.scss</code> extension. Lets name your file <span class="text-success">'mytheme.scss'</span></strong> </li> <li> <strong>Open <code>cust-theme-1.scss</code> to take a look at the example of what we did.</strong> </li> <li> <strong>You can either copy from <code>cust-theme-1.scss</code> to your 'methemes.scss' file and change the color values, or you may open <code>variables.scss</code> file and copy over any values you wish to change. </strong> <br><br> <strong>For example: mytheme.scss</strong> <div class="border border-dark mb-g overflow-hidden rounded"> <pre class="prettyprint mb-0"> /* #THEME COLOR (variable overrides) ========================================================================== */ $color-primary: #b56a9f; $color-success: #1dc958; /* #GLOBAL IMPORTS (You must not remove this!) ========================================================================== */ @@import './src/scss/_imports/_theme-modules-import'; /* #My Unique Changes (you can also add CSS overrides below) ========================================================================== */ $nav-background: #ed1c34; //&lt;-- my unique change here</pre> </div> </li> <li> <strong>Once done, re-run the <a href="#gulp">gulp build command</a> to generate your unique CSS skin. You can then use the methods above to load the new skin to your application.</strong> </li> </ol> </div> </div> <div class="card mb-g p-2"> <div class="card-body"> <h2 class="fw-700 m-0"> Seed & Slim Projects <a href="javascript:void(0);" name="seedslim"></a> <small> @Settings.AppName's slim and seed projects comes at a reduced functionality, where seed is only the barebone essentials to get the project up and running, slim offers much more reduced functionalities, CSS features. If the Full Project was a size of a potato, the Seed Project would be the size of potato wedges, and the Slim Project would be Lays thinly sliced potato chips. </small> </h2> <table class="table table-bordered table-hover table-striped m-0"> <thead> <tr> <th>Feature</th> <th class="text-center"><span class="badge badge-danger width-5 fs-lg">Slim</span></th> <th class="text-center"><span class="badge badge-warning width-5 fs-lg">Seed</span></th> <th class="text-center"><span class="badge badge-info width-5 fs-lg">Full</span></th> </tr> </thead> <tbody> <tr> <td>Bootstrap components</td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> </tr> <tr> <td>Dependencies</td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> </tr> <tr> <td>Core plugins</td> <td class="text-center">Partial*</td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> </tr> <tr> <td>Extension plugins</td> <td class="text-center"><i class="fal fa-times-circle fa-2x color-danger-800"></i></td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> </tr> <tr> <td>App Settings and Layouts</td> <td class="text-center"><i class="fal fa-times-circle fa-2x color-danger-800"></i></td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> </tr> <tr> <td>Form Plugins</td> <td class="text-center"><i class="fal fa-times-circle fa-2x color-danger-800"></i></td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> </tr> <tr> <td>Sample Pages</td> <td class="text-center"><i class="fal fa-times-circle fa-2x color-danger-800"></i></td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> </tr> <tr> <td>Datatables</td> <td class="text-center"><i class="fal fa-times-circle fa-2x color-danger-800"></i></td> <td class="text-center"><i class="fal fa-times-circle fa-2x color-danger-800"></i></td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> </tr> <tr> <td>Charts &amp; Statistics</td> <td class="text-center"><i class="fal fa-times-circle fa-2x color-danger-800"></i></td> <td class="text-center"><i class="fal fa-times-circle fa-2x color-danger-800"></i></td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> </tr> <tr> <td>Notifications & Other plugins</td> <td class="text-center"><i class="fal fa-times-circle fa-2x color-danger-800"></i></td> <td class="text-center"><i class="fal fa-times-circle fa-2x color-danger-800"></i></td> <td class="text-center"><i class="fal fa-check-circle fa-2x color-success-800"></i></td> </tr> </tbody> </table> </div> </div> <div class="card mb-g p-2"> <div class="card-body"> <h2 class="fw-700 m-0" name="filestructure"> File Structure <a href="javascript:void(0);" name="filestructure"></a> <small> This webapp toolkit comes with a flexible file structure that can be easily used for small to large scope projects. This section will explains app's file structure and how to adapt it to your project. </small> </h2> <ul class="fs-lg fw-500 list-style-none pl-2"> <li> <i class="fad fa-folder-open color-warning-700"></i> smartadmin-html-full <ul> <li><i class="fad fa-folder color-warning-700"></i> build <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp;all the scripts for gulp</span></li> <li><i class="fad fa-folder color-warning-700"></i> dist <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; compiled HTML and assets</span></li> <li> <i class="fad fa-folder-open color-success-600"></i> src <ul> <li><i class="fad fa-folder color-warning-700"></i> content <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; content for all pages</span></li> <li> <i class="fad fa-folder-open color-success-600"></i> custom <ul> <li><i class="fad fa-folder color-warning-800"></i> demo-data <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; data for demo pages and graphs</span></li> <li><i class="fad fa-folder color-warning-700"></i> docs-data <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; data for plugins and documentation</span></li> <li><i class="fad fa-folder color-warning-600"></i> lang <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; data for language files</span></li> <li><i class="fad fa-folder color-warning-500"></i> media <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; video and sound files</span></li> <li><i class="fad fa-folder color-warning-400"></i> plugins <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; scripts for all custom plugins and non-destructive manipulation</span></li> <li><i class="fad fa-folder color-warning-300"></i> webfonts <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; all font icon files</span></li> </ul> </li> <li><i class="fad fa-folder color-warning-700"></i> image <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; all image contents</span></li> <li><i class="fad fa-folder color-warning-700"></i> js <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; core javascript contents</span></li> <li> <i class="fad fa-folder-open color-success-600"></i> scss <ul> <li><i class="fad fa-folder color-warning-800"></i> _extensions <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; customization for extended plugins</span></li> <li><i class="fad fa-folder color-warning-700"></i> _imports <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; global and theme imports as a master file refernce</span></li> <li><i class="fad fa-folder color-warning-600"></i> _mixins <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; scss mixins and shortclasses</span></li> <li><i class="fad fa-folder color-warning-500"></i> _modules <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; SCSS modular components for the theme</span></li> <li><i class="fad fa-folder color-warning-400"></i> _themes <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; theme files for various theme colors</span></li> <li><i class="fad fa-folder color-warning-300"></i> ...</li> </ul> </li> <li> <i class="fad fa-folder-open color-success-600"></i> template <ul> <li><i class="fad fa-folder color-warning-700"></i> layouts <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; various hbs layout templates </span></li> <li><i class="fad fa-folder color-warning-600"></i> include <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; hbs includes, such as head, body, nav, content area, etc </span></li> <li><i class="fad fa-folder color-warning-500"></i> _helpers <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; hbs conditions </span></li> </ul> </li> <li><i class="fad fa-file"></i> navigation.json <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; JSON file that auto generates nav.hbs file </span> </li> </ul> </li> <li><i class="fad fa-file"></i> build.json <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; structure of the entire project and file components, use this file to restructure your project files </span></li> <li><i class="fad fa-file"></i> package.json <span class="text-muted fs-sm fw-400">&nbsp; --- &nbsp; list of all npm packages for the app </span></li> </ul> </li> <li><i class="fad fa-folder color-warning-700"></i> smartadmin-html-seed</li> <li><i class="fad fa-folder color-warning-600"></i> smartadmin-html-slim</li> <li><i class="fad fa-folder color-warning-500"></i> tests</li> </ul> </div> </div> <div class="card mb-g p-2"> <div class="card-body"> <h2 class="fw-700 m-0" name="pluginreference"> Plugin Reference & Dependency <a href="javascript:void(0);" name="pluginreference"></a> <small> Reference for all plugins within @Settings.AppName </small> </h2> <p> All plugins are categorized into five (5) categories. <span class="badge bg-fusion-500 fs-nano my-1">DEPENDENCY</span> and <span class="badge badge-danger fs-nano my-1">CORE</span> are essential plugins, removing any of these plugins can cause the application to crash or become unstable. <span class="badge badge-success fs-nano my-1">EXTENSION</span> are refered to plugins that enhances the user experience, this could range from SmartPanels, SlimScroll, BootBox, etc. You are free to remove these plugins without crashing the application. <span class="badge badge-warning fs-nano my-1">RECOMMENDED</span> increases application's performance, removing this will not crash the application but users may experience delay in animation or delayed overall responses from the application. <span class="badge badge-primary fs-nano my-1">ADDON</span> are plugins added on top as a development need or requirement, you are free to add or delete any addons </p> <br> <p class="panel-tag fw-500"> To see all list of Core plugins for a barebone version please visit the <a href="/plugin/faq" class="fw-500"> Plugin FAQ</a> page. </p> <div class="mb-g"> <h4><i class="fal fa-exclamation text-danger"></i> Select a plugin from the list below for full documentation</h4> <p class="text-faded fs-nano">Disclaimer: Third party plugins are left unchanged, all third party plugins have limited support (to design elements only), and you must refer to the documentation via plugin's official website</p> <select class="js-plugins custom-select form-control" style="width:15rem;"> <option value="">-- Select Plugin --</option> </select> </div> <div id="js-display" class="d-none"> <h5 class="fw-700"> <span class="js-plugin-name"></span> </h5> <p> <span class="js-plugin-description"></span> </p> <p> <strong>Documentation:</strong> <br> <a href="" class="js-plugin-url" target="_blank"></a> </p> <p> <strong>License:</strong> <br> <span class="js-plugin-license"></span> </p> </div> </div> </div> <div class="card mb-g p-2"> <div class="card-body"> <h2 class="fw-700 m-0"> RTL Support <a href="javascript:void(0);" name="rtlsupport"></a> <small> Right to left text support for @Settings.AppName </small> </h2> <p>You can turn on your RTL support by enabling the RTL CSS, by adding the suffix <code>*-rtl.css</code> you can turn on your RTL support. You can also have RTL on by default through your build.json config file.</p> <div class="fw-500 mb-1"> Change via CSS from your HTML <code>head</code> </div> <div class="border border-dark mb-g overflow-hidden rounded"> <pre class="prettyprint mb-0"> link rel="stylesheet" media="screen, print" href="~/css/vendors.bundle-rtl.css" &lt;-- just add the '-rtl' suffix link rel="stylesheet" media="screen, print" href="~/css/app.bundle-rtl.css"</pre> </div> <div class="fw-500 mb-1"> Change via Gulp and <code>build.json</code> file </div> <div class="border border-dark mb-g overflow-hidden rounded"> <pre class="prettyprint mb-0"> "compile": { "jsUglify": false, "cssMinify": false, "jsSourcemaps": false, "cssSourcemaps": true, "autoprefixer": true, "seedOnly": false, "rtl": true &lt;-- change to 'true' },</pre> </div> <p class="fw-500 fs-md">Once done, save your changes and run your <a href="#gulp">gulp build command</a> to generate the new CSS files.</p> </div> </div> <div class="card mb-g p-2"> <div class="card-body"> <h2 class="fw-700 m-0"> Changing Component Colors <a href="javascript:void(0);" name="changingcomponentcolors"></a> <small> How to change component colors in @Settings.AppName using Chrome's developer tools and Gulp </small> </h2> <div class="panel-tag"> In this example, we will change the 'left panel' background color. For this example to work, you first need to make sure your <a href="#overview">gulp build</a> is working correctly and you have the following enabled from <code>build.json</code> <br><br> <span class="fw-500 font-italic">Inside your build.json file</span> <pre class="prettyprint mb-0 bg-transparent fs-sm"> "compile": { "jsUglify": false, "cssMinify": false, "jsSourcemaps": false, "cssSourcemaps": true, &lt;-- change to 'true' "autoprefixer": true, "seedOnly": false, "rtl": false }, </pre> </div> <ol class="pl-3"> <li> <span class="fw-500 mb-2 d-block"> Load your website on the latest version of <a href="https://www.google.ca/chrome/" target="_blank"> Google Chrome </a> and press F12 to load the <a href="https://developers.google.com/web/tools/chrome-devtools/open" target="_blank"> developer toolkit </a> </span> <div class="row"> <div class="col col-lg-9 col-xl-7"> <div class="demo-window rounded height-sm mb-g"> <div class="demo-window-content"> <img src="~/img/demo/chromedevtools-1.png"> </div> </div> </div> </div> </li> <li> <span class="fw-500 mt-4 mb-2 d-block"> Click on Elements tab and then locate the DOM element (eg. <code>page-sidebar</code>) you wish to manipulate, once selected it will reveal the SCSS file from the CSS maps (to the right). Locate the reference for the CSS class (eg. <code>.page-sidebar</code>) to reveal the SCSS file name where these classes are residing. In this example below it is residing inside <code>_placeholder.scss</code> in line 29. Clicking on the file name as shown in the image below... </span> <div class="row"> <div class="col col-lg-9 col-xl-7"> <div class="demo-window rounded height-sm mb-g"> <div class="demo-window-content"> <img src="~/img/demo/chromedevtools-2.png"> </div> </div> </div> </div> </li> <li> <span class="fw-500 mt-4 mb-2 d-block"> When you open the <code>_placeholder.scss</code> file from the previous window, you will see the variable you need to change for the left navigation background color. In this example it will be <code>$nav-background</code> </span> <div class="row"> <div class="col col-lg-9 col-xl-7"> <div class="demo-window rounded height-sm mb-g"> <div class="demo-window-content"> <img src="~/img/demo/chromedevtools-3.png"> </div> </div> </div> </div> </li> <li> <span class="fw-500 mt-4 mb-2 d-block"> Open <code>\smartadmin-html-full\src\scss\_modules\variables.scss</code> and locate the key variable you wish to change, in this case we locate <code>$nav-background</code>, make sure to also change <code>$nav-background-shade</code> to your liking. </span> <div class="row"> <div class="col col-lg-9 col-xl-7"> <div class="demo-window rounded height-sm mb-g"> <div class="demo-window-content"> <img src="~/img/demo/chromedevtools-4.png"> </div> </div> </div> </div> </li> <li> <span class="fw-500 mt-4 mb-2 d-block"> Once done, save your changes and run your <a href="#gulp">gulp build command</a> to generate the new CSS files. </span> </li> </ol> </div> </div> <div class="card mb-g p-2"> <div class="card-body"> <h2 class="fw-700 m-0"> Product Support <a href="javascript:void(0);" name="productsupport"></a> <small> Customer support for @Settings.AppName </small> </h2> <p>All support questions related to HTML and/or CSS will be honored. Issues that are encountered on the Seed versions of specific flavors of SmartAdmin are covered by their <a href="/intel/introduction" target="_blank">respective authors</a>, but will be limited to HTML and/or CSS issues. If you need assistance with a technical issue that is currently not covered by the FAQ, you will need to have purchased a Full license of that flavor and contact the respective author for further assistance. The Full version links will be added to the <a href="/info/appflavors" target="_blank">Flavors</a> page once they are made available.</p> </div> </div> <div class="card mb-g p-2"> <div class="card-body"> <h2 class="fw-700 m-0"> Browser Support <a href="javascript:void(0);" name="browsersupport"></a> <small> @Settings.AppName supports all the browsers supported by the Bootstrap 4, which includes: </small> </h2> <ul class="list-spaced list-unstyled fs-lg"> <li> <i class="fab fa-internet-explorer mr-2" style="color: #42c0fb"></i> IE10+ </li> <li> <i class="fab fa-firefox mr-2" style="color: #fe820a"></i> Firefox (latest) </li> <li> <i class="fab fa-safari mr-2" style="color: #448aff"></i> Safari (latest) </li> <li> <i class="fab fa-chrome mr-2" style="color: #458bc2"></i> Chrome (latest) </li> <li> <i class="fab fa-opera mr-2" style="color: #ff0000"></i> Opera (latest) </li> </ul> <p> <strong>Note: Certain pages and plugins are not supported with IE10, when this happens, we will display a message in your IE10 browser to notify the user.</strong> </p> </div> </div> <div class="card mb-g p-2"> <div class="card-body"> <h2 class="fw-700 m-0"> Components <a href="javascript:void(0);" name="components"></a> <small> @Settings.AppName comes with full documentation for all components </small> </h2> <p> Each component page has documentation of its usage and how they work. For all bootstrap components we have demonstrated its usage with examples for you to either copy from the HTML source or refer to the docs directly on bootstrap website. </p> <p class="font-weight-bold"> To see documentation on a particular component please refer to the component page. For third party plugin documentation, you will need to refer to the website for the third party plugin, which can be located at the <a href="#pluginreference">Plugin Reference & Dependency</a> section. </p> </div> </div> } @section ScriptsBlock { <script src="https://cdn.rawgit.com/google/code-prettify/master/loader/run_prettify.js"></script> <script> $(document).ready(function () { var jsdisplay = $('#js-display'); var url = "/media/data/plugin-reference.json"; $.getJSON(url, function (data) { $.each(data, function (index, value) { $('.js-plugins').append('<option value="' + value.plugin + '" data-description="' + value.description + '" data-url="' + value.url + '" data-license="' + value.license + '">' + value.plugin + '</option>'); }); }); // SHOW SELECTED VALUE. $('.js-plugins').change(function() { var plugin = this.options[this.selectedIndex].text; var url = $('select.js-plugins').find(':selected').data('url'); var license = $('select.js-plugins').find(':selected').data('license'); var description = $('select.js-plugins').find(':selected').data('description'); jsdisplay.removeClass().addClass('d-block') $('.js-plugin-name').text(plugin); $('.js-plugin-url').text(url); $('.js-plugin-url').attr('href', url); $('.js-plugin-license').text(license); $('.js-plugin-description').text(description); }); }); </script> }
the_stack
@model Xms.Web.Customize.Models.SolutionComponentDialogModel @{ Layout = null; } <!-- (Modal) --> <div class="modal fade" id="solutionComponentModal" tabindex="-1" role="dialog" aria-labelledby="solutionComponentModalLabel" aria-hidden="true"> <link href="~/content/js/jquery-ui-1.10.3/themes/base/jquery.ui.all.css?v=@app.PlatformSettings.VersionNumber" rel="stylesheet"> <link href="~/content/js/grid/pqgrid.dev.css?v=@app.PlatformSettings.VersionNumber" rel="stylesheet"> <div class="modal-dialog"> <div class="modal-content"> <div class="modal-header"> <button type="button" class="close" data-dismiss="modal" aria-hidden="true"> × </button> <h4 class="modal-title" id="solutionComponentModalLabel"> <span class="glyphicon glyphicon-th"></span> 组件 </h4> </div> <div class="modal-body"> <div class="dialog-datagrid-view" id="datagriddialogview"></div> </div> <div class="modal-footer"> <button type="button" class="btn btn-default" data-dismiss="modal"> <span class="glyphicon glyphicon-remove"></span> 关闭 </button> <button type="button" class="btn btn-primary" onclick="SolutionComponentModel.dialog_return()"> <span class="glyphicon glyphicon-ok"></span> 确定 </button> </div> <script src="/content/js/jquery-ui-1.10.3/ui/jquery.ui.button.js?v=@app.PlatformSettings.VersionNumber"></script> <script src="/content/js/jquery-ui-1.10.3/ui/jquery.ui.mouse.js?v=@app.PlatformSettings.VersionNumber"></script> <script src="/content/js/jquery-ui-1.10.3/ui/jquery.ui.autocomplete.js?v=@app.PlatformSettings.VersionNumber"></script> <script src="/content/js/jquery-ui-1.10.3/ui/jquery.ui.draggable.js?v=@app.PlatformSettings.VersionNumber"></script> <script src="/content/js/jquery-ui-1.10.3/ui/jquery.ui.resizable.js?v=@app.PlatformSettings.VersionNumber"></script> <script src="/content/js/jquery-ui-1.10.3/ui/jquery.ui.tooltip.js?v=@app.PlatformSettings.VersionNumber"></script> <script src="/content/js/grid/pqgrid.dev.js?v=@app.PlatformSettings.VersionNumber"></script> <script src="/content/js/grid/localize/pq-localize-zh.js?v=@app.PlatformSettings.VersionNumber"></script> <script src="/content/js/cdatagrid.js?v=@app.PlatformSettings.VersionNumber"></script> <script src="/content/js/jquery.bootpag.min.js?v=@app.PlatformSettings.VersionNumber"></script> <script src="/content/js/jquery.form.js?v=@app.PlatformSettings.VersionNumber"></script> <script> var SolutionComponentModel = { dialogid: '#solutionComponentModal', dialog: $('#solutionComponentModal'), datatable: $("#solutionComponentModal .datatableload"), gridviewid: "#solutionComponentModal #gridview", gridview: $("#solutionComponentModal #gridview"), searchform: $('#solutionComponentModal #searchForm'), pagesection: $('#page-selection-Dialog'), pageUrl : '@app.Url', callback : @Html.Raw(Model.CallBack), inputid : '@Html.Raw(Model.InputId)', solutionid: '@Html.Raw(Model.SolutionId)', serviceuri: '@Html.Raw(Model.ComponentDescriptor.ComponentsEndpoint)', ajaxgrid_reset : function () { SolutionComponentModel.pag_init(); Xms.Web.DataTable($("#solutionComponentModal .datatableload")); }, pag_init: function () { var page_section = $('#page-selection-Dialog'); page_section.bootpag({ total: page_section.attr('data-total') , maxVisible: 5 , page: page_section.attr('data-page') , leaps: false , prev: '&lsaquo;' , next: '&rsaquo;' , firstLastUse: true , first: '&laquo;' , last: '&raquo;' }).on("page", function (event, /* page number here */ num) { event.preventDefault(); var url = $.setUrlParam(SolutionComponentModel.pageUrl, 'page', num); SolutionComponentModel.gridview.ajaxLoad(url, SolutionComponentModel.gridviewid, function (response) { SolutionComponentModel.ajaxgrid_reset(); }); return false; }); }, dialog_return : function () { var result = new Array(); var $datagriddialogview = $('#datagriddialogview'); var $Grid = $datagriddialogview.cDatagrid('getGrid'); $datagriddialogview.find('.pq-grid-cont-inner:first').find("input[name=recordid]:checked").each(function (i, n) { var obj = new Object(); obj.id = $(n).val(); var data = $Grid.pqGrid('getRowData',{ rowIndxPage: i }) obj.name = data.localizedname; result.push(obj); }); var dialog = $(SolutionComponentModel.dialogid); if (dialog.data().OpenDialogCallback) { dialog.data().OpenDialogCallback(result, SolutionComponentModel.inputid,SolutionComponentModel) } else { SolutionComponentModel.callback && SolutionComponentModel.callback(result,SolutionComponentModel.inputid); } SolutionComponentModel.dialog.modal('hide'); } }; $(function () { SolutionComponentModel.ajaxgrid_reset(); SolutionComponentModel.searchform.ajaxSearch(SolutionComponentModel.gridviewid, SolutionComponentModel.ajaxgrid_reset); SolutionComponentModel.datatable.ajaxTable(); SolutionComponentModel.dialog.modal({ backdrop: 'static' }); SolutionComponentModel.dialog.on('hidden.bs.modal', function () { Xms.Web.CloseDialog(SolutionComponentModel.dialogid); }); $(SolutionComponentModel.dialogid + ' button[name=createBtn]').off('click').on('click', null, function (e) { SolutionComponentModel.CreateRecord(); }); //Xms.Ajax.GetJson(SolutionComponentModel.serviceuri + '?page=1&pagesize=10&insolution=false&solutionid=' + SolutionComponentModel.solutionid, null, function (response) { // console.log('components', response); //}); //"{"currentpage":1,"items":[{"objectid":"4cdb41ab-cc63-4b86-b611-9b8b457bb6cb","name":"Customer.js","localizedname":"Customer.js","componenttypename":"WebResource"}],"itemsperpage":10,"totalitems":1,"totalpages":1}" var columnConfigs = [{ "dataIndx": "name", "title": "名称", "dataType": "string", "width": 100 , "isrequired": true, "isloged": false, "iscustomfield": false, "iscustomizable": false, "issecured": false, "isprimaryfield": false, "attributetypename": "string" }, { "dataIndx": "localizedname", "title": "显示名称", "dataType": "string", "width": 100 , "isrequired": true, "isprimaryfield": false, "attributetypename": "string" }, { "dataIndx": "componenttypename", "title": "类型", "dataType": "string", "width": 100, "isrequired": true, "isprimaryfield": false, "attributetypename": "string" }, { title: "选择", editable: false, minWidth: 165, sortable: false, render: function (ui) { return '<a class="btn btn-link btn-xs rowclickok" href="javascript:;" ><span class="glyphicon glyphicon-ok"></span></a>'; } }] var datagridconfig = { height:300, //获取数据的方法 getDataUrl: function () { return ORG_SERVERURL+SolutionComponentModel.serviceuri + '?page=1&pagesize=10&insolution=false&solutionid=' + SolutionComponentModel.solutionid }, filterColModel: function (opts) { opts.colModel = columnConfigs; return opts; }, rowDblClick: function (event, ui) { console.log($(event.target)); console.log(ui); var $tr = ui.$tr; $tr.find('input[name="recordid"]').prop('checked', true); SolutionComponentModel.dialog_return(); }, columnFilter: function (items) { console.log(items) $.each(items, function (key, item) { item.editable = false; }); //添加操作列 items.unshift( { title: "", dataIndx: "recordid", maxWidth: 48, minWidth: 48, align: "center", resizable: false, type: 'checkBoxSelection', cls: 'ui-state-default', sortable: false, editable: false, render: function (ui) { console.log(ui) return '<input type="checkbox" value="' + ui.rowData.objectid+'" name="recordid" class="">' }, cb: { all: true, header: true } }); return items; } } datagridconfig.height = 300; // console.log(itemtmpl); setTimeout(function () { $('.dialog-datagrid-view').xmsDataTable(datagridconfig); },100) $('.dialog-datagrid-view').off('click').on('click', '.rowclickok', function (e) { var $tr = $(this).parents('tr:first'); $tr.find('input[name="recordid"]').prop('checked', true); SolutionComponentModel.dialog_return(); }); }); </script> </div><!-- /.modal-content --> </div><!-- /.modal-dialog --> </div><!-- /.modal -->
the_stack
@using NewLife.Common; @using NewLife.Cube.WebMiddleware; @using NewLife.Cube.Charts; @{ var set = NewLife.Cube.Setting.Current; var set2 = ViewBag.PageSetting as PageSetting; var title = ViewBag.Title + ""; if (title != "" && !title.Contains(" - ")) { title += " - " + NewLife.Common.SysConfig.Current.DisplayName; } var runInfo = !set.ShowRunTime ? "" : RunTimeMiddleware.GetInfo(this.Context); var res = set.ResourceUrl; if (String.IsNullOrEmpty(res)) res = "/Content"; res = res.TrimEnd('/'); var charts = ViewBag.Charts as ECharts[]; var charts2 = ViewBag.Charts2 as ECharts[]; var ver = typeof(ControllerBaseX).Assembly.GetName().Version; } <!DOCTYPE html> <html lang="zh-CN"> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> <meta name="description" content="新生命团队,NewLife,魔方,Cube,XCode,XIoT" /> @*上述3个meta标签*必须*放在最前面,任何其他内容都*必须*跟随其后!*@ <title>@title</title> <link href="~/favicon.ico" rel="shortcut icon" type="image/x-icon"> <script src="~/metronic/plugins/global/plugins.bundle.js"></script> <!-- 页面内联样式 --> @RenderSection("css", required: false) @if (set != null && set.BootstrapSelect) { <link rel="stylesheet" href="@res/bootstrap/css/bootstrap-multiselect.css"> } <!-- Metronic 样式 --> <!-- 网页字体 --> <script src="https://cdn.bootcss.com/webfont/1.6.16/webfontloader.js"></script> <script> WebFont.load({ google: { "families": ["Poppins:300,400,500,600,700", "Roboto:300,400,500,600,700"] }, active: function () { sessionStorage.fonts = true; } }); </script> <link href="~/metronic/plugins/global/plugins.bundle.css" rel="stylesheet" type="text/css" /> <link href="~/metronic/plugins/custom/prismjs/prismjs.bundle.css" rel="stylesheet" type="text/css" /> <link href="~/metronic/css/style.bundle.min.css" rel="stylesheet" type="text/css" /> <link href="~/Content/Cube.css" type="text/css" /> <link href="@res/artDialog/css/ui-dialog.css" rel="stylesheet" /> <!-- 自定义样式 --> <link rel="stylesheet" href="@res/Cube.css?v=@ver"> @if (charts != null && charts.Length > 0 || charts2 != null && charts2.Length > 0) { <script src="@res/echarts.min.js"></script> } </head> <body id="kt_body" class="header-mobile-fixed subheader-enabled aside-enabled aside-fixed aside-secondary-enabled page-loading"> <!-- 开始布局页 --> @await Html.PartialAsync("_Layout_Header_Mobile") <div class="d-flex flex-column flex-root"> <div class="d-flex flex-row flex-column-fluid page"> @await Html.PartialAsync("_Layout_Aside") <div class="d-flex flex-column flex-row-fluid wrapper" id="kt_wrapper"> <div class="content d-flex flex-column flex-column-fluid" id="kt_content"> @await Html.PartialAsync("_Layout_Header") <div class="d-flex flex-column-fluid"> <div class="container-fluid"> @RenderBody() @if (set.ShowRunTime) { <div class="alert alert-custom alert-default alert-shadow gutter-t"> <div class="alert-text"> @Html.Raw(runInfo) </div> </div> } </div> </div> <!-- footer --> </div> </div> </div> </div> <!-- 结束布局页--> @await Html.PartialAsync("_Layout_LoginUser") <div id="kt_scrolltop" class="scrolltop"> <span class="svg-icon"> <!--begin::Svg Icon | path:assets/media/svg/icons/Navigation/Up-2.svg--> <svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="24px" height="24px" viewBox="0 0 24 24" version="1.1"> <g stroke="none" stroke-width="1" fill="none" fill-rule="evenodd"> <polygon points="0 0 24 0 24 24 0 24"></polygon> <rect fill="#000000" opacity="0.3" x="11" y="10" width="2" height="10" rx="1"></rect> <path d="M6.70710678,12.7071068 C6.31658249,13.0976311 5.68341751,13.0976311 5.29289322,12.7071068 C4.90236893,12.3165825 4.90236893,11.6834175 5.29289322,11.2928932 L11.2928932,5.29289322 C11.6714722,4.91431428 12.2810586,4.90106866 12.6757246,5.26284586 L18.6757246,10.7628459 C19.0828436,11.1360383 19.1103465,11.7686056 18.7371541,12.1757246 C18.3639617,12.5828436 17.7313944,12.6103465 17.3242754,12.2371541 L12.0300757,7.38413782 L6.70710678,12.7071068 Z" fill="#000000" fill-rule="nonzero"></path> </g> </svg> <!--end::Svg Icon--> </span> </div> <!-- 界面配置变量 --> <script> var KTAppSettings = { "breakpoints": { "sm": 576, "md": 768, "lg": 992, "xl": 1200, "xxl": 1200 }, "colors": { "theme": { "base": { "white": "#ffffff", "primary": "#1BC5BD", "secondary": "#E5EAEE", "success": "#1BC5BD", "info": "#6993FF", "warning": "#FFA800", "danger": "#F64E60", "light": "#F3F6F9", "dark": "#212121" }, "light": { "white": "#ffffff", "primary": "#1BC5BD", "secondary": "#ECF0F3", "success": "#C9F7F5", "info": "#E1E9FF", "warning": "#FFF4DE", "danger": "#FFE2E5", "light": "#F3F6F9", "dark": "#D6D6E0" }, "inverse": { "white": "#ffffff", "primary": "#ffffff", "secondary": "#212121", "success": "#ffffff", "info": "#ffffff", "warning": "#ffffff", "danger": "#ffffff", "light": "#464E5F", "dark": "#ffffff" } }, "gray": { "gray-100": "#F3F6F9", "gray-200": "#ECF0F3", "gray-300": "#E5EAEE", "gray-400": "#D6D6E0", "gray-500": "#B5B5C3", "gray-600": "#80808F", "gray-700": "#464E5F", "gray-800": "#1B283F", "gray-900": "#212121" } }, "font-family": "Poppins" }; </script> <!-- Metronic模板脚本 --> <script src="~/metronic/plugins/custom/prismjs/prismjs.bundle.js"></script> <script src="~/metronic/js/scripts.bundle.min.js"></script> <!-- 最早引入基本脚本 [魔方原有脚本,全部移植完成后删除] --> @if (set != null && set.BootstrapSelect) { <script src="@res/bootstrap/js/bootstrap-multiselect.min.js"></script> } @RenderSection("scripts", required: false) <!-- 启用列表表头选择 --> @if (set2.EnableSelect) { <script> $(function () { var $toolbarContext = $('.toolbar-batch'), $batchButtons = $('button[data-action="action"], input[data-action="action"]'), //button, input=button, a 3种类型都可以 $table = $('.table'), $keys = $('input[name="keys"]', $table); $table.on('click', '#chkAll', function () { // 全选 $keys.prop('checked', this.checked); // 启用禁用批量操作区 $batchButtons.prop('disabled', !this.checked); }); $table.on('click.checked', 'tbody input[name="keys"]', function (e) { //页面中所有的checkbox var allbox = $table.find('tbody :checkbox[name="keys"]'); //页面中所选中的checkbox var selecteds = $table.find('tbody :checkbox:checked[name="keys"]'); if (selecteds.length > 0) { // 启用禁用批量操作区 $batchButtons.prop('disabled', false); //需要判断当前页面所有行的checkbox是否都选中,以此来决定是否需要改变checkbox#chkAll 的状态 if (allbox.length == selecteds.length) { $table.find('#chkAll').prop('checked', true); } else { $table.find('#chkAll').prop('checked', false); } } else { $batchButtons.prop('disabled', true); $table.find('#chkAll').prop('checked', false); } }); }) </script> } <script> // 日历控件 $(function () { $('.form_datetime').each(function () { var df = $(this).attr('dateformat'); if (!df) { $(this).attr('dateformat', 'yyyy-mm-dd hh:ii:ss'); $(this).width(140); } else { // 把C#标准格式化替换为控件格式 df = df.replace('mm', 'ii').replace('MM', 'mm').replace('HH', 'hh'); $(this).attr('dateformat', df); // 根据时间日期格式,锁定日期输入框宽度 if (df.indexOf('hh') >= 0) $(this).width(140); else $(this).width(80); } //根据日期格式自动推算日期选择view显示的范围 var sv = 2; var mv = 2; var autoView = $(this).attr('autoView'); if (autoView != 'false') { df = $(this).attr('dateformat'); if (df.indexOf('ii') >= 0) { mv = 0; if (df.indexOf('dd') >= 0) { sv = 2; } else { sv = 1; } } else if (df.indexOf('hh') >= 0) { mv = 1; if (df.indexOf('dd') >= 0) { sv = 2; } else { sv = 1; } } else if (df.indexOf('dd') >= 0) { mv = 2; sv = 2; } else if (df.indexOf('mm') >= 0) { mv = 3; sv = 3; } else if (df.indexOf('yyyy') >= 0) { mv = 4; sv = 4; } } $(this).attr('startview', sv); $(this).attr('minview', mv); }); $("[dateformat]").each(function () { $(this).datetimepicker({ format: $(this).attr("dateformat"), autoclose: true, language: "zh-CN", forceParse: false, startView: parseInt($(this).attr('startview')), minView: parseInt($(this).attr('minview')) }); }); @if (set != null && set.BootstrapSelect) {<text> $('.multiselect').each(function () { $(this).multiselect({ // 下拉列表仅在列表项比较多时显示过滤框 enableFiltering: $(this).children().length > 10, buttonClass: 'btn btn-white btn-primary', templates: { button: '<button type="button" class="multiselect dropdown-toggle" data-toggle="dropdown"></button>', ul: '<ul class="multiselect-container dropdown-menu"></ul>', filter: '<li class="multiselect-item filter"><div class="input-group"><span class="input-group-addon"><i class="fa fa-search"></i></span><input class="form-control multiselect-search" type="text"></div></li>', filterClearBtn: '<span class="input-group-btn"><button class="btn btn-default btn-white btn-grey multiselect-clear-filter" type="button"><i class="fa fa-times-circle red2"></i></button></span>', li: '<li><a href="javascript:void(0);"><label></label></a></li>', divider: '<li class="multiselect-item divider"></li>', liGroup: '<li class="multiselect-item group"><label class="multiselect-group"></label></li>' }, filterPlaceholder: '搜索', nonSelectedText: '无', allSelectedText: '全选' }); }); </text>} @*双击跳转到form _List_Data_Action.cshtml变更 @Html.ActionLink("编辑", "Edit", new { id = @Model },new{@class="editcell"})*@ $('tr').dblclick(function () { var $this = $(this); var row = $this.closest("tr"); var findcell = row.find('.editcell'); if (findcell.length > 0) window.location.href = findcell.attr("href"); }); }); // 区间时间插件 $("input[name='dtRange']").daterangepicker( { // autoApply: true, // autoUpdateInput: false, // alwaysShowCalendars: true, buttonClasses: 'btn', applyClass: 'btn-primary', cancelClass: 'btn-secondary', ranges: { '今天': [moment(), moment()], '昨天': [moment().subtract(1, 'days'), moment().subtract(1, 'days')], '近7天': [moment().subtract(7, 'days'), moment()], '近30天': [moment().subtract(29, 'days'), moment()], '这个月': [moment().startOf('month'), moment().endOf('month')], '上个月': [moment().subtract(1, 'month').startOf('month'), moment().subtract(1, 'month').endOf('month')] }, locale: { format: "YYYY/MM/DD HH:mm", separator: " - ", applyLabel: "确认", cancelLabel: "清空", fromLabel: "开始时间", toLabel: "结束时间", customRangeLabel: "自定义日期范围", daysOfWeek: ["日", "一", "二", "三", "四", "五", "六"], monthNames: ["一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"] } } ).on('cancel.daterangepicker', function (ev, picker) { $("#dtRange").val("请选择日期范围"); $("#dtStart").val(""); $("#dtEnd").val(""); }).on('apply.daterangepicker', function (ev, picker) { $("#dtStart").val(picker.startDate.format('YYYY-MM-DD')); $("#dtEnd").val(picker.endDate.format('YYYY-MM-DD')); $("#dtRange").val(picker.startDate.format('YYYY-MM-DD') + " 至 " + picker.endDate.format('YYYY-MM-DD')); }); // 处理表单标签长度 window.onresize = function () { var ilabel = $("div.item-label:first"); $("div.bigitem-label").width(ilabel.width()); // 设置所有备注类大字段的标签宽度 } $(function () { var ilabel = $("div.item-label:first"); $("div.bigitem-label").width(ilabel.width()); // 设置所有备注类大字段的标签宽度 }); </script> <!--开关插件--> <script src="@res/bootstrap_switch/bootstrap-switch.min.js"></script> <script> $(function () { $("input.chkSwitch").bootstrapSwitch(); }) </script> @*artDialog*@ <script src="@res/artDialog/dist/dialog-plus-min.js"></script> <script src="@res/artDialog/dist/dialog.new.life.js"></script> @*模态窗Ajax删除提示*@ <script src="@res/Cube.js?v=@ver"></script> </body> </html>
the_stack
@page @model C3Model @{ ViewData["Title"] = "C3 Charts"; ViewData["PageName"] = "statistics_c3"; ViewData["Category1"] = "Statistics"; ViewData["Heading"] = "<i class='subheader-icon fal fa-chart-pie'></i> C3 Charts<sup class='badge badge-primary fw-500'>ADDON</sup>"; ViewData["PageDescription"] = "Dygraphs is a fast, flexible open source JavaScript charting library."; } @section HeadBlock { <link rel="stylesheet" media="screen, print" href="~/css/statistics/c3/c3.css"> } <div class="alert alert-primary"> <div class="d-flex flex-start w-100"> <div class="mr-2 hidden-md-down"> <span class="icon-stack icon-stack-lg"> <i class="base base-6 icon-stack-3x opacity-100 color-primary-500"></i> <i class="base base-10 icon-stack-2x opacity-100 color-primary-300 fa-flip-vertical"></i> <i class="ni ni-blog-read icon-stack-1x opacity-100 color-white"></i> </span> </div> <div class="d-flex flex-fill"> <div class="flex-fill"> <span class="h5">Features:</span> <p> C3 makes it easy to generate D3-based charts by wrapping the code required to construct the entire chart. We don't need to write D3 code any more. The plugin adds classes to each element when generating, so you can define a custom style by the class and it's possible to extend the structure directly by D3. Provides a variety of APIs and callbacks to access the state of the chart. </p> <p class="m-0"> Find more examples and guidelines on C3's <a href="https://c3js.org/examples.html" target="_blank">official documentation</a> and <a href="https://groups.google.com/forum/#!forum/c3js" target="_blank">community support</a> </p> </div> </div> </div> </div> <div class="row"> <div class="col-xl-12"> <div class="card m-auto border mb-g"> <div class="card-body position-relative"> <h3 class="mb-4"> Kitchen Sink Interactive Demo <small> See how easy it is to manipulate and transform graphs using c3 engine! </small> </h3> <div class="position-absolute pos-top pos-right mt-5 mr-5"> <div class="border-faded border-left-0 border-top-0 border-bottom-0" id="playDemo"> <button class="btn btn-success" onclick="startDemo();"> <i class="@(Settings.Theme.IconPrefix) fa-play mr-sm-2"></i><span class="hidden-sm-down">Play Demo</span> </button> </div> <div class="border-faded border-left-0 border-top-0 border-bottom-0" id="pauseDemo" style="display:none;"> <button class="btn btn-danger" onclick="pauseDemo();"> <i class="@(Settings.Theme.IconPrefix) fa-pause mr-sm-2"></i> <span class="hidden-sm-down">Pause Demo</span> </button> </div> </div> <div class="d-flex position-absolute pos-left pos-right mt-5 align-items-center justify-content-center" style="z-index:1"> <div id="message" class="bg-fusion-100 rounded"></div> </div> <div id='chart' class="mt-2"></div> </div> <div class="card-footer p-0"> <div class="progress rounded-0" style="height: 3px;"> <div id="demo-progress" class="progress-bar bg-primary-500" role="progressbar" style="width:100%; opacity: 0.5"></div> </div> </div> </div> </div> <div class="col-xl-12"> <div class="row"> <div class="col-xl-6"> <div id="panel-1" class="panel"> <div class="panel-hdr"> <h2> Simple <span class="fw-300"><i>Line</i></span> </h2> <div class="panel-toolbar"> <button class="btn btn-panel" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"></button> <button class="btn btn-panel" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"></button> <button class="btn btn-panel" data-action="panel-close" data-toggle="tooltip" data-offset="0,10" data-original-title="Close"></button> </div> </div> <div class="panel-container show"> <div class="panel-content"> <div class="panel-tag"> Line chart with sequential data </div> <div id="simpleLine" style="width:100%; height:300px;"></div> </div> </div> </div> </div> <div class="col-xl-6"> <div id="panel-2" class="panel"> <div class="panel-hdr"> <h2> Line <span class="fw-300"><i>Regions</i></span> </h2> <div class="panel-toolbar"> <button class="btn btn-panel" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"></button> <button class="btn btn-panel" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"></button> <button class="btn btn-panel" data-action="panel-close" data-toggle="tooltip" data-offset="0,10" data-original-title="Close"></button> </div> </div> <div class="panel-container show"> <div class="panel-content"> <div class="panel-tag"> Set regions for each data with style using the <code>regions</code> option </div> <div id="linleRegions" style="width:100%; height:300px;"></div> </div> </div> </div> </div> </div> </div> <div class="col-xl-12"> <div class="row"> <div class="col-xl-6"> <div id="panel-3" class="panel"> <div class="panel-hdr"> <h2> Time <span class="fw-300"><i>Series</i></span> </h2> <div class="panel-toolbar"> <button class="btn btn-panel" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"></button> <button class="btn btn-panel" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"></button> <button class="btn btn-panel" data-action="panel-close" data-toggle="tooltip" data-offset="0,10" data-original-title="Close"></button> </div> </div> <div class="panel-container show"> <div class="panel-content"> <div class="panel-tag"> Simple line chart with timeseries data using <code>xFormat</code> and <code>axis</code> options </div> <div id="timeSeries" style="width:100%; height:300px;"></div> </div> </div> </div> </div> <div class="col-xl-6"> <div id="panel-4" class="panel"> <div class="panel-hdr"> <h2> Spline <span class="fw-300"><i>Line</i></span> </h2> <div class="panel-toolbar"> <button class="btn btn-panel" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"></button> <button class="btn btn-panel" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"></button> <button class="btn btn-panel" data-action="panel-close" data-toggle="tooltip" data-offset="0,10" data-original-title="Close"></button> </div> </div> <div class="panel-container show"> <div class="panel-content"> <div class="panel-tag"> Display as Spline Chart </div> <div id="splilneLine" style="width:100%; height:300px;"></div> </div> </div> </div> </div> </div> </div> <div class="col-xl-12"> <div class="row"> <div class="col-xl-6"> <div id="panel-5" class="panel"> <div class="panel-hdr"> <h2> Scatter <span class="fw-300"><i>Chart</i></span> </h2> <div class="panel-toolbar"> <button class="btn btn-panel" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"></button> <button class="btn btn-panel" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"></button> <button class="btn btn-panel" data-action="panel-close" data-toggle="tooltip" data-offset="0,10" data-original-title="Close"></button> </div> </div> <div class="panel-container show"> <div class="panel-content"> <div class="panel-tag d-flex"> Display as Scatter Plot </div> <div id="scatterChart" style="width:100%; height:300px;"></div> <div class="text-right"> <button id="scatterChartLoad" onclick="scatterChartLoad();" class="btn btn-sm btn-dark ml-auto">Load New Data</button> </div> </div> </div> </div> </div> <div class="col-xl-6"> <div id="panel-6" class="panel"> <div class="panel-hdr"> <h2> Bar <span class="fw-300"><i>Chart</i></span> </h2> <div class="panel-toolbar"> <button class="btn btn-panel" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"></button> <button class="btn btn-panel" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"></button> <button class="btn btn-panel" data-action="panel-close" data-toggle="tooltip" data-offset="0,10" data-original-title="Close"></button> </div> </div> <div class="panel-container show"> <div class="panel-content"> <div class="panel-tag"> Display as Bar Chart </div> <div id="barChart" style="width:100%; height:300px;"></div> <div class="text-right"> <button id="barChartLoad" onclick="barChartLoad();" class="btn btn-sm btn-dark ml-auto">Load New Data</button> </div> </div> </div> </div> </div> </div> </div> <div class="col-xl-12"> <div class="row"> <div class="col-xl-6"> <div id="panel-7" class="panel"> <div class="panel-hdr"> <h2> Stacked <span class="fw-300"><i>Bar</i></span> </h2> <div class="panel-toolbar"> <button class="btn btn-panel" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"></button> <button class="btn btn-panel" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"></button> <button class="btn btn-panel" data-action="panel-close" data-toggle="tooltip" data-offset="0,10" data-original-title="Close"></button> </div> </div> <div class="panel-container show"> <div class="panel-content"> <div class="panel-tag"> Display as Stacked Bar Chart </div> <div id="stackedBar" style="width:100%; height:300px;"></div> <div class="text-right"> <button id="stackedBarLoad" onclick="stackedBarLoad();" class="btn btn-sm btn-dark ml-auto">Load New Data</button> </div> </div> </div> </div> </div> <div class="col-xl-6"> <div id="panel-8" class="panel"> <div class="panel-hdr"> <h2> Step <span class="fw-300"><i>Chart</i></span> </h2> <div class="panel-toolbar"> <button class="btn btn-panel" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"></button> <button class="btn btn-panel" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"></button> <button class="btn btn-panel" data-action="panel-close" data-toggle="tooltip" data-offset="0,10" data-original-title="Close"></button> </div> </div> <div class="panel-container show"> <div class="panel-content"> <div class="panel-tag"> An example of area and line Step Charts </div> <div id="stepChart" style="width:100%; height:300px;"></div> <div class="text-right"> <button id="stepChartLoad" onclick="stepChartLoad();" class="btn btn-sm btn-dark ml-auto">Load New Data</button> </div> </div> </div> </div> </div> </div> </div> <div class="col-xl-12"> <div class="row"> <div class="col-xl-6"> <div id="panel-9" class="panel"> <div class="panel-hdr"> <h2> Pie <span class="fw-300"><i>Chart</i></span> </h2> <div class="panel-toolbar"> <button class="btn btn-panel" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"></button> <button class="btn btn-panel" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"></button> <button class="btn btn-panel" data-action="panel-close" data-toggle="tooltip" data-offset="0,10" data-original-title="Close"></button> </div> </div> <div class="panel-container show"> <div class="panel-content"> <div class="panel-tag"> Display as Pie Chart </div> <div id="pieChart" style="width:100%; height:300px;"></div> <div class="text-right"> <button id="pieChartUnload" onclick="pieChartUnload();" class="btn btn-sm btn-dark ml-auto">Unload Data</button> </div> </div> </div> </div> </div> <div class="col-xl-6"> <div id="panel-10" class="panel"> <div class="panel-hdr"> <h2> Donut <span class="fw-300"><i>Chart</i></span> </h2> <div class="panel-toolbar"> <button class="btn btn-panel" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"></button> <button class="btn btn-panel" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"></button> <button class="btn btn-panel" data-action="panel-close" data-toggle="tooltip" data-offset="0,10" data-original-title="Close"></button> </div> </div> <div class="panel-container show"> <div class="panel-content"> <div class="panel-tag"> Donut chart example </div> <div id="donutChart" style="width:100%; height:300px;"></div> <div class="text-right"> <button id="donutChartUnload" onclick="donutChartUnload();" class="btn btn-sm btn-dark ml-auto">Unload Data</button> </div> </div> </div> </div> </div> </div> </div> <div class="col-xl-12"> <div class="row"> <div class="col-xl-6"> <div id="panel-11" class="panel"> <div class="panel-hdr"> <h2> Combination <span class="fw-300"><i>Chart</i></span> </h2> <div class="panel-toolbar"> <button class="btn btn-panel" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"></button> <button class="btn btn-panel" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"></button> <button class="btn btn-panel" data-action="panel-close" data-toggle="tooltip" data-offset="0,10" data-original-title="Close"></button> </div> </div> <div class="panel-container show"> <div class="panel-content"> <div class="panel-tag"> An example of multiple charts in one canvas - "all in one" </div> <div id="combinationChart" style="width:100%; height:300px;"></div> </div> </div> </div> </div> <div class="col-xl-6"> <div id="panel-12" class="panel"> <div class="panel-hdr"> <h2> Interactive <span class="fw-300"><i>Mouse wheel</i></span> </h2> <div class="panel-toolbar"> <button class="btn btn-panel" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"></button> <button class="btn btn-panel" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"></button> <button class="btn btn-panel" data-action="panel-close" data-toggle="tooltip" data-offset="0,10" data-original-title="Close"></button> </div> </div> <div class="panel-container show"> <div class="panel-content"> <div class="panel-tag"> Zoom by mouse wheel event and slide by drag </div> <div id="interactiveWheel" style="width:100%; height:300px;"></div> </div> </div> </div> </div> </div> </div> <div class="col-xl-12"> <div id="panel-13" class="panel"> <div class="panel-hdr"> <h2> Interactive <span class="fw-300"><i>Example</i></span> </h2> <div class="panel-toolbar"> <button class="btn btn-panel" data-action="panel-collapse" data-toggle="tooltip" data-offset="0,10" data-original-title="Collapse"></button> <button class="btn btn-panel" data-action="panel-fullscreen" data-toggle="tooltip" data-offset="0,10" data-original-title="Fullscreen"></button> <button class="btn btn-panel" data-action="panel-close" data-toggle="tooltip" data-offset="0,10" data-original-title="Close"></button> </div> </div> <div class="panel-container show"> <div class="panel-content"> <div class="panel-tag"> Show sub chart for zoom and selection range </div> <div id="interactiveExample" style="width:100%; height:350px;"></div> </div> </div> </div> </div> </div> @section ScriptsBlock { <script src="~/js/statistics/d3/d3.js"></script> <script src="~/js/statistics/c3/c3.js"></script> <script src="~/js/statistics/demo-data/demo-c3.js"></script> <script> var colors = [color.success._500, color.danger._500, color.info._500, color.primary._500, color.warning._500]; var simpleLine = c3.generate({ bindto: "#simpleLine", data: { columns: [ ["data1", 100, 165, 140, 270, 200, 140, 220], ["data2", 110, 80, 100, 85, 125, 90, 100] ] }, color: { pattern: colors } }); var linleRegions = c3.generate({ bindto: "#linleRegions", data: { columns: [ ['data1', 30, 200, 100, 400, 150, 250], ['data2', 50, 20, 10, 40, 15, 25] ], regions: { 'data1': [{'start':1, 'end':2, 'style':'dashed'},{'start':3}], // currently 'dashed' style only 'data2': [{'end':3}] } }, color: { pattern: colors } }); var timeSeries = c3.generate({ bindto: "#timeSeries", data: { x: 'x', xFormat: '%Y', columns: [ ['x', '2010', '2011', '2012', '2013', '2014', '2015'], ['data1', 30, 200, 100, 400, 150, 250], ['data2', 130, 340, 200, 500, 250, 350] ] }, color: { pattern: colors }, axis: { x: { type: 'timeseries', // if true, treat x value as localtime (Default) // if false, convert to UTC internally localtime: false, tick: { format: '%Y-%m-%d %H:%M:%S' } } } }); var splilneLine = c3.generate({ bindto: "#splilneLine", data: { columns: [ ['data1', 30, 200, 100, 400, 150, 250], ['data2', 130, 100, 140, 200, 150, 50] ], type: 'spline' }, color: { pattern: colors } }); var scatterChart = c3.generate({ bindto: "#scatterChart", data: { xs: { setosa: 'setosa_x', versicolor: 'versicolor_x', }, // iris data from R columns: [ ["setosa_x", 3.5, 3.0, 3.2, 3.1, 3.6, 3.9, 3.4, 3.4, 2.9, 3.1, 3.7, 3.4, 3.0, 3.0, 4.0, 4.4, 3.9, 3.5, 3.8, 3.8, 3.4, 3.7, 3.6, 3.3, 3.4, 3.0, 3.4, 3.5, 3.4, 3.2, 3.1, 3.4, 4.1, 4.2, 3.1, 3.2, 3.5, 3.6, 3.0, 3.4, 3.5, 2.3, 3.2, 3.5, 3.8, 3.0, 3.8, 3.2, 3.7, 3.3], ["versicolor_x", 3.2, 3.2, 3.1, 2.3, 2.8, 2.8, 3.3, 2.4, 2.9, 2.7, 2.0, 3.0, 2.2, 2.9, 2.9, 3.1, 3.0, 2.7, 2.2, 2.5, 3.2, 2.8, 2.5, 2.8, 2.9, 3.0, 2.8, 3.0, 2.9, 2.6, 2.4, 2.4, 2.7, 2.7, 3.0, 3.4, 3.1, 2.3, 3.0, 2.5, 2.6, 3.0, 2.6, 2.3, 2.7, 3.0, 2.9, 2.9, 2.5, 2.8], ["setosa", 0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2], ["versicolor", 1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1.0, 1.3, 1.4, 1.0, 1.5, 1.0, 1.4, 1.3, 1.4, 1.5, 1.0, 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1.0, 1.1, 1.0, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1.0, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3], ], type: 'scatter' }, color: { pattern: colors }, axis: { x: { label: 'Sepal.Width', tick: { fit: false } }, y: { label: 'Petal.Width' } } }); var scatterChartLoad = function(){ $("#scatterChartLoad").attr("disabled", true); setTimeout(function () { scatterChart.load({ xs: { virginica: 'virginica_x' }, columns: [ ["virginica_x", 3.3, 2.7, 3.0, 2.9, 3.0, 3.0, 2.5, 2.9, 2.5, 3.6, 3.2, 2.7, 3.0, 2.5, 2.8, 3.2, 3.0, 3.8, 2.6, 2.2, 3.2, 2.8, 2.8, 2.7, 3.3, 3.2, 2.8, 3.0, 2.8, 3.0, 2.8, 3.8, 2.8, 2.8, 2.6, 3.0, 3.4, 3.1, 3.0, 3.1, 3.1, 3.1, 2.7, 3.2, 3.3, 3.0, 2.5, 3.0, 3.4, 3.0], ["virginica", 2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2.0, 1.9, 2.1, 2.0, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2.0, 2.0, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2.0, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2.0, 2.3, 1.8], ] }); }, 1000); setTimeout(function () { scatterChart.unload({ ids: 'setosa' }); }, 2000); setTimeout(function () { scatterChart.load({ columns: [ ["virginica", 0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2], ] }); }, 3000); setTimeout(function () { scatterChart.load({ columns: [ ["setosa_x", 3.5, 3.0, 3.2, 3.1, 3.6, 3.9, 3.4, 3.4, 2.9, 3.1, 3.7, 3.4, 3.0, 3.0, 4.0, 4.4, 3.9, 3.5, 3.8, 3.8, 3.4, 3.7, 3.6, 3.3, 3.4, 3.0, 3.4, 3.5, 3.4, 3.2, 3.1, 3.4, 4.1, 4.2, 3.1, 3.2, 3.5, 3.6, 3.0, 3.4, 3.5, 2.3, 3.2, 3.5, 3.8, 3.0, 3.8, 3.2, 3.7, 3.3], ["versicolor_x", 3.2, 3.2, 3.1, 2.3, 2.8, 2.8, 3.3, 2.4, 2.9, 2.7, 2.0, 3.0, 2.2, 2.9, 2.9, 3.1, 3.0, 2.7, 2.2, 2.5, 3.2, 2.8, 2.5, 2.8, 2.9, 3.0, 2.8, 3.0, 2.9, 2.6, 2.4, 2.4, 2.7, 2.7, 3.0, 3.4, 3.1, 2.3, 3.0, 2.5, 2.6, 3.0, 2.6, 2.3, 2.7, 3.0, 2.9, 2.9, 2.5, 2.8], ["setosa", 0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2], ["versicolor", 1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1.0, 1.3, 1.4, 1.0, 1.5, 1.0, 1.4, 1.3, 1.4, 1.5, 1.0, 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1.0, 1.1, 1.0, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1.0, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3], ] }); }, 4000); setTimeout(function(){ $("#scatterChartLoad").attr("disabled", false); }, 5000) } var barChart = c3.generate({ bindto: "#barChart", data: { columns: [ ['data1', 30, 200, 100, 400, 150, 250], ['data2', 130, 100, 140, 200, 150, 50] ], type: 'bar' }, color: { pattern: colors }, bar: { width: { ratio: 0.8 // this makes bar width 50% of length between ticks } // or //width: 100 // this makes bar width 100px } }); var barChartLoad = function(){ $("#barChartLoad").attr("disabled", true); $("#barChartLoad").text("loading...") setTimeout(function () { barChart.load({ columns: [ ['data3', 130, -150, 200, 300, -200, 100] ] }); }, 1000); setTimeout(function () { barChart.load({ columns: [ ['data4', 50, -70, 130, 170, 100, 50] ] }); }, 2000); setTimeout(function () { barChart.load({ columns: [ ['data5', 70, -50, -110, 70, 40, -70] ] }); }, 3000); setTimeout(function(){ $("#barChartLoad").text("load complete") }, 4000) }; var stackedBar = c3.generate({ bindto: "#stackedBar", data: { columns: [ ['data1', -30, 200, 200, 400, -150, 250], ['data2', 130, 100, -100, 200, -150, 50], ['data3', -230, 200, 200, -300, 250, 250] ], type: 'bar', groups: [ ['data1', 'data2'] ] }, color: { pattern: colors } }); var stackedBarLoad = function(){ $("#stackedBarLoad").attr("disabled", true); $("#stackedBarLoad").text("loading..."); setTimeout(function () { $("#stackedBarLoad").text("grouping...") stackedBar.groups([['data1', 'data2', 'data3']]) }, 1000); setTimeout(function () { $("#stackedBarLoad").text("grouping...") stackedBar.load({ columns: [['data4', 100, -50, 150, 200, -300, -100]] }); }, 2000); setTimeout(function () { $("#stackedBarLoad").text("grouping...") stackedBar.groups([['data1', 'data2', 'data3', 'data4']]) }, 3000); setTimeout(function(){ $("#stackedBarLoad").text("load complete") }, 4000) }; var stepChart = c3.generate({ bindto: "#stepChart", data: { columns: [ ['data1', 300, 350, 300, 0, 0, 100], ['data2', 130, 100, 140, 200, 150, 50] ], types: { data1: 'step', data2: 'area-step' } }, color: { pattern: [color.info._500, color.warning._500, color.success._500, color.danger._500, color.fusion._300] } }); var stepChartLoad = function(){ $("#stepChartLoad").attr("disabled", true); $("#stepChartLoad").text("loading..."); setTimeout(function () { stepChart.load({ columns: [ ['data3', 130, -50, 200, 300, -200, 100] ], type: 'area-step', }); }, 1000); setTimeout(function () { stepChart.load({ columns: [ ['data4', 50, 70, 130, 170, 100, 50] ], type: 'area-step' }); }, 2000); setTimeout(function () { stepChart.load({ columns: [ ['data5', 70, -50, -110, 70, 40, -70] ], type: 'step' }); }, 3000); setTimeout(function(){ $("#stepChartLoad").text("load complete") }, 4000) } var combinationChart = c3.generate({ bindto: "#combinationChart", data: { columns: [ ['data1', 30, 20, 50, 40, 60, 50], ['data2', 200, 130, 90, 240, 130, 220], ['data3', 300, 200, 160, 400, 250, 250], ['data4', 200, 130, 90, 240, 130, 220], ['data5', 130, 120, 150, 140, 160, 150], ['data6', 90, 70, 20, 50, 60, 120], ], type: 'bar', types: { data3: 'spline', data4: 'line', data6: 'area', }, groups: [ ['data1','data2'] ] }, color: { pattern: colors } }); var pieChart = c3.generate({ bindto: "#pieChart", data: { // iris data from R columns: [ ['virtigo', 30], ['clarfy', 120], ["setosa", 0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2], ["versicolor", 1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1.0, 1.3, 1.4, 1.0, 1.5, 1.0, 1.4, 1.3, 1.4, 1.5, 1.0, 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1.0, 1.1, 1.0, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1.0, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3], ["virginica", 2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2.0, 1.9, 2.1, 2.0, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2.0, 2.0, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2.0, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2.0, 2.3, 1.8], ], type : 'pie'//, /*onclick: function (d, i) { console.log("onclick", d, i); }, onmouseover: function (d, i) { console.log("onmouseover", d, i); }, onmouseout: function (d, i) { console.log("onmouseout", d, i); }*/ }, color: { pattern: colors } }); var pieChartUnload = function(){ $("#pieChartUnload").attr("disabled", true); $("#pieChartUnload").text("unloading datasets...") setTimeout(function () { pieChart.unload({ ids: 'virtigo' }); pieChart.unload({ ids: 'clarfy' }); }, 1000); setTimeout(function () { $("#pieChartUnload").text("unload complete") }, 2000); }; var donutChart = c3.generate({ bindto: "#donutChart", data: { // iris data from R columns: [ ['IE', 30], ['Firefox', 120], ["Opera", 0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2], ["Safari", 1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1.0, 1.3, 1.4, 1.0, 1.5, 1.0, 1.4, 1.3, 1.4, 1.5, 1.0, 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1.0, 1.1, 1.0, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1.0, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3], ["Chrome", 2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2.0, 1.9, 2.1, 2.0, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2.0, 2.0, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2.0, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2.0, 2.3, 1.8], ], type : 'donut'//, /*onclick: function (d, i) { console.log("onclick", d, i); }, onmouseover: function (d, i) { console.log("onmouseover", d, i); }, onmouseout: function (d, i) { console.log("onmouseout", d, i); }*/ }, donut: { title: "SmartAdmin Browsers" }, color: { pattern: colors } }); var donutChartUnload = function(){ $("#donutChartUnload").attr("disabled", true); $("#donutChartUnload").text("unloading datasets...") setTimeout(function () { donutChart.unload({ ids: 'Chrome' }); donutChart.unload({ ids: 'Opera' }); }, 1000); setTimeout(function () { $("#donutChartUnload").text("unload complete") }, 2000); }; var interactiveExample = c3.generate({ bindto: "#interactiveExample", data: { columns: [ ["data1", 100, 165, 140, 170, 200, 140, 220, 210, 190, 200, 170, 250], ["data2", 110, 80, 100, 85, 125, 110, 100, 130, 120, 100, 130, 145], ["data3", 75, 60, 70, 65, 85, 80, 70, 100, 100, 70, 90, 100] ], type: "area-spline" }, subchart: { show: true }, color: { pattern: colors } }); var interactiveWheel = c3.generate({ bindto: "#interactiveWheel", data: { columns: [ ['sample', 30, 200, 100, 400, 150, 250, 150, 200, 170, 240, 350, 150, 100, 400, 150, 250, 150, 200, 170, 240, 100, 150, 250, 150, 200, 170, 240, 30, 200, 100, 400, 150, 250, 150, 200, 170, 240, 350, 150, 100, 400, 350, 220, 250, 300, 270, 140, 150, 90, 150, 50, 120, 70, 40] ] }, zoom: { enabled: true }, color: { pattern: [color.primary._500, color.danger._500, color.info._500, color.success._500, color.warning._500] }, grid: { x: { show: true }, y: { show: false } } }); </script> }
the_stack
@using lsc.Common @model lsc.Model.EnterCustContacts @{ ViewData["Title"] = "客户联系人"; Layout = "~/Pages/_LayoutNone.cshtml"; int type = ViewBag.type; if (type!=0) { Layout = "~/Pages/_Layout.cshtml"; } //if (Model!=null) //{ // Layout = "~/Pages/_LayoutNone.cshtml"; // } } <blockquote class="layui-elem-quote"> 添加客户联系人信息 </blockquote> <div class="manage-form-container"> <form class="layui-form" method="post" id="addform" action="/EnterCustom/SaveEnterCustContacts"> <div class="layui-form-item"> <label class="layui-form-label">姓名</label> <div class="layui-input-block"> <input type="text" class="layui-input layui-form-text" value="@(Model!=null ? Model.Name:"")" name="Name" placeholder="请输入名称" autocomplete="off" /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">性别</label> <div class="layui-input-block"> <input type="radio" name="sex" value="1" title="男" @(Model!=null && Model.Sex == lsc.Model.Enume.SexEnum.Man?"checked":"")> <input type="radio" name="sex" value="2" title="女" @(Model!=null && Model.Sex == lsc.Model.Enume.SexEnum.Woman?"checked":"")> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">负责业务</label> <div class="layui-input-block"> <input type="text" class="layui-input layui-form-text" value="@(Model!=null ? Model.Business:"")" name="Business" placeholder="请输入负责业务" autocomplete="off" /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">部门</label> <div class="layui-input-block"> <input type="text" class="layui-input layui-form-text" value="@(Model!=null ? Model.Department : "")" name="Department" placeholder="请输入联系人所在部门" autocomplete="off" /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">职务</label> <div class="layui-input-block"> <input type="text" class="layui-input layui-form-text" value="@(Model!=null ? Model.Duties:"")" name="Duties" placeholder="请输入联系人职务" autocomplete="off" /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">手机号</label> <div class="layui-input-block"> <input type="text" class="layui-input layui-form-text" onchange="telonchanged()" @(Model!=null&& !Model.Telephone.IsNull() ? "readonly" : "") value="@(Model!=null ? Model.Telephone:"")" name="Telephone" id="Telephone" placeholder="请输入手机号" autocomplete="off" /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">固定电话</label> <div class="layui-input-block"> <input type="text" class="layui-input layui-form-text" onchange="phonechanged()" @(Model!=null&& !Model.Landline.IsNull() ? "readonly" : "") value="@(Model!=null ? Model.Landline:"")" name="Landline" placeholder="请输入手机号" autocomplete="off" /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">邮箱</label> <div class="layui-input-block"> <input type="email" class="layui-input layui-form-text" onchange="emailchanged()" value="@(Model!=null ? Model.Email:"")" name="Email" id="Email" placeholder="请输入邮箱" autocomplete="off" /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">QQ</label> <div class="layui-input-block"> <input type="text" class="layui-input layui-form-text" onchange="qqchanged()" value="@(Model!=null ? Model.QQ:"")" name="QQ" placeholder="请输入QQ号" autocomplete="off" /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">微信号</label> <div class="layui-input-block"> <input type="text" class="layui-input layui-form-text" onchange="wechartchanged()" value="@(Model!=null ? Model.WeChart:"")" name="WeChart" placeholder="请输入微信号" autocomplete="off" /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">住址</label> <div class="layui-input-block"> <input type="text" class="layui-input layui-form-text" value="@(Model!=null ? Model.Address:"")" name="Address" placeholder="请输入住址" autocomplete="off" /> </div> </div> <div class="layui-form-item"> <label class="layui-form-label">备注</label> <div class="layui-input-block"> <textarea name="Rem" class="layui-form-text layui-input" rows="4">@(Model!=null ? Model.Rem:"")</textarea> </div> </div> <div class="layui-form-label"> <div class="layui-input-block"><button class="layui-btn" lay-submit lay-filter="*">提交</button></div> <input type="hidden" name="ID" value="@(Model!=null ? Model.ID.ToString() :"")" /> <input type="hidden" name="EnterCustID" value="@(Model!=null ? Model.EnterCustID.ToString():ViewBag.EnterCustID)" /> <input type="hidden" name="mobile" id="mobile" value="@(Model!=null ? Model.Telephone+Model.Landline:"")" /> <input type="hidden" name="emails" id="emails" value="@(Model!=null ? Model.QQ+Model.Email+Model.WeChart:"")" /> <input type="hidden" name="t" value="@(ViewBag.type)" /> </div> </form> </div> @section Scripts{ <script src="~/layui/jquery.validate.js"></script> <script src="~/layui/jquery.form.js"></script> <script type="text/javascript"> var form, layer layui.use(['form', 'element', 'layer'], function() { form = layui.form layer = layui.layer }) jQuery.validator.addMethod("isPhone", function(value, element) { var length = value.length; var mobile = /^(((13[0-9]{1})|(15[0-9]{1})|(18[0-9]{1})|(17[0-9]{1}))+\d{8})$/; return this.optional(element) || (length == 11 && mobile.test(value)); }, "请填写正确的手机号码"); jQuery.validator.addMethod("isTel", function(value, element) { var length = value.length; var phone = /^\d{3,4}-\d{7,8}$/; return this.optional(element) || (phone.test(value)); }, "请填写正确的固定电话"); jQuery.validator.addMethod("isQQ", function(value, element) { var length = value.length; var qq = /^[1-9][0-9]{4,9}$/; return this.optional(element) || (qq.test(value)); }, "请填写正确的QQ号"); $('#addform').validate({ ignore: "", rules: { Name: { required: true, maxlength: 64 }, sex: { required: true }, Telephone: { isPhone: true }, Landline: { isTel: true }, mobile: { required: true }, Email: { email: true }, QQ: { isQQ: true }, emails: { required: true } }, messages: { Name: { required: "请输入联系人名称", maxlength: "联系人名称最多64个字" }, sex: { required: "请选择性别" }, Telephone: { isPhone: "请输入正确格式的手机号" }, Landline: { Landline: "请输入正确格式的固定电话号" }, mobile: { required: "手机号和固话号至少填一个" }, Email: { email: "请输入正确格式的邮箱" }, QQ: { isQQ: "请输入正确的QQ号" }, emails: { required: "邮箱、QQ、微信号至少填写一个" } }, errorPlacement: function(error, element) { console.log(element) if (element.attr("name") == "mobile") { error.insertAfter("#Telephone"); } else if (element.attr("name") == "emails") { error.insertAfter("#Email"); } else { error.insertAfter(element); } }, submitHandler: function(form) { //layer.load(0, { shade: false }) $(form).ajaxSubmit(function(res) { if (res.code === 1) { layer.msg('保存成功', { icon: 6 }); var t = Number($("input[name='t']").val()); if (t === 0) { var index = parent.layer.getFrameIndex(window.name); //先得到当前iframe层的索引 parent.layer.close(index); //再执行关闭 } else { window.location = '/EnterCustom/AddEnterCustPhaseLog?types=1&id=@(Model!=null ? Model.EnterCustID.ToString():ViewBag.EnterCustID)'; } } else { layer.msg('保存失败', { icon: 5 }); } }); } }); var telonchanged = function() { console.log($("input[name='Telephone']").val()); $("#mobile").val($("input[name='Telephone']").val() + $("input[name='Landline']").val()); } var phonechanged = function() { $("#mobile").val($("input[name='Telephone']").val() + $("input[name='Landline']").val()); } var emailchanged = function() { var values = $("input[name='Email']").val() + $("input[name='QQ']").val() + $("input[name='WeChart']").val(); $("#emails").val(values); } var qqchanged = function() { var values = $("input[name='Email']").val() + $("input[name='QQ']").val() + $("input[name='WeChart']").val(); $("#emails").val(values); } var wechartchanged = function() { var values = $("input[name='Email']").val() + $("input[name='QQ']").val() + $("input[name='WeChart']").val(); $("#emails").val(values); } </script> }
the_stack