#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <assert.h>
#include <string.h>

#include <cuda_runtime.h>

#include <unistd.h>
#include <pthread.h>
#include <sched.h>

#include "util.h"
#include "system_typ.h"
#include "dvmh.h"
#include "dvmgpu_red.h"
#include "dvmgpu_distribution.h"

// Device == memory space
// Device types
enum DeviceType {dtHost = 0, dtCuda = 1, DEVICE_TYPES};

// Handler type flags
enum HandlerType {htParallel = 0, htMaster = 1};

typedef struct {
    int deviceIndex;
    pthread_t thread;
} DvmhPerformer;

#define MAX_PORTIONS 100

#define DEVICE_COMMON \
enum DeviceType deviceType; \
int slotCount; \
int queueSize; \
DvmhPerformer **performers; /*Array of pointers to DvmhPerformer (length=slotCount)*/ \
void *defaultBase; \
\
pthread_mutex_t mut; \
volatile int slotsLeft; \
volatile int queueStart; \
struct tag_DvmhLoopPortion *portionsQueue[MAX_PORTIONS]; \
volatile int queueEnd; \
volatile int sleeperCount; \
pthread_cond_t performerWakeup; \
volatile int performersCount;

typedef struct {
    DEVICE_COMMON
} CommonDevice;

typedef struct {
    DEVICE_COMMON
} HostDevice;

typedef struct {
    DEVICE_COMMON
    int index;
} CudaDevice;

#undef DEVICE_COMMON

static CommonDevice **devices; //Array of pointers to Devices (length=devicesCount)
static int devicesCount;

static void *performerFunc(void *arg);

enum Intent {INTENT_IN = 1, INTENT_OUT = 2, INTENT_LOCAL = 4};

typedef struct {
    void *key; //s_AMVIEW
    int rank;
    Interval *space; //Array of Intervals (length=rank)
    int *distribFlags; //Array of ints (length=rank)
    int arrayCount;
} DvmhDistribSpace;

typedef struct {
    int axisNumber;
    long multiplier;
    long summand;
} DvmhAxisDistribRule;

typedef struct {
    DvmhDistribSpace *dspace;
    DvmhAxisDistribRule *map; //Array of structures (length=dspace->rank)
} DvmhDistribRule;

typedef struct {
    void *deviceAddr;
    Interval *havePortion; //Array of Intervals (length=data->rank)
    DvmhPieces *actualState;
} DvmhRepresentative;

typedef long ShdWidth[2];
typedef struct {
    int typeSize;
    int rank;
    Interval *space; //Array of intervals (length=rank)
    Interval *localPart; //Array of intervals (length=rank)
    ShdWidth *shdWidths; //Array of ShdWidths (length=rank)
    DvmhDistribRule *distribRule;
    DvmhRepresentative **representatives; //Array of pointers to DvmhRepresentatives (length=devicesCount)
} DvmhData;

static FILE *logFile;
enum LogLevel logLevel;
static char hostName[200];
static int myRank;
static int myGlobalRank;

static Dictionary *dataDict; //(void * of scalar | s_DISARRAY *) => DvmhData *
static Dictionary *amViewDict; //s_AMVIEW * => DvmhDistribSpace *

static const char *logMessages[] = {"FATAL", "ERROR", "WARNING", "INFO", "DEBUG", "TRACE"};

int dvmh_log_ex(enum LogLevel level, const char *fileName, int lineNumber, const char *fmt, ...) {
    if (level >= 0 && level < LOG_LEVELS && level <= logLevel) {
        va_list ap;
        fprintf(logFile, "[%s]\t%s[%2d]\t%s:%d\t", logMessages[level], hostName, myRank, fileName, lineNumber);
        va_start(ap, fmt);
        vfprintf(logFile, fmt, ap);
        va_end(ap);
        if (!(*fmt && fmt[strlen(fmt) - 1] == '\n'))
            fprintf(logFile, "\n");
        fflush(logFile);
        return 1;
    } else
        return 0;
}

extern void dvmcommrank_(int *);
extern void dvmgetprocessorname_(int *, char *, long);

static int getProcessorCount() {
    int res = 1;
#ifdef linux
    FILE *f = fopen("/proc/cpuinfo", "rt");
    char buf[1000];
    while (fgets(buf, 999, f)) {
        if (strstr(buf, "processor") == buf) {
            char ignore[100];
            sscanf(buf, "%s%s%d", ignore, ignore, &res);
            res++;
        }
    }
    fclose(f);
#else
    const char *env = getenv("NUMBER_OF_PROCESSORS");
    if (env)
        res = atoi(env);
#endif
    return res;
}

// Инициализация.
void dvmh_init_() {
    // Cluster stuff
    int ppn = 1;
    const char *sett = getenv("DVMH_PPN");
    if (!sett)
        sett = getenv("procpernode");
    if (sett)
        ppn = atoi(sett);
    if (ppn < 1)
        ppn = 1;
    dvmcommrank_(&myGlobalRank);
    dvmgetprocessorname_(&myGlobalRank, hostName, sizeof(hostName));
    myRank = myGlobalRank % ppn;

    // Logging
    logLevel = INFO;
    sett = getenv("DVMH_LOGLEVEL");
    if (sett)
        logLevel = (enum LogLevel)atoi(sett);
    if (logLevel < FATAL)
        logLevel = FATAL;
    logFile = 0;
    sett = getenv("DVMH_LOGFILE");
    if (sett)
        logFile = fopen(sett, "at");
    if (!logFile)
        logFile = stderr;
    if (!myGlobalRank)
        dvmh_log(DEBUG, "Starting log. <date-time>. Logging level is %d(%s)", logLevel, logMessages[logLevel]);

    // Devices
    int totalProcessors = getProcessorCount();
    int totalRequestedProcessors = 0;
    // Host device
    HostDevice *newDevice = (HostDevice *)malloc(sizeof(HostDevice));
    newDevice->deviceType = dtHost;
    sett = getenv("DVMH_NUM_THREADS");
    if (sett)
        newDevice->slotCount = atoi(sett);
    else {
        int count = totalProcessors;
        newDevice->slotCount = count / gcd(ppn, count);
    }
    if (newDevice->slotCount < 0)
        newDevice->slotCount = 0;
    totalRequestedProcessors += newDevice->slotCount;

    devicesCount = 1;
    devices = (CommonDevice **)malloc(sizeof(CommonDevice *));
    devices[0] = (CommonDevice *)newDevice;

    // CUDA devices
    int count = 0;
    int dpp = 0;
    sett = getenv("DVMH_NUM_CUDAS");
    if (sett)
        dpp = atoi(sett);
    if (!sett || dpp > 0) {
        count = 0;
        cudaError_t err = cudaGetDeviceCount(&count);
        if (err != cudaSuccess) {
            dvmh_log(WARNING, "Cannot request CUDA device count - not using CUDA at all");
            count = 0;
        } else
            dvmh_log(INFO, "Found %d CUDA devices", count);
        if (!sett)
            dpp = count / gcd(ppn, count);
    }
    if (dpp < 0)
        dpp = 0;
    assert(dpp == 0 || dpp >= 0 && dpp <= count);
    if (dpp > 0) {
        totalRequestedProcessors++;
        int lowIndex = (dpp * myRank) % count;
        int i;
        for (i = 0; i < dpp; i++) {
            CudaDevice *newDevice = (CudaDevice *)malloc(sizeof(CudaDevice));
            newDevice->deviceType = dtCuda;
            newDevice->slotCount = 1;
            newDevice->index = (lowIndex + i) % count;
            devicesCount++;
            devices = (CommonDevice **)realloc(devices, sizeof(CommonDevice *) * devicesCount);
            devices[devicesCount - 1] = (CommonDevice *)newDevice;
        }
    }

    // Set affinity
    {
        if (totalRequestedProcessors > totalProcessors)
            totalRequestedProcessors = totalProcessors;
        if (totalRequestedProcessors < 1)
            totalRequestedProcessors = 1;
        unsigned long mask = ((1l << totalRequestedProcessors) - 1) << (totalRequestedProcessors * myRank % totalProcessors);
        mask = (mask & ((1l << totalProcessors) - 1)) | (mask >> totalProcessors);
        dvmh_log(INFO, "Using processors affinity 0x%X", mask);
        sched_setaffinity((pid_t)0, sizeof(mask), &mask);
    }

    // Initialize devices and performers
    int i;
    for (i = 0; i < devicesCount; i++) {
        CommonDevice *device = devices[i];
        device->defaultBase = 0;
        eassert(pthread_mutex_init(&device->mut, 0) == 0);
        device->slotsLeft = device->slotCount;
        device->queueEnd = 0;
        device->queueStart = 0;
        device->queueSize = MAX_PORTIONS;
        device->performersCount = 0;
        device->sleeperCount = 0;
        eassert(pthread_cond_init(&device->performerWakeup, 0) == 0);
        device->performers = (DvmhPerformer **)calloc(1, sizeof(DvmhPerformer *) * device->slotCount);
        int j;
        for (j = 0; j < device->slotCount; j++) {
            device->performers[j] = (DvmhPerformer *)malloc(sizeof(DvmhPerformer));
            device->performers[j]->deviceIndex = i;
            eassert(pthread_create(&device->performers[j]->thread, 0, performerFunc, device->performers[j]) == 0);
        }

        // Wait for performers to complete their initialization
        while (device->performersCount < device->slotCount) usleep(10);
    }

    // Print info on devices in use
    {
        char cudas[100];
        char *cudaptr = cudas;
        for (i = 1; i < devicesCount && devices[i]->deviceType == dtCuda; i++) {
            cudaptr += sprintf(cudaptr, " %d", ((CudaDevice *)devices[i])->index);
        }
        *cudaptr = 0;
        int totalSlotCount = 0;
        for (i = 0; i < devicesCount; i++)
            totalSlotCount += devices[i]->slotCount;
        dvmh_log(INFO, "Set of devices in use: HOST (%d slots), CUDA (device numbers:%s). Total device count = %d. Total slot count = %d",
                devices[0]->slotCount, cudas, devicesCount, totalSlotCount);
    }

    // Global variables initialization
    dataDict = dictNew();
    amViewDict = dictNew();
}

#undef MAX_PORTIONS

static void performCopy(DvmhData *data, int dev1, int dev2, Interval *cutting) {
    int d1 = -1, d2 = -1;
    if (devices[dev1]->deviceType == dtCuda)
        d1 = ((CudaDevice *)devices[dev1])->index;
    if (devices[dev2]->deviceType == dtCuda)
        d2 = ((CudaDevice *)devices[dev2])->index;
    long header1[data->rank + 3], header2[data->rank + 3];
    DvmhRepresentative *repr1, *repr2;
    repr1 = data->representatives[dev1];
    repr2 = data->representatives[dev2];
    header1[data->rank + 2] = (long)repr1->deviceAddr;
    header2[data->rank + 2] = (long)repr2->deviceAddr;
    fillHeader(data->rank, data->typeSize, repr1->deviceAddr, repr1->havePortion, header1);
    fillHeader(data->rank, data->typeSize, repr2->deviceAddr, repr2->havePortion, header2);
    if ((d1 >= 0 || d2 >= 0) && (d1 != d2))
        performCopyCuda(data->rank, data->typeSize, d1, header1, d2, header2, cutting);
    else
        dvmh_log(WARNING, "Called copy to copy self to self");
}

static void performGetActual(DvmhData *data, int dev, Interval *realBlock) {
    assert((data->rank == 0) >= (realBlock == 0));
    DvmhPieces *pieces1 = piecesNew(1, data->rank);
    if (data->rank)
        memcpy(pieces1->pieces[0], realBlock, sizeof(Interval) * data->rank);
    DvmhPieces *pieces2 = piecesSubtract(data->rank, pieces1, data->representatives[dev]->actualState);
    piecesDelete(pieces1);
    int i;
    for (i = 0; i < devicesCount; i++)
        if (data->representatives[i] && i != dev) {
            if (pieces2->piecesCount == 0)
                break;
            dvmh_log(TRACE, "pieces rest to actualize");
            custom_log(TRACE, piecesOut, data->rank, pieces2);
            dvmh_log(TRACE, "pieces has device #%d", i);
            custom_log(TRACE, piecesOut, data->rank, data->representatives[i]->actualState);
            pieces1 = piecesIntersect(data->rank, pieces2, data->representatives[i]->actualState);
            dvmh_log(TRACE, "pieces that can be actualized");
            custom_log(TRACE, piecesOut, data->rank, pieces1);
            int j;
            for (j = 0; j < pieces1->piecesCount; j++)
                performCopy(data, i, dev, pieces1->pieces[j]);
            DvmhPieces *pieces3 = piecesSubtract(data->rank, pieces2, pieces1);
            piecesDelete(pieces1);
            piecesDelete(pieces2);
            pieces2 = pieces3;
        }
    if (pieces2->piecesCount > 0)
        dvmh_log(WARNING, "Can not actualize whole requested block to representative on device %d", dev);
    piecesDelete(pieces2);
    piecesUniteOne(data->rank, data->representatives[dev]->actualState, realBlock);
}

// Запрос на актуализацию данных на хост-системе. Первый параметр – заголовочный массив. Второй – массив нижних индексов. Третий – массив верхних индексов. Значение индекса -2147483648 означает открытую границу (то есть до конца конкретного измерения с конкретной стороны имеющегося у этого процесса части пространства массива (локальная часть + теневые грани + чтобы-то-ни-было)).
void dvmh_get_actual_subarray_(long dvmDesc[], long lowIndex[], long highIndex[]) {
    s_DISARRAY *dvmHead = (s_DISARRAY *)((SysHandle *)dvmDesc[0])->pP;
    assert(dvmHead != 0);
    DvmhData *data = dictFind(dataDict, dvmHead);
    if (data) {
        DvmhRepresentative *repr = data->representatives[0];
        assert(repr != 0);
        Interval realBlock[data->rank];
        fillRealBlock(data->rank, lowIndex, highIndex, repr->havePortion, realBlock);
        performGetActual(data, 0, realBlock);
    }
}

// Запрос на актуализацию данных на хост-системе. Параметр – скалярная переменная.
void dvmh_get_actual_scalar_(void *addr) {
    DvmhData *data = dictFind(dataDict, addr);
    if (data) {
        DvmhRepresentative *repr = data->representatives[0];
        assert(repr != 0);
        performGetActual(data, 0, 0);
    }
}

typedef struct {
    DvmhData **datas; //Array of pointers to DvmhData (length=datasCount)
    int datasCount;
} DvmhShadow;

// Запрос на актуализацию на хост-систему граничных элементов.
DvmhShadowRef dvmh_get_actual_edges_(ShadowGroupRef *group) {
    s_BOUNDGROUP *bg = (s_BOUNDGROUP *)((SysHandle *)*group)->pP;
    assert(bg != 0);
    DvmhShadow *shadow = (DvmhShadow *)malloc(sizeof(DvmhShadow));
    shadow->datasCount = 0;
    shadow->datas = (DvmhData **)malloc(sizeof(DvmhData *) * bg->NewArrayColl.Count);
    int i;
    for (i = 0; i < bg->NewArrayColl.Count; i++) {
        s_DISARRAY *dvmHead = bg->NewArrayColl.List[i];
        DvmhData *data = dictFind(dataDict, dvmHead);
        if (data) {
            shadow->datasCount++;
            shadow->datas[shadow->datasCount - 1] = data;
        }
    }

    for (i = 0; i < shadow->datasCount; i++) {
        DvmhData *data = shadow->datas[i];
        assert(data != 0);
        Interval realBlock[data->rank];
        memcpy(realBlock, data->localPart, sizeof(Interval) * data->rank);
        int j;
        for (j = 0; j < data->rank; j++) {
            if (data->localPart[j][0] > data->space[j][0]) {
                realBlock[j][0] = data->localPart[j][0];
                realBlock[j][1] = realBlock[j][0] + data->shdWidths[j][1] - 1;
                performGetActual(data, 0, realBlock);
            }

            if (data->localPart[j][1] < data->space[j][1]) {
                realBlock[j][1] = data->localPart[j][1];
                realBlock[j][0] = realBlock[j][1] - (data->shdWidths[j][0] - 1);
                performGetActual(data, 0, realBlock);
            }

            memcpy(realBlock + j, data->localPart + j, sizeof(Interval));
        }
    }
    return (DvmhShadowRef)shadow;
}

static void clearActual(DvmhData *data, DvmhPieces *p) {
    int i;
    for (i = 0; i < devicesCount; i++)
        if (data->representatives[i]) {
            DvmhPieces *p2 = piecesSubtract(data->rank, data->representatives[i]->actualState, p);
            assert(data->representatives[i]->actualState != 0);
            piecesDelete(data->representatives[i]->actualState);
            data->representatives[i]->actualState = p2;
        }
}

static void performSetActual(DvmhData *data, int dev, Interval *realBlock) {
    assert((data->rank == 0) >= (realBlock == 0));
    DvmhPieces *p = piecesNew(0, data->rank);
    piecesAppendOne(data->rank, p, realBlock);
    clearActual(data, p);
    piecesDelete(p);
    piecesAppendOne(data->rank, data->representatives[dev]->actualState, realBlock);
}

// Объявление актуальности данных на хост-системе. Первый параметр – заголовочный массив. Второй – массив нижних индексов. Третий – массив верхних индексов. Значение индекса -2147483648 означает открытую границу (то есть до конца конкретного измерения с конкретной стороны имеющейся у этого процесса локальной части массива).
void dvmh_actual_subarray_(long dvmDesc[], long lowIndex[], long highIndex[]) {
    s_DISARRAY *dvmHead = (s_DISARRAY *)((SysHandle *)dvmDesc[0])->pP;
    DvmhData *data = dictFind(dataDict, dvmHead);
    if (data) {
        DvmhRepresentative *repr = data->representatives[0];
        assert(repr != 0);
        Interval realBlock[data->rank];
        fillRealBlock(data->rank, lowIndex, highIndex, data->localPart, realBlock);
        performSetActual(data, 0, realBlock);
    }
}

// Объявление актуальности данных на хост-системе. Параметр – скалярная переменная.
void dvmh_actual_scalar_(void *addr) {
    DvmhData *data = dictFind(dataDict, addr);
    if (data) {
        DvmhRepresentative *repr = data->representatives[0];
        assert(repr != 0);
        performSetActual(data, 0, 0);
    }
}

typedef struct {
    DvmhDistribSpace *dspace;

    Interval **localParts; //Array of array of Intervals (length1=devicesCount, length2=dspace->rank)
} DvmhRegionDistribSpace;
typedef struct {
    DvmhData *data;
    DvmhPieces *inPieces;
    DvmhPieces *outPieces;
    DvmhPieces *localPieces;

    Interval **localParts; //Array of array of Intervals (length1=devicesCount, length2=data->rank)
} DvmhRegionData;
typedef struct {
    int async;
    long usesDevices;
    Dictionary *datas; //DvmhData * => DvmhRegionData *
    Dictionary *dspaces; //DvmhDistribSpace * => DvmhRegionDistribSpace *
} DvmhRegion;

static DvmhRegionData *getRdata(DvmhRegion *region, DvmhData *data) {
    DvmhRegionData *rdata = dictFind(region->datas, data);
    if (!rdata) {
        rdata = (DvmhRegionData *)malloc(sizeof(DvmhRegionData));
        rdata->data = data;
        rdata->inPieces = piecesNew(0, data->rank);
        rdata->outPieces = piecesNew(0, data->rank);
        rdata->localPieces = piecesNew(0, data->rank);
        rdata->localParts = (Interval **)calloc(1, sizeof(Interval *) * devicesCount);
        dictAdd(region->datas, data, rdata);
    }
    return rdata;
}

// Инициация обновления теневых граней на вычислителях региона. Первый параметр – ссылка на регион, которую вернул вызов region_create_(). Второй параметр – то, что вернул соответствующий прошлый вызов dvmh_get_actual_edges_().
void dvmh_shadow_renew_(DvmhRegionRef *regionRef, DvmhShadowRef *group) {
    DvmhRegion *region = (DvmhRegion *)*regionRef;
    DvmhShadow *shadow = (DvmhShadow *)*group;

    int i;
    for (i = 0; i < shadow->datasCount; i++) {
        DvmhData *data = shadow->datas[i];
        assert(data != 0);
        Interval realBlock[data->rank];
        memcpy(realBlock, data->localPart, sizeof(Interval) * data->rank);
        int j;
        for (j = 0; j < data->rank; j++) {
            if (data->localPart[j][0] > data->space[j][0]) {
                realBlock[j][0] = data->localPart[j][0] - data->shdWidths[j][0];
                realBlock[j][1] = realBlock[j][0] + data->shdWidths[j][0] - 1;
                performSetActual(data, 0, realBlock);
            }

            if (data->localPart[j][1] < data->space[j][1]) {
                realBlock[j][1] = data->localPart[j][1] + data->shdWidths[j][1];
                realBlock[j][0] = realBlock[j][1] - (data->shdWidths[j][1] - 1);
                performSetActual(data, 0, realBlock);
            }

            memcpy(realBlock + j, data->localPart + j, sizeof(Interval));
        }

        DvmhRegionData *rdata = getRdata(region, data);
        int k;
        for (k = 0; k < devicesCount; k++) {
            Interval *localPart = rdata->localParts[k];
            if (localPart) {
                DvmhRepresentative *repr = data->representatives[k];
                assert(repr != 0);
                memcpy(realBlock, localPart, sizeof(Interval) * data->rank);

                int j;
                for (j = 0; j < data->rank; j++) {
                    if (localPart[j][0] > data->space[j][0]) {
                        realBlock[j][0] = localPart[j][0] - data->shdWidths[j][0];
                        realBlock[j][1] = realBlock[j][0] + data->shdWidths[j][0] - 1;
                        piecesSubtractOne(data->rank, repr->actualState, realBlock);
                    }

                    if (localPart[j][1] < data->space[j][1]) {
                        realBlock[j][1] = localPart[j][1] + data->shdWidths[j][1];
                        realBlock[j][0] = realBlock[j][1] - (data->shdWidths[j][1] - 1);
                        piecesSubtractOne(data->rank, repr->actualState, realBlock);
                    }

                    memcpy(realBlock + j, localPart + j, sizeof(Interval));
                }
            }
        }
        for (k = 0; k < devicesCount; k++) {
            Interval *localPart = rdata->localParts[k];
            if (localPart) {
                DvmhRepresentative *repr = data->representatives[k];
                assert(repr != 0);
                memcpy(realBlock, localPart, sizeof(Interval) * data->rank);

                int j;
                for (j = 0; j < data->rank; j++) {
                    if (localPart[j][0] > data->space[j][0]) {
                        realBlock[j][0] = localPart[j][0] - data->shdWidths[j][0];
                        realBlock[j][1] = realBlock[j][0] + data->shdWidths[j][0] - 1;
                        performGetActual(data, k, realBlock);
                    }

                    if (localPart[j][1] < data->space[j][1]) {
                        realBlock[j][1] = localPart[j][1] + data->shdWidths[j][1];
                        realBlock[j][0] = realBlock[j][1] - (data->shdWidths[j][1] - 1);
                        performGetActual(data, k, realBlock);
                    }

                    memcpy(realBlock + j, localPart + j, sizeof(Interval));
                }
            }
        }
    }

    if (shadow->datas)
        free(shadow->datas);
    free(shadow);
}

// Вычисляет естественную базу для массива. Это такая база, при использовании которой смещение равно нулю.
void *dvmh_get_natural_base_(long *deviceRef, long dvmDesc[]) {
    s_DISARRAY *dvmHead = (s_DISARRAY *)((SysHandle *)dvmDesc[0])->pP;
    DvmhData *data = dictFind(dataDict, dvmHead);
    assert(data != 0);
    assert(*deviceRef >= 0 && *deviceRef < devicesCount);
    DvmhRepresentative *repr = data->representatives[*deviceRef];
    assert(repr != 0);
    long tempHeader[data->rank + 3];
    tempHeader[data->rank + 2] = 0;
    fillHeader(data->rank, data->typeSize, repr->deviceAddr, repr->havePortion, tempHeader);
    return (void *)(tempHeader[data->rank + 1] * data->typeSize);
}

// Вычисляет смещение представителя скалярной переменной на заданном устройстве по заданной базе. Параметры: устройство(будет передаваться в обработчик системой поддержки), база, переменная скалярная.
long dvmh_calculate_offset_(long *deviceRef, void *base, void *variable) {
    DvmhData *data = dictFind(dataDict, variable);
    assert(data != 0);
    assert(*deviceRef >= 0 && *deviceRef < devicesCount);
    DvmhRepresentative *repr = data->representatives[*deviceRef];
    assert(repr != 0);
    return ((long)repr->deviceAddr - (long)base) / data->typeSize;
}

// Заполняет урезанный DVM-заголовочный массив для представителя на заданном устройстве в соответствии с указанной базой. Параметры: устройство(будет передаваться в обработчик системой поддержки), база, заголовочный массив, массив для вновь заполненного заголовка для конкретного устройства.
void dvmh_fill_header_(long *deviceRef, void *base, long dvmDesc[], long dvmhDesc[]) {
    s_DISARRAY *dvmHead = (s_DISARRAY *)((SysHandle *)dvmDesc[0])->pP;
    DvmhData *data = dictFind(dataDict, dvmHead);
    assert(data != 0);
    assert(*deviceRef >= 0 && *deviceRef < devicesCount);
    DvmhRepresentative *repr = data->representatives[*deviceRef];
    assert(repr != 0);
    dvmhDesc[data->rank + 2] = (long)base;
    fillHeader(data->rank, data->typeSize, repr->deviceAddr, repr->havePortion, dvmhDesc);
}

static void reprDelete(DvmhData *data, int device) {
    dvmh_log(TRACE, "Deleting representation on device %d", device);
    DvmhRepresentative *repr = data->representatives[device];
    if (repr) {
        if (repr->havePortion)
            free(repr->havePortion);
        piecesDelete(repr->actualState);
        if (repr->deviceAddr && devices[device]->deviceType == dtCuda) {
#ifndef DONT_HAVE_CUDA4
            assertCuda(cudaSetDevice(((CudaDevice *)devices[device])->index));
            assertCuda(cudaFree(repr->deviceAddr));
#endif
        }
        free(repr);
    }
}

static void dataDelete(DvmhData *data) {
    if (data) {
        int i;
        for (i = 0; i < devicesCount; i++) {
            reprDelete(data, i);
        }
        if (data->representatives)
            free(data->representatives);
        if (data->distribRule) {
            assert(data->distribRule->map);
            free(data->distribRule->map);
            assert(data->distribRule->dspace);
            data->distribRule->dspace->arrayCount--;
            if (data->distribRule->dspace->arrayCount == 0) {
                DvmhDistribSpace *dspace = data->distribRule->dspace;
                if (dspace->rank > 0) {
                    assert(dspace->distribFlags);
                    free(dspace->distribFlags);
                    assert(dspace->space);
                    free(dspace->space);
                }
                dictErase(amViewDict, dspace->key);
                free(dspace);
            }
            free(data->distribRule);
        }
        if (data->shdWidths)
            free(data->shdWidths);
        if (data->space)
            free(data->space);
        if (data->localPart)
            free(data->localPart);
        free(data);
    }
}

static void destroyVariable(void *addr) {
    DvmhData *data = dictFind(dataDict, addr);
    if (data) {
        dataDelete(data);
        dictErase(dataDict, addr);
    }
}

// Оповещение о фактическом прекращении существования переменной-массива. Вызывать непосредственно перед выходом из подпрограммы для локальных переменных, а также перед deallocate и redistribute.
void dvmh_destroy_array_(long dvmDesc[]) {
    destroyVariable(((SysHandle *)dvmDesc[0])->pP);
}

// Оповещение о фактическом прекращении существования переменной-скаляра. Вызывать непосредственно перед выходом из подпрограммы для локальных переменных, а также перед deallocate.
void dvmh_destroy_scalar_(void *addr) {
    destroyVariable(addr);
}

// Регионы
// Создание региона. Параметр – флаг асинхронности
DvmhRegionRef region_create_(long *asyncFlagRef) {
    DvmhRegion *region = (DvmhRegion *)malloc(sizeof(DvmhRegion));
    region->async = *asyncFlagRef;
    region->datas = dictNew();
    region->dspaces = dictNew();
    return (DvmhRegionRef)region;
}

// Регистрация подмассива в регионе. Первый параметр – ссылка на регион, которую вернул вызов region_create_(). Второй параметр – направление использования (1 – in, 2 – out, 4 – local, 3 – inout, 5 - inlocal). Третий параметр – заголовочный массив. Четвертый параметр – массив нижних индексов. Пятый параметр – массив верхних индексов. Значение индекса -2147483648 означает открытую границу (то есть до конца конкретного измерения с конкретной стороны (для in - локальная часть + теневые грани, для out и local - только локальная часть)).
void region_register_subarray_(DvmhRegionRef *regionRef, long *intentRef, long dvmDesc[], long lowIndex[], long highIndex[]) {
    DvmhData *data = dictFind(dataDict, (s_DISARRAY *)((SysHandle *)dvmDesc[0])->pP);
    if (!data) {
        s_DISARRAY *dvmHead = (s_DISARRAY *)((SysHandle *)dvmDesc[0])->pP;
        data = (DvmhData *)malloc(sizeof(DvmhData));
        data->typeSize = dvmHead->TLen;
        data->rank = dvmHead->Space.Rank;
        data->representatives = (DvmhRepresentative **)calloc(1, sizeof(DvmhRepresentative *) * devicesCount);
        data->representatives[0] = (DvmhRepresentative *)malloc(sizeof(DvmhRepresentative));
        DvmhRepresentative *repr = data->representatives[0];
        long *dvmheader = (long *)dvmHead->HandlePtr->HeaderPtr;
        data->space = (Interval *)malloc(sizeof(Interval) * data->rank);
        data->shdWidths = (ShdWidth *)malloc(sizeof(ShdWidth) * data->rank);
        data->localPart = (Interval *)malloc(sizeof(Interval) * data->rank);
        repr->havePortion = (Interval *)malloc(sizeof(Interval) * data->rank);
        repr->deviceAddr = (char *)dvmHead->BasePtr + data->typeSize * dvmheader[data->rank + 1];
        int i;
        for (i = 0; i < data->rank; i++) {
            long starti = dvmheader[data->rank + i + 2];
            data->space[i][0] = starti;
            data->space[i][1] = data->space[i][0] + dvmHead->Space.Size[i] - 1;
            dvmh_log(DEBUG, "space [%ld..%ld]", data->space[i][0], data->space[i][1]);

            data->localPart[i][0] = dvmHead->Block.Set[i].Lower + starti;
            data->localPart[i][1] = dvmHead->Block.Set[i].Upper + starti;
            dvmh_log(DEBUG, "local [%ld..%ld]", data->localPart[i][0], data->localPart[i][1]);

            data->shdWidths[i][0] = dvmHead->InitLowShdWidth[i];
            data->shdWidths[i][1] = dvmHead->InitHighShdWidth[i];
            dvmh_log(DEBUG, "shdWidths %ld, %ld", data->shdWidths[i][0], data->shdWidths[i][1]);

            repr->havePortion[i][0] = dvmHead->ArrBlock.Block.Set[i].Lower + starti;
            repr->havePortion[i][1] = dvmHead->ArrBlock.Block.Set[i].Upper + starti;
            dvmh_log(DEBUG, "have [%ld..%ld]", repr->havePortion[i][0], repr->havePortion[i][1]);
            
            repr->deviceAddr = (char *)repr->deviceAddr + data->typeSize * repr->havePortion[i][0] * (i < data->rank - 1 ? dvmheader[i + 1] : 1);
        }
        repr->actualState = piecesNew(0, data->rank);
        piecesAppendOne(data->rank, repr->actualState, repr->havePortion);
        DvmhDistribSpace *dspace = dictFind(amViewDict, dvmHead->AMView);
        if (dspace == 0) {
            dspace = (DvmhDistribSpace *)malloc(sizeof(DvmhDistribSpace));
            dspace->key = dvmHead->AMView;
            dspace->arrayCount = 0;
            dspace->rank = dvmHead->AMView->Space.Rank;
            dspace->space = (Interval *)malloc(sizeof(Interval) * dspace->rank);
            dspace->distribFlags = (int *)malloc(sizeof(int) * dspace->rank);
            for (i = 0; i < dspace->rank; i++) {
                dspace->space[i][0] = dvmHead->AMView->Local.Set[i].Lower;
                dspace->space[i][1] = dvmHead->AMView->Local.Set[i].Upper;
                dspace->distribFlags[i] = 1;
                dvmh_log(DEBUG, "distribSpace [%ld..%ld]", dspace->space[i][0], dspace->space[i][1]);
            }
            dictAdd(amViewDict, dspace->key, dspace);
        }
        assert(dspace != 0);
        DvmhDistribRule *rule = (DvmhDistribRule *)malloc(sizeof(DvmhDistribRule));
        dspace->arrayCount++;
        rule->dspace = dspace;
        rule->map = (typeof(rule->map))malloc(sizeof(*rule->map) * dspace->rank);
        for (i = 0; i < dspace->rank; i++) {
            long starti = dvmheader[data->rank + dvmHead->Align[data->rank + i].Axis - 1 + 2];
            rule->map[i].axisNumber = dvmHead->Align[data->rank + i].Attr == align_REPLICATE ? -1 : dvmHead->Align[data->rank + i].Axis;
            rule->map[i].multiplier = dvmHead->Align[data->rank + i].A;
            rule->map[i].summand = dvmHead->Align[data->rank + i].B - starti * dvmHead->Align[data->rank + i].A;
            dvmh_log(DEBUG, "array mapping Axis=%d A=%ld B=%ld", rule->map[i].axisNumber, rule->map[i].multiplier, rule->map[i].summand);
        }
        data->distribRule = rule;
        dictAdd(dataDict, dvmHead, data);
    }
    assert(data != 0);
    DvmhRepresentative *repr = data->representatives[0];
    assert(repr != 0);
    DvmhRegion *region = (DvmhRegion *)*regionRef;
    DvmhRegionData *rdata = getRdata(region, data);
    assert(rdata != 0);
    Interval realBlock[data->rank];
    fillRealBlock(data->rank, lowIndex, highIndex, repr->havePortion, realBlock);
    if (*intentRef & INTENT_IN) {
        piecesUniteOne(data->rank, rdata->inPieces, realBlock);
    }
    if (*intentRef & INTENT_OUT) {
        piecesUniteOne(data->rank, rdata->outPieces, realBlock);
    }
    if (*intentRef & INTENT_LOCAL) {
        piecesUniteOne(data->rank, rdata->localPieces, realBlock);
    }
    DvmhDistribSpace *dspace = data->distribRule->dspace;
    DvmhRegionDistribSpace *rdspace = dictFind(region->dspaces, dspace);
    if (!rdspace) {
        rdspace = (DvmhRegionDistribSpace *)malloc(sizeof(DvmhRegionDistribSpace));
        rdspace->dspace = dspace;
        rdspace->localParts = (Interval **)calloc(1, sizeof(Interval *) * devicesCount);
        dictAdd(region->dspaces, dspace, rdspace);
    }
    dvmh_log(TRACE, "register_subarray OK");
}

// Регистрация скаляра в регионе. Первый параметр – ссылка на регион, которую вернул вызов region_create_(). Второй параметр – направление использования (1 – in, 2 – out, 4 – local, 3 – inout, 5 - inlocal). Третий параметр – скалярная переменная. Четвертый параметр – размер скаляра (4 для float, 8 для double, и т.п.).
void region_register_scalar_(DvmhRegionRef *regionRef, long *intentRef, void *addr, long *sizeRef) {
    DvmhData *data = dictFind(dataDict, addr);
    if (!data) {
        data = (DvmhData *)malloc(sizeof(DvmhData));
        data->rank = 0;
        data->typeSize = *sizeRef;
        data->representatives = (DvmhRepresentative **)calloc(1, sizeof(DvmhRepresentative *) * devicesCount);
        data->representatives[0] = (DvmhRepresentative *)malloc(sizeof(DvmhRepresentative));
        DvmhRepresentative *repr = data->representatives[0];
        repr->deviceAddr = addr;
        repr->actualState = piecesNew(1, 0);
        repr->havePortion = 0;
        data->shdWidths = 0;
        data->space = 0;
        data->localPart = 0;
        data->distribRule = 0;
        dictAdd(dataDict, addr, data);
    }
    assert(data != 0);
    DvmhRegion *region = (DvmhRegion *)*regionRef;
    DvmhRegionData *rdata = getRdata(region, data);
    assert(rdata != 0);
    if (*intentRef & INTENT_IN) {
        if (rdata->inPieces->piecesCount == 0)
            piecesAppendOne(0, rdata->inPieces, 0);
    }
    if (*intentRef & INTENT_OUT) {
        if (rdata->outPieces->piecesCount == 0)
            piecesAppendOne(0, rdata->outPieces, 0);
    }
    if (*intentRef & INTENT_LOCAL) {
        if (rdata->localPieces->piecesCount == 0)
            piecesAppendOne(0, rdata->localPieces, 0);
    }
}

static void mapSpacesOnDevices(DvmhRegion *region, void *key, void *value) {
    long devicesMask = region->usesDevices;
    DvmhRegionDistribSpace *rdspace = value;
    assert(rdspace != 0);
    DvmhDistribSpace *dspace = rdspace->dspace;
    assert(dspace != 0);
    double deviceWeights[devicesCount];
// TODO: determine from launches
    deviceWeights[0] = 1.0 / ((devicesCount - 1) * 3 + 1);
    int i;
    for (i = 1; i < devicesCount; i++)
        deviceWeights[i] = (1.0 - deviceWeights[0]) / (devicesCount - 1);
    double totalWeight = 0;
    int deviceCount = 0;
    for (i = 0; i < devicesCount; i++)
        if (devicesMask & (1l << i)) {
            deviceCount++;
            totalWeight += deviceWeights[i];
        }
    long wholeSize = dspace->space[0][1] - dspace->space[0][0] + 1;
    long prevIdx = dspace->space[0][0] - 1;
    for (i = 0; i < devicesCount; i++)
        if (devicesMask & (1l << i)) {
            assert(rdspace->localParts[i] == 0);
            if (prevIdx < dspace->space[0][1]) {
                rdspace->localParts[i] = (Interval *)malloc(sizeof(Interval) * dspace->rank);
                memcpy(rdspace->localParts[i], dspace->space, sizeof(Interval) * dspace->rank);
                rdspace->localParts[i][0][0] = prevIdx + 1;
                rdspace->localParts[i][0][1] = min(dspace->space[0][1], prevIdx + (long)(deviceWeights[i] / totalWeight * wholeSize + 1.0));
                prevIdx = rdspace->localParts[i][0][1];
                dvmh_log(DEBUG, "distribSpace part on device %d: [%ld..%ld]", i, rdspace->localParts[i][0][0], rdspace->localParts[i][0][1]);
            }
        }
}

static void mapDatasOnDevices(DvmhRegion *region, void *key, void *value) {
    long devicesMask = region->usesDevices;
    DvmhRegionData *rdata = value;
    assert(rdata != 0);
    DvmhData *data = rdata->data;
    assert(data != 0);
    int j;
    for (j = 0; j < devicesCount; j++)
        if (devicesMask & (1l << j)) {
            assert(rdata->localParts[j] == 0);
            int hasLocal = 1;
            if (data->rank > 0) {
                Interval part[data->rank];
                memcpy(part, data->localPart, sizeof(Interval) * data->rank);
                DvmhDistribRule *rule = data->distribRule;
                if (rule != 0) {
                    DvmhDistribSpace *dspace = rule->dspace;
                    assert(dspace != 0);
                    DvmhRegionDistribSpace *rdspace = dictFind(region->dspaces, dspace);
                    assert(rdspace != 0);
                    hasLocal = hasLocal && rdspace->localParts[j] != 0;
                    if (hasLocal) {
                        int i;
                        for (i = 0; i < dspace->rank; i++) {
                            int ax = rule->map[i].axisNumber;
                            if (ax > -1) {
                                if (rule->map[i].multiplier == 0) {
                                    if (!(rule->map[i].summand >= rdspace->localParts[j][i][0] && rule->map[i].summand <= rdspace->localParts[j][i][1]))
                                        hasLocal = 0;
                                } else {
                                    part[ax - 1][0] = max(part[ax - 1][0], (rdspace->localParts[j][i][0] - rule->map[i].summand + rule->map[i].multiplier - 1) /
                                            rule->map[i].multiplier);
                                    part[ax - 1][1] = min(part[ax - 1][1], (rdspace->localParts[j][i][1] - rule->map[i].summand) / rule->map[i].multiplier);
                                    if (part[ax - 1][0] > part[ax - 1][1])
                                        hasLocal = 0;
                                }
                            }
                        }
                    }
                }
                if (hasLocal) {
                    rdata->localParts[j] = (Interval *)malloc(sizeof(Interval) * data->rank);
                    memcpy(rdata->localParts[j], part, sizeof(Interval) * data->rank);
                }
            }
            if (hasLocal) {
                if (data->rank > 0) {
                    int i;
                    for (i = 0; i < data->rank; i++)
                        dvmh_log(DEBUG, "array localPart [%ld..%ld]", rdata->localParts[j][i][0], rdata->localParts[j][i][1]);
                }
                DvmhRepresentative *repr = data->representatives[j];
// TODO: not only absent repr, but also not equitant
                if (!repr) {
                    repr = (DvmhRepresentative *)malloc(sizeof(DvmhRepresentative));
                    data->representatives[j] = repr;
                    repr->actualState = piecesNew(0, data->rank);

// TODO: determine havePortion in accordance with distribution
                    if (data->rank > 0) {
                        DvmhRepresentative *hrepr = data->representatives[0];
                        assert(hrepr != 0);
                        repr->havePortion = (Interval *)malloc(sizeof(Interval) * data->rank);
                        memcpy(repr->havePortion, hrepr->havePortion, sizeof(Interval) * data->rank);
                    } else
                        repr->havePortion = 0;

                    long memNeeded = data->typeSize;
                    int k;
                    for (k = 0; k < data->rank; k++)
                        memNeeded *= repr->havePortion[k][1] - repr->havePortion[k][0] + 1;
                    if (devices[j]->deviceType == dtCuda) {
                        int deviceNum = ((CudaDevice *)devices[j])->index;
                        assertCuda(cudaSetDevice(deviceNum));
                        assertCuda(cudaMalloc(&repr->deviceAddr, memNeeded));
                    }
                }
                assert(repr != 0);
                if (data->rank > 0) {
                    DvmhPieces *p1 = piecesNew(0, data->rank);
                    piecesAppendOne(data->rank, p1, rdata->localParts[j]);
                    int k;
                    for (k = 0; k < data->rank; k++) {
                        p1->pieces[0][k][0] -= data->shdWidths[k][0];
                        p1->pieces[0][k][1] += data->shdWidths[k][1];

                        if (p1->pieces[0][k][0] < data->space[k][0])
                            p1->pieces[0][k][0] = data->space[k][0];
                        if (p1->pieces[0][k][1] > data->space[k][1])
                            p1->pieces[0][k][1] = data->space[k][1];
                    }
                    DvmhPieces *p2 = piecesIntersect(data->rank, p1, rdata->inPieces);
                    for (k = 0; k < p2->piecesCount; k++)
                        performGetActual(data, j, p2->pieces[k]);
                    piecesDelete(p2);
                    piecesDelete(p1);
                }
            }
        }
    if (data->rank > 0) {
        DvmhPieces *p1 = piecesNew(0, data->rank);
        piecesAppendOne(data->rank, p1, data->localPart);
        DvmhPieces *p2 = piecesIntersect(data->rank, p1, rdata->outPieces);
        clearActual(data, p2);
        piecesDelete(p2);
        p2 = piecesIntersect(data->rank, p1, rdata->localPieces);
        clearActual(data, p2);
        piecesDelete(p2);
        piecesDelete(p1);
        for (j = 0; j < devicesCount; j++)
            if ((devicesMask & (1l << j)) && rdata->localParts[j]) {
                DvmhRepresentative *repr = data->representatives[j];
                assert(repr != 0);
                p1 = piecesNew(0, data->rank);
                piecesAppendOne(data->rank, p1, rdata->localParts[j]);
                p2 = piecesIntersect(data->rank, p1, rdata->outPieces);
                piecesAppend(data->rank, repr->actualState, p2);
                piecesDelete(p2);
                p2 = piecesIntersect(data->rank, p1, rdata->localPieces);
                piecesAppend(data->rank, repr->actualState, p2);
                piecesDelete(p2);
                piecesDelete(p1);
            }
    }
    dvmh_log(TRACE, "Data is mapped");
}

// Оповещение о наборе типов устройств, для которых подготовлен регион. Первый параметр - ссылка на регион, которую вернул вызов region_create_(). Второй параметр – побитовое объединение типов устройств (DEVICE_TYPE_HOST = 1, DEVICE_TYPE_CUDA = 2).
void region_prepared_for_devices_(DvmhRegionRef *regionRef, long *devicesRef) {
    DvmhRegion *region = (DvmhRegion *)*regionRef;
    long devicesMask = 0;
    int i;
    for (i = 0; i < devicesCount; i++)
        if (devices[i]->slotCount > 0)
            devicesMask |= (long)(((1l << devices[i]->deviceType) & *devicesRef) != 0) << i;

    // map onto devices
    region->usesDevices = devicesMask;
    dvmh_log(DEBUG, "devices choosen for mapping = %d", region->usesDevices);
    dictForEach(region->dspaces, mapSpacesOnDevices, 1, region);
    dictForEach(region->datas, mapDatasOnDevices, 1, region);
}

// Старт вычислительной части региона. Если регион пустой, то эту функцию не надо вызывать.
void region_inner_start_(DvmhRegionRef *regionRef) {
    // Nothing to do here
}

// Структура, описывающая редукционную переменную(массив)
typedef struct {
    char *array; // ссылка на фактическое расположение редукционной переменной
    int arrayLength; // количество элементов в редукционном массиве
    int arrayElementType; // тип элемента редукционного массива
    int arrayElementSize; // размер в байтах одного элемента редукционного массива
    char *locArray; // ссылка на фактическое расположение LOC-массива
    int locElementSize; // размер в байтах одного элемента LOC-массива
    unsigned char funcNumber; // номер редукционной функции

    char *arrayBackup; // временное хранилище начального значения переменной (чтобы не просуммировалась огромное количество раз)
} DvmhReduction;

typedef struct {
    DvmhReduction *reduction;
    char *gpuMem; // ссылка на аллоцированную на девайсе память
    char *gpuLocMem;
    char **gpuMemPtr;
    char **gpuLocMemPtr;
    void *arrayBaseAddr; // база для массива промежуточных значений редукционной переменной
    void *locBaseAddr; // база для массива промежуточных значений LOC-массива
    CudaOffsetType *gpuOffsetPtr; // ссылка на переменную, в которую следует вписывать смещение массива промежуточных значений редукционной переменной
    CudaOffsetType *gpuLocOffsetPtr; // ссылка на переменную, в которую следует вписывать смещение массива промежуточных значений LOC-массива
} DvmhReductionCuda;

typedef struct {
    int isParallel;
    int isMaster;
    void (*f)();
    void **params;
    int paramsCount;
    int basesCount;
} DvmhLoopHandler;
typedef struct tag_DvmhLoopPortion {
    struct tag_DvmhLoop *loop;
    LoopBounds *loopBounds; //Array of LoopBounds (length=loop->dimension)
    int deviceNum;
    int slotsToUse;
    void *loopRef;
    DvmhLoopHandler *handler;
} DvmhLoopPortion;

// Структура, описывающая DVMH цикл:
typedef struct tag_DvmhLoop {
    DvmhRegion *region; // Вычислительный регион, в котором цикл
    int dimension; // Количество измерений
    LoopBounds *loopBounds; // подпараллелепипед витков DVM-цикла, который поступил извне и хочет быть выполнен
    DvmhDistribRule *distribRule;
    pthread_mutex_t endedMut;
    pthread_cond_t ended;
    pthread_mutex_t mut;
    int portionsCount;
    DvmhReduction **reductions; //Array of pointers to DvmhReduction (length=reductionsCount)
    int reductionsCount;
    DvmhLoopHandler **handlers[DEVICE_TYPES]; //Array of array of pointers to DvmhLoopHandler (length1=DEVICE_TYPES, length2=handlersCounts[i])
    int handlersCounts[DEVICE_TYPES];
    int cudaBlock[3];
} DvmhLoop;

#define SPEC_LOOP_COMMON \
DvmhLoopPortion *portion;

typedef struct {
    SPEC_LOOP_COMMON
} DvmhSpecLoop;

typedef struct {
    SPEC_LOOP_COMMON
    int counter; // Временное решение для управления ходом исполнения цикла - 0 - только начался, 1 - в обработке, 2 - кончился
    int restBlocks; // Количество пока не обработанных блоков
    int latestBlocks; // Количество посланных в последний раз на обработку блоков
    int overallBlocks; // Общее количество блоков(на весь входной цикл)    
    DvmhReductionCuda **reductions;
    int cudaDeviceNum;
    cudaStream_t cudaStream;
} DvmhLoopCuda;

#undef SPEC_LOOP_COMMON

// Создает нечто для управления параллельным DVMH циклом. Первый параметр – ссылка на регион, которую вернул region_create_(). Второй параметр – DVM-описатель параллельного цикла.
DvmhLoopRef loop_create_(DvmhRegionRef *regionRef, LoopRef *InDvmLoop) {
    DvmhRegion *region = (DvmhRegion *)*regionRef;
    assert(region != 0);
    DvmhLoop *loop = (DvmhLoop *)malloc(sizeof(DvmhLoop));
    loop->region = region;
    loop->reductions = 0;
    loop->reductionsCount = 0;
    s_PARLOOP *dvmLoop = (s_PARLOOP *)((SysHandle *)(*InDvmLoop))->pP;
    loop->dimension = dvmLoop->Rank;
    loop->loopBounds = (LoopBounds *)malloc(sizeof(LoopBounds) * loop->dimension);
    int i;
    for (i = 0; i < loop->dimension; i++) {
        loop->loopBounds[i][0] = (*dvmLoop->MapList[i].InitIndexPtr);
        loop->loopBounds[i][1] = (*dvmLoop->MapList[i].LastIndexPtr);
        loop->loopBounds[i][2] = (*dvmLoop->MapList[i].StepPtr);
        if (loop->loopBounds[i][2] < 0) {
            loop->loopBounds[i][2] *= -1;
            long tmp = loop->loopBounds[i][0];
            loop->loopBounds[i][0] = loop->loopBounds[i][1];
            loop->loopBounds[i][1] = tmp;
        }
    }
    DvmhDistribSpace *dspace = dictFind(amViewDict, dvmLoop->AMView);
    assert(dspace != 0);
    DvmhDistribRule *rule = (DvmhDistribRule *)malloc(sizeof(DvmhDistribRule));
    rule->dspace = dspace;
    rule->map = (typeof(rule->map))malloc(sizeof(*rule->map) * dspace->rank);
    for (i = 0; i < dspace->rank; i++) {
        long starti = dvmLoop->InitIndex[dvmLoop->Align[loop->dimension + 1].Axis > 0 ? dvmLoop->Align[loop->dimension + 1].Axis - 1 : 0];
        rule->map[i].axisNumber = dvmLoop->Align[loop->dimension + i].Attr == align_REPLICATE ? -1 : dvmLoop->Align[loop->dimension + i].Axis;
        rule->map[i].multiplier = dvmLoop->Align[loop->dimension + i].A;
        rule->map[i].summand = dvmLoop->Align[loop->dimension + i].B - starti * dvmLoop->Align[loop->dimension + i].A;
        dvmh_log(DEBUG, "loop mapping Axis=%d A=%ld B=%ld", rule->map[i].axisNumber, rule->map[i].multiplier, rule->map[i].summand);
    }
    loop->distribRule = rule;
    memset(loop->handlers, 0, sizeof(loop->handlers));
    memset(loop->handlersCounts, 0, sizeof(loop->handlersCounts));
    eassert(pthread_mutex_init(&loop->mut, 0) == 0);
    loop->portionsCount = 0;
    eassert(pthread_mutex_init(&loop->endedMut, 0) == 0);
    eassert(pthread_cond_init(&loop->ended, 0) == 0);

    if (loop->dimension >= 3) {
        loop->cudaBlock[0] = 16;
        loop->cudaBlock[1] = 4;
        loop->cudaBlock[2] = 7;
    } else if (loop->dimension >= 2) {
        loop->cudaBlock[0] = 32;
        loop->cudaBlock[1] = 14;
        loop->cudaBlock[2] = 1;
    } else {
        loop->cudaBlock[0] = 480;
        loop->cudaBlock[1] = 1;
        loop->cudaBlock[2] = 1;
    }

    dvmh_log(TRACE, "loop created");
    return (DvmhLoopRef)loop;
}

// Функция для включения редукции в параллельный DVMH цикл. Первый параметр – ссылка на нечто для управления параллельным DVMH циклом. Второй параметр – ссылка на DVMовскую редукционную функцию.
void loop_insred_(DvmhLoopRef *InDvmhLoop, RedRef *InRedRefPtr) {
    DvmhLoop *loop = (DvmhLoop *)*InDvmhLoop;
    if (!loop->reductions) {
        loop->reductionsCount = 1;
        loop->reductions = (DvmhReduction **)malloc(sizeof(DvmhReduction *));
    } else {
        loop->reductionsCount++;
        loop->reductions = (DvmhReduction **)realloc(loop->reductions, sizeof(DvmhReduction *) * loop->reductionsCount);
    }
    DvmhReduction *reduction = (DvmhReduction *)malloc(sizeof(DvmhReduction));
    loop->reductions[loop->reductionsCount - 1] = reduction;
    s_REDVAR *dvmRVar = (s_REDVAR *)((SysHandle *)(*InRedRefPtr))->pP;
    reduction->array = dvmRVar->Mem;
    reduction->arrayLength = dvmRVar->VLength;
    reduction->arrayElementType = dvmRVar->VType;
    reduction->arrayElementSize = dvmRVar->RedElmLength;
    reduction->locArray = dvmRVar->LocMem;
    reduction->locElementSize = dvmRVar->LocElmLength;
    reduction->funcNumber = dvmRVar->Func == rf_MAXLOC ? rf_MAX : (dvmRVar->Func == rf_MINLOC ? rf_MIN : dvmRVar->Func);
    reduction->arrayBackup = 0;
}

// Установка размера блока нитей. Первый параметр - ссылка на нечто для управления параллельным DVMH циклом. Второй параметр – размер блока нитей по оси X. Третий параметр - размер блока нитей по оси Y. Четвертый параметр - размер блока нитей по оси Z. Если размер по какой-то из осей не задан пользователем, то передавать в качестве соответствующего размера следует 1.
void loop_set_cuda_block_(DvmhLoopRef *InDvmhLoop, long *InXRef, long *InYRef, long *InZRef) {
    DvmhLoop *loop = (DvmhLoop *)*InDvmhLoop;
    assert(loop != 0);
    loop->cudaBlock[0] = *InXRef;
    loop->cudaBlock[1] = *InYRef;
    loop->cudaBlock[2] = *InZRef;
    dvmh_log(TRACE, "Overriden CUDA block: (%d, %d, %d)", loop->cudaBlock[0], loop->cudaBlock[1], loop->cudaBlock[2]);
}

// Регистрация обработчика для параллельного цикла. Первый параметр - ссылка на нечто для управления параллельным DVMH циклом. Второй параметр - тип устройства, для которого годится данный обработчик (DEVICE_TYPE_HOST, ...). Третий параметр - набор флагов, указывающий характеристики обработчика (HANDLER_TYPE_PARALLEL = 1, HANDLER_TYPE_MASTER = 2). Четвертый параметр - ссылка на функцию-обработчик. Пятый параметр - количество передаваемых в обработчик базовых массивов. Шестой параметр - количество "пользовательских" параметров, которые необходимо передать обработчику. Седьмой и последующие - эти самые параметры (передача строго по адресу).
// HANDLER_TYPE_PARALLEL означает, что обработчик является параллельным и для него на усмотрение системы поддержки будет выделено несколько (1 или более) слотов исполнения для устройства. Такой обработчик должен сам запросить у системы поддержки свое количество слотов (функция loop_get_slot_number_()).
// HANDLER_TYPE_MASTER означает, что обработчик может исполняться только в основной нити хост-системы (в частности это влечет за собой, что других его инстанций в параллель запущено не будет). Этот фдаг ставится в случае использования технологий, не готовых к внешнему параллелизму.
void loop_register_handler_(DvmhLoopRef *InDvmhLoop, long *deviceTypeRef, long *flagsRef, void (*f)(), long *basesCount, long *paramCount, ...) {
    DvmhLoop *loop = (DvmhLoop *)*InDvmhLoop;
    DvmhLoopHandler *handler = (DvmhLoopHandler *)malloc(sizeof(DvmhLoopHandler));
    handler->isParallel = (*flagsRef & (1l << htParallel)) != 0;
    handler->isMaster = (*flagsRef & (1l << htMaster)) != 0;
    enum DeviceType deviceType = (enum DeviceType)ilog(*deviceTypeRef);
    assert(deviceType >= 0 && deviceType < DEVICE_TYPES);
    handler->f = f;
    assert(*basesCount >= 0);
    handler->basesCount = *basesCount;
    assert(*paramCount >= 0);
    handler->paramsCount = *paramCount;
    if (*paramCount > 0) {
        handler->params = (void **)malloc(sizeof(void *) * handler->paramsCount);
        va_list ap;
        va_start(ap, paramCount);
        int i;
        for (i = 0; i < *paramCount; i++)
            handler->params[i] = va_arg(ap, void *);
        va_end(ap);
    } else
        handler->params = 0;
    loop->handlersCounts[deviceType]++;
    if (loop->handlers[deviceType])
        loop->handlers[deviceType] = (DvmhLoopHandler **)realloc(loop->handlers[deviceType], sizeof(DvmhLoopHandler *) * loop->handlersCounts[deviceType]);
    else
        loop->handlers[deviceType] = (DvmhLoopHandler **)malloc(sizeof(DvmhLoopHandler *) * loop->handlersCounts[deviceType]);
    loop->handlers[deviceType][loop->handlersCounts[deviceType] - 1] = handler;
    dvmh_log(TRACE, "registered handler #%d for device type %d", loop->handlersCounts[deviceType] - 1, deviceType);
}

static int mapLoopOnDevice(DvmhLoop *loop, int device, LoopBounds *res) {
    int hasLocal = 1;
    if (loop->dimension > 0) {
        memcpy(res, loop->loopBounds, sizeof(LoopBounds) * loop->dimension);
        DvmhDistribRule *rule = loop->distribRule;
        if (rule != 0) {
            DvmhDistribSpace *dspace = rule->dspace;
            assert(dspace != 0);
            DvmhRegionDistribSpace *rdspace = dictFind(loop->region->dspaces, dspace);
            assert(rdspace != 0);
            hasLocal = hasLocal && rdspace->localParts[device] != 0;
            if (hasLocal) {
                int i;
                for (i = 0; i < dspace->rank; i++) {
                    int ax = rule->map[i].axisNumber;
                    if (ax > -1) {
                        if (rule->map[i].multiplier == 0) {
                            if (!(rule->map[i].summand >= rdspace->localParts[device][i][0] && rule->map[i].summand <= rdspace->localParts[device][i][1]))
                                hasLocal = 0;
                        } else {
                            long cand = (rdspace->localParts[device][i][0] - rule->map[i].summand + rule->map[i].multiplier - 1) /
                                    rule->map[i].multiplier;
                            if (cand > res[ax - 1][0])
                                res[ax - 1][0] = res[ax - 1][0] + (cand - res[ax - 1][0] + res[ax - 1][2] - 1) / res[ax - 1][2] * res[ax - 1][2];
                            cand = (rdspace->localParts[device][i][1] - rule->map[i].summand) / rule->map[i].multiplier;
                            if (cand < res[ax - 1][1])
                                res[ax - 1][1] = res[ax - 1][1] - (res[ax - 1][1] - cand + res[ax - 1][2] - 1) / res[ax - 1][2] * res[ax - 1][2];
                            if (res[ax - 1][0] > res[ax - 1][1])
                                hasLocal = 0;
                        }
                    }
                }
            }
        }
    }
    if (hasLocal && loop->dimension > 0) {
        int i;
        for (i = 0; i < loop->dimension; i++)
            dvmh_log(DEBUG, "loop localPart [%ld..%ld] step %ld", res[i][0], res[i][1], res[i][2]);
    }
    return hasLocal;
}

static void performPortion(DvmhLoopPortion *portion) {
    DvmhLoopHandler *handler = portion->handler;
    int realParamsCount = handler->paramsCount + handler->basesCount + 2;
    void *realParams[realParamsCount];
    long deviceNum = portion->deviceNum;
    realParams[0] = &deviceNum;
    realParams[1] = &portion->loopRef;
    int i;
    for (i = 0; i < handler->basesCount; i++)
        realParams[2 + i] = devices[portion->deviceNum]->defaultBase;
    memcpy(realParams + realParamsCount - handler->paramsCount, handler->params, sizeof(void *) * handler->paramsCount);
    executeFunction(handler->f, realParams, realParamsCount);
}

static void loopPortionInit(DvmhLoopPortion *portion) {
    DvmhLoop *loop = portion->loop;
    if (devices[portion->deviceNum]->deviceType == dtCuda) {
        DvmhLoopCuda *cloop = (DvmhLoopCuda *)malloc(sizeof(DvmhLoopCuda));
        cloop->portion = portion;
        cloop->counter = 0;
        if (loop->reductionsCount > 0)
            cloop->reductions = (DvmhReductionCuda **)calloc(1, sizeof(DvmhReductionCuda *) * loop->reductionsCount);
        else
            cloop->reductions = 0;
        cloop->cudaDeviceNum = ((CudaDevice *)devices[portion->deviceNum])->index;
        cloop->cudaStream = 0;
        portion->loopRef = cloop;
    } else if (devices[portion->deviceNum]->deviceType == dtHost) {
        DvmhSpecLoop *sloop = (DvmhSpecLoop *)malloc(sizeof(DvmhSpecLoop));
        sloop->portion = portion;
        portion->loopRef = sloop;
    }
}

static void loopPortionFinish(DvmhLoopPortion *portion) {
    DvmhSpecLoop *sloop = portion->loopRef;
    assert(sloop);
    if (devices[portion->deviceNum]->deviceType == dtCuda) {
        DvmhLoopCuda *cloop = (DvmhLoopCuda *)sloop;
        assert(cloop->reductions == 0);
    }
    free(sloop);
    portion->loopRef = 0;
}

static void loopFinish(DvmhLoop *loop) {
}

static void loopDelete(DvmhLoop *loop) {
    assert(loop != 0);
    assert((loop->reductionsCount <= 0) == (loop->reductions == 0));
    int i;
    for (i = 0; i < loop->reductionsCount; i++) {
        assert(loop->reductions[i] != 0);
        free(loop->reductions[i]);
    }
    if (loop->reductions)
        free(loop->reductions);
    assert(loop->loopBounds != 0);
    free(loop->loopBounds);
    for (i = 0; i < DEVICE_TYPES; i++) {
        assert((loop->handlersCounts[i] <= 0) == (loop->handlers[i] == 0));
        if (loop->handlers[i]) {
            int j;
            for (j = 0; j < loop->handlersCounts[i]; j++) {
                assert(loop->handlers[i][j] != 0);
                assert((loop->handlers[i][j]->paramsCount <= 0) == (loop->handlers[i][j]->params == 0));
                if (loop->handlers[i][j]->params)
                    free(loop->handlers[i][j]->params);
                free(loop->handlers[i][j]);
            }
            free(loop->handlers[i]);
        }
    }
    eassert(pthread_mutex_destroy(&loop->mut) == 0);
    eassert(pthread_mutex_destroy(&loop->endedMut) == 0);
    eassert(pthread_cond_destroy(&loop->ended) == 0);
    free(loop);
}

static void performOnePortion(DvmhLoopPortion *portion) {
    CommonDevice *device = devices[portion->deviceNum];
    int cudaFlag = device->deviceType == dtCuda;
    if (cudaFlag) {
        assertCuda(cudaSetDevice(((CudaDevice *)device)->index));
    }

    DvmhLoop *loop = portion->loop;
    loopPortionInit(portion);
    performPortion(portion);
    loopPortionFinish(portion);
    if (cudaFlag) {
        assertCuda(cudaStreamSynchronize(0));
        assertCuda(cudaGetLastError());
    }

    eassert(pthread_mutex_lock(&device->mut) == 0);
    device->slotsLeft += portion->slotsToUse;
    eassert(pthread_mutex_unlock(&device->mut) == 0);

    free(portion->loopBounds);
    free(portion);

    int lastPortion = 0;
    eassert(pthread_mutex_lock(&loop->mut) == 0);
    loop->portionsCount--;
    lastPortion = loop->portionsCount == 0;
    eassert(pthread_mutex_unlock(&loop->mut) == 0);

    if (lastPortion) {
        if (loop->region->async) {
            loopFinish(loop);
            loopDelete(loop);
// TODO: Also do something else for asynchronous regions
        } else {
            eassert(pthread_mutex_lock(&loop->endedMut) == 0);
            eassert(pthread_cond_signal(&loop->ended) == 0);
            eassert(pthread_mutex_unlock(&loop->endedMut) == 0);
        }
    }
}

static void *performerFunc(void *arg) {
    DvmhPerformer *performer = arg;
    CommonDevice *device = devices[performer->deviceIndex];
    int cudaFlag = device->deviceType == dtCuda;
    int selfIndex = 0;
    int i;
    for (i = 0; i < device->slotCount; i++)
        if (performer == device->performers[i]) {
            selfIndex = i;
        }
    int masterFlag = device->performers[0] == performer;
    dvmh_log(DEBUG, "Performer %d on device %d started cudaFlag=%d", selfIndex, performer->deviceIndex, cudaFlag);
    if (cudaFlag) {
        CudaDevice *cudaDevice = (CudaDevice *)device;
        assertCuda(cudaSetDevice(cudaDevice->index));
        if (masterFlag) {
            assertCuda(cudaSetDeviceFlags(cudaDeviceScheduleYield | cudaDeviceScheduleBlockingSync));
            assertCuda(cudaMalloc(&device->defaultBase, 256));
            int i;
            for (i = 0; i < devicesCount; i++)
                if (devices[i] != device && devices[i]->deviceType == dtCuda) {
                    int canFlag;
                    int dev = ((CudaDevice *)devices[i])->index;
                    assertCuda(cudaDeviceCanAccessPeer(&canFlag, cudaDevice->index, dev));
                    if (0 && canFlag) {
// XXX: hangs here
                        assertCuda(cudaDeviceEnablePeerAccess(dev, 0));
                    } else
                        dvmh_log(INFO, "Not enabling Peer Access from device %d to device %d due to %s", cudaDevice->index, dev, (canFlag ? "system hanging" :
                                "impossibility"));
                }
        }
    }

    // Notify about readiness
    eassert(pthread_mutex_lock(&device->mut) == 0);
    device->performersCount++;
    eassert(pthread_mutex_unlock(&device->mut) == 0);

    for (;;) {
        DvmhLoopPortion *portion = 0;
        eassert(pthread_mutex_lock(&device->mut) == 0);
        while (!portion) {
            if (device->queueEnd != device->queueStart && device->portionsQueue[device->queueStart]->slotsToUse <= device->slotsLeft) {
                portion = device->portionsQueue[device->queueStart];
                device->queueStart = (device->queueStart + 1) % device->queueSize;
                device->slotsLeft -= portion->slotsToUse;
            } else {
                device->sleeperCount++;
                dvmh_log(TRACE, "Performer %d on device %d is going to sleep", selfIndex, performer->deviceIndex);
                eassert(pthread_cond_wait(&device->performerWakeup, &device->mut) == 0);
                dvmh_log(TRACE, "Performer %d on device %d is awaken", selfIndex, performer->deviceIndex);
                device->sleeperCount--;
            }
        }
        if (device->sleeperCount > 0 && device->queueEnd != device->queueStart && device->portionsQueue[device->queueStart]->slotsToUse <= device->slotsLeft)
            eassert(pthread_cond_signal(&device->performerWakeup) == 0);
        eassert(pthread_mutex_unlock(&device->mut) == 0);

        assert(portion != 0);
        dvmh_log(TRACE, "Performing portion [%ld..%ld] on device %d by performer #%d. Time %lf", portion->loopBounds[0][0], portion->loopBounds[0][1],
                performer->deviceIndex, selfIndex, MPI_Wtime());
        performOnePortion(portion);
        dvmh_log(TRACE, "Performing finished on device %d by performer #%d. Time %lf", performer->deviceIndex, selfIndex, MPI_Wtime());
    }
}

// Указание начала исполнения цикла. Параметр - ссылка на нечто для управления параллельным DVMH циклом.
void loop_start_(DvmhLoopRef *InDvmhLoop) {
    DvmhLoop *loop = (DvmhLoop *)*InDvmhLoop;
    assert(loop != 0);
    DvmhRegion *region = loop->region;
    assert(region != 0);
    DvmhLoopPortion **portions;
    int s = 0;
    int i;
    for (i = 0; i < devicesCount; i++)
        s += devices[i]->slotCount;
    portions = (DvmhLoopPortion **)malloc(sizeof(DvmhLoopPortion *) * s);
    for (i = 0; i < devicesCount; i++)
        if (region->usesDevices & (1l << i)) {
            CommonDevice *device = devices[i];
            enum DeviceType devType = device->deviceType;
            assert(loop->handlers[devType] != 0 && loop->handlersCounts[devType] > 0);

            LoopBounds partialBounds[loop->dimension];
            if (mapLoopOnDevice(loop, i, partialBounds)) {
// TODO: choose optimal handler for particular loop for particular device
                DvmhLoopHandler *handler = loop->handlers[devType][0];
                if (handler->isMaster || handler->isParallel) {
// TODO: several parallel handlers on one device simultaneously
                    // One portion on device
                    loop->portionsCount++;
                    DvmhLoopPortion *portion = (DvmhLoopPortion *)malloc(sizeof(DvmhLoopPortion));
                    portion->loop = loop;
                    portion->deviceNum = i;
                    portion->slotsToUse = handler->isParallel ? device->slotCount : 1;
                    portion->loopBounds = (LoopBounds *)malloc(sizeof(LoopBounds) * loop->dimension);
                    portion->handler = handler;
                    memcpy(portion->loopBounds, partialBounds, sizeof(LoopBounds) * loop->dimension);
                    portion->loopRef = 0;
                    portions[loop->portionsCount - 1] = portion;
                } else {
                    long turns = loop->dimension ? (partialBounds[0][1] - partialBounds[0][0]) / partialBounds[0][2] + 1 : 1;
                    int j;
                    for (j = 0; j < device->slotCount; j++) {
                        loop->portionsCount++;
                        DvmhLoopPortion *portion = (DvmhLoopPortion *)malloc(sizeof(DvmhLoopPortion));
                        portion->loop = loop;
                        portion->deviceNum = i;
                        portion->slotsToUse = 1;
                        portion->loopBounds = (LoopBounds *)malloc(sizeof(LoopBounds) * loop->dimension);
                        portion->handler = handler;
                        memcpy(portion->loopBounds, partialBounds, sizeof(LoopBounds) * loop->dimension);
                        portion->loopBounds[0][0] += (j * turns / device->slotCount) * partialBounds[0][2];
                        portion->loopBounds[0][1] = partialBounds[0][0] + ((j + 1) * turns / device->slotCount - 1) * partialBounds[0][2];
                        portion->loopRef = 0;
                        portions[loop->portionsCount - 1] = portion;
                    }
// TODO: cut proper portion
                }
            }
        }
    int localPortionsCount = loop->portionsCount;
    int masterPortionsCount = 0;
    for (i = 0; i < localPortionsCount; i++) {
        DvmhLoopPortion *portion = portions[i];
        if (portion->handler->isMaster) {
            if (i > masterPortionsCount)
                portions[masterPortionsCount] = portions[i];
            masterPortionsCount++;
        } else {
            CommonDevice *device = devices[portion->deviceNum];
            int okFlag = 0;
            while (!okFlag) {
                if ((device->queueEnd + 1) % device->queueSize != device->queueStart) {
                    eassert(pthread_mutex_lock(&device->mut) == 0);
                    if ((device->queueEnd + 1) % device->queueSize != device->queueStart) {
                        okFlag = 1;
                        device->portionsQueue[device->queueEnd] = portion;
                        device->queueEnd = (device->queueEnd + 1) % device->queueSize;
                    }
                    eassert(pthread_mutex_unlock(&device->mut) == 0);
                }
                if (!okFlag) {
                    dvmh_log(DEBUG, "Portion queue on device %d is full. Sleeping", portion->deviceNum);
                    usleep(500);
                }
            }
        }
    }
    for (i = 0; i < devicesCount; i++)
        if (region->usesDevices & (1l << i)) {
            CommonDevice *device = devices[i];
            if (device->sleeperCount > 0 && device->queueEnd != device->queueStart &&
                    device->portionsQueue[device->queueStart]->slotsToUse <= device->slotsLeft) {
                eassert(pthread_mutex_lock(&device->mut) == 0);
                if (device->sleeperCount > 0 && device->queueEnd != device->queueStart &&
                        device->portionsQueue[device->queueStart]->slotsToUse <= device->slotsLeft)
                    eassert(pthread_cond_signal(&device->performerWakeup) == 0);
                eassert(pthread_mutex_unlock(&device->mut) == 0);
            }
        }
    for (i = 0; i < masterPortionsCount; i++) {
        DvmhLoopPortion *portion = portions[i];
        dvmh_log(TRACE, "Performing portion [%ld..%ld] on device %d by master thread. Time %lf", portion->loopBounds[0][0], portion->loopBounds[0][1],
                portion->deviceNum, MPI_Wtime());
        performOnePortion(portion);
        dvmh_log(TRACE, "Performing finished on device %d by master thread. Time %lf", portion->deviceNum, MPI_Wtime());
    }
    free(portions);
}

// Эти функции вызываются из отдельных подпрограмм (обработчиков), а в основной программе нет баз кроме DVMовских и вообще нет следов CUDA.
// Запрос на заполнение границ цикла (и шагов) для исполнения порции цикла обработчиком. Первый параметр - новый локальный описатель параллельного DVMH цикла, который передается параметром в обработчик.
void loop_fill_bounds_(DvmhLoopRef *InDvmhLoop, IndexType lowIndex[], IndexType highIndex[], IndexType stepIndex[]) {
    DvmhSpecLoop *sloop = (DvmhSpecLoop *)*InDvmhLoop;
    assert(sloop);
    assert(sloop->portion);
    DvmhLoop *loop = sloop->portion->loop;
    assert(loop);
    int i;
    for (i = 0; i < loop->dimension; i++) {
        lowIndex[i] = sloop->portion->loopBounds[i][0];
        highIndex[i] = sloop->portion->loopBounds[i][1];
        stepIndex[i] = sloop->portion->loopBounds[i][2];
    }
}

// Запрос на заполнение редукционного массива и сопутствующего ему loc начальными данными. Первый параметр - новый локальный описатель параллельного DVMH цикла, который передается параметром в обработчик. Второй параметр – номер по порядку редукционной функции для цикла. Третий параметр – указатель на локальный для обработчика экземпляр редукционного массива. Четвертый параметр - указатель на локальный для обработчика экземпляр массива loc.
void loop_red_init(DvmhLoopRef *InDvmhLoop, int InRedNum, void *arrayPtr, void *locPtr) {
    DvmhSpecLoop *sloop = (DvmhSpecLoop *)*InDvmhLoop;
    assert(sloop);
    assert(sloop->portion);
    assert(sloop->portion->loop);
    DvmhReduction *red = sloop->portion->loop->reductions[InRedNum];
    if (arrayPtr && red->array)
        memcpy(arrayPtr, red->array, red->arrayElementSize * red->arrayLength);
    if (locPtr && red->locArray)
        memcpy(locPtr, red->locArray, red->locElementSize * red->arrayLength);
}

// Фортрановская версия
void loop_red_init_(DvmhLoopRef *InDvmhLoop, long *InRedNumRef, void *arrayPtr, void *locPtr) {
    loop_red_init(InDvmhLoop, *InRedNumRef, arrayPtr, locPtr);
}

// Функция для регистрации переменных, в которые будут помещены смещения для редукционной переменной и ее LOC. Первый параметр – новый локальный описатель параллельного DVMH цикла, который передается параметром в обработчик. Второй параметр – номер по порядку редукционной функции для цикла (нумерация с нуля). Третий параметр – база на девайсе для редукционной переменной. Четвертый параметр – база на девайсе для переменной LOC. Пятый параметр – переменная, в которую будет записано смещение для редукционной переменной. Шестой параметр – адрес переменной, в которую будет записано смещение для LOC, если LOC нет, то передавать нуль (dvm0c0).
void loop_register_red_cuda_(DvmhLoopRef *InDvmhLoop, long *InRedNumRef, void *InDeviceArrayBaseAddr, void *InDeviceLocBaseAddr, CudaOffsetTypeRef *ArrayOffsetPtr,
        CudaOffsetTypeRef *LocOffsetPtr) {
    DvmhLoopCuda *cloop = (DvmhLoopCuda *)*InDvmhLoop;
    assert(cloop);
    assert(cloop->portion);
    DvmhLoop *loop = cloop->portion->loop;
    assert(loop);
    assert(*InRedNumRef < loop->reductionsCount && *InRedNumRef >= 0);
    assert(cloop->reductions[*InRedNumRef] == 0);
    DvmhReductionCuda *reduction = (DvmhReductionCuda *)malloc(sizeof(DvmhReductionCuda));
    cloop->reductions[*InRedNumRef] = reduction;
    reduction->arrayBaseAddr = InDeviceArrayBaseAddr;
    reduction->gpuLocMem = 0;
    reduction->gpuLocOffsetPtr = (CudaOffsetType *)*LocOffsetPtr;
    reduction->gpuLocMemPtr = 0;
    reduction->gpuMem = 0;
    reduction->gpuOffsetPtr = (CudaOffsetType *)*ArrayOffsetPtr;
    reduction->gpuMemPtr = 0;
    reduction->locBaseAddr = InDeviceLocBaseAddr;
    reduction->reduction = loop->reductions[*InRedNumRef];
    dvmh_log(TRACE, "Reduction #%ld registered for CUDA", *InRedNumRef);
}

// Сишный безбазовый и бессмещенческий вариант. В ArrayPtr и LocPtr будут положены (в процессе итерирования цикла) адреса в памяти устройства, в которые ядро будет записывать информацию из блоков.
void loop_register_red_cuda(DvmhLoopRef *InDvmhLoop, int InRedNum, void **ArrayPtr, void **LocPtr) {
    DvmhLoopCuda *cloop = (DvmhLoopCuda *)*InDvmhLoop;
    assert(cloop);
    assert(cloop->portion);
    DvmhLoop *loop = cloop->portion->loop;
    assert(loop);
    assert(InRedNum < loop->reductionsCount && InRedNum >= 0);
    assert(cloop->reductions[InRedNum] == 0);
    DvmhReductionCuda *reduction = (DvmhReductionCuda *)malloc(sizeof(DvmhReductionCuda));
    cloop->reductions[InRedNum] = reduction;
    reduction->arrayBaseAddr = 0;
    reduction->gpuLocMem = 0;
    reduction->gpuLocOffsetPtr = 0;
    reduction->gpuLocMemPtr = (char **)LocPtr;
    reduction->gpuMem = 0;
    reduction->gpuOffsetPtr = 0;
    reduction->gpuMemPtr = (char **)ArrayPtr;
    reduction->locBaseAddr = 0;
    reduction->reduction = loop->reductions[InRedNum];
    dvmh_log(TRACE, "Reduction #%d registered for CUDA", InRedNum);
}

#define CALL_REDFUNC(func, type) { red_##func##_##type(items, (type *)reduction->gpuMem, reduction->reduction->arrayLength, reduction->gpuLocMem, \
 reduction->reduction->locElementSize, (type *)reduction->reduction->array, reduction->reduction->locArray); catched = 1; }
static void finishReductionCuda(int items, DvmhReductionCuda *reduction) {
    int catched = 0;
    switch(reduction->reduction->arrayElementType) {
    case rt_FLOAT:
        switch (reduction->reduction->funcNumber) {
        case rf_MAX: CALL_REDFUNC(max, float); break;
        }
        break;
    }
    if (!catched)
        dvmh_log(WARNING, "Reduction for this combination (%d, %d) of type and function is not implemented", reduction->reduction->arrayElementType,
                reduction->reduction->funcNumber);
}
#undef CALL_REDFUNC

// Сишный безбазовый и бессмещенческий вариант. В InOutBlocks будет записан адрес на устройстве, где располагается информация для блоков.
long loop_do_cuda(DvmhLoopRef *InDvmhLoop, dim3 *OutBlocks, dim3 *OutThreads, cudaStream_t *OutStream, IndexType **InOutBlocks) {
    DvmhLoopCuda *cloop = (DvmhLoopCuda *)*InDvmhLoop;
    DvmhLoop *loop = cloop->portion->loop;

    OutThreads->x = loop->cudaBlock[0];
    OutThreads->y = loop->cudaBlock[1];
    OutThreads->z = loop->cudaBlock[2];
    *OutStream = cloop->cudaStream;
    if (cloop->counter > 0)
        assertCuda(cudaGetLastError());
    if (cloop->counter == 0) {
        int resBlocks;
        IndexType *deviceBlocksInfo;
        get_distribution(loop->dimension, cloop->portion->loopBounds, loop->cudaBlock, &resBlocks, &deviceBlocksInfo);
        *InOutBlocks = deviceBlocksInfo;
        assert((cloop->reductions == 0) == (loop->reductions == 0));
        int i;
        for (i = 0; i < loop->reductionsCount; i++)
            assert(cloop->reductions[i] != 0 && loop->reductions[i] != 0);
        if (cloop->reductions) {
            int i;
            for (i = 0; i < loop->reductionsCount; i++) {
                cudaMalloc((void **)&cloop->reductions[i]->gpuMem,
                        loop->reductions[i]->arrayLength * loop->reductions[i]->arrayElementSize * resBlocks);
                if (cloop->reductions[i]->gpuOffsetPtr)
                    *cloop->reductions[i]->gpuOffsetPtr = ((long)cloop->reductions[i]->gpuMem - (long)cloop->reductions[i]->arrayBaseAddr) /
                            loop->reductions[i]->arrayElementSize - 1;
                else
                    *cloop->reductions[i]->gpuMemPtr = cloop->reductions[i]->gpuMem;
                if (cloop->reductions[i]->gpuLocOffsetPtr || cloop->reductions[i]->gpuLocMemPtr) {
                    cudaMalloc((void **)&cloop->reductions[i]->gpuLocMem,
                            loop->reductions[i]->arrayLength * loop->reductions[i]->locElementSize * resBlocks);
                    if (cloop->reductions[i]->gpuLocOffsetPtr)
                        *cloop->reductions[i]->gpuLocOffsetPtr = ((long)cloop->reductions[i]->gpuLocMem - (long)cloop->reductions[i]->locBaseAddr) /
                                loop->reductions[i]->locElementSize - 1;
                    else
                        *cloop->reductions[i]->gpuLocMemPtr = cloop->reductions[i]->gpuLocMem;
                }
                /*if (loop->reductions[i]->funcNumber == rf_SUM || loop->reductions[i]->funcNumber == rf_MULT || loop->reductions[i]->funcNumber == rf_XOR) {
                    loop->reductions[i]->arrayBackup = (char *)malloc(loop->reductions[i]->arrayLength * loop->reductions[i]->arrayElementSize);
                    memcpy(loop->reductions[i]->arrayBackup, loop->reductions[i]->array,
                            loop->reductions[i]->arrayLength * loop->reductions[i]->arrayElementSize);
                    if (loop->reductions[i]->funcNumber == rf_MULT) {
                        int j;
                        for (j = 0; j < loop->reductions[i]->arrayLength; j++) {
                            char *ptr = loop->reductions[i]->array + j * loop->reductions[i]->arrayElementSize;
                            switch (loop->reductions[i]->arrayElementType) {
                            case rt_CHAR: *(char *)ptr = 1; break;
                            case rt_INT: *(int *)ptr = 1; break;
                            case rt_LONG: *(long *)ptr = 1; break;
                            case rt_FLOAT: *(float *)ptr = 1.0f; break;
                            case rt_DOUBLE: *(double *)ptr = 1.0; break;
                            case rt_FLOAT_COMPLEX: *(float *)ptr = 1.0f; *((float *)ptr + 1) = 0.0f; break;
                            case rt_DOUBLE_COMPLEX: *(double *)ptr = 1.0; *((double *)ptr + 1) = 0.0; break;
                            }
                        }
                    } else {
                        int j;
                        for (j = 0; j < loop->reductions[i]->arrayLength; j++) {
                            char *ptr = loop->reductions[i]->array + j * loop->reductions[i]->arrayElementSize;
                            switch (loop->reductions[i]->arrayElementType) {
                            case rt_CHAR: *(char *)ptr = 0; break;
                            case rt_INT: *(int *)ptr = 0; break;
                            case rt_LONG: *(long *)ptr = 0; break;
                            case rt_FLOAT: *(float *)ptr = 0.0f; break;
                            case rt_DOUBLE: *(double *)ptr = 0.0; break;
                            case rt_FLOAT_COMPLEX: *(float *)ptr = 0.0f; *((float *)ptr + 1) = 0.0f; break;
                            case rt_DOUBLE_COMPLEX: *(double *)ptr = 0.0; *((double *)ptr + 1) = 0.0; break;
                            }
                        }
                    }
                }*/
            }
        }
        cloop->overallBlocks = resBlocks;
        cloop->restBlocks = resBlocks;
        cloop->latestBlocks = 0;
        cloop->counter++;
    }
    if (cloop->counter == 1) {
        if (cloop->restBlocks <= 0) {
            cloop->counter++;
        } else {
// TODO: cudaProps
            int maxBlocks = 65535;
            int toExec = cloop->restBlocks <= maxBlocks ? cloop->restBlocks : (cloop->restBlocks / 2 <= maxBlocks ? cloop->restBlocks / 2 : maxBlocks);
            *InOutBlocks += cloop->latestBlocks * loop->dimension * 2;
            if (cloop->reductions) {
                int i;
                for (i = 0; i < loop->reductionsCount; i++) {
                    if (cloop->reductions[i]->gpuOffsetPtr)
                        *cloop->reductions[i]->gpuOffsetPtr += cloop->latestBlocks * loop->reductions[i]->arrayLength;
                    else
                        *cloop->reductions[i]->gpuMemPtr += cloop->latestBlocks * loop->reductions[i]->arrayLength * loop->reductions[i]->arrayElementSize;
                    if (cloop->reductions[i]->gpuLocOffsetPtr)
                        *cloop->reductions[i]->gpuLocOffsetPtr += cloop->latestBlocks * loop->reductions[i]->arrayLength;
                    else if (cloop->reductions[i]->gpuLocMemPtr)
                        *cloop->reductions[i]->gpuLocMemPtr += cloop->latestBlocks * loop->reductions[i]->arrayLength * loop->reductions[i]->locElementSize;
                }
            }
            OutBlocks->x = toExec;
            OutBlocks->y = 1;
            OutBlocks->z = 1;
            cloop->latestBlocks = toExec;
            cloop->restBlocks -= toExec;
            dvmh_log(TRACE, "loop_do_cuda_ block=(%d,%d,%d) grid=(%d,%d,%d)", OutThreads->x, OutThreads->y, OutThreads->z, OutBlocks->x, OutBlocks->y,
                    OutBlocks->z);
            return 1;
        }
    }
    if (cloop->counter == 2) {
        *InOutBlocks = 0;
        if (cloop->reductions) {
            int i;
            for (i = 0; i < loop->reductionsCount; i++) {
// TODO: finish reduction
                finishReductionCuda(cloop->overallBlocks, cloop->reductions[i]);
                if (cloop->reductions[i]->gpuMem)
                    assertCuda(cudaFree(cloop->reductions[i]->gpuMem));
                if (cloop->reductions[i]->gpuLocMem)
                    assertCuda(cudaFree(cloop->reductions[i]->gpuLocMem));
                free(cloop->reductions[i]);
            }
            free(cloop->reductions);
            cloop->reductions = 0;
        }
        //assertCuda(cudaThreadSynchronize());
        size_t freeMemSize, totalMemSize;
        cudaMemGetInfo(&freeMemSize, &totalMemSize);
        dvmh_log(TRACE, "loop finished tot=%zu free=%zu", totalMemSize, freeMemSize);
        return 0;
    }
    dvmh_log(WARNING, "loop_do_cuda_ unusual branch");
    return 0;
}

// Вызов для итерирования вызовов ядер на ГПУ. Возвращает 1, если надо запустить ядро после этого вызова и 0, если надо заканчивать. Первый параметр – новый локальный описатель параллельного DVMH цикла, который передается параметром в обработчик. Второй параметр – переменная типа dim3, в которую будут записаны размеры решетки блоков для запуска ядра. Третий параметр – переменная типа dim3, в которую будут записаны размеры блока нитей для запуска ядра. Четвертый параметр - переменна типа cudaStream_t, в которую будет записан идентификатор CUDA-потока для запуска. Пятый параметр – база для адресации информации о блоках (ее тип - IndexType). Шестой параметр – переменная, в которую будет записано смещение информации о блоках относительно базы.
long loop_do_cuda_(DvmhLoopRef *InDvmhLoop, dim3 *OutBlocks, dim3 *OutThreads, cudaStream_t *OutStream, void *InDeviceBaseAddr, CudaOffsetType *InOutBlocksOffs) {
    IndexType *deviceBlocksInfo;
    long res = loop_do_cuda(InDvmhLoop, OutBlocks, OutThreads, OutStream, &deviceBlocksInfo);
    *InOutBlocksOffs = ((long)deviceBlocksInfo - (long)InDeviceBaseAddr) / sizeof(IndexType);
    return res;
}

// Возврат результатов частичной редукции в РТС. Первый параметр - новый локальный описатель параллельного DVMH цикла, который передается параметром в обработчик. Второй параметр – номер по порядку редукционной функции для цикла. Третий и четвертый - результаты работы обработчика порции цикла.
void loop_red_post_(DvmhLoopRef *InDvmhLoop, long *InRedNumRef, void *arrayPtr, void *locPtr) {
    DvmhSpecLoop *sloop = (DvmhSpecLoop *)*InDvmhLoop;
    assert(sloop);
    assert(sloop->portion);
    assert(sloop->portion->loop);
    assert(*InRedNumRef >= 0 && *InRedNumRef < sloop->portion->loop->reductionsCount);
    DvmhReduction *reduction = sloop->portion->loop->reductions[*InRedNumRef];
    switch(reduction->arrayElementType) {
    case rt_FLOAT:
        switch (reduction->funcNumber) {
        case rf_MAX: *((float *)reduction->array) = max(*((float *)reduction->array), *(float *)arrayPtr);
        }
        break;
    }
}

// Конец цикла. Параметр – ссылка на нечто для управления параллельным DVMH циклом.
void loop_end_(DvmhLoopRef *loopRef) {
    DvmhLoop *loop = (DvmhLoop *)*loopRef;
    assert(loop != 0);
    DvmhRegion *region = loop->region;
    assert(region != 0);
    if (!region->async) {
        dvmh_log(TRACE, "Signal wait");
        eassert(pthread_mutex_lock(&loop->endedMut) == 0);
        while (loop->portionsCount > 0)
            eassert(pthread_cond_wait(&loop->ended, &loop->endedMut) == 0);
        eassert(pthread_mutex_unlock(&loop->endedMut) == 0);
        loopFinish(loop);
        loopDelete(loop);
    }
    dvmh_log(TRACE, "loop ended async=%d", region->async);
}

static void regionDataFree(void *key, void *value) {
    DvmhRegionData *rdata = value;
    assert(rdata != 0);
    assert(rdata->inPieces != 0);
    piecesDelete(rdata->inPieces);
    assert(rdata->outPieces != 0);
    piecesDelete(rdata->outPieces);

    assert(rdata->localPieces != 0);
    clearActual(rdata->data, rdata->localPieces);
    piecesDelete(rdata->localPieces);

    assert(rdata->localParts != 0);
    int i;
    for (i = 0; i < devicesCount; i++)
        if (rdata->localParts[i])
            free(rdata->localParts[i]);
    free(rdata->localParts);
    free(rdata);
}

static void regionDistribSpaceFree(void *key, void *value) {
    DvmhRegionDistribSpace *rdspace = value;
    assert(rdspace != 0);
    assert(rdspace->localParts != 0);
    int i;
    for (i = 0; i < devicesCount; i++)
        if (rdspace->localParts[i])
            free(rdspace->localParts[i]);
    free(rdspace->localParts);
    free(rdspace);
}

// Конец региона. Параметр – ссылка на регион, которую вернул вызов region_create_().
void region_end_(DvmhRegionRef *regionRef) {
    DvmhRegion *region = (DvmhRegion *)*regionRef;
    assert(region != 0);
    assert(region->datas != 0);
    dictForEach(region->datas, regionDataFree, 0);
    dictDelete(region->datas);
    dictForEach(region->dspaces, regionDistribSpaceFree, 0);
    dictDelete(region->dspaces);
    free(region);
    dvmh_log(TRACE, "region_end");
}
