/*
 * Copyright 2012 INRIA Paris-Rocquencourt
 * Copyright 2012 Ecole Normale Superieure
 *
 * Use of this software is governed by the MIT license
 *
 * Written by Tobias Grosser, INRIA Paris-Rocquencourt,
 * Domaine de Voluceau, Rocquenqourt, B.P. 105,
 * 78153 Le Chesnay Cedex France
 * and Sven Verdoolaege,
 * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
 */

#include <limits.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <isl/aff.h>
#include <isl/ctx.h>
#include <isl/flow.h>
#include <isl/map.h>
#include <isl/ast_build.h>
#include <isl/schedule.h>
#include <isl/schedule_node.h>
#include <pet.h>

#include "ppcg.h"
#include "ppcg_options.h"
#include "cpu.h"
#include "print.h"
#include "schedule.h"
#include "util.h"
#include "amp.h"
#include "amp_utilities.h"

// dump amp_stmt_access
void amp_stmt_access_dump(struct amp_stmt_access *access) {
    if (access) {
        fprintf(stderr, "       the   amp_stmt_access  is on the below :\n");
        fprintf(stderr, "          the       read      is %d \n", access->read);
        fprintf(stderr, "          the       write     is %d \n", access->write);
        fprintf(stderr, "          the    exact_write  is %d \n", access->exact_write);
        fprintf(stderr, "          the   fixed_element is %d \n", access->fixed_element);
        fprintf(stderr, "          the      n_index    is %d \n", access->n_index);
        fprintf(stderr, "          the    exact_write  is %d \n", access->read);
        fprintf(stderr, "          the     access      is:   \n");
        isl_map_dump(access->access);
        fprintf(stderr, "          the  tagged_access  is: \n");
        isl_map_dump(access->tagged_access);
        fprintf(stderr, "          the     ref_id      is: \n");
        isl_id_dump(access->ref_id);
        fprintf(stderr, "          the      next       is: \n");
        if (access->next) {
            amp_stmt_access_dump(access->next);
        } else
            fprintf(stderr, "NULL\n");
    } else {
        fprintf(stderr, "    @WARN:          sorry, the amp_stmt_access is NULL !!! \n");
    }
}

// dump the amp_group_data
void amp_group_data_dump(struct amp_group_data *data) {
    if (data) {
        fprintf(stderr, "       the     amp_group_data    is on the below : \n");
        fprintf(stderr, "          the   kernel_depth     is %d \n", data->kernel_depth);
        fprintf(stderr, "          the   shared_depth     is %d \n", data->shared_depth);
        fprintf(stderr, "          the   thread_depth     is %d \n", data->thread_depth);
        fprintf(stderr, "          the     n_thread       is %d \n", data->n_thread);
        // fprintf(stderr, "          the   privatization    is:   \n");
        // isl_set_dump(data->privatization);
        fprintf(stderr, "          the    host_sched     is:   \n");
        isl_union_map_dump(data->host_sched);
        fprintf(stderr, "          the   shared_sched     is:   \n");
        isl_union_map_dump(data->shared_sched);
        fprintf(stderr, "          the    copy_sched      is:   \n");
        isl_union_map_dump(data->copy_sched);
        fprintf(stderr, "          the   thread_sched     is:   \n");
        isl_union_map_dump(data->thread_sched);
        fprintf(stderr, "          the    full_sched      is:   \n");
        isl_union_map_dump(data->full_sched);
    } else {
        fprintf(stderr, "    @WARN:\n       sorry, the amp_group_data is NULL !!! \n");
    }
}

// dump the amp_array_bound
void amp_array_bound_dump(struct amp_array_bound *bound) {
    if (bound) {
        fprintf(stderr, "       the amp_array_bound is on the below :\n");
        fprintf(stderr, "          the      size    is: \n");
        if (bound->size)
            isl_val_dump(bound->size);
        fprintf(stderr, "          the      lb      is: \n");
        if (bound->lb)
            isl_aff_dump(bound->lb);
        fprintf(stderr, "          the    stride    is: \n");
        if (bound->stride)
            isl_val_dump(bound->stride);
        fprintf(stderr, "          the    shift     is: \n");
        if (bound->shift)
            isl_aff_dump(bound->shift);
    } else {
        fprintf(stderr, "   @WARN: \n       sorry, the amp_array_bound is NULL !!! \n");
    }
}

// dump the amp_array_tile
void amp_array_tile_dump(struct amp_array_tile *tile) {
    if (tile) {
        fprintf(stderr, "       the amp_array_tile is on the below :\n");
        fprintf(stderr, "          the   depth     is %d \n", tile->depth);
        fprintf(stderr, "          the     n       is %d \n", tile->n);
        fprintf(stderr, "          the   tiling    is:   \n");
        if (tile->tiling)
            isl_multi_aff_dump(tile->tiling);
        fprintf(stderr, "          the   bound     is:   \n");
        amp_array_bound_dump(tile->bound);
    } else {
        fprintf(stderr, "    @WARN: \n       sorry, the amp_array_tile is NULL !!! \n");
    }
}

// dump the amp_array_info
void amp_array_info_dump(struct amp_array_info *info) {
    if (info) {
        fprintf(stderr, "       the       amp_array_info     is on the below :\n");
        fprintf(stderr, "          the         type          is %s \n", info->type);
        fprintf(stderr, "          the         name          is %s \n", info->name);
        fprintf(stderr, "          the         size          is %d \n", info->size);
        fprintf(stderr, "          the        space          is :  \n");
        isl_space_dump(info->space);
        fprintf(stderr, "          the    declared_extent    is :  \n");
        isl_set_dump(info->declared_extent);
        fprintf(stderr, "          the     declared_size     is :  \n");
        isl_ast_expr_dump(info->declared_size);
        fprintf(stderr, "          the        extent         is :  \n");
        isl_set_dump(info->extent);
        fprintf(stderr, "          the        bound          is :  \n");
        isl_multi_pw_aff_dump(info->bound);
        fprintf(stderr, "          the      bound_expr       is :  \n");
        isl_ast_expr_dump(info->bound_expr);
        fprintf(stderr, "          the         n_ref         is %d \n", info->n_ref);
        for (int r = 0; r < info->n_ref; r++) {
            fprintf(stderr, "          the         refs[%d]      is:   \n", r);
            amp_stmt_access_dump(info->refs[r]);
        }
        fprintf(stderr, "          the        n_index        is %d \n", info->n_index);
        fprintf(stderr, "          the       accessed        is %d \n", info->accessed);
        fprintf(stderr, "          the   read_only_scalar    is %d \n", info->read_only_scalar);
        fprintf(stderr, "          the has_compound_element  is %d \n", info->has_compound_element);
        fprintf(stderr, "          the  only_fixed_element   is %d \n", info->only_fixed_element);
        fprintf(stderr, "          the         local         is %d \n", info->local);
        fprintf(stderr, "          the    declare_local      is %d \n", info->declare_local);
        fprintf(stderr, "          the         global        is %d \n", info->global);
        fprintf(stderr, "          the      linearize        is %d \n", info->linearize);
        fprintf(stderr, "          the      dep_order        is :  \n");
        isl_union_map_dump(info->dep_order);
    } else {
        fprintf(stderr, "    @WARN: \n       sorry, the amp_array_info is NULL !!! \n");
    }
}

// dump the amp_array_ref_group
void amp_array_ref_group_dump(struct amp_array_ref_group *group) {
    if (group) {
        fprintf(stderr, "       the amp_array_ref_group is on the below :\n");
        fprintf(stderr, "          the      nr          is %d \n", group->nr);
        fprintf(stderr, "          the    write         is %d \n", group->write);
        fprintf(stderr, "          the  exact_write     is %d \n", group->exact_write);
        fprintf(stderr, "          the    slice         is %d \n", group->slice);
        fprintf(stderr, "          the   min_depth      is %d \n", group->min_depth);
        fprintf(stderr, "          the     n_ref        is %d \n", group->n_ref);
        fprintf(stderr, "          the    access        is: \n");
        isl_map_dump(group->access);
        fprintf(stderr, "          the  shared_tile     is: \n");
        amp_array_tile_dump(group->shared_tile);
        fprintf(stderr, "          the     array        is: \n");
        amp_array_info_dump(group->array);
        // fprintf(stderr, "          the    local_array   is: \n");
        // amp_local_array_info_dump(group->local_array);
    } else {
        fprintf(stderr, "    @WARN: \n       sorry, the amp_array_ref_group is NULL !!! \n");
    }
}

// dump the amp_local_array_info
void amp_local_array_info_dump(struct amp_local_array_info *info) {
    if (info) {
        fprintf(stderr, "       the amp_local_array_info is on the below :\n");
        fprintf(stderr, "          the     global        is %d \n", info->global);
        fprintf(stderr, "          the     n_index       is %d \n", info->n_index);
        fprintf(stderr, "          the      bound        is :  \n");
        isl_multi_pw_aff_dump(info->bound);
        fprintf(stderr, "          the     bound_expr    is :  \n");
        isl_ast_expr_dump(info->bound_expr);
        fprintf(stderr, "          the     n_group       is %d \n", info->n_group);
        for (int i = 0; i < info->n_group; i++) {
            fprintf(stderr, "          the    groups[%d]     is :  \n", i);
            amp_array_ref_group_dump(info->groups[i]);
        }
        fprintf(stderr, "          the     array         is :  \n");
        amp_array_info_dump(info->array);
    } else {
        fprintf(stderr, "    @WARN: \n       sorry, the amp_local_array_info is NULL !!! \n");
    }
}

// dump amp_stmt
void amp_stmt_dump(struct amp_stmt *stmts) {
    if (stmts) {
        fprintf(stderr, "       the      amp_stmt   is on the below :\n");
        fprintf(stderr, "          the      id      is: \n");
        isl_id_dump(stmts->id);
        fprintf(stderr, "          the   accesses   is: \n");
        amp_stmt_access_dump(stmts->accesses);
    } else {
        fprintf(stderr, "    @WARN: \n       sorry, the amp_stmt is NULL !!! \n");
    }
}

// dump amp_prog
void amp_prog_dump(struct amp_prog *prog) {
    if (prog) {
        fprintf(stderr, "       the      amp_prog       is on the below :\n");
        fprintf(stderr, "          the      context     is: \n");
        isl_set_dump(prog->context);
        fprintf(stderr, "          the      read        is: \n");
        isl_union_map_dump(prog->read);
        fprintf(stderr, "          the    may_write     is: \n");
        isl_union_map_dump(prog->may_write);
        fprintf(stderr, "          the    must_write    is: \n");
        isl_union_map_dump(prog->must_write);
        fprintf(stderr, "          the tagged_must_kill is: \n");
        isl_union_map_dump(prog->tagged_must_kill);
        fprintf(stderr, "          the   may_persist   is: \n");
        isl_union_set_dump(prog->may_persist);
        fprintf(stderr, "          the      to_outer   is: \n");
        isl_union_map_dump(prog->to_outer);
        fprintf(stderr, "          the      to_inner   is: \n");
        isl_union_map_dump(prog->to_inner);
        fprintf(stderr, "          the  any_to_outer   is: \n");
        isl_union_map_dump(prog->any_to_outer);
        fprintf(stderr, "          the    array_order  is: \n");
        isl_union_map_dump(prog->array_order);
        fprintf(stderr, "          the   n_stmts   is: %d \n", prog->n_stmts);
        for (int s = 0; s < prog->n_stmts; s++) {
            fprintf(stderr, "          the  stmts[%d]  is: \n", s);
            amp_stmt_dump(prog->stmts + s);
        }
        fprintf(stderr, "          the   n_array   is: %d \n", prog->n_array);
        for (int a = 0; a < prog->n_array; a++) {
            fprintf(stderr, "          the  array[%d]  is: \n", a);
            amp_array_info_dump(prog->array + a);
        }
    } else {
        fprintf(stderr, "    @WARN: \n       sorry, the amp_prog is NULL !!! \n");
    }
}

// dump amp_ppcg_kernel
void amp_ppcg_kernel_dump(struct amp_ppcg_kernel *kernel) {
    if (kernel) {
        fprintf(stderr, "       the   amp_ppcg_kernel  is on the below :\n");
        fprintf(stderr, "          the       id        is: %d \n", kernel->id);
        fprintf(stderr, "          the      prog       is: \n");
        amp_prog_dump(kernel->prog);
        fprintf(stderr, "          the   size_expr     is: \n");
        isl_ast_expr_dump(kernel->size_expr);
        fprintf(stderr, "          the    context      is: \n");
        isl_set_dump(kernel->context);
        fprintf(stderr, "          the     core        is: \n");
        isl_union_set_dump(kernel->core);
        fprintf(stderr, "          the    arrays       is: \n");
        isl_union_set_dump(kernel->arrays);
        fprintf(stderr, "          the    contraction  is: \n");
        isl_union_pw_multi_aff_dump(kernel->contraction);
        fprintf(stderr, "          the expanded_domain is: \n");
        isl_union_set_dump(kernel->expanded_domain);
        fprintf(stderr, "          the     space       is: \n");
        isl_space_dump(kernel->space);
        fprintf(stderr, "          the    n_array      is: %d \n", kernel->n_array);
        for (int r = 0; r < kernel->n_array; r++) {
            fprintf(stderr, "          the     array[%d]       is: \n", r);
            amp_local_array_info_dump(kernel->array);
        }
        // fprintf(stderr, "          the     array       is: \n");
        // amp_local_array_info_dump(kernel->array);
        fprintf(stderr, "          the      n_var      is: %d \n", kernel->n_var);
        fprintf(stderr, "          the  thread_filter  is: \n");
        isl_union_set_dump(kernel->thread_filter);
        fprintf(stderr, "          the  copy_schedule  is: \n");
        isl_union_pw_multi_aff_dump(kernel->copy_schedule);
        fprintf(stderr, "          the copy_schedule_dim is: %d \n", kernel->copy_schedule_dim);
        fprintf(stderr, "          the       tree       is: \n");
        isl_ast_node_dump(kernel->tree);
    } else {
        fprintf(stderr, "    @WARN: \n       sorry, the amp_ppcg_kernel is NULL !!! \n");
    }
}

/* Representation of a statement inside a generated AST.
 *
 * "stmt" refers to the original statement.
 * "ref2expr" maps the reference identifier of each access in
 * the statement to an AST expression that should be printed
 * at the place of the access.
 */
struct ppcg_stmt {
	struct pet_stmt *stmt;

	isl_id_to_ast_expr *ref2expr;
};

/* Internal data structure for at_domain.
 *
 * "prog" represents the entire scop.
 * "kernel" points to the kernel to which the current schedule node
 * belongs.  It is set by before_mark and reset by after_mark.
 * It may be NULL if we are outside any kernel.
 */
struct ppcg_at_domain_data
{
	struct amp_prog *prog;
	struct amp_ppcg_kernel *kernel;
};

/* Internal data structure for extract_access.
 * "next_access" points to the end of a linked list that is extended
 * by extract_access.
 * "single_expression" is set if the access expressions belong to
 * an expression statement (i.e., a statement without internal control).
 * "any_to_outer" maps all intermediate arrays to their outer arrays.
 */
struct ppcg_extract_access_data
{
    struct amp_stmt_access **next_access;
    int single_expression;
    isl_union_map *any_to_outer;
};

/* Internal data structure for the index and AST expression transformation
 * callbacks for pet_stmt_build_ast_exprs.
 *
 * "kernel" is the kernel for which are computing AST expressions and
 * may be NULL if we are not inside a kernel.
 * "accesses" is the list of gpu_stmt_access in the statement.
 * "iterator_map" expresses the statement iterators in terms of
 * the AST loop iterators.
 * "sched2copy" expresses the outer copy_schedule_dim dimensions of
 * the kernel schedule in terms of the AST loop iterators and
 * may be NULL if we are not inside a kernel.
 *
 * The following fields are set in transform_index and used in transform_expr.
 * "array" is the array that is being accessed.
 * "global" is set if the global array is accessed (rather than
 * shared/private memory).
 * "local_array" refers to information on the array specialized
 * to the current kernel.
 */
struct ppcg_transform_data
{
	struct amp_ppcg_kernel *kernel;
	struct amp_stmt_access *accesses;
	isl_pw_multi_aff *iterator_map;
	isl_pw_multi_aff *sched2copy;

	struct amp_array_info *array;
	int global;
	struct amp_local_array_info *local_array;
};

enum ppcg_kernel_stmt_type
{
	ppcg_kernel_copy,
	ppcg_kernel_domain
};

/* Representation of special statements, in particular copy statements
 * and __syncthreads statements, inside a kernel.
 *
 * type represents the kind of statement
 *
 *
 * for ppcg_kernel_copy statements we have
 *
 * read is set if the statement should copy data from global memory
 * to shared memory or registers.
 *
 * index expresses an access to the array element that needs to be copied
 * local_index expresses the corresponding element in the tile
 *
 * array refers to the original array being copied
 * local_array is a pointer to the appropriate element in the "array"
 *	array of the ppcg_kernel to which this copy access belongs
 *
 *
 * for ppcg_kernel_domain statements we have
 *
 * stmt is the corresponding input statement
 *
 * n_access is the number of accesses in stmt
 * access is an array of local information about the accesses
 */
struct ppcg_kernel_stmt
{
	enum ppcg_kernel_stmt_type type;

	union
	{
		struct
		{
			int read;
			isl_ast_expr *index;
			isl_ast_expr *local_index;
			struct amp_array_info *array;
			struct amp_local_array_info *local_array;
		} c;
		struct
		{
			struct amp_stmt *stmt;
			isl_id_to_ast_expr *ref2expr;
		} d;
	} u;
};

static void ppcg_stmt_free(void *user)
{
	struct ppcg_stmt *stmt = user;

	if (!stmt)
		return;

	isl_id_to_ast_expr_free(stmt->ref2expr);

	free(stmt);
}

/* Derive the output file name from the input file name.
 * 'input' is the entire path of the input file. The output
 * is the file name plus the additional extension.
 *
 * We will basically replace everything after the last point
 * with '.ppcg.c'. This means file.c becomes file.ppcg.c
 */
static FILE *get_output_file(const char *input, const char *output)
{
	char name[PATH_MAX];
	const char *ext;
	const char ppcg_marker[] = ".ppcg";
	int len;
	FILE *file;

	len = ppcg_extract_base_name(name, input);

	strcpy(name + len, ppcg_marker);
	ext = strrchr(input, '.');
	strcpy(name + len + sizeof(ppcg_marker) - 1, ext ? ext : ".c");

	if (!output)
		output = name;

	file = fopen(output, "w");
	if (!file) {
		fprintf(stderr, "Unable to open '%s' for writing\n", output);
		return NULL;
	}

	return file;
}

/* Derive the output file name from the input file name.
 * 'input' is the entire path of the input file. The output
 * is the file name plus the additional extension.
 *
 * We will basically replace everything after the last point
 * with '.ppcg.c'. This means file.c becomes file.ppcg.c
 */
static FILE *get_output_file_with_amp(const char *input, const char *output, struct ppcg_options *options)
{
	char name[PATH_MAX];
	const char *ext;
	int len;
	FILE *file;

	len = ppcg_extract_base_name(name, input);

	if (options->automatic_mixed_precision)
	{
		char ppcg_marker[] = ".amp_ppcg";
		strcpy(name + len, ppcg_marker);
		ext = strrchr(input, '.');
		strcpy(name + len + sizeof(ppcg_marker) - 1, ext ? ext : ".c");
	}
	else
	{
		char ppcg_marker[] = ".ppcg";
		strcpy(name + len, ppcg_marker);
		ext = strrchr(input, '.');
		strcpy(name + len + sizeof(ppcg_marker) - 1, ext ? ext : ".c");
	}

	if (!output)
		output = name;

	file = fopen(output, "w");
	if (!file)
	{
		fprintf(stderr, "Unable to open '%s' for writing\n", output);
		return NULL;
	}

	// if (options->automatic_mixed_precision)
	// {
	// 	// 在文件开始加入AMP相关的头文件
	// 	fprintf(file, "#include <assert.h>\n");
	// 	fprintf(file, "#include <stdio.h>\n");
	// 	// fprintf(file, "#include \"amp_utilities.h\"\n\n");
	// }

	return file;
}

/* Data used to annotate for nodes in the ast.
 */
struct ast_node_userinfo {
	/* The for node is an openmp parallel for node. */
	int is_openmp;
};

/* Information used while building the ast.
 */
struct ast_build_userinfo {
	/* The current ppcg scop. */
	struct ppcg_scop *scop;

	/* Are we currently in a parallel for loop? */
	int in_parallel_for;

	/* The contraction of the entire schedule tree. */
	isl_union_pw_multi_aff *contraction;
};

/* Check if the current scheduling dimension is parallel.
 *
 * We check for parallelism by verifying that the loop does not carry any
 * dependences.
 *
 * If any expansion nodes are present in the schedule tree,
 * then they are assumed to be situated near the leaves of the schedule tree,
 * underneath any node that may result in a for loop.
 * In particular, these expansions may have been introduced
 * by the call to isl_schedule_expand inside ppcg_compute_grouping_schedule.
 * The dependence relations are formulated in terms of the expanded
 * domains, while, by assumption, the partial schedule returned
 * by isl_ast_build_get_schedule refers to the contracted domains.
 * Plug in the contraction such that the schedule would also
 * refer to the expanded domains.
 * Note that if the schedule tree does not contain any expansions,
 * then the contraction is an identity function.
 *
 * If the live_range_reordering option is set, then this currently
 * includes the order dependences.  In principle, non-zero order dependences
 * could be allowed, but this would require privatization and/or expansion.
 *
 * Parallelism test: if the distance is zero in all outer dimensions, then it
 * has to be zero in the current dimension as well.
 * Implementation: first, translate dependences into time space, then force
 * outer dimensions to be equal.  If the distance is zero in the current
 * dimension, then the loop is parallel.
 * The distance is zero in the current dimension if it is a subset of a map
 * with equal values for the current dimension.
 */
static int ast_schedule_dim_is_parallel(__isl_keep isl_ast_build *build,
	struct ast_build_userinfo *build_info)
{
	struct ppcg_scop *scop = build_info->scop;
	isl_union_map *schedule, *deps;
	isl_map *schedule_deps, *test;
	isl_space *schedule_space;
	unsigned i, dimension, is_parallel;

	schedule = isl_ast_build_get_schedule(build);
	schedule = isl_union_map_preimage_domain_union_pw_multi_aff(schedule,
		isl_union_pw_multi_aff_copy(build_info->contraction));
	schedule_space = isl_ast_build_get_schedule_space(build);

	dimension = isl_space_dim(schedule_space, isl_dim_out) - 1;

	deps = isl_union_map_copy(scop->dep_flow);
	deps = isl_union_map_union(deps, isl_union_map_copy(scop->dep_false));
	if (scop->options->live_range_reordering) {
		isl_union_map *order = isl_union_map_copy(scop->dep_order);
		deps = isl_union_map_union(deps, order);
	}
	deps = isl_union_map_apply_range(deps, isl_union_map_copy(schedule));
	deps = isl_union_map_apply_domain(deps, schedule);

	if (isl_union_map_is_empty(deps)) {
		isl_union_map_free(deps);
		isl_space_free(schedule_space);
		return 1;
	}

	schedule_deps = isl_map_from_union_map(deps);

	for (i = 0; i < dimension; i++)
		schedule_deps = isl_map_equate(schedule_deps, isl_dim_out, i,
					       isl_dim_in, i);

	test = isl_map_universe(isl_map_get_space(schedule_deps));
	test = isl_map_equate(test, isl_dim_out, dimension, isl_dim_in,
			      dimension);
	is_parallel = isl_map_is_subset(schedule_deps, test);

	isl_space_free(schedule_space);
	isl_map_free(test);
	isl_map_free(schedule_deps);

	return is_parallel;
}

/* Mark a for node openmp parallel, if it is the outermost parallel for node.
 */
static void mark_openmp_parallel(__isl_keep isl_ast_build *build,
	struct ast_build_userinfo *build_info,
	struct ast_node_userinfo *node_info)
{
	if (build_info->in_parallel_for)
		return;

	if (ast_schedule_dim_is_parallel(build, build_info)) {
		build_info->in_parallel_for = 1;
		node_info->is_openmp = 1;
	}
}

/* Allocate an ast_node_info structure and initialize it with default values.
 */
static struct ast_node_userinfo *allocate_ast_node_userinfo()
{
	struct ast_node_userinfo *node_info;
	node_info = (struct ast_node_userinfo *)
		malloc(sizeof(struct ast_node_userinfo));
	node_info->is_openmp = 0;
	return node_info;
}

/* Free an ast_node_info structure.
 */
static void free_ast_node_userinfo(void *ptr)
{
	struct ast_node_userinfo *info;
	info = (struct ast_node_userinfo *) ptr;
	free(info);
}

/* This method is executed before the construction of a for node. It creates
 * an isl_id that is used to annotate the subsequently generated ast for nodes.
 *
 * In this function we also run the following analyses:
 *
 * 	- Detection of openmp parallel loops
 */
static __isl_give isl_id *ast_build_before_for(
	__isl_keep isl_ast_build *build, void *user)
{
	isl_id *id;
	struct ast_build_userinfo *build_info;
	struct ast_node_userinfo *node_info;

	build_info = (struct ast_build_userinfo *) user;
	node_info = allocate_ast_node_userinfo();
	id = isl_id_alloc(isl_ast_build_get_ctx(build), "", node_info);
	id = isl_id_set_free_user(id, free_ast_node_userinfo);

	mark_openmp_parallel(build, build_info, node_info);

	return id;
}

/* This method is executed after the construction of a for node.
 *
 * It performs the following actions:
 *
 * 	- Reset the 'in_parallel_for' flag, as soon as we leave a for node,
 * 	  that is marked as openmp parallel.
 *
 */
static __isl_give isl_ast_node *ast_build_after_for(
	__isl_take isl_ast_node *node, __isl_keep isl_ast_build *build,
	void *user)
{
	isl_id *id;
	struct ast_build_userinfo *build_info;
	struct ast_node_userinfo *info;

	id = isl_ast_node_get_annotation(node);
	info = isl_id_get_user(id);

	if (info && info->is_openmp) {
		build_info = (struct ast_build_userinfo *) user;
		build_info->in_parallel_for = 0;
	}

	isl_id_free(id);

	return node;
}

/* Find the element in scop->stmts that has the given "id".
 */
static struct pet_stmt *find_stmt(struct ppcg_scop *scop, __isl_keep isl_id *id)
{
	int i;

	for (i = 0; i < scop->pet->n_stmt; ++i) {
		struct pet_stmt *stmt = scop->pet->stmts[i];
		isl_id *id_i;

		id_i = isl_set_get_tuple_id(stmt->domain);
		isl_id_free(id_i);

		if (id_i == id)
			return stmt;
	}

	isl_die(isl_id_get_ctx(id), isl_error_internal,
		"statement not found", return NULL);
}

/* Find the element in gen->stmt that has the given "id".
 * Return NULL if no such gpu_stmt can be found.
 */
static struct amp_stmt *find_amp_stmt(struct amp_prog *prog, __isl_keep isl_id *id)
{
	int i;

	for (i = 0; i < prog->n_stmts; ++i)
	{
		if (id == prog->stmts[i].id)
			break;
	}

	return i < prog->n_stmts ? &prog->stmts[i] : NULL;
}

/* Print a user statement in the generated AST.
 * The ppcg_stmt has been attached to the node in at_each_domain.
 */
static __isl_give isl_printer *print_user(__isl_take isl_printer *p,
	__isl_take isl_ast_print_options *print_options,
	__isl_keep isl_ast_node *node, void *user)
{
	struct ppcg_stmt *stmt;
	isl_id *id;

	id = isl_ast_node_get_annotation(node);
	stmt = isl_id_get_user(id);
	isl_id_free(id);

	p = pet_stmt_print_body(stmt->stmt, p, stmt->ref2expr);

	isl_ast_print_options_free(print_options);

	return p;
}


/* Print a for loop node as an openmp parallel loop.
 *
 * To print an openmp parallel loop we print a normal for loop, but add
 * "#pragma openmp parallel for" in front.
 *
 * Variables that are declared within the body of this for loop are
 * automatically openmp 'private'. Iterators declared outside of the
 * for loop are automatically openmp 'shared'. As ppcg declares all iterators
 * at the position where they are assigned, there is no need to explicitly mark
 * variables. Their automatically assigned type is already correct.
 *
 * This function only generates valid OpenMP code, if the ast was generated
 * with the 'atomic-bounds' option enabled.
 *
 */
static __isl_give isl_printer *print_for_with_openmp(
	__isl_keep isl_ast_node *node, __isl_take isl_printer *p,
	__isl_take isl_ast_print_options *print_options)
{
	p = isl_printer_start_line(p);
	p = isl_printer_print_str(p, "#pragma omp parallel for");
	p = isl_printer_end_line(p);

	p = isl_ast_node_for_print(node, p, print_options);

	return p;
}

/* Print a for node.
 *
 * Depending on how the node is annotated, we either print a normal
 * for node or an openmp parallel for node.
 */
static __isl_give isl_printer *print_for(__isl_take isl_printer *p,
	__isl_take isl_ast_print_options *print_options,
	__isl_keep isl_ast_node *node, void *user)
{
	isl_id *id;
	int openmp;

	openmp = 0;
	id = isl_ast_node_get_annotation(node);

	if (id) {
		struct ast_node_userinfo *info;

		info = (struct ast_node_userinfo *) isl_id_get_user(id);
		if (info && info->is_openmp)
			openmp = 1;
	}

	if (openmp)
		p = print_for_with_openmp(node, p, print_options);
	else
		p = isl_ast_node_for_print(node, p, print_options);

	isl_id_free(id);

	return p;
}

/* Index transformation callback for pet_stmt_build_ast_exprs.
 *
 * "index" expresses the array indices in terms of statement iterators
 * "iterator_map" expresses the statement iterators in terms of
 * AST loop iterators.
 *
 * The result expresses the array indices in terms of
 * AST loop iterators.
 */
static __isl_give isl_multi_pw_aff *pullback_index(
	__isl_take isl_multi_pw_aff *index, __isl_keep isl_id *id, void *user)
{
	isl_pw_multi_aff *iterator_map = user;

	iterator_map = isl_pw_multi_aff_copy(iterator_map);
	return isl_multi_pw_aff_pullback_pw_multi_aff(index, iterator_map);
}

/* Transform the accesses in the statement associated to the domain
 * called by "node" to refer to the AST loop iterators, construct
 * corresponding AST expressions using "build",
 * collect them in a ppcg_stmt and annotate the node with the ppcg_stmt.
 */
static __isl_give isl_ast_node *at_each_domain(__isl_take isl_ast_node *node,
	__isl_keep isl_ast_build *build, void *user)
{
	struct ppcg_scop *scop = user;
	isl_ast_expr *expr, *arg;
	isl_ctx *ctx;
	isl_id *id;
	isl_map *map;
	isl_pw_multi_aff *iterator_map;
	struct ppcg_stmt *stmt;

	ctx = isl_ast_node_get_ctx(node);
	stmt = isl_calloc_type(ctx, struct ppcg_stmt);
	if (!stmt)
		goto error;

	expr = isl_ast_node_user_get_expr(node);
	arg = isl_ast_expr_get_op_arg(expr, 0);
	isl_ast_expr_free(expr);
	id = isl_ast_expr_get_id(arg);
	isl_ast_expr_free(arg);
	stmt->stmt = find_stmt(scop, id);
	isl_id_free(id);
	if (!stmt->stmt)
		goto error;

	map = isl_map_from_union_map(isl_ast_build_get_schedule(build));
	map = isl_map_reverse(map);
	iterator_map = isl_pw_multi_aff_from_map(map);
	stmt->ref2expr = pet_stmt_build_ast_exprs(stmt->stmt, build,
				    &pullback_index, iterator_map, NULL, NULL);
	isl_pw_multi_aff_free(iterator_map);

	id = isl_id_alloc(isl_ast_node_get_ctx(node), NULL, stmt);
	id = isl_id_set_free_user(id, &ppcg_stmt_free);
	return isl_ast_node_set_annotation(node, id);
error:
	ppcg_stmt_free(stmt);
	return isl_ast_node_free(node);
}

/* Given a mapping "iterator_map" from the AST schedule to a domain,
 * return the corresponding mapping from the AST schedule
 * to the outer kernel->copy_schedule_dim dimensions of
 * the schedule computed by PPCG for this kernel.
 *
 * Note that kernel->copy_schedule_dim is at least as large as
 * the largest depth of any array reference group associated to the kernel.
 * This is needed as the returned schedule is used to extract a mapping
 * to the outer tile->depth dimensions in transform_index.
 */
static __isl_give isl_pw_multi_aff *compute_sched_to_copy(
	struct amp_ppcg_kernel *kernel, __isl_take isl_pw_multi_aff *iterator_map)
{
	isl_union_pw_multi_aff *upma;
	isl_pw_multi_aff *pma;
	isl_space *space;

	space = isl_space_range(isl_pw_multi_aff_get_space(iterator_map));
	space = isl_space_from_domain(space);
	space = isl_space_add_dims(space, isl_dim_out,
							   kernel->copy_schedule_dim);

	upma = isl_union_pw_multi_aff_copy(kernel->copy_schedule);
	pma = isl_union_pw_multi_aff_extract_pw_multi_aff(upma, space);
	isl_union_pw_multi_aff_free(upma);

	return isl_pw_multi_aff_pullback_pw_multi_aff(pma, iterator_map);
}

/* Return the gpu_stmt_access in the list "accesses" that corresponds
 * to "ref_id".
 */
static struct amp_stmt_access *find_access(struct amp_stmt_access *accesses,
										   __isl_keep isl_id *ref_id)
{
	struct amp_stmt_access *access;

	for (access = accesses; access; access = access->next)
		if (access->ref_id == ref_id)
			return access;

	return NULL;
}

/* Return the name of the outer array (of structs) accessed by "access".
 */
static const char *get_outer_array_name(__isl_keep isl_map *access)
{
	isl_space *space;
	const char *name;

	space = isl_space_range(isl_map_get_space(access));
	while (space && isl_space_is_wrapping(space))
		space = isl_space_domain(isl_space_unwrap(space));
	name = isl_space_get_tuple_name(space, isl_dim_set);
	isl_space_free(space);

	return name;
}

/* Return the index of the array called "name" in the list of arrays.
 */
static int find_array_index(struct amp_ppcg_kernel *kernel, const char *name)
{
	int i;

	for (i = 0; i < kernel->n_array; ++i)
		if (!strcmp(name, kernel->array[i].array->name))
			return i;

	return -1;
}

/* Return a pointer to the gpu_array_ref_group in "local"
 * that contains the reference "access".
 * Return NULL if no such group can be found.
 */
static struct amp_array_ref_group *find_ref_group(
	struct amp_local_array_info *local, struct amp_stmt_access *access)
{
	int i, j;

	for (i = 0; i < local->n_group; ++i)
	{
		struct amp_array_ref_group *group = local->groups[i];

		for (j = 0; j < group->n_ref; ++j)
			if (group->refs[j] == access)
				return group;
	}

	return NULL;
}

/* Is "node" a mark node with an identifier called "name"?
 */
static int is_marked(__isl_keep isl_schedule_node *node, const char *name) {
    isl_id *mark;
    int has_name;

    if (!node)
        return -1;

    if (isl_schedule_node_get_type(node) != isl_schedule_node_mark)
        return 0;

    mark = isl_schedule_node_mark_get_id(node);
    if (!mark)
        return -1;

    has_name = !strcmp(isl_id_get_name(mark), name);
    isl_id_free(mark);

    return has_name;
}

/* Is "node" a mark node with an identifier called "kernel"?
 */
int amp_tree_node_is_kernel(__isl_keep isl_schedule_node *node) {
    return is_marked(node, "amp_kernel");
}

/* Is "node" a mark node with an identifier called "amp_higher"?
 */
static int node_is_amp_higher(__isl_keep isl_schedule_node *node) {
    return is_marked(node, "amp_higher");
}

/* Is "node" a mark node with an identifier called "amp_lower"?
 */
static int node_is_amp_lower(__isl_keep isl_schedule_node *node) {
    return is_marked(node, "amp_lower");
}

/* Is "node" a mark node with an identifier called "shared"?
 */
static int node_is_shared(__isl_keep isl_schedule_node *node) {
    return is_marked(node, "shared");
}

/* Is "node" a mark node with an identifier called "thread"?
 * in the amp_kernel, 'thread' mark means an atomic calculation, which without modification.
 */
static int node_is_thread(__isl_keep isl_schedule_node *node) {
    return is_marked(node, "thread");
}

/* Should this array reference group be mapped to private, shared or global
 * memory?
 * If we have computed both a private and a shared tile, then
 * the tile with the smallest depth is used.  If both have the same depth,
 * then the private tile is used.
 */
enum ppcg_group_access_type amp_array_ref_group_type(struct amp_array_ref_group *group) {
    if (group->shared_tile)
        return ppcg_access_shared;
    return ppcg_access_global;
}

/* Return the effective gpu_array_tile associated to "group" or
 * NULL if there is no such gpu_array_tile.
 */
struct amp_array_tile *amp_array_ref_group_tile(struct amp_array_ref_group *group)
{
    switch (amp_array_ref_group_type(group))
    {
    case ppcg_access_shared:
        return group->shared_tile;
    case ppcg_access_global:
        return NULL;
    }
}

/* Given an index expression "index" of the form
 *
 *	L -> F(A),
 *
 * with F(A) either A or some subfield of A and L the AST loop iterators,
 * and a tiling "tiling" of the form
 *
 *	[L -> A] -> T
 *
 * apply the tiling to the outer array in the index expression to obtain
 *
 *	L -> T(A)
 *
 * If F(A) is some subfield of A, then separate the member access
 * into the base index expression and the field index expression,
 * apply the tiling to the base index expression and combine the result
 * with the field index expression.
 *
 * If F(A) is A, then modify index to keep track of the iterators
 *
 *	L -> [L -> A]
 *
 * and combine the result with the tiling to obtain a tiled index expression
 * in terms of the AST loop iterators
 *
 *	L -> T
 */
static __isl_give isl_multi_pw_aff *tile_outer(
	__isl_take isl_multi_pw_aff *index, __isl_take isl_multi_pw_aff *tiling)
{
	isl_bool is_wrapping;
	isl_space *space;
	isl_multi_pw_aff *mpa;

	is_wrapping = isl_multi_pw_aff_range_is_wrapping(index);
	if (is_wrapping < 0)
		goto error;
	if (is_wrapping)
	{
		isl_multi_pw_aff *field;

		field = isl_multi_pw_aff_copy(index);
		field = isl_multi_pw_aff_range_factor_range(field);
		index = isl_multi_pw_aff_range_factor_domain(index);
		index = tile_outer(index, tiling);
		return isl_multi_pw_aff_range_product(index, field);
	}

	space = isl_space_domain(isl_multi_pw_aff_get_space(index));
	space = isl_space_map_from_set(space);
	mpa = isl_multi_pw_aff_identity(space);
	index = isl_multi_pw_aff_range_product(mpa, index);
	index = isl_multi_pw_aff_pullback_multi_pw_aff(tiling, index);

	return index;
error:
	isl_multi_pw_aff_free(index);
	isl_multi_pw_aff_free(tiling);
	return NULL;
}

/* Index transformation callback for pet_stmt_build_ast_exprs.
 *
 * "index" expresses the array indices in terms of statement iterators
 *
 * We first reformulate "index" in terms of the AST loop iterators.
 * Then we check if we are accessing the global array or
 * a shared/private copy.  In particular, if we are not inside a kernel
 * then we must be accessing a global array.
 * In the former case, we simply return
 * the updated index.  If "index" is an affine expression rather
 * than an array access, then we also return the updated index here.
 *
 * If no reference groups have been computed for the array,
 * then we can only be accessing the global array.
 *
 * Otherwise, we apply the tiling to the index.
 * This tiling is of the form
 *
 *	[D -> A] -> T
 *
 * where D corresponds to the outer tile->depth dimensions of
 * the kernel schedule.
 * The index is of the form
 *
 *	L -> A
 *
 * We update the tiling to refer to the AST loop iterators
 *
 *	[L -> A] -> T
 *
 * and combine it with the index to obtain a tiled index expression in terms
 * of the AST loop iterators
 *
 *	L -> T
 *
 * Note that while the tiling applies directly to an outer array.
 * the index may refer to some subfield of this outer array.
 * In such cases, the result will refer to the same subfield of the tile.
 * That is, an index expression of the form  L -> F(A) will be transformed
 * into an index expression of the form L -> F(T).
 */
static __isl_give isl_multi_pw_aff *transform_index(
	__isl_take isl_multi_pw_aff *index, __isl_keep isl_id *ref_id,
	void *user)
{
	struct ppcg_transform_data *data = user;
	struct amp_stmt_access *access;
	struct amp_array_ref_group *group;
	struct amp_array_tile *tile;
	isl_pw_multi_aff *iterator_map;
	int i;
	int dim;
	const char *name;
	isl_space *space;
	isl_multi_pw_aff *tiling;
	isl_pw_multi_aff *pma;
	isl_pw_multi_aff *sched2depth;

	data->array = NULL;

	iterator_map = isl_pw_multi_aff_copy(data->iterator_map);
	index = isl_multi_pw_aff_pullback_pw_multi_aff(index, iterator_map);

	if (!data->kernel)
		return index;

	access = find_access(data->accesses, ref_id);
	if (!access)
		return index;
	if (!isl_map_has_tuple_name(access->access, isl_dim_out))
		return index;

	name = get_outer_array_name(access->access);
	if (!name)
		return isl_multi_pw_aff_free(index);
	i = find_array_index(data->kernel, name);
	if (i < 0)
		isl_die(isl_multi_pw_aff_get_ctx(index), isl_error_internal,
				"cannot find array",
				return isl_multi_pw_aff_free(index));
	data->local_array = &data->kernel->array[i];
	data->array = data->local_array->array;

	group = find_ref_group(data->local_array, access);
	if (!group)
	{
		data->global = 1;
		return index;
	}

	tile = amp_array_ref_group_tile(group);
	data->global = !tile;
	if (!tile)
		return index;

	space = isl_space_domain(isl_multi_aff_get_space(tile->tiling));
	space = isl_space_range(isl_space_unwrap(space));
	space = isl_space_map_from_set(space);
	pma = isl_pw_multi_aff_identity(space);
	sched2depth = isl_pw_multi_aff_copy(data->sched2copy);
	dim = isl_pw_multi_aff_dim(sched2depth, isl_dim_out);
	sched2depth = isl_pw_multi_aff_drop_dims(sched2depth, isl_dim_out, tile->depth, dim - tile->depth);
	pma = isl_pw_multi_aff_product(sched2depth, pma);
	tiling = isl_multi_pw_aff_from_multi_aff(isl_multi_aff_copy(tile->tiling));
	tiling = isl_multi_pw_aff_pullback_pw_multi_aff(tiling, pma);

	index = tile_outer(index, tiling);

	return index;
}

/* Dereference "expr" by adding an index [0].
 * The original "expr" is assumed not to have any indices.
 *
 * If "expr" is a member access, then the dereferencing needs
 * to be applied to the structure argument of this member access.
 */
static __isl_give isl_ast_expr *dereference(__isl_take isl_ast_expr *expr)
{
	isl_ctx *ctx;
	isl_ast_expr *arg0, *res;
	isl_ast_expr_list *list;

	arg0 = isl_ast_expr_get_op_arg(expr, 0);
	if (!arg0)
		return isl_ast_expr_free(expr);
	if (isl_ast_expr_get_type(arg0) == isl_ast_expr_op &&
		isl_ast_expr_get_op_type(arg0) == isl_ast_op_member)
	{
		isl_ast_expr *arg;

		arg = isl_ast_expr_get_op_arg(arg0, 0);
		arg = dereference(arg);
		arg0 = isl_ast_expr_set_op_arg(arg0, 0, arg);
		expr = isl_ast_expr_set_op_arg(expr, 0, arg0);

		return expr;
	}
	isl_ast_expr_free(arg0);

	ctx = isl_ast_expr_get_ctx(expr);
	res = isl_ast_expr_from_val(isl_val_zero(ctx));
	list = isl_ast_expr_list_from_ast_expr(res);
	res = isl_ast_expr_get_op_arg(expr, 0);
	res = isl_ast_expr_access(res, list);
	isl_ast_expr_free(expr);

	return res;
}

/* Linearize the index expression "expr" based on the array bounds
 * of "array".
 *
 * That is, transform expression
 *
 *	A[i_0][i_1]...[i_n]
 *
 * to
 *
 *	A[(..((i_0 * b_1 + i_1) ... ) * b_n + i_n]
 *
 * where b_0, b_1, ..., b_n are the bounds on the array.
 *
 * If the base of "expr" is a member access, then the linearization needs
 * to be applied to the structure argument of this member access.
 *
 * In the base case, if "expr" has no arguments (other than the name of
 * the array), then we are passing an entire array to a function.
 * In this case, there is nothing to linearize.
 * Note that at this point an expression with no arguments can
 * only be an entire array because the scalar case and
 * the case of single struct are handled by the caller.
 *
 * If the number of specified index expressions in "expr"
 * is smaller than the dimension of the accessed array,
 * then the missing i_j also do not appear in the linearized expression.
 * Furthermore, since such an expression does not refer to a single
 * element while the default linearized expression would refer to
 * a single element, we return the expression
 *
 *	A + (..((i_0 * b_1 + i_1) ... ) * b_l + i_l)
 *
 * instead.  Note that because of the special case handling above,
 * we can assume here that there is at least one index expression.
 */
__isl_give isl_ast_expr *amp_local_array_info_linearize_index(
	struct amp_local_array_info *array, __isl_take isl_ast_expr *expr)
{
	int i, n;
	isl_ast_expr *arg0;
	isl_ast_expr *res;
	isl_ast_expr_list *list;

	arg0 = isl_ast_expr_get_op_arg(expr, 0);
	if (isl_ast_expr_get_type(arg0) == isl_ast_expr_op &&
		isl_ast_expr_get_op_type(arg0) == isl_ast_op_member)
	{
		isl_ast_expr *arg;

		arg = isl_ast_expr_get_op_arg(arg0, 0);
		arg = amp_local_array_info_linearize_index(array, arg);
		arg0 = isl_ast_expr_set_op_arg(arg0, 0, arg);
		expr = isl_ast_expr_set_op_arg(expr, 0, arg0);

		return expr;
	}
	isl_ast_expr_free(arg0);

	if (isl_ast_expr_get_op_n_arg(expr) == 1)
		return expr;

	n = isl_ast_expr_get_op_n_arg(expr);
	res = isl_ast_expr_get_op_arg(expr, 1);
	for (i = 1; i < array->n_index; ++i)
	{
		isl_ast_expr *expr_i;

		expr_i = isl_ast_expr_get_op_arg(array->bound_expr, 1 + i);
		res = isl_ast_expr_mul(res, expr_i);

		if (i + 1 >= n)
			continue;
		expr_i = isl_ast_expr_get_op_arg(expr, i + 1);
		res = isl_ast_expr_add(res, expr_i);
	}

	if (1 + array->n_index > n)
	{
		res = isl_ast_expr_add(isl_ast_expr_get_op_arg(expr, 0), res);
	}
	else
	{
		list = isl_ast_expr_list_from_ast_expr(res);
		res = isl_ast_expr_get_op_arg(expr, 0);
		res = isl_ast_expr_access(res, list);
	}

	isl_ast_expr_free(expr);

	return res;
}

/* Is "array" a read-only scalar?
 */
int amp_array_is_read_only_scalar(struct amp_array_info *array)
{
    return array->read_only_scalar;
}

/* AST expression transformation callback for pet_stmt_build_ast_exprs.
 *
 * If the AST expression refers to an array that is not accessed
 * at all, then this means the value of the expression is not used,
 * so we might as well print zero (NULL pointer) instead.
 *
 * If the AST expression refers to a global scalar that is not
 * a read-only scalar, then its address was passed to the kernel and
 * we need to dereference it.
 *
 * If the AST expression refers to an access to a global array,
 * then we linearize the access exploiting the bounds in data->local_array.
 */
static __isl_give isl_ast_expr *transform_expr(__isl_take isl_ast_expr *expr,
											   __isl_keep isl_id *id, void *user)
{
	struct ppcg_transform_data *data = user;

	if (!data->array)
		return expr;
	if (!data->array->accessed)
	{
		isl_ctx *ctx;

		ctx = isl_ast_expr_get_ctx(expr);
		isl_ast_expr_free(expr);
		return isl_ast_expr_from_val(isl_val_zero(ctx));
	}
	if (amp_array_is_read_only_scalar(data->array))
		return expr;
	if (!data->global)
		return expr;
	if (data->array->n_index == 0)
		return dereference(expr);
	if (!data->array->linearize)
		return expr;

	return amp_local_array_info_linearize_index(data->local_array, expr);
}

static void ppcg_kernel_stmt_free(void *user)
{
	struct ppcg_kernel_stmt *stmt = user;

	if (!stmt)
		return;

	switch (stmt->type)
	{
	case ppcg_kernel_copy:
		isl_ast_expr_free(stmt->u.c.index);
		isl_ast_expr_free(stmt->u.c.local_index);
		break;
	case ppcg_kernel_domain:
		isl_id_to_ast_expr_free(stmt->u.d.ref2expr);
		break;
	}

	free(stmt);
}

/* This function is called for each instance of a user statement
 * in the kernel "kernel", identified by "gpu_stmt".
 * "kernel" may be NULL if we are not inside a kernel.
 *
 * We attach a struct ppcg_kernel_stmt to the "node", containing
 * a computed AST expression for each access, through an annotation
 * with name "user".
 * These AST expressions are computed from iterator_map,
 * which expresses the domain
 * elements in terms of the generated loops, and sched2copy,
 * which expresses the outer copy_schedule_dim dimensions of
 * the kernel schedule computed by PPCG in terms of the generated loops.
 */
static __isl_give isl_ast_node *create_domain_leaf(
	struct amp_ppcg_kernel *kernel, __isl_take isl_ast_node *node,
	__isl_keep isl_ast_build *build, struct amp_stmt *amp_stmt)
{
	struct ppcg_transform_data data;
	struct ppcg_kernel_stmt *stmt;
	isl_ctx *ctx;
	isl_id *id;
	isl_pw_multi_aff *sched2copy;
	isl_map *map;
	isl_pw_multi_aff *iterator_map;
	isl_union_map *schedule;

	if (!node)
		return NULL;
	ctx = isl_ast_node_get_ctx(node);

	stmt = isl_calloc_type(ctx, struct ppcg_kernel_stmt);
	if (!stmt)
		return isl_ast_node_free(node);

	schedule = isl_ast_build_get_schedule(build);
	map = isl_map_reverse(isl_map_from_union_map(schedule));
	iterator_map = isl_pw_multi_aff_from_map(map);
	if (kernel)
		sched2copy = compute_sched_to_copy(kernel, isl_pw_multi_aff_copy(iterator_map));
	else
		sched2copy = NULL;

	stmt->type = ppcg_kernel_domain;
	stmt->u.d.stmt = amp_stmt;

	data.kernel = kernel;
	data.accesses = stmt->u.d.stmt->accesses;
	data.iterator_map = iterator_map;
	data.sched2copy = sched2copy;
	stmt->u.d.ref2expr = pet_stmt_build_ast_exprs(stmt->u.d.stmt->stmt,
												  build, &transform_index, &data,
												  &transform_expr, &data);

	isl_pw_multi_aff_free(iterator_map);
	isl_pw_multi_aff_free(sched2copy);

	id = isl_id_alloc(ctx, "user", stmt);
	id = isl_id_set_free_user(id, &ppcg_kernel_stmt_free);
	if (!id)
		ppcg_kernel_stmt_free(stmt);
	return isl_ast_node_set_annotation(node, id);
}

static __isl_give isl_multi_aff *create_from_access(isl_ctx *ctx,
													struct amp_array_ref_group *group, int read)
{
	isl_space *space;
	isl_id *id;

	space = isl_space_copy(group->array->space);
	space = isl_space_from_range(space);
	space = isl_space_wrap(space);
	space = isl_space_map_from_set(space);

	id = isl_id_alloc(ctx, read ? "read" : "write", group);
	space = isl_space_set_tuple_id(space, isl_dim_in, id);

	return isl_multi_aff_identity(space);
}

/* Does "array" need to be allocated on the device?
 * If it is a read-only scalar, then it will be passed as an argument
 * to the kernel and therefore does not require any allocation.
 * If this device memory is not accessed at all, then it does not
 * need to be allocated either.
 */
int amp_array_requires_allocation(struct amp_array_info *array)
{
    if (amp_array_is_read_only_scalar(array))
        return 0;
	return 1;
}

/* Build AST expressions for the amp array sizes of all arrays in "prog"
 * that require allocation on the device using "build", as well as
 * for the original array sizes of all arrays that need to be declared
 * on the host.
 * "node" is freed in case of error.
 */
__isl_give isl_ast_node *amp_build_array_bounds(__isl_take isl_ast_node *node, amp_prog *prog, __isl_keep isl_ast_build *build)
{
    int i;

    for (i = 0; i < prog->n_array; ++i)
    {
        struct amp_array_info *array = &prog->array[i];
        isl_multi_pw_aff *size;
        isl_ast_expr *expr;

        // if (!amp_array_requires_allocation(array))
        //     continue;

        size = isl_multi_pw_aff_copy(array->bound);
        expr = ppcg_build_size_expr(size, build);
        array->bound_expr = expr;
        if (!expr)
            return isl_ast_node_free(node);
    }

    for (i = 0; i < prog->n_array; ++i)
    {
        struct amp_array_info *array = &prog->array[i];
        isl_set *extent;
        isl_multi_pw_aff *size;
        isl_ast_expr *expr;

        if (!array->declare_local)
            continue;
        extent = isl_set_copy(array->declared_extent);
        size = ppcg_size_from_extent(extent);
        expr = ppcg_build_size_expr(size, build);
        array->declared_size = expr;
        if (!expr)
            return isl_ast_node_free(node);
    }

    return node;
}

/* This function is called for each statement node in the AST
 * for copying to or from shared/private memory.
 * Attach a pointer to a ppcg_kernel_stmt representing the copy
 * statement to the node.
 * The statement name is "read" or "write", depending on whether we are
 * reading from global memory or writing to global memory.
 *
 * The schedule is of the form
 *
 *	type[D -> A] -> L
 *
 * where D corresponds to the outer tile->depth dimensions of
 * the kernel schedule, A to the global array and L to the outer
 * generated AST schedule.
 * We compute the inverse and strip off the type, resulting in
 *
 *	L -> [D -> A]
 *
 * We combine this mapping with on the one hand the projection
 *
 *	[D -> A] -> A
 *
 * and on the other hand the group tiling
 *
 *	[D -> A] -> T
 *
 * resulting in
 *
 *	L -> A		and 	L -> T
 *
 * and store the corresponding expressions in stmt->index and stmt->local_index,
 * where stmt points to the ppcg_kernel_stmt that is attached to the node.
 * stmt->index is linearized if the global memory array is linearized.
 */
static __isl_give isl_ast_node *create_access_leaf(struct amp_ppcg_kernel *kernel,
												   struct amp_array_ref_group *group, __isl_take isl_ast_node *node,
												   __isl_keep isl_ast_build *build)
{
	// #define DEBUG_CREATE_ACCESS_LEAF

	struct ppcg_kernel_stmt *stmt;
	struct amp_array_tile *tile;
	isl_id *id;
	isl_ast_expr *expr;
	isl_space *space;
	isl_map *access;
	isl_pw_multi_aff *pma, *pma2;
	const char *type;
	isl_ctx *ctx;

	if (kernel == NULL)
		printf("\n\033[31m@ERROR:\n       the amp_ppcg_kernel in create_access_leaf,is NULL!!!  \033[0m\n\n");
	if (!node)
		return NULL;
	ctx = isl_ast_node_get_ctx(node);

	stmt = isl_calloc_type(ctx, struct ppcg_kernel_stmt);
	if (!stmt)
		return isl_ast_node_free(node);

	access = isl_map_from_union_map(isl_ast_build_get_schedule(build));
	type = isl_map_get_tuple_name(access, isl_dim_in);
	stmt->u.c.read = type && !strcmp(type, "read");
	access = isl_map_reverse(access);
	pma = isl_pw_multi_aff_from_map(access);
	pma = isl_pw_multi_aff_reset_tuple_id(pma, isl_dim_out);

	space = isl_space_range(isl_pw_multi_aff_get_space(pma));
	space = isl_space_unwrap(space);
	pma2 = isl_pw_multi_aff_range_map(space);
	pma2 = isl_pw_multi_aff_pullback_pw_multi_aff(pma2, isl_pw_multi_aff_copy(pma));
	expr = isl_ast_build_access_from_pw_multi_aff(build, pma2);
	if (group->array->linearize)
		expr = amp_local_array_info_linearize_index(group->local_array, expr);
	stmt->u.c.index = expr;

	tile = amp_array_ref_group_tile(group);
	if (tile->tiling == NULL)
	{

		printf("\n\033[31m@ERROR:\n       the tile->tiling in create_access_leaf,is NULL!!!  \033[0m\n\n");
		isl_multi_aff *ma = isl_multi_aff_copy(create_from_access(kernel->ctx, group, 1));
		pma2 = isl_pw_multi_aff_from_multi_aff(isl_multi_aff_copy(ma));
	}
	else
	{
		pma2 = isl_pw_multi_aff_from_multi_aff(isl_multi_aff_copy(tile->tiling));
	}

	pma2 = isl_pw_multi_aff_pullback_pw_multi_aff(pma2, pma);
	expr = isl_ast_build_access_from_pw_multi_aff(build, pma2);
	stmt->u.c.local_index = expr;

	stmt->u.c.array = group->array;
	stmt->u.c.local_array = group->local_array;
	stmt->type = ppcg_kernel_copy;

	id = isl_id_alloc(kernel->ctx, "copy", stmt);
	id = isl_id_set_free_user(id, &ppcg_kernel_stmt_free);
	if (!id)
		ppcg_kernel_stmt_free(stmt);
	return isl_ast_node_set_annotation(node, id);
}

/* Transform the accesses in the statement associated to the domain
 * called by "node" to refer to the AST loop iterators, construct
 * corresponding AST expressions using "build",
 * collect them in a ppcg_stmt and annotate the node with the ppcg_stmt.
 */
static __isl_give isl_ast_node *at_each_domain_with_amp(__isl_take isl_ast_node *node, __isl_keep isl_ast_build *build, void *user)
{
	struct ppcg_at_domain_data *data = user;
	amp_prog *prog = data->prog;
	struct ppcg_scop *scop = prog->scop;
	isl_ast_expr *expr, *arg;
	isl_ctx *ctx;
	isl_id *id;
	isl_map *map;
	isl_pw_multi_aff *iterator_map;
	// struct ppcg_stmt *stmt;
	struct amp_stmt *amp_stmt;
	const char *name;
	void *p;

	ctx = isl_ast_node_get_ctx(node);
	amp_stmt = isl_calloc_type(ctx, struct amp_stmt);
	if (!amp_stmt)
		goto error;

	expr = isl_ast_node_user_get_expr(node);
	arg = isl_ast_expr_get_op_arg(expr, 0);
	isl_ast_expr_free(expr);
	id = isl_ast_expr_get_id(arg);
	name = isl_id_get_name(id);
	p = isl_id_get_user(id);
	isl_ast_expr_free(arg);

	amp_stmt = find_amp_stmt(data->prog, id);

	if (amp_stmt)
		return create_domain_leaf(data->kernel, node, build, amp_stmt);

	if (!strcmp(name, "read") || !strcmp(name, "write"))
	{
		struct amp_array_ref_group *group = p;
		/** Build AST expressions for the amp array sizes of all arrays in "prog" **/
		node = amp_build_array_bounds(node, prog, build);
		return create_access_leaf(data->kernel, group, node, build);
	}

	fprintf(stderr, "\n\033[31m@ERROR:\n       the at_each_domain_with_amp function meets an unexpected errors.  \033[0m\n\n");
	return isl_ast_node_set_annotation(node, id);

	// isl_id_free(id);

	// 	if (!stmt->stmt)
	// 	goto error;

	// map = isl_map_from_union_map(isl_ast_build_get_schedule(build));
	// map = isl_map_reverse(map);
	// iterator_map = isl_pw_multi_aff_from_map(map);
	// stmt->ref2expr = pet_stmt_build_ast_exprs(stmt->stmt, build, &pullback_index, iterator_map, NULL, NULL);
	// isl_pw_multi_aff_free(iterator_map);

	// id = isl_id_alloc(isl_ast_node_get_ctx(node), NULL, stmt);
	// id = isl_id_set_free_user(id, &ppcg_stmt_free);

	// /** Build AST expressions for the amp array sizes of all arrays in "prog" **/
	// node = amp_build_array_bounds(node, prog, build);

	// return isl_ast_node_set_annotation(node, id);
error:
	// ppcg_stmt_free(stmt);
	return isl_ast_node_free(node);
}

/* Set *depth (initialized to 0 by the caller) to the maximum
 * of the schedule depths of the leaf nodes for which this function is called.
 */
static isl_bool update_depth(__isl_keep isl_schedule_node *node, void *user)
{
	int *depth = user;
	int node_depth;

	if (isl_schedule_node_get_type(node) != isl_schedule_node_leaf)
		return isl_bool_true;
	node_depth = isl_schedule_node_get_schedule_depth(node);
	if (node_depth > *depth)
		*depth = node_depth;

	return isl_bool_false;
}

/* This function is called for each node in a CPU AST.
 * In case of a user node, print the macro definitions required
 * for printing the AST expressions in the annotation, if any.
 * For other nodes, return true such that descendants are also
 * visited.
 *
 * In particular, print the macro definitions needed for the substitutions
 * of the original user statements.
 */
static isl_bool at_node(__isl_keep isl_ast_node *node, void *user)
{
	struct ppcg_stmt *stmt;
	isl_id *id;
	isl_printer **p = user;

	if (isl_ast_node_get_type(node) != isl_ast_node_user)
		return isl_bool_true;

	id = isl_ast_node_get_annotation(node);
	stmt = isl_id_get_user(id);
	isl_id_free(id);

	if (!stmt)
		return isl_bool_error;

	*p = ppcg_print_body_macros(*p, stmt->ref2expr);
	if (!*p)
		return isl_bool_error;

	return isl_bool_false;
}

/* Print the required macros for the CPU AST "node" to "p",
 * including those needed for the user statements inside the AST.
 */
static __isl_give isl_printer *cpu_print_macros(__isl_take isl_printer *p,
	__isl_keep isl_ast_node *node)
{
	if (isl_ast_node_foreach_descendant_top_down(node, &at_node, &p) < 0)
		return isl_printer_free(p);
	p = ppcg_print_macros(p, node);
	return p;
}

/* Initialize the fields of "build_info".
 *
 * Initially, the AST generation is not inside any parallel for loop.
 *
 * The contraction of the entire schedule tree is extracted
 * right underneath the root node.
 */
static isl_stat init_build_info(struct ast_build_userinfo *build_info,
	struct ppcg_scop *scop, __isl_keep isl_schedule *schedule)
{
	isl_schedule_node *node = isl_schedule_get_root(schedule);
	node = isl_schedule_node_child(node, 0);

	build_info->scop = scop;
	build_info->in_parallel_for = 0;
	build_info->contraction =
		isl_schedule_node_get_subtree_contraction(node);

	isl_schedule_node_free(node);

	return isl_stat_non_null(build_info->contraction);
}

/* Clear all memory allocated by "build_info".
 */
static void clear_build_info(struct ast_build_userinfo *build_info)
{
	isl_union_pw_multi_aff_free(build_info->contraction);
}

/* Code generate the scop 'scop' using "schedule"
 * and print the corresponding C code to 'p'.
 */
static __isl_give isl_printer *print_scop(struct ppcg_scop *scop,
	__isl_take isl_schedule *schedule, __isl_take isl_printer *p,
	struct ppcg_options *options)
{
	isl_ctx *ctx = isl_printer_get_ctx(p);
	isl_ast_build *build;
	isl_ast_print_options *print_options;
	isl_ast_node *tree;
	isl_id_list *iterators;
	struct ast_build_userinfo build_info;
	int depth;

	depth = 0;
	if (isl_schedule_foreach_schedule_node_top_down(schedule, &update_depth,
						&depth) < 0)
		goto error;

	build = isl_ast_build_alloc(ctx);
	iterators = ppcg_scop_generate_names(scop, depth, "c");
	build = isl_ast_build_set_iterators(build, iterators);

	build = isl_ast_build_set_at_each_domain(build, &at_each_domain, scop);

	if (options->openmp) {
		if (init_build_info(&build_info, scop, schedule) < 0)
			build = isl_ast_build_free(build);

		build = isl_ast_build_set_before_each_for(build,
							&ast_build_before_for,
							&build_info);
		build = isl_ast_build_set_after_each_for(build,
							&ast_build_after_for,
							&build_info);
	}

	tree = isl_ast_build_node_from_schedule(build, schedule);
	isl_ast_build_free(build);

	if (options->openmp)
		clear_build_info(&build_info);

	print_options = isl_ast_print_options_alloc(ctx);
	print_options = isl_ast_print_options_set_print_user(print_options,
							&print_user, NULL);

	print_options = isl_ast_print_options_set_print_for(print_options,
							&print_for, NULL);

	p = cpu_print_macros(p, tree);
	p = isl_ast_node_print(tree, p, print_options);

	isl_ast_node_free(tree);

	return p;
error:
	isl_schedule_free(schedule);
	isl_printer_free(p);
	return NULL;
}

/* Build access AST expressions for the localized array sizes using "build".
 * Store the result in local->bound_expr.
 * Only do this for arrays for which localized bounds have been computed.
 */
static isl_stat build_local_array_sizes(struct amp_ppcg_kernel *kernel,
										__isl_keep isl_ast_build *build)
{
	int i;

	for (i = 0; i < kernel->n_array; ++i)
	{
		struct amp_local_array_info *local = &kernel->array[i];
		isl_multi_pw_aff *size;

		if (local->n_group == 0)
			continue;
		size = isl_multi_pw_aff_copy(local->bound);
		local->bound_expr = ppcg_build_size_expr(size, build);
		kernel->size_expr = ppcg_build_size_expr(size, build);
		if (!local->bound_expr)
			return isl_stat_error;
	}

	return isl_stat_ok;
}

/* Build access AST expressions for the effective grid size and
 * the localized array sizes using "build".
 */
static isl_stat build_amp_array_sizes(struct amp_ppcg_kernel *kernel,
									  __isl_keep isl_ast_build *build)
{
	if (build_local_array_sizes(kernel, build) < 0)
		return isl_stat_error;
	return isl_stat_ok;
}

/* This function is called before the AST generator starts traversing
 * the schedule subtree of a node with mark "mark".
 *
 * If the mark is called "kernel", store the kernel pointer in data->kernel
 * for use in at_domain and build AST expressions for the grid size and
 * the localized array sizes.
 */
static isl_stat before_mark_with_amp(__isl_keep isl_id *mark,
									 __isl_keep isl_ast_build *build, void *user)
{
	struct ppcg_at_domain_data *data = user;

	if (!mark)
		return isl_stat_error;
	if (!strcmp(isl_id_get_name(mark), "amp_kernel"))
	{
		data->kernel = isl_id_get_user(mark);
		if (build_amp_array_sizes(data->kernel, build) < 0)
			return isl_stat_error;
	}
	return isl_stat_ok;
}
/* This function is called after the AST generator has finished traversing
 * the schedule subtree of a mark node.  "node" points to the corresponding
 * mark AST node.
 *
 * If the mark is called "kernel", then replace "node" by a user node
 * that "calls" the kernel, representing the launch of the kernel.
 * The original "node" is stored inside the kernel object so that
 * it can be used to print the device code.
 * Note that this assumes that a kernel is only launched once.
 * Also clear data->kernel.
 */
static __isl_give isl_ast_node *after_mark_with_amp(__isl_take isl_ast_node *node,
													__isl_keep isl_ast_build *build, void *user)
{
	isl_ctx *ctx;
	isl_id *id;
	isl_ast_expr *expr;
	isl_ast_expr_list *list;
	struct amp_ppcg_kernel *kernel;
	struct ppcg_at_domain_data *data = user;

	// ctx = isl_ast_node_get_ctx(node);
	id = isl_ast_node_mark_get_id(node);
	if (!id)
		return isl_ast_node_free(node);
	// // if (strcmp(isl_id_get_name(id), "amp_kernel") || !data->kernel)
	// if (strcmp(isl_id_get_name(id), "amp_kernel"))
	// {
	// 	isl_id_free(id);
	// 	return node;
	// }
	// kernel = data->kernel;
	// data->kernel = NULL;
	// kernel->space = isl_ast_build_get_schedule_space(build);
	// kernel->tree = isl_ast_node_mark_get_node(node);
	// isl_ast_node_free(node);

	// expr = isl_ast_expr_from_id(isl_id_copy(id));
	// list = isl_ast_expr_list_alloc(ctx, 0);
	// expr = isl_ast_expr_call(expr, list);
	// node = isl_ast_node_alloc_user(expr);
	// node = isl_ast_node_set_annotation(node, id);

	isl_id_free(id);
	return node;
}

/* Print an access to the element in the private/shared memory copy
 * described by "stmt".  The index of the copy is recorded in
 * stmt->local_index as an access to the array.
 */
static __isl_give isl_printer *stmt_print_local_index(__isl_take isl_printer *p,
													  struct ppcg_kernel_stmt *stmt)
{
	return isl_printer_print_ast_expr(p, stmt->u.c.local_index);
}

/* Check if a amp array is a scalar.  A scalar is a value that is not stored
 * as an array or through a pointer reference, but as a single data element.
 * At the moment, scalars are represented as zero-dimensional arrays.
 * Note that the single data element may be an entire structure.
 */
int amp_array_is_scalar(struct amp_array_info *array)
{
    return array->n_index == 0;
}

// free array_info in amp_prog
static void free_array_info(amp_prog *prog)
{
    int i;

    for (i = 0; i < prog->n_array; ++i)
    {
        free(prog->array[i].type);
        free(prog->array[i].name);
        isl_multi_pw_aff_free(prog->array[i].bound);
        isl_ast_expr_free(prog->array[i].bound_expr);
        isl_space_free(prog->array[i].space);
        isl_set_free(prog->array[i].declared_extent);
        isl_set_free(prog->array[i].extent);
        isl_ast_expr_free(prog->array[i].declared_size);
        free(prog->array[i].refs);
        isl_union_map_free(prog->array[i].dep_order);
    }
    free(prog->array);
}

/* Print an access to the element in the global memory copy
 * described by "stmt".  The index of the copy is recorded in
 * stmt->index as an access to the array.
 */
static __isl_give isl_printer *stmt_print_global_index(
	__isl_take isl_printer *p, struct ppcg_kernel_stmt *stmt)
{
	struct amp_array_info *array = stmt->u.c.array;
	isl_ast_expr *index;

	if (amp_array_is_scalar(array))
	{
		if (!amp_array_is_read_only_scalar(array))
			p = isl_printer_print_str(p, "*");
		p = isl_printer_print_str(p, array->name);
		return p;
	}

	index = isl_ast_expr_copy(stmt->u.c.index);

	p = isl_printer_print_ast_expr(p, index);
	isl_ast_expr_free(index);

	return p;
}

/* Print a copy statement.
 *
 * A read copy statement is printed as
 *
 *	local = global;
 *
 * while a write copy statement is printed as
 *
 *	global = local;
 */
static __isl_give isl_printer *ppcg_kernel_print_copy(__isl_take isl_printer *p,
													  struct ppcg_kernel_stmt *stmt)
{
	p = isl_printer_start_line(p);
	if (stmt->u.c.read)
	{
		p = stmt_print_local_index(p, stmt);
		p = isl_printer_print_str(p, " = (float)");
		p = stmt_print_global_index(p, stmt);
	}
	else
	{
		p = stmt_print_global_index(p, stmt);
		p = isl_printer_print_str(p, " = (double)");
		p = stmt_print_local_index(p, stmt);
	}
	p = isl_printer_print_str(p, ";");
	p = isl_printer_end_line(p);

	return p;
}

static __isl_give isl_printer *ppcg_kernel_print_domain(__isl_take isl_printer *p,
														struct ppcg_kernel_stmt *stmt)
{
	return pet_stmt_print_body(stmt->u.d.stmt->stmt, p, stmt->u.d.ref2expr);
}

/* Print a user statement in the generated AST.
 * The ppcg_stmt has been attached to the node in at_each_domain.
 */
static __isl_give isl_printer *print_user_with_amp(__isl_take isl_printer *p,
												   __isl_take isl_ast_print_options *print_options,
												   __isl_keep isl_ast_node *node, void *user)
{
	// #define DEBUG_PRINT_USER_WITH_AMP

	isl_id *id;
	int is_user, is_amp_kernel;
	struct amp_ppcg_kernel *kernel;
	struct ppcg_kernel_stmt *stmt;
	struct amp_prog *prog;

	isl_ast_print_options_free(print_options);

	prog = (struct amp_prog *)user;

	id = isl_ast_node_get_annotation(node);
	is_user = !strcmp(isl_id_get_name(id), "user");
	is_amp_kernel = !strcmp(isl_id_get_name(id), "amp_kernel");

	// if (is_amp_kernel)
	// {
	// 	kernel = isl_id_get_user(id);
	// 	isl_id_free(id);
	// 	isl_ctx *ctx = isl_ast_node_get_ctx(kernel->tree);
	// 	isl_ast_print_options *print_options;

	// 	// p = print_kernel_vars(p, kernel);
	// 	// p = isl_printer_end_line(p);
	// 	print_options = isl_ast_print_options_alloc(ctx);
	// 	print_options = isl_ast_print_options_set_print_user(print_options, &print_kernel_stmt, NULL);
	// 	p = isl_ast_node_print(kernel->tree, p, print_options);
	// 	return p;
	// }
	// else if (is_user)
	if (is_user)
	{
		stmt = isl_id_get_user(id);
		isl_id_free(id);
		return ppcg_kernel_print_domain(p, stmt);
	}
	else
	{
		stmt = isl_id_get_user(id);
		isl_id_free(id);
	}

	if (stmt)
	{
		switch (stmt->type)
		{
		case ppcg_kernel_copy:
			return ppcg_kernel_print_copy(p, stmt);
		case ppcg_kernel_domain:
			return ppcg_kernel_print_domain(p, stmt);
		}
	}

	return p;
}

/* This function is called for each node in a CPU AST.
 * In case of a user node, print the macro definitions required
 * for printing the AST expressions in the annotation, if any.
 * For other nodes, return true such that descendants are also
 * visited.
 *
 * In particular, print the macro definitions needed for the substitutions
 * of the original user statements.
 */
static isl_bool at_node_with_amp(__isl_keep isl_ast_node *node, void *user)
{
	// #define DEBUG_AT_NODE_WITH_AMP

	const char *name;
	int is_kernel;
	struct amp_ppcg_kernel *kernel;
	struct ppcg_kernel_stmt *stmt;
	// struct ppcg_stmt *stmt;
	isl_id *id;
	isl_printer **p = user;

	if (isl_ast_node_get_type(node) != isl_ast_node_user)
		return isl_bool_true;

	id = isl_ast_node_get_annotation(node);
	if (!id)
		return isl_bool_false;

	name = isl_id_get_name(id);
	if (!name)
		return isl_bool_error;

	is_kernel = !strcmp(name, "amp_kernel");
	kernel = is_kernel ? isl_id_get_user(id) : NULL;
	stmt = is_kernel ? NULL : isl_id_get_user(id);
	isl_id_free(id);

	if ((is_kernel && !kernel) || (!is_kernel && !stmt))
		return isl_bool_error;

	if (is_kernel)
	{
		printf("\n\n\n the is kernel is true! but return directly! \n\n\n");
		// *p = ppcg_ast_expr_print_macros(kernel->tree, *p);
		return isl_bool_true;
	}
	if (stmt->type == ppcg_kernel_copy)
	{
		*p = ppcg_ast_expr_print_macros(stmt->u.c.index, *p);
		*p = ppcg_ast_expr_print_macros(stmt->u.c.local_index, *p);
	}
	else if (stmt->type == ppcg_kernel_domain)
	{
		*p = ppcg_print_body_macros(*p, stmt->u.d.ref2expr);
	}
	if (!*p)
		return isl_bool_error;

	return isl_bool_false;

	// stmt = isl_id_get_user(id);
	// isl_id_free(id);

	// if (!stmt)
	// 	return isl_bool_error;

	// *p = ppcg_print_body_macros(*p, stmt->ref2expr);
	// if (!*p)
	// 	return isl_bool_error;

	// return isl_bool_false;
}

/** 获得比当前精度更低的数据类型 **/
char *amp_get_lower_precision_type(char *type) {
    // 生成更低精度的
    if (strcmp("double", type) == 0)
        return "float";
    else if (strcmp("float", type) == 0)
        return "int";
    else if (strcmp("int", type) == 0)
        return "short int";
    else
        return type;
}

char *amp_get_int_precision_name(const char *s)
{
        if (strcmp("c0", s) == 0)
            return "int";
        if (strcmp("c1", s) == 0)
            return "int";
        if (strcmp("c2", s) == 0)
            return "int";
}

char *amp_get_lower_precision_name(const char *s)
{
    // 生成更低精度的
    if (strcmp("C", s) == 0)
        return "double";
    else
        return "float";
}

/* Print the required macros for the CPU AST "node" to "p",
 * including those needed for the user statements inside the AST.
 */
static __isl_give isl_printer *cpu_print_macros_with_amp(__isl_take isl_printer *p,
														 __isl_keep isl_ast_node *node)
{
	if (isl_ast_node_foreach_descendant_top_down(node, &at_node_with_amp, &p) < 0)
		return isl_printer_free(p);
	p = ppcg_print_macros(p, node);
	return p;
}

static __isl_give isl_printer *print_kernel_var(__isl_take isl_printer *p,
												struct amp_ppcg_kernel_var *var)
{
	int j;
    // if(var->name != "C"){
    if (strcmp("C", var->name) == 0){
        return p;
    } else{
        p = isl_printer_start_line(p);
        p = isl_printer_print_str(p, amp_get_lower_precision_type(var->array->type));
        p = isl_printer_print_str(p, " ");
        p = isl_printer_print_str(p, var->name);
        for (j = 0; j < var->array->n_index; ++j)
        {
            isl_val *v;

            p = isl_printer_print_str(p, "[");
            v = isl_vec_get_element_val(var->size, j);
            if (isl_val_is_one(v))
            {
                isl_ast_expr *bound;
                bound = isl_ast_expr_get_op_arg(var->array->bound_expr, 1 + j);
                p = isl_printer_print_ast_expr(p, bound);
            }else
                p = isl_printer_print_val(p, v);
            isl_val_free(v);
            p = isl_printer_print_str(p, "]");
        }
        p = isl_printer_print_str(p, ";");
        p = isl_printer_end_line(p);

    }
    return p;
	
}

static __isl_give isl_printer *print_kernel_vars(__isl_take isl_printer *p,
												 struct amp_ppcg_kernel *kernel)
{
	int i;

	for (i = 0; i < kernel->n_var; ++i)
		p = print_kernel_var(p, &kernel->var[i]);

	return p;
}

/* Print a declaration for the amp array corresponding to "array" on "p".
 */
// __isl_give isl_printer *declare_amp_lower_precision_array(__isl_take isl_printer *p, struct amp_array_info *array)
// {
//     int i;

//     p = isl_printer_start_line(p);
// 	// p = isl_printer_print_str(p, array->type);
//     p = isl_printer_print_str(p, amp_get_lower_precision_type(array->type)); // 换成更低精度的类型
//     p = isl_printer_print_str(p, " ");
//     if (array->n_index > 1)
//         p = isl_printer_print_str(p, "(");
//     p = isl_printer_print_str(p, "*amp_lower_");
//     p = isl_printer_print_str(p, array->name);
//     if (array->n_index > 1)
//     {
//         p = isl_printer_print_str(p, ")");
//         for (i = 1; i < array->n_index; i++)
//         {
//             isl_ast_expr *bound;
//             bound = isl_ast_expr_get_op_arg(array->bound_expr, 1 + i);
//             p = isl_printer_print_str(p, "[");
//             p = isl_printer_print_ast_expr(p, bound);
//             p = isl_printer_print_str(p, "]");
//             isl_ast_expr_free(bound);
//         }
//     }
//     p = isl_printer_print_str(p, ";");
//     p = isl_printer_end_line(p);

//     return p;
// }

// __isl_give isl_printer *declare_amp_lower_precision_arrays(__isl_take isl_printer *p, amp_prog *prog)
// {
//     int i;

//     for (i = 0; i < prog->n_array; ++i)
//     {
//         if (!amp_array_requires_allocation(&prog->array[i]))
//             continue;

//         p = declare_amp_lower_precision_array(p, &prog->array[i]);
//     }
//     p = isl_printer_start_line(p);
//     p = isl_printer_end_line(p);

//     return p;
// }

/* Print an expression for the size of "array" in bytes.
 */
// __isl_give isl_printer *amp_array_info_print_size(__isl_take isl_printer *prn, struct amp_array_info *array)
// {
//     int i;

//     for (i = 0; i < array->n_index; ++i)
//     {
//         isl_ast_expr *bound;

//         prn = isl_printer_print_str(prn, "(");
//         bound = isl_ast_expr_get_op_arg(array->bound_expr, 1 + i);
//         prn = isl_printer_print_ast_expr(prn, bound);
//         isl_ast_expr_free(bound);
//         prn = isl_printer_print_str(prn, ") * ");
//     }
//     prn = isl_printer_print_str(prn, "sizeof(");
//     prn = isl_printer_print_str(prn, amp_get_lower_precision_type(array->type)); // 换成更低精度的
//     prn = isl_printer_print_str(prn, ")");

//     return prn;
// }

// __isl_give isl_printer *allocate_amp_lower_precision_arrays(__isl_take isl_printer *p, amp_prog *prog)
// {
//     int i;

//     for (i = 0; i < prog->n_array; ++i)
//     {
//         struct amp_array_info *array = &prog->array[i];

//         if (!amp_array_requires_allocation(&prog->array[i]))
//             continue;
//         p = ppcg_ast_expr_print_macros(array->bound_expr, p);
//         p = isl_printer_start_line(p);
//         p = isl_printer_print_str(p, "ampCheckReturn(ampMalloc((void **) &amp_lower_");
//         p = isl_printer_print_str(p, prog->array[i].name);
//         p = isl_printer_print_str(p, ", ");
//         p = amp_array_info_print_size(p, &prog->array[i]);
//         p = isl_printer_print_str(p, "));");
//         p = isl_printer_end_line(p);
//     }
//     p = isl_printer_start_line(p);
//     p = isl_printer_end_line(p);

//     return p;
// }

// static __isl_give isl_printer *print_amp_macros(__isl_take isl_printer *p)
// {
//     const char *macros = "\n";
//         "  if (ret != AMP_SUCCESS) {\\\n"
//         "    fprintf(stderr, \"AMP error: %s\\n\", "
//         "amp_error_string(ret)); \\\n"
//         "    fflush(stderr); \\\n"
//         "    assert(ret == AMP_SUCCESS);\\\n  }\n";

//     p = isl_printer_print_str(p, macros);

//     p = isl_printer_start_line(p);
//     p = isl_printer_end_line(p);

//     return p;
// }
/* Print declarations to "p" for arrays that are local to "prog"
 * but that are used on the host and therefore require a declaration.
 */
__isl_give isl_printer *amp_print_local_declarations(__isl_take isl_printer *p,
	struct amp_prog *prog)
{
	int i;

	if (!prog)
		return isl_printer_free(p);

	for (i = 0; i < prog->n_array; ++i) {
		struct amp_array_info *array = &prog->array[i];
		isl_ast_expr *size;

		if (!array->declare_local)
			continue;
		size = array->declared_size;
		p = ppcg_print_declaration_with_size(p, array->type, size);
	}

	return p;
}

/* Return an isl_id called "prefix%d", with "%d" set to "i".
 * If an isl_id with such a name already appears among the variable names
 * of "scop", then adjust the name to "prefix%d_%d".
 */
static __isl_give isl_id *generate_name(struct ppcg_scop *scop,
	const char *prefix, int i)
{
	int j;
	char name[23];
	isl_ctx *ctx;
	isl_id *id;
	int has_name;

	ctx = isl_set_get_ctx(scop->context);
	snprintf(name, sizeof(name), "%s%d", prefix, i);
	id = isl_id_alloc(ctx, name, NULL);

	j = 0;
	while ((has_name = isl_id_to_ast_expr_has(scop->names, id)) == 1) {
		isl_id_free(id);
		snprintf(name, sizeof(name), "%s%d_%d", prefix, i, j++);
		id = isl_id_alloc(ctx, name, NULL);
	}

	return has_name < 0 ? isl_id_free(id) : id;
}

static __isl_give isl_printer *print_c0_vars(struct ppcg_scop *scop,
	int n, const char *prefix, __isl_take isl_printer *p)
{
    int i;
	isl_ctx *ctx;
	isl_id_list *names;

	ctx = isl_set_get_ctx(scop->context);
	names = isl_id_list_alloc(ctx, n);
	for (i = 0; i < n; ++i) {
		isl_id *id;
        char *name;
		id = generate_name(scop, prefix, i);
        name = isl_id_get_name(id);

        p = isl_printer_start_line(p);
        p = isl_printer_print_str(p, amp_get_int_precision_name(name));
	    p = isl_printer_print_str(p, " ");
	    p = isl_printer_print_str(p, name);
	    p = isl_printer_print_str(p, ";");
	    p = isl_printer_end_line(p);
	}
	return p;
}

/* Code generate the scop 'scop' using "schedule"
 * and print the corresponding C code to 'p'.
 */
/** 待完善 **/
static __isl_give isl_printer *print_scop_with_amp(__isl_take isl_schedule *schedule, 
__isl_take isl_printer *p, struct ppcg_options *options, amp_prog *prog)
{
	// #define DEBUG_PRINT_SCOP_WITH_AMP

	struct ppcg_at_domain_data data;
	struct ppcg_scop *scop = prog->scop;
	isl_ctx *ctx = isl_printer_get_ctx(p);
	isl_ast_build *build;
	isl_ast_print_options *print_options;
	isl_ast_node *tree;
	isl_id_list *iterators;
	struct ast_build_userinfo build_info;
	int depth;

	data.prog = prog;
	data.kernel = NULL;

	depth = 0;
	if (isl_schedule_foreach_schedule_node_top_down(schedule, &update_depth, &depth) < 0)
		goto error;

	build = isl_ast_build_alloc(ctx);
	iterators = ppcg_scop_generate_names(scop, depth, "c");//iterators: (c0,c1,c2)
    // p = print_c0_vars(scop, depth, "c", p);
	build = isl_ast_build_set_iterators(build, iterators);
    
	if (options->automatic_mixed_precision)
	{
		/**-- 修改了哈 --**/
		build = isl_ast_build_set_at_each_domain(build, &at_each_domain_with_amp, &data);
		/**-- 仿照gpu，新增的两行代码 */
		build = isl_ast_build_set_before_each_mark(build, &before_mark_with_amp, &data);
		build = isl_ast_build_set_after_each_mark(build, &after_mark_with_amp, &data);
	}
	else
	{
		build = isl_ast_build_set_at_each_domain(build, &at_each_domain, scop);
	}

	if (options->openmp)
	{
		if (init_build_info(&build_info, scop, schedule) < 0)
			build = isl_ast_build_free(build);

		build = isl_ast_build_set_before_each_for(build, &ast_build_before_for, &build_info);
		build = isl_ast_build_set_after_each_for(build, &ast_build_after_for, &build_info);
	}

	tree = isl_ast_build_node_from_schedule(build, schedule);
	isl_ast_build_free(build);

	if (options->openmp)
		clear_build_info(&build_info);

	print_options = isl_ast_print_options_alloc(ctx);
	print_options = isl_ast_print_options_set_print_user(print_options, &print_user_with_amp, prog);
	print_options = isl_ast_print_options_set_print_for(print_options, &print_for, NULL);

	// p = cpu_print_macros_with_amp(p, tree);
	p = cpu_print_macros_with_amp(p, tree);
	if (options->automatic_mixed_precision)
	{
		//打印int,c0,c1,c2
        p = print_c0_vars(scop, depth, "c", p);
        // 打印低精度数组
        p = print_kernel_vars(p, data.kernel);
	}

	p = isl_ast_node_print(tree, p, print_options);

	isl_ast_node_free(tree);

	return p;
error:
	isl_schedule_free(schedule);
	isl_printer_free(p);
	return NULL;
}

/* Tile the band node "node" with tile sizes "sizes" and
 * mark all members of the resulting tile node as "atomic".
 */
static __isl_give isl_schedule_node *tile(__isl_take isl_schedule_node *node,
	__isl_take isl_multi_val *sizes)
{
	node = ppcg_tile(node, sizes);
	node = ppcg_set_schedule_node_type(node, isl_ast_loop_atomic);

	return node;
}

/* Tile "node", if it is a band node with at least 2 members.
 * The tile sizes are set from the "tile_size" option.
 */
static __isl_give isl_schedule_node *tile_band(
	__isl_take isl_schedule_node *node, void *user)
{
	struct ppcg_scop *scop = user;
	int n;
	isl_space *space;
	isl_multi_val *sizes;

	if (isl_schedule_node_get_type(node) != isl_schedule_node_band)
		return node;

	n = isl_schedule_node_band_n_member(node);
	if (n <= 1)
		return node;

	space = isl_schedule_node_band_get_space(node);
	sizes = ppcg_multi_val_from_int(space, scop->options->tile_size);

	return tile(node, sizes);
}

/* Construct schedule constraints from the dependences in ps
 * for the purpose of computing a schedule for a CPU.
 *
 * The proximity constraints are set to the flow dependences.
 *
 * If live-range reordering is allowed then the conditional validity
 * constraints are set to the order dependences with the flow dependences
 * as condition.  That is, a live-range (flow dependence) will be either
 * local to an iteration of a band or all adjacent order dependences
 * will be respected by the band.
 * The validity constraints are set to the union of the flow dependences
 * and the forced dependences, while the coincidence constraints
 * are set to the union of the flow dependences, the forced dependences and
 * the order dependences.
 *
 * If live-range reordering is not allowed, then both the validity
 * and the coincidence constraints are set to the union of the flow
 * dependences and the false dependences.
 *
 * Note that the coincidence constraints are only set when the "openmp"
 * options is set.  Even though the way openmp pragmas are introduced
 * does not rely on the coincident property of the schedule band members,
 * the coincidence constraints do affect the way the schedule is constructed,
 * such that more schedule dimensions should be detected as parallel
 * by ast_schedule_dim_is_parallel.
 * Since the order dependences are also taken into account by
 * ast_schedule_dim_is_parallel, they are also added to
 * the coincidence constraints.  If the openmp handling learns
 * how to privatize some memory, then the corresponding order
 * dependences can be removed from the coincidence constraints.
 */
static __isl_give isl_schedule_constraints *construct_cpu_schedule_constraints(
	struct ppcg_scop *ps)
{
	isl_schedule_constraints *sc;
	isl_union_map *validity, *coincidence;

	sc = isl_schedule_constraints_on_domain(isl_union_set_copy(ps->domain));
	if (ps->options->live_range_reordering) {
		sc = isl_schedule_constraints_set_conditional_validity(sc,
				isl_union_map_copy(ps->tagged_dep_flow),
				isl_union_map_copy(ps->tagged_dep_order));
		validity = isl_union_map_copy(ps->dep_flow);
		validity = isl_union_map_union(validity,
				isl_union_map_copy(ps->dep_forced));
		if (ps->options->openmp) {
			coincidence = isl_union_map_copy(validity);
			coincidence = isl_union_map_union(coincidence,
					isl_union_map_copy(ps->dep_order));
		}
	} else {
		validity = isl_union_map_copy(ps->dep_flow);
		validity = isl_union_map_union(validity,
				isl_union_map_copy(ps->dep_false));
		if (ps->options->openmp)
			coincidence = isl_union_map_copy(validity);
	}
	if (ps->options->openmp)
		sc = isl_schedule_constraints_set_coincidence(sc, coincidence);
	sc = isl_schedule_constraints_set_validity(sc, validity);
	sc = isl_schedule_constraints_set_proximity(sc,
					isl_union_map_copy(ps->dep_flow));

	return sc;
}

/* Compute a schedule for the scop "ps".
 *
 * First derive the appropriate schedule constraints from the dependences
 * in "ps" and then compute a schedule from those schedule constraints,
 * possibly grouping statement instances based on the input schedule.
 */
static __isl_give isl_schedule *compute_cpu_schedule(struct ppcg_scop *ps)
{
	isl_schedule_constraints *sc;
	isl_schedule *schedule;

	if (!ps)
		return NULL;

	sc = construct_cpu_schedule_constraints(ps);

	schedule = ppcg_compute_schedule(sc, ps->schedule, ps->options);

	return schedule;
}

/* Compute a new schedule to the scop "ps" if the reschedule option is set.
 * Otherwise, return a copy of the original schedule.
 */
static __isl_give isl_schedule *optionally_compute_schedule(void *user)
{
	struct ppcg_scop *ps = user;

	if (!ps)
		return NULL;
	if (!ps->options->reschedule)
		return isl_schedule_copy(ps->schedule);
	return compute_cpu_schedule(ps);
}

/* Compute a schedule based on the dependences in "ps" and
 * tile it if requested by the user.
 */
static __isl_give isl_schedule *get_schedule(struct ppcg_scop *ps,
	struct ppcg_options *options)
{
	isl_ctx *ctx;
	isl_schedule *schedule;

	if (!ps)
		return NULL;

	ctx = isl_union_set_get_ctx(ps->domain);
	schedule = ppcg_get_schedule(ctx, options,
				    &optionally_compute_schedule, ps);
	if (ps->options->tile)
		schedule = isl_schedule_map_schedule_node_bottom_up(schedule,
							&tile_band, ps);

	return schedule;
}

/* Generate CPU code for the scop "ps" using "schedule" and
 * print the corresponding C code to "p", including variable declarations.
 */
static __isl_give isl_printer *print_cpu_with_schedule(
	__isl_take isl_printer *p, struct ppcg_scop *ps,
	__isl_take isl_schedule *schedule, struct ppcg_options *options)
{
	int hidden;
	isl_set *context;

	p = isl_printer_start_line(p);
	p = isl_printer_print_str(p, "/* ppcg generated CPU code */");
	p = isl_printer_end_line(p);

	p = isl_printer_start_line(p);
	p = isl_printer_end_line(p);

	p = ppcg_set_macro_names(p);
	p = ppcg_print_exposed_declarations(p, ps);
	hidden = ppcg_scop_any_hidden_declarations(ps);
	if (hidden) {
		p = ppcg_start_block(p);
		p = ppcg_print_hidden_declarations(p, ps);
	}

	context = isl_set_copy(ps->context);
	context = isl_set_from_params(context);
	schedule = isl_schedule_insert_context(schedule, context);
	if (options->debug->dump_final_schedule)
		isl_schedule_dump(schedule);
	p = print_scop(ps, schedule, p, options);
	if (hidden)
		p = ppcg_end_block(p);

	return p;
}

/* Generate CPU code for the scop "ps" using "schedule" and
 * print the corresponding C code to "p", including variable declarations.
 */
/** 自动混合精度 打印代码**/
/** 待修改 ***/
static __isl_give isl_printer *print_cpu_with_amp(__isl_take isl_printer *p, 
__isl_take isl_schedule *schedule, struct ppcg_options *options, amp_prog *prog)
{
	struct ppcg_scop *ps = prog->scop;
	int hidden;
	isl_set *context;

	p = isl_printer_start_line(p);
	p = isl_printer_print_str(p, "/* ppcg generated CPU code with AMP */");
	p = isl_printer_end_line(p);

	p = isl_printer_start_line(p);
	p = isl_printer_end_line(p);

	p = ppcg_set_macro_names(p);
	p = ppcg_print_exposed_declarations(p, ps);
	hidden = ppcg_scop_any_hidden_declarations(ps);
	if (hidden)
	{
		p = ppcg_start_block(p);
		p = ppcg_print_hidden_declarations(p, ps);
	}

	context = isl_set_copy(ps->context);
	context = isl_set_from_params(context);
	schedule = isl_schedule_insert_context(schedule, context);
	if (options->debug->dump_final_schedule)
		isl_schedule_dump(schedule);

	// 打印AST ?
	p = print_scop_with_amp(schedule, p, options, prog);

	if (hidden)
		p = ppcg_end_block(p);

	// 用完释放掉amp_prog
	amp_prog_free(prog);
	return p;
}

/* Generate CPU code for the scop "ps" and print the corresponding C code
 * to "p", including variable declarations.
 */
__isl_give isl_printer *print_cpu(__isl_take isl_printer *p,
	struct ppcg_scop *ps, struct ppcg_options *options)
{
	isl_schedule *schedule;

	schedule = isl_schedule_copy(ps->schedule);
	return print_cpu_with_schedule(p, ps, schedule, options);
}

/* Given a tagged access relation to a single array "tagged", extract it
 * as a map, taking into account that the input may be empty.
 * If the access relation is empty, then it does not contain
 * any space information, so we try to recover it from the index
 * expression.
 * The space of the index expression is of the form I -> A,
 * with I the statement instances and A the array, or [I -> F] -> A,
 * with F the filters corresponding to arguments.
 * We first drop F, if present, obtaining I -> A.
 * Then we construct I -> R, with R the reference tag,
 * combine the two into I -> [R -> A] and uncurry to obtain
 * the final result [I -> R] -> A.
 * Note that the index expression may have a lower dimension
 * than that of the array, but this dimension is not used
 * if the access relation is empty.
 */
static __isl_give isl_map *extract_single_tagged_access(
    __isl_take isl_union_map *tagged, __isl_keep pet_expr *expr)
{
    int empty;
    isl_id *id;
    isl_space *space, *space2;
    isl_multi_pw_aff *index;

    empty = isl_union_map_is_empty(tagged);
    if (empty < 0)
        goto error;
    if (!empty)
        return isl_map_from_union_map(tagged);
    isl_union_map_free(tagged);

    index = pet_expr_access_get_index(expr);
    space = isl_multi_pw_aff_get_space(index);
    isl_multi_pw_aff_free(index);
    if (isl_space_domain_is_wrapping(space))
        space = isl_space_domain_factor_domain(space);
    space2 = isl_space_from_domain(isl_space_domain(isl_space_copy(space)));
    id = pet_expr_access_get_ref_id(expr);
    space2 = isl_space_set_tuple_id(space2, isl_dim_out, id);
    space = isl_space_range_product(space2, space);
    space = isl_space_uncurry(space);

    return isl_map_empty(space);
error:
    isl_union_map_free(tagged);
    return NULL;
}

/* Does the index expression "index" of "expr" represent an access
 * to a single element?
 * That is, is "index" completely specified?
 *
 * If "expr" accesses elements from different spaces (i.e., fields
 * of a structure), then it does not access a single element.
 * Otherwise, if the single space of the access matches the space
 * of "index", then the index expression is completely specified
 * (no pointer to a lower-dimensional slice of the accessed array)
 * and a single element is being accessed.
 */
static isl_bool complete_index(__isl_keep pet_expr *expr, __isl_keep isl_multi_pw_aff *index)
{
    isl_union_map *read, *write, *all;
    isl_map *map;
    isl_space *space1, *space2;
    isl_bool complete;

    read = pet_expr_access_get_may_read(expr);
    write = pet_expr_access_get_may_write(expr);
    all = isl_union_map_union(read, write);
    if (!all)
        return isl_bool_error;
    if (isl_union_map_n_map(all) != 1)
    {
        isl_union_map_free(all);
        return isl_bool_false;
    }
    map = isl_map_from_union_map(all);
    space1 = isl_map_get_space(map);
    isl_map_free(map);
    space2 = isl_multi_pw_aff_get_space(index);
    complete = isl_space_tuple_is_equal(space1, isl_dim_out,
                                        space2, isl_dim_out);
    isl_space_free(space1);
    isl_space_free(space2);

    return complete;
}

static isl_bool accesses_fixed_element(__isl_keep pet_expr *expr)
{
    int i, n;
    isl_multi_pw_aff *index;
    isl_bool fixed = isl_bool_true;

    index = pet_expr_access_get_index(expr);
    if (index < 0)
        return isl_bool_error;
    n = isl_multi_pw_aff_dim(index, isl_dim_out);
    for (i = 0; i < n; ++i)
    {
        isl_pw_aff *pa;

        pa = isl_multi_pw_aff_get_pw_aff(index, 0);
        fixed = isl_pw_aff_n_piece(pa) == 1;
        if (fixed)
            fixed = isl_pw_aff_is_cst(pa);
        isl_pw_aff_free(pa);
        if (fixed < 0 || !fixed)
            break;
    }
    if (fixed >= 0 && fixed)
        fixed = complete_index(expr, index);
    isl_multi_pw_aff_free(index);

    return fixed;
}

/* Extract a amp_stmt_access from "expr", append it to the list
 * that ends in *data->next_access and update the end of the list.
 * If the access expression performs a write, then it is considered
 * exact only if it appears in a single expression statement and
 * if its may access relation is equal to its must access relation.
 *
 * The combined set of may accesses may be a union if member accesses
 * are involved, but the entire set is derived from a single reference and
 * therefore from a single index expression.  These accesses therefore
 * all map to the same outer array.
 */
static int extract_access(__isl_keep pet_expr *expr, void *user)
{
    struct ppcg_extract_access_data *data = user;
    isl_union_map *tagged;
    struct amp_stmt_access *access;
    isl_ctx *ctx = pet_expr_get_ctx(expr);
    isl_multi_pw_aff *index;

    access = isl_alloc_type(ctx, struct amp_stmt_access);
    if (!access)
        return -1;
    access->next = NULL;
    access->read = pet_expr_access_is_read(expr);
    access->write = pet_expr_access_is_write(expr);
    tagged = pet_expr_access_get_tagged_may_read(expr);
    tagged = isl_union_map_union(tagged, pet_expr_access_get_tagged_may_write(expr));
    tagged = isl_union_map_apply_range(tagged, isl_union_map_copy(data->any_to_outer));
    if (!access->write)
    {
        access->exact_write = 1;
    }
    else if (!data->single_expression)
    {
        access->exact_write = 0;
    }
    else
    {
        isl_union_map *must, *may;
        may = isl_union_map_copy(tagged);
        may = isl_union_map_domain_factor_domain(may);
        must = pet_expr_access_get_must_write(expr);
        access->exact_write = isl_union_map_is_equal(must, may);
        isl_union_map_free(must);
        isl_union_map_free(may);
    }
    index = pet_expr_access_get_index(expr);
    access->n_index = isl_multi_pw_aff_dim(index, isl_dim_out);
    isl_multi_pw_aff_free(index);
    access->ref_id = pet_expr_access_get_ref_id(expr);
    access->tagged_access = extract_single_tagged_access(tagged, expr);
    access->access = isl_map_copy(access->tagged_access);
    access->access = isl_map_domain_factor_domain(access->access);
    access->fixed_element = accesses_fixed_element(expr);

    *data->next_access = access;
    data->next_access = &(*data->next_access)->next;

    if (!access->access || access->fixed_element < 0)
        return -1;

    return 0;
}

/* Construct a linked list of amp_stmt_access objects,
 * one for each access expression in the statement body.
 * "any_to_outer" maps all intermediate arrays to their outer arrays.
 */
static int pet_stmt_extract_accesses(struct amp_stmt *stmt, __isl_keep isl_union_map *any_to_outer)
{
    struct ppcg_extract_access_data data;

    stmt->accesses = NULL;
    data.next_access = &stmt->accesses;
    data.single_expression = pet_tree_get_type(stmt->stmt->body) == pet_tree_expr;
    data.any_to_outer = any_to_outer;
    return pet_tree_foreach_access_expr(stmt->stmt->body, &extract_access, &data);
}

static void *free_stmts(struct amp_stmt *stmts, int n)
{
    int i;

    if (!stmts)
        return NULL;

    for (i = 0; i < n; ++i)
    {
        struct amp_stmt_access *access, *next;

        for (access = stmts[i].accesses; access; access = next)
        {
            next = access->next;
            isl_id_free(access->ref_id);
            isl_map_free(access->access);
            isl_map_free(access->tagged_access);
            free(access);
        }

        isl_id_free(stmts[i].id);
    }
    free(stmts);

    return NULL;
}

/* Has statement "stmt" been killed from "scop"?
 * That is, is the instance set of "scop" free from any
 * instances of "stmt"?
 */
static isl_bool is_stmt_killed(struct ppcg_scop *scop, struct pet_stmt *stmt)
{
    isl_space *space;
    isl_set *left;
    isl_bool empty;

    if (!scop || !stmt)
        return isl_bool_error;
    space = isl_set_get_space(stmt->domain);
    left = isl_union_set_extract_set(scop->domain, space);
    empty = isl_set_plain_is_empty(left);
    isl_set_free(left);

    return empty;
}

/* Return an array of amp_stmt representing the statements in "scop".
 * Do not collect array accesses for statements that have been killed.
 */
static struct amp_stmt *extract_stmts(isl_ctx *ctx, struct ppcg_scop *scop,
                                      __isl_keep isl_union_map *any_to_outer)
{
    int i;
    struct amp_stmt *stmts;

    stmts = isl_calloc_array(ctx, struct amp_stmt, scop->pet->n_stmt);
    if (!stmts)
        return NULL;

    for (i = 0; i < scop->pet->n_stmt; ++i)
    {
        struct amp_stmt *s = &stmts[i];
        isl_bool killed;

        s->id = isl_set_get_tuple_id(scop->pet->stmts[i]->domain);
        s->stmt = scop->pet->stmts[i];
        killed = is_stmt_killed(scop, scop->pet->stmts[i]);
        if (killed < 0)
            return free_stmts(stmts, i + 1);
        if (killed)
            continue;
        if (pet_stmt_extract_accesses(s, any_to_outer) < 0)
            return free_stmts(stmts, i + 1);
    }

    return stmts;
}

/* Compute and return the extent of "array", taking into account the set of
 * accessed elements.
 *
 * In particular, the extent in the outer dimension is taken
 * from "accessed", while the extents in the remaining dimensions
 * are taken from array->extent.
 *
 * The extent in the outer dimension cannot be taken from array->extent
 * because that may be unbounded.  Furthermore, even if it is bounded,
 * it may be larger than the piece of the array that is being accessed.
 */
static __isl_give isl_set *amp_compute_extent(struct pet_array *array, __isl_keep isl_set *accessed)
{
    int n_index;
    isl_id *id;
    isl_set *outer;
    isl_set *extent;

    extent = isl_set_copy(array->extent);

    n_index = isl_set_dim(accessed, isl_dim_set);
    if (n_index == 0)
        return extent;

    extent = isl_set_project_out(extent, isl_dim_set, 0, 1);
    outer = isl_set_copy(accessed);
    outer = isl_set_project_out(outer, isl_dim_set, 1, n_index - 1);
    extent = isl_set_flat_product(outer, extent);
    id = isl_set_get_tuple_id(accessed);
    extent = isl_set_set_tuple_id(extent, id);

    return extent;
}

/* Collect all references to the given array and store pointers to them
 * in array->refs.
 */
static isl_stat collect_references(amp_prog *prog, struct amp_array_info *array)
{
    // #define DEBUG_COLLECT_REFERRNCES
    int i;
    int n;

    n = 0;
    for (i = 0; i < prog->n_stmts; ++i)
    {
        struct amp_stmt *stmt = &prog->stmts[i];
        struct amp_stmt_access *access;

        for (access = stmt->accesses; access; access = access->next)
        {
            const char *name;
            name = get_outer_array_name(access->access);
            if (name && !strcmp(array->name, name))
                n++;
        }
    }

    array->refs = isl_alloc_array(prog->ctx, struct amp_stmt_access *, n);
    if (!array->refs)
        return isl_stat_error;
    array->n_ref = n;

    n = 0;
    for (i = 0; i < prog->n_stmts; ++i)
    {
        struct amp_stmt *stmt = &prog->stmts[i];
        struct amp_stmt_access *access;

        for (access = stmt->accesses; access; access = access->next)
        {
            const char *name;
            name = get_outer_array_name(access->access);
            if (!name || strcmp(array->name, name))
                continue;

            array->refs[n++] = access;
        }
    }
    return isl_stat_ok;
}

/* Is "array" only accessed as individual, fixed elements?
 * That is, does each access to "array" access a single, fixed element?
 */
static isl_bool only_fixed_element_accessed(struct amp_array_info *array)
{
    int i;

    for (i = 0; i < array->n_ref; ++i)
        if (!array->refs[i]->fixed_element)
            return isl_bool_false;

    return isl_bool_true;
}

/* Is the array "array" being extracted a read-only scalar?
 *
 * That is, is "array" a scalar that is never possibly written to.
 * An array containing structures is never considered to be a scalar.
 */
static int is_read_only_scalar(struct amp_array_info *array, amp_prog *prog)
{
    isl_set *space;
    isl_union_map *write;
    int empty;

    if (array->has_compound_element)
        return 0;
    if (array->n_index != 0)
        return 0;

    write = isl_union_map_copy(prog->may_write);
    space = isl_set_universe(isl_space_copy(array->space));
    write = isl_union_map_intersect_range(write, isl_union_set_from_set(space));
    empty = isl_union_map_is_empty(write);
    isl_union_map_free(write);

    return empty;
}

/* Compute bounds on the host array "pa" based on the corresponding
 * accessed elements in "arrays"
 * and collect all references to the array.
 * Store the results in "info".
 *
 * If the array is zero-dimensional and does not contain structures,
 * i.e., if the array is a scalar, we check whether it is read-only.
 * We also check whether the array is accessed at all.
 */
static isl_stat extract_array_info(amp_prog *prog, struct amp_array_info *info, struct pet_array *pa, __isl_keep isl_union_set *arrays)
{
    int empty;
    const char *name;
    int n_index;
    isl_multi_pw_aff *bounds;
    isl_set *accessed, *extent;

    n_index = isl_set_dim(pa->extent, isl_dim_set);
    name = isl_set_get_tuple_name(pa->extent);

    info->space = isl_set_get_space(pa->extent);
    info->name = strdup(name);
    info->n_index = n_index;
    info->linearize = 0;

    info->type = strdup(pa->element_type);
    info->size = pa->element_size;
    info->local = pa->declared && !pa->exposed;
    info->has_compound_element = pa->element_is_record;
    info->read_only_scalar = is_read_only_scalar(info, prog);

    info->declared_extent = isl_set_copy(pa->extent);
    accessed = isl_union_set_extract_set(arrays, isl_space_copy(info->space));
    empty = isl_set_is_empty(accessed);
    extent = amp_compute_extent(pa, accessed);
    isl_set_free(accessed);
    info->extent = extent;
    if (empty < 0)
        return isl_stat_error;
    info->accessed = !empty;
    bounds = ppcg_size_from_extent(isl_set_copy(extent));
    bounds = isl_multi_pw_aff_gist(bounds, isl_set_copy(prog->context));
    if (!bounds)
        return isl_stat_error;
    if (!isl_multi_pw_aff_is_cst(bounds))
        info->linearize = 0;
    info->bound = bounds;

    if (collect_references(prog, info) < 0)
        return isl_stat_error;
    info->only_fixed_element = only_fixed_element_accessed(info);

    return isl_stat_ok;
}

/* Remove independence from the order constraints "order" on array "array".
 * Since the pairs of iterations in the filter relation of an independence
 * are guaranteed to be completely independent by the user, there is
 * no need to ensure that live ranges are ordered along those pairs.
 * We make an exception for local variables, though, as the independence
 * guarantee does not apply to those.
 *
 * The order constraints are used in two places.
 * Those on scalars are used in check_scalar_live_ranges to check if
 * we need to force the scalar to be private.  Any non-local scalar
 * should not be forced scalar if it only appears in independent loops.
 * Those on non-scalars are added to the coincidence constraints
 * in compute_schedule because we do not support any array expansion.
 * Accesses to non-local arrays should not prevent a loop from being
 * considered coincident so we should indeed remove those constraints
 * from the order constraints.
 */
static __isl_give isl_union_map *remove_independences(amp_prog *prog, struct amp_array_info *array, __isl_take isl_union_map *order)
{
    int i;

    for (i = 0; i < prog->scop->pet->n_independence; ++i)
    {
        struct pet_independence *pi = prog->scop->pet->independences[i];
        if (isl_union_set_contains(pi->local, array->space))
            continue;

        order = isl_union_map_subtract(order, isl_union_map_copy(pi->filter));
    }

    return order;
}

/* Can "array" be mapped to private memory?
 * That is, is it only accessed as individual elements with
 * constant index expressions?
 */
isl_bool amp_array_can_be_private(struct amp_array_info *array)
{
    if (!array)
        return isl_bool_error;
    return array->only_fixed_element;
}

/* For each array in "prog", store the (untagged) order dependences
 * derived from the array in array->dep_order.
 * In particular, consider all references that access the given array
 * and take the order dependences that have one of these references
 * as source.  (Since an order dependence relates two references to
 * the same array, the target of these order dependences will also
 * be one of these references.)
 * Additionally, store the union of these array->dep_order relations
 * for all arrays that cannot be mapped to private memory in prog->array_order.
 */
/* multiple definition with GPU */
void amp_collect_order_dependences(amp_prog *prog)
{
    int i;
    isl_space *space;
    isl_union_map *accesses;

    space = isl_union_map_get_space(prog->read);
    prog->array_order = isl_union_map_empty(space);

    accesses = isl_union_map_copy(prog->scop->tagged_reads);
    accesses = isl_union_map_union(accesses, isl_union_map_copy(prog->scop->tagged_may_writes));
    accesses = isl_union_map_universe(accesses);
    accesses = isl_union_map_apply_range(accesses, isl_union_map_copy(prog->to_outer));

    for (i = 0; i < prog->n_array; ++i)
    {
        struct amp_array_info *array = &prog->array[i];
        isl_set *set;
        isl_union_set *uset;
        isl_union_map *order;

        set = isl_set_universe(isl_space_copy(array->space));
        uset = isl_union_set_from_set(set);
        uset = isl_union_map_domain(isl_union_map_intersect_range(isl_union_map_copy(accesses), uset));
        order = isl_union_map_copy(prog->scop->tagged_dep_order);
        order = isl_union_map_intersect_domain(order, uset);
        order = isl_union_map_zip(order);
        order = isl_union_set_unwrap(isl_union_map_domain(order));
        order = remove_independences(prog, array, order);
        array->dep_order = order;

        if (amp_array_can_be_private(array))
            continue;

        prog->array_order = isl_union_map_union(prog->array_order, isl_union_map_copy(array->dep_order));
    }

    isl_union_map_free(accesses);
}

/* Construct a amp_array_info for each array referenced by amp->scop and
 * collect them in amp->array.
 *
 * The sizes are based on the extents and the set of possibly accessed
 * elements by "prog".
 * If there are any member accesses involved, then they are first mapped
 * to the outer arrays of structs.
 * Only extract amp_array_info entries for these outer arrays.
 *
 * If we are allowing live range reordering, then also set
 * the dep_order field.  Otherwise leave it NULL.
 */
static isl_stat collect_array_info(amp_prog *prog)
{
    int i;
    isl_stat r = isl_stat_ok;
    isl_union_set *arrays;

    prog->n_array = 0;
    prog->array = isl_calloc_array(prog->ctx, struct amp_array_info, prog->scop->pet->n_array);
    if (!prog->array)
        return isl_stat_error;

    arrays = isl_union_map_range(isl_union_map_copy(prog->read));
    arrays = isl_union_set_union(arrays, isl_union_map_range(isl_union_map_copy(prog->may_write)));

    arrays = isl_union_set_apply(arrays, isl_union_map_copy(prog->to_outer));

    arrays = isl_union_set_coalesce(arrays);

    for (i = 0; i < prog->scop->pet->n_array; ++i)
    {
        isl_bool field;

        field = isl_set_is_wrapping(prog->scop->pet->arrays[i]->extent);
        if (field < 0)
            break;
        if (field)
            continue;
        if (extract_array_info(prog, &prog->array[prog->n_array++], prog->scop->pet->arrays[i], arrays) < 0)
            r = isl_stat_error;
    }
    if (i < prog->scop->pet->n_array)
        r = isl_stat_error;

    isl_union_set_free(arrays);

    if (prog->scop->options->live_range_reordering)
        amp_collect_order_dependences(prog);

    return r;
}

/* Compute the set of inner array elements that may have their values
 * preserved by "prog".  In particular, collect the array elements of
 * arrays that are not local to "prog" and remove those elements that
 * are definitely killed or definitely written by "prog".
 */
static __isl_give isl_union_set *compute_may_persist(amp_prog *prog)
{
    int i;
    isl_union_set *may_persist, *killed;
    isl_union_map *must_kill;

    may_persist = isl_union_set_empty(isl_set_get_space(prog->context));
    for (i = 0; i < prog->n_array; ++i)
    {
        isl_set *extent;

        if (prog->array[i].local)
            continue;

        extent = isl_set_copy(prog->array[i].extent);
        may_persist = isl_union_set_add_set(may_persist, extent);
    }

    may_persist = isl_union_set_intersect_params(may_persist, isl_set_copy(prog->context));
    may_persist = isl_union_set_apply(may_persist, isl_union_map_copy(prog->to_inner));
    must_kill = isl_union_map_copy(prog->tagged_must_kill);
    killed = isl_union_map_range(must_kill);
    must_kill = isl_union_map_copy(prog->must_write);
    killed = isl_union_set_union(killed, isl_union_map_range(must_kill));

    may_persist = isl_union_set_subtract(may_persist, killed);
    return may_persist;
}

void *amp_prog_free(amp_prog *prog)
{
    if (!prog)
        return NULL;
    free_array_info(prog);
    free_stmts(prog->stmts, prog->n_stmts);
    isl_union_map_free(prog->any_to_outer);
    isl_union_map_free(prog->to_outer);
    isl_union_map_free(prog->to_inner);
    isl_union_map_free(prog->read);
    isl_union_map_free(prog->may_write);
    isl_union_map_free(prog->must_write);
    isl_union_map_free(prog->tagged_must_kill);
    isl_union_map_free(prog->array_order);
    isl_union_set_free(prog->may_persist);
    isl_set_free(prog->context);
    free(prog);

    return NULL;
}

amp_prog *amp_prog_alloc(__isl_take isl_ctx *ctx, struct ppcg_scop *scop)
{
    amp_prog *prog;
    isl_space *space;
    isl_map *id;

    if (!scop)
        return NULL;

    prog = isl_calloc_type(ctx, amp_prog);
    if (!prog)
        return NULL;

    prog->ctx = ctx;
    prog->scop = scop;
    prog->kernel_id = 0;
    prog->context = isl_set_copy(scop->context);
    prog->n_stmts = scop->pet->n_stmt;
    prog->any_to_outer = pet_scop_compute_outer_to_any(scop->pet);
    prog->any_to_outer = isl_union_map_reverse(prog->any_to_outer);
    space = isl_union_map_get_space(prog->any_to_outer);
    space = isl_space_set_from_params(space);
    space = isl_space_add_dims(space, isl_dim_set, 1);
    space = isl_space_map_from_set(space);
    id = isl_map_identity(space);
    prog->any_to_outer = isl_union_map_add_map(prog->any_to_outer, id);
    prog->stmts = extract_stmts(ctx, scop, prog->any_to_outer);
    prog->read = isl_union_map_copy(scop->reads);
    prog->may_write = isl_union_map_copy(scop->may_writes);
    prog->must_write = isl_union_map_copy(scop->must_writes);
    prog->tagged_must_kill = isl_union_map_copy(scop->tagged_must_kills);
    prog->to_inner = pet_scop_compute_outer_to_inner(scop->pet);
    prog->to_outer = isl_union_map_copy(prog->to_inner);
    prog->to_outer = isl_union_map_reverse(prog->to_outer);

    if (!prog->stmts)
        return amp_prog_free(prog);

    if (collect_array_info(prog) < 0)
        return amp_prog_free(prog);

    prog->may_persist = compute_may_persist(prog);

    return prog;
}

/* Insert a mark node with identifier "shared" in front of "node".
 */
static __isl_give isl_schedule_node *insert_shared(
    __isl_take isl_schedule_node *node) {
    isl_ctx *ctx;
    isl_id *id;

    ctx = isl_schedule_node_get_ctx(node);
    id = isl_id_alloc(ctx, "shared", NULL);
    node = isl_schedule_node_insert_mark(node, id);

    return node;
}

/* Insert a mark node with identifier "amp_lower" in front of "node".
 */
static __isl_give isl_schedule_node *insert_amp_lower(
    __isl_take isl_schedule_node *node) {
    isl_ctx *ctx;
    isl_id *id;

    ctx = isl_schedule_node_get_ctx(node);
    id = isl_id_alloc(ctx, "amp_lower", NULL);
    node = isl_schedule_node_insert_mark(node, id);

    return node;
}


/* Insert a "shared" mark in front of the "thread" mark
 * provided the linear branch between "node" and the "thread" mark
 * does not contain such a "shared" mark already.
 *
 * As a side effect, this function checks that the subtree at "node"
 * actually contains a "thread" mark and that there is no branching
 * in between "node" and this "thread" mark.
 */
__isl_give isl_schedule_node *amp_tree_insert_shared_before_thread(
    __isl_take isl_schedule_node *node) {
    int depth0, depth;
    int any_shared = 0;

    if (!node)
        return NULL;

    depth0 = isl_schedule_node_get_tree_depth(node);

    for (;;) {
        int is_thread;
        int n;

        if (!any_shared) {
            any_shared = node_is_shared(node);
            if (any_shared < 0)
                return isl_schedule_node_free(node);
        }
        is_thread = node_is_thread(node);
        if (is_thread < 0)
            return isl_schedule_node_free(node);
        if (is_thread)
            break;
        n = isl_schedule_node_n_children(node);
        if (n == 0)
            isl_die(isl_schedule_node_get_ctx(node),
                    isl_error_invalid,
                    "no thread marker found",
                    return isl_schedule_node_free(node));
        if (n > 1)
            isl_die(isl_schedule_node_get_ctx(node),
                    isl_error_invalid,
                    "expecting single thread marker",
                    return isl_schedule_node_free(node));

        node = isl_schedule_node_child(node, 0);
    }

    if (!any_shared)
        node = insert_shared(node);
    depth = isl_schedule_node_get_tree_depth(node);
    node = isl_schedule_node_ancestor(node, depth - depth0);

    return node;
}

/* If group->n_ref == 1, then group->refs was set by
 * populate_array_references to point directly into
 * group->array->refs and should not be freed.
 * If group->n_ref > 1, then group->refs was set by join_groups
 * to point to a newly allocated array.
 */
struct amp_array_ref_group *amp_array_ref_group_free(
    struct amp_array_ref_group *group) {
    if (!group)
        return NULL;

    isl_map_free(group->access);
    if (group->n_ref > 1)
        free(group->refs);
    free(group);
    return NULL;
}

struct amp_ppcg_kernel *amp_ppcg_kernel_free(struct amp_ppcg_kernel *kernel) {
    int i, j;

    if (!kernel)
        return NULL;
    isl_set_free(kernel->context);
    isl_union_set_free(kernel->core);
    isl_union_set_free(kernel->arrays);
    isl_union_pw_multi_aff_free(kernel->contraction);
    isl_union_set_free(kernel->expanded_domain);
    isl_space_free(kernel->space);
    isl_ast_node_free(kernel->tree);

    isl_union_pw_multi_aff_free(kernel->copy_schedule);

    for (i = 0; i < kernel->n_array; ++i) {
        struct amp_local_array_info *array = &kernel->array[i];

        for (j = 0; j < array->n_group; ++j)
            amp_array_ref_group_free(array->groups[j]);
        free(array->groups);

        isl_multi_pw_aff_free(array->bound);
        isl_ast_expr_free(array->bound_expr);
    }
    free(kernel->array);

    for (i = 0; i < kernel->n_var; ++i) {
        free(kernel->var[i].name);
        isl_vec_free(kernel->var[i].size);
    }
    free(kernel->var);

    free(kernel);

    return NULL;
}

/* Create the array of gpu_local_array_info structures "array"
 * inside "kernel".  The number of elements in this array is
 * the same as the number of arrays in "prog".
 * Initialize the "array" field of each local array to point
 * to the corresponding array in "prog".
 */
static struct amp_ppcg_kernel *amp_ppcg_kernel_create_local_arrays(
    struct amp_ppcg_kernel *kernel, struct amp_prog *prog) {
    int i;
    isl_ctx *ctx;

    if (!kernel)
        return NULL;

    ctx = isl_set_get_ctx(prog->context);
    kernel->array = isl_calloc_array(ctx, struct amp_local_array_info, prog->n_array);
    if (!kernel->array)
        return amp_ppcg_kernel_free(kernel);
    kernel->n_array = prog->n_array;

    for (i = 0; i < prog->n_array; ++i)
        kernel->array[i].array = &prog->array[i];

    return kernel;
}

/* Extract the set of parameter values and outer schedule dimensions
 * for which any statement instance
 * in the kernel inserted at "node" needs to be executed.
 * Intersect the set of parameter values derived from the host schedule
 * relation with the context of "prog".
 */
static __isl_give isl_set *extract_context(__isl_keep isl_schedule_node *node,
                                           struct amp_prog *prog) {
    isl_union_map *schedule;
    isl_union_set *schedule_domain;
    isl_set *context;
    int empty;

    schedule = isl_schedule_node_get_prefix_schedule_relation(node);
    schedule_domain = isl_union_map_range(schedule);
    empty = isl_union_set_is_empty(schedule_domain);
    if (empty < 0) {
        isl_union_set_free(schedule_domain);
        return NULL;
    }
    if (empty) {
        int depth;
        isl_space *space;

        space = isl_union_set_get_space(schedule_domain);
        isl_union_set_free(schedule_domain);
        space = isl_space_set_from_params(space);
        depth = isl_schedule_node_get_schedule_depth(node);
        space = isl_space_add_dims(space, isl_dim_set, depth);
        context = isl_set_empty(space);
    } else {
        context = isl_set_from_union_set(schedule_domain);
    }
    context = isl_set_intersect_params(context,
                                       isl_set_copy(prog->context));

    return context;
}

/* Return the set of outer array elements accessed by
 * by the statement instances in "domain" in "prog".
 * The instances in "domain" are those that appear
 * in the domains of the access relations in "prog".
 */
static __isl_give isl_union_set *accessed_by_domain(
    __isl_take isl_union_set *domain, struct amp_prog *prog) {
    isl_union_map *access;
    isl_union_set *arrays;

    access = isl_union_map_union(isl_union_map_copy(prog->read), isl_union_map_copy(prog->may_write));
    access = isl_union_map_intersect_domain(access, domain);
    arrays = isl_union_map_range(access);
    arrays = isl_union_set_apply(arrays, isl_union_map_copy(prog->to_outer));

    return arrays;
}

/* Mark all dimensions in the current band node atomic.
 */
static __isl_give isl_schedule_node *atomic(__isl_take isl_schedule_node *node) {
    return ppcg_set_schedule_node_type(node, isl_ast_loop_atomic);
}

/* Mark "node" atomic, if it is a band node.
 * Do the same for all ancestors.
 * Return a pointer to "node" (in the updated schedule tree).
 */
static __isl_give isl_schedule_node *atomic_ancestors(
    __isl_take isl_schedule_node *node) {
    // #define DEBUG_ATOMIC_ANCESTORS

    int pos;

    if (!node)
        return NULL;
    if (!isl_schedule_node_has_parent(node))
        return node;

    pos = isl_schedule_node_get_child_position(node);
    node = isl_schedule_node_parent(node);
    if (isl_schedule_node_get_type(node) == isl_schedule_node_band)
        node = atomic(node);
    node = atomic_ancestors(node);
    node = isl_schedule_node_child(node, pos);

    return node;
}

/* Wrapper around ppcg_kernel_free for use as a isl_id_set_free_user callback.
 */
static void amp_ppcg_kernel_free_wrap(void *user) {
    struct amp_ppcg_kernel *kernel = user;

    amp_ppcg_kernel_free(kernel);
}

/* Assuming "node" is a filter node, does it correspond to the branch
 * that contains the "thread" mark, i.e., does it contain any elements
 * in "core"?
 */
static int node_is_core(__isl_keep isl_schedule_node *node,
                        __isl_keep isl_union_set *core) {
    int disjoint;
    isl_union_set *filter;

    filter = isl_schedule_node_filter_get_filter(node);
    disjoint = isl_union_set_is_disjoint(filter, core);
    isl_union_set_free(filter);
    if (disjoint < 0)
        return -1;

    return !disjoint;
}

/* Move to the only child of "node" that has the "thread" mark as descendant,
 * where the branch containing this mark is identified by the domain elements
 * in "core".
 *
 * If "node" is not a sequence, then it only has one child and we move
 * to that single child.
 * Otherwise, we check each of the filters in the children, pick
 * the one that corresponds to "core" and return a pointer to the child
 * of the filter node.
 */
static __isl_give isl_schedule_node *core_child(
    __isl_take isl_schedule_node *node, __isl_keep isl_union_set *core) {
    int i, n;

    if (isl_schedule_node_get_type(node) != isl_schedule_node_sequence)
        return isl_schedule_node_child(node, 0);

    n = isl_schedule_node_n_children(node);
    for (i = 0; i < n; ++i) {
        int is_core;

        node = isl_schedule_node_child(node, i);
        is_core = node_is_core(node, core);

        if (is_core < 0)
            return isl_schedule_node_free(node);
        if (is_core)
            return isl_schedule_node_child(node, 0);

        node = isl_schedule_node_parent(node);
    }

    isl_die(isl_schedule_node_get_ctx(node), isl_error_internal,
            "core child not found", return isl_schedule_node_free(node));
}

/* Move up the tree underneath the "kernel" mark until
 * the "kernel" mark is reached.
 */
__isl_give isl_schedule_node *amp_tree_move_up_to_kernel(
    __isl_take isl_schedule_node *node) {
    int is_kernel;

    while ((is_kernel = amp_tree_node_is_kernel(node)) == 0)
        node = isl_schedule_node_parent(node);
    if (is_kernel < 0)
        node = isl_schedule_node_free(node);

    return node;
}

/* Move down the branch between "kernel" and "thread" until
 * the "shared" mark is reached, where the branch containing the "shared"
 * mark is identified by the domain elements in "core".
 */
__isl_give isl_schedule_node *amp_tree_move_down_to_shared(
    __isl_take isl_schedule_node *node, __isl_keep isl_union_set *core) {
    int is_shared;

    while ((is_shared = node_is_shared(node)) == 0)
        node = core_child(node, core);
    if (is_shared < 0)
        node = isl_schedule_node_free(node);

    return node;
}

/* Return the prefix schedule at "node" as a relation
 * between domain elements and schedule dimensions after detecting
 * equalities in this relation.
 */
static __isl_give isl_union_map *prefix_with_equalities(
    __isl_keep isl_schedule_node *node) {
    isl_union_map *schedule;

    schedule = isl_schedule_node_get_prefix_schedule_relation(node);
    schedule = isl_union_map_detect_equalities(schedule);

    return schedule;
}

/* Move down the branch between "kernel" and "thread" until
 * the "thread" mark is reached, where the branch containing the "thread"
 * mark is identified by the domain elements in "core".
 */
__isl_give isl_schedule_node *amp_tree_move_down_to_thread(
    __isl_take isl_schedule_node *node, __isl_keep isl_union_set *core) {
    int is_thread;

    while ((is_thread = node_is_thread(node)) == 0)
        node = core_child(node, core);
    if (is_thread < 0)
        node = isl_schedule_node_free(node);

    return node;
}

/* Expand the domain of the schedule "s" by plugging in
 * the contraction "contraction" and return the result.
 */
static __isl_give isl_union_map *expand(__isl_take isl_union_map *s,
                                        __isl_keep isl_union_pw_multi_aff *contraction) {
    contraction = isl_union_pw_multi_aff_copy(contraction);
    s = isl_union_map_preimage_domain_union_pw_multi_aff(s, contraction);
    return s;
}

/* Set array->n_group and array->groups to n and groups.
 *
 * Additionally, set the "nr" field of each group.
 */
static void new_set_array_groups(struct amp_local_array_info *array,
                             int n_write, struct amp_array_ref_group **groups_write, int n_read, struct amp_array_ref_group **groups_read) {
    int i;
    int n;
    // int n_read, n_write;
    struct amp_array_ref_group **groups;
    n = 0;
    n = n_read + n_write;
    // n_read = 0;
    // n_write = 0;
    // array->n_group = n;
    array->n_read_group = n_read;
    array->n_write_group = n_write;

    // array->groups = groups;
    array->groups_read = groups_read;
    array->groups_write = groups_write;

    // for (i = 0; i < n_read; ++i)
    //     groups_read[i]->n_readr = i;
    // for (i = 0; i < n_write; ++i)
    //     groups_write[i]->nr = i;
    // // 按照操作类型将组分配到不同的数组中
    // struct amp_array_ref_group **read_groups = malloc(sizeof(struct amp_array_ref_group *) * n);
    // struct amp_array_ref_group **write_groups = malloc(sizeof(struct amp_array_ref_group *) * n);

    for (i = 0; i < n_read; ++i) {
        groups_read[n_read++] = groups[i];
        }
    for (i = n_read; i < n; ++i) {
        groups_write[n_write++] = groups[i];
        }
    for (i = 0; i < n; ++i)
        groups[i]->nr = i;
}


/* Fill up the groups array with singleton groups, i.e., one group
 * per reference, initializing the array, access, write, n_ref and refs fields.
 * In particular the access field is initialized to the scheduled
 * access relation of the array reference.
 *
 * Return the number of elements initialized, i.e., the number of
 * active references in the current kernel.
 */
static int populate_array_references(struct amp_local_array_info *local,
                                     struct amp_array_ref_group **groups, struct amp_group_data *data) {
    int i;
    int n;
    isl_ctx *ctx = isl_union_map_get_ctx(data->copy_sched);

    n = 0;
    for (i = 0; i < local->array->n_ref; ++i) {
        isl_union_map *umap;
        isl_map *map;
        struct amp_array_ref_group *group;
        struct amp_stmt_access *access = local->array->refs[i];

        map = isl_map_copy(access->access);
        umap = isl_union_map_from_map(map);
        umap = isl_union_map_apply_domain(umap, isl_union_map_copy(data->copy_sched));

        if (isl_union_map_is_empty(umap)) {
            isl_union_map_free(umap);
            continue;
        }

        map = isl_map_from_union_map(umap);
        map = isl_map_detect_equalities(map);

        group = isl_calloc_type(ctx, struct amp_array_ref_group);
        if (!group) {
            isl_map_free(map);
            return -1;
        }
        group->local_array = local;
        group->array = local->array;
        group->access = map;
        group->write = access->write;
        group->exact_write = access->exact_write;
        group->slice = access->n_index < local->array->n_index;
        group->refs = &local->array->refs[i];
        group->n_ref = 1;

        groups[n++] = group;
    }

    return n;
}

// /* Fill up the groups array with singleton groups, i.e., one group
//  * per reference, initializing the array, access, write, n_ref and refs fields.
//  * In particular the access field is initialized to the scheduled
//  * access relation of the array reference.
//  *
//  * Return the number of elements initialized, i.e., the number of
//  * active references in the current kernel.
//  */
// static int populate_array_references(struct amp_local_array_info *local,
//                                      struct amp_array_ref_group **groups, struct amp_group_data *data) {
//     int i;
//     int n, n_write, n_read;
//     isl_ctx *ctx = isl_union_map_get_ctx(data->copy_sched);

//     n = 0;
//     // struct amp_array_ref_group **groups_write;
//     // struct amp_array_ref_group **groups_read;

//     for (i = 0; i < local->array->n_ref; ++i) {
//         isl_union_map *umap;
//         isl_map *map;
//         struct amp_array_ref_group *group;

//         struct amp_stmt_access *access = local->array->refs[i];

//         map = isl_map_copy(access->access);
//         umap = isl_union_map_from_map(map);
//         umap = isl_union_map_apply_domain(umap, isl_union_map_copy(data->copy_sched));

//         if (isl_union_map_is_empty(umap)) {
//             isl_union_map_free(umap);
//             continue;
//         }

//         map = isl_map_from_union_map(umap);
//         map = isl_map_detect_equalities(map);

//         group = isl_calloc_type(ctx, struct amp_array_ref_group);
//         if (!group) {
//             isl_map_free(map);
//             return -1;
//         }
//         group->local_array = local;
//         group->array = local->array;
//         group->access = map;
//         group->slice = access->n_index < local->array->n_index;
//         group->refs = &local->array->refs[i];
//         group->n_ref = 1;

//         // if (access->write) {
//         //     group->write = 1;
//         //     group->exact_write = access->exact_write;
//         //     groups_write[n_write++] = group;
//         // } else {
//         //     groups_read[n_read++] = group;
//         // }
//         if (access->read) {
//             groups[n++] = group;
//         } else {
//             group->write = access->write;
//             group->exact_write = access->exact_write;
//             groups[n++] = group;
//         }
        
//         // group->write = access->write;
//         // group->exact_write = access->exact_write;
//         // group->slice = access->n_index < local->array->n_index;
        

//         // groups[n++] = group;
//     }
//     // n = n_write + n_read;
//     // new_set_array_groups(local, n_write, groups_write, n_read, groups_read);
//     return n;
// }

/* Combine the given two groups into a single group, containing
 * the references of both groups.
 */
static struct amp_array_ref_group *join_groups(
    struct amp_array_ref_group *group1,
    struct amp_array_ref_group *group2) {
    int i;
    isl_ctx *ctx;
    struct amp_array_ref_group *group;

    if (!group1 || !group2)
        return NULL;

    ctx = isl_map_get_ctx(group1->access);
    group = isl_calloc_type(ctx, struct amp_array_ref_group);
    if (!group)
        return NULL;
    group->local_array = group1->local_array;
    group->array = group1->array;
    group->access = isl_map_union(isl_map_copy(group1->access),
                                  isl_map_copy(group2->access));
    group->write = group1->write || group2->write;
    group->exact_write = group1->exact_write && group2->exact_write;
    group->slice = group1->slice || group2->slice;
    group->n_ref = group1->n_ref + group2->n_ref;
    group->refs = isl_alloc_array(ctx, struct amp_stmt_access *,
                                  group->n_ref);
    if (!group->refs)
        return amp_array_ref_group_free(group);
    for (i = 0; i < group1->n_ref; ++i)
        group->refs[i] = group1->refs[i];
    for (i = 0; i < group2->n_ref; ++i)
        group->refs[group1->n_ref + i] = group2->refs[i];

    return group;
}

/* Combine the given two groups into a single group and free
 * the original two groups.
 */
static struct amp_array_ref_group *join_groups_and_free(
    struct amp_array_ref_group *group1,
    struct amp_array_ref_group *group2) {
    struct amp_array_ref_group *group;

    group = join_groups(group1, group2);
    amp_array_ref_group_free(group1);
    amp_array_ref_group_free(group2);
    return group;
}

/* Combine all groups in "groups" into a single group and return
 * the new number of groups (1 or 0 if there were no groups to start with).
 */
static int join_all_groups(int n, struct amp_array_ref_group **groups) {
    int i;

    for (i = n - 1; i > 0; --i) {
        groups[0] = join_groups_and_free(groups[0], groups[i]);
        groups[i] = NULL;
        n--;
    }

    return n;
}

/* Set array->n_group and array->groups to n and groups.
 *
 * Additionally, set the "nr" field of each group.
 */
static void set_array_groups(struct amp_local_array_info *array,
                             int n, struct amp_array_ref_group **groups) {
    int i;

    array->n_group = n;
    array->groups = groups;

    for (i = 0; i < n; ++i)
        groups[i]->nr = i;
}


/* Compute the number of outer schedule tile dimensions that affect
 * the offset of "tile".
 * If there is no such dimension, then return the index
 * of the first kernel dimension, i.e., data->kernel_depth.
 */
static int compute_tile_depth(struct amp_group_data *data,
                              struct amp_array_tile *tile) {
    int i, j;

    for (j = tile->depth - 1; j >= data->kernel_depth; --j) {
        for (i = 0; i < tile->n; ++i) {
            isl_aff *lb;
            isl_aff *shift;

            lb = tile->bound[i].lb;
            if (isl_aff_involves_dims(lb, isl_dim_in, j, 1))
                break;

            shift = tile->bound[i].shift;
            if (!shift)
                continue;
            if (isl_aff_involves_dims(shift, isl_dim_in, j, 1))
                break;
        }
        if (i < tile->n)
            break;
    }

    return ++j;
}

/* Adjust the fields of "tile" to reflect the new input dimension "depth".
 * The dimension beyond "depth" are assumed not to affect the tile,
 * so they can simply be dropped.
 */
static int tile_adjust_depth(struct amp_array_tile *tile, int depth) {
    int i;

    if (tile->depth == depth)
        return 0;

    for (i = 0; i < tile->n; ++i) {
        tile->bound[i].lb = isl_aff_drop_dims(tile->bound[i].lb,
                                              isl_dim_in, depth, tile->depth - depth);
        if (!tile->bound[i].lb)
            return -1;
        if (!tile->bound[i].shift)
            continue;
        tile->bound[i].shift = isl_aff_drop_dims(tile->bound[i].shift,
                                                 isl_dim_in, depth, tile->depth - depth);
        if (!tile->bound[i].shift)
            return -1;
    }

    tile->depth = depth;

    return 0;
}

/* Determine the number of schedule dimensions that affect the offset of the
 * shared or private tile "tile" and store the result in tile->depth, with
 * a lower bound of data->kernel_depth.
 * Also adjust the fields of the tile to only refer to the tile->depth
 * outer schedule dimensions.
 */
static isl_stat tile_set_depth(struct amp_group_data *data,
                               struct amp_array_tile *tile) {
    if (tile_adjust_depth(tile, compute_tile_depth(data, tile)) < 0)
        return isl_stat_error;

    return isl_stat_ok;
}

/* Determine the number of schedule dimensions that affect the offset of the
 * shared tile and store the minimum of the private and shared tile depth
 * in group->min_depth, with a lower bound of data->kernel_depth.
 * If there is no tile defined on the array reference group,
 * then set group->min_depth to data->thread_depth.
 */
static int set_depth(struct amp_group_data *data,
                     struct amp_array_ref_group *group) {
    group->min_depth = data->thread_depth;

    if (group->shared_tile) {
        if (tile_set_depth(data, group->shared_tile) < 0)
            return -1;
        if (group->shared_tile->depth < group->min_depth)
            group->min_depth = group->shared_tile->depth;
    }

    return 0;
}

/* Return the union of all read (read = 1) and/or write (write = 1)
 * access relations in the group.
 */
__isl_give isl_union_map *amp_array_ref_group_access_relation(
    struct amp_array_ref_group *group, int read, int write) {
    int i;
    isl_union_map *access;

    access = isl_union_map_empty(isl_map_get_space(group->access));
    for (i = 0; i < group->n_ref; ++i) {
        isl_map *map_i;

        if (!((read && group->refs[i]->read) ||
              (write && group->refs[i]->write)))
            continue;
        map_i = isl_map_copy(group->refs[i]->access);
        access = isl_union_map_union(access, isl_union_map_from_map(map_i));
    }

    return access;
}

/* Replace the host schedule dimensions in the access relation "access"
 * by parameters, so that they are treated as fixed when checking for reuse
 * (within a kernel) or whether two consecutive elements are accessed
 * (within a kernel).
 */
static __isl_give isl_union_map *localize_access(struct amp_group_data *data,
                                                 __isl_take isl_union_map *access) {
    // #define DEBUG_LOCALIZE_ACCESS

    int n;
    isl_space *space;
    isl_set *param;
    isl_union_map *umap;
    isl_id_list *ids;

    umap = isl_union_map_copy(data->host_sched);
    space = isl_union_map_get_space(umap);

    n = data->kernel_depth;
    ids = ppcg_scop_generate_names(data->scop, n, "__ppcg_host_");
    param = parametrization(space, n, 0, ids);

    isl_id_list_free(ids);
    umap = isl_union_map_intersect_range(umap, isl_union_set_from_set(param));

    access = isl_union_map_intersect_domain(access, isl_union_map_domain(umap));

    return access;
}

/* Construct a map from domain_space to domain_space that increments
 * the dimension at position "pos" and leaves all other dimensions
 * constant.
 */
static __isl_give isl_map *next(__isl_take isl_space *domain_space, int pos) {
    isl_space *space;
    isl_aff *aff;
    isl_multi_aff *next;

    space = isl_space_map_from_set(domain_space);
    next = isl_multi_aff_identity(space);
    aff = isl_multi_aff_get_aff(next, pos);
    aff = isl_aff_add_constant_si(aff, 1);
    next = isl_multi_aff_set_aff(next, pos, aff);

    return isl_map_from_multi_aff(next);
}

/* Check if the given access is coalesced (or if there is no point
 * in trying to coalesce the access by mapping the array to shared memory).
 * That is, check whether incrementing the dimension that will get
 * wrapped over the last thread index results in incrementing
 * the last array index.
 *
 * If no two consecutive array elements are ever accessed by "access",
 * then mapping the corresponding array to shared memory will not
 * improve coalescing.  In fact, the copying will likely be performed
 * by a single thread.  Consider the access as coalesced such that
 * the caller will not try and map the array to shared memory just
 * to improve coalescing.
 *
 * This function is only called for access relations without reuse and
 * kernels with at least one thread identifier.
 */
static int access_is_coalesced(struct amp_group_data *data,
                               __isl_keep isl_union_map *access) {
    // #define DEBUG_ACCESS_IS_COALESCED

    int dim;
    isl_space *space;
    isl_set *accessed;
    isl_map *access_map;
    isl_map *next_thread_x;
    isl_map *next_element;
    isl_map *map;
    int coalesced, empty;

    access = isl_union_map_copy(access);
    access = isl_union_map_apply_domain(access, isl_union_map_copy(data->full_sched));
    access_map = isl_map_from_union_map(access);

    space = isl_map_get_space(access_map);
    space = isl_space_range(space);
    dim = isl_space_dim(space, isl_dim_set);
    if (dim == 0)
        next_element = isl_map_empty(isl_space_map_from_set(space));
    else
        next_element = next(space, dim - 1);

    accessed = isl_map_range(isl_map_copy(access_map));
    map = isl_map_copy(next_element);
    map = isl_map_intersect_domain(map, isl_set_copy(accessed));
    map = isl_map_intersect_range(map, accessed);
    empty = isl_map_is_empty(map);
    isl_map_free(map);

    if (empty < 0 || empty) {
        isl_map_free(next_element);
        isl_map_free(access_map);
        return empty;
    }

    space = isl_map_get_space(access_map);
    space = isl_space_domain(space);
    next_thread_x = next(space, data->thread_depth + data->n_thread - 1);

    map = isl_map_apply_domain(next_thread_x, isl_map_copy(access_map));
    map = isl_map_apply_range(map, access_map);

    coalesced = isl_map_is_subset(map, next_element);

    isl_map_free(next_element);
    isl_map_free(map);

    return coalesced;
}

/* Report that the array reference group with the given access relation
 * is not mapped to shared memory in the given kernel because
 * it does not exhibit any reuse and is considered to be coalesced.
 */
static void report_no_reuse_and_coalesced(struct amp_ppcg_kernel *kernel,
                                          __isl_keep isl_union_map *access) {
    isl_ctx *ctx;
    isl_printer *p;

    ctx = isl_union_map_get_ctx(access);
    p = isl_printer_to_file(ctx, stdout);
    p = isl_printer_print_str(p, "Array reference group ");
    p = isl_printer_print_union_map(p, access);
    p = isl_printer_print_str(p,
                              " not considered for mapping to shared memory in kernel");
    p = isl_printer_print_int(p, kernel->id);
    p = isl_printer_print_str(p,
                              " because it exhibits no reuse and is considered to be coalesced");
    p = isl_printer_end_line(p);
    isl_printer_free(p);
}

/* Given an array access "access", check if for any index i there is
 * a shift a(p) and a stride g such that
 *
 *	a(p) + i = 0 mod g
 *
 * If so, record the information in tile->bound[i]->stride and
 * tile->bound[i]->shift.
 * Otherwise, set tile->bound[i]->stride to 1 (and tile->bound[i]->shift to 0).
 * Return isl_bool_true if any non-trivial stride was found.
 *
 * Note that the stride info returned by isl_map_get_range_stride_info
 * is of the form
 *
 *	i = o(p) + g n
 *
 * a(p) can therefore be taken to be equal to -o(p).
 */
static isl_bool detect_strides(struct amp_array_tile *tile,
                               __isl_keep isl_map *access) {
    int i;
    isl_bool has_strides = isl_bool_false;

    for (i = 0; i < tile->n; ++i) {
        struct amp_array_bound *bound = &tile->bound[i];
        isl_stride_info *si;

        si = isl_map_get_range_stride_info(access, i);
        bound->stride = isl_stride_info_get_stride(si);
        bound->shift = isl_aff_neg(isl_stride_info_get_offset(si));
        isl_stride_info_free(si);

        if (!has_strides)
            has_strides = isl_val_gt_si(bound->stride, 1);
        if (has_strides < 0)
            return isl_bool_error;
    }

    return has_strides;
}

/* Given an array access "access", remove the strides based
 * on the information in tile->bound[i]->stride and tile->bound[i]->shift.
 *
 * In particular let the access be A[a] and
 * let the shifts s_i(p) and the strides g_i be such that
 *
 *  S(p) + a = 0 mod G
 *
 * Replace the access by
 *
 *  A[(a + S(p))/G]
 *
 * First collect the shifts s_i into an isl_multi_aff and
 * the strides into the scaling function A[i] -> A[G i].
 * Then add the shifts to the original access and
 * take the preimage over the scaling.
 */
static __isl_give isl_map *remove_strides(__isl_take isl_map *access,
                                          struct amp_array_tile *tile) {
    int i;
    isl_space *space;
    isl_multi_aff *shift, *scale;
    isl_multi_val *stride;

    space = isl_map_get_space(access);
    shift = isl_multi_aff_zero(isl_space_copy(space));
    space = isl_space_range(space);
    stride = isl_multi_val_zero(isl_space_copy(space));
    scale = isl_multi_aff_identity(isl_space_map_from_set(space));
    for (i = 0; i < tile->n; ++i) {
        struct amp_array_bound *bound = &tile->bound[i];
        isl_aff *shift_i;
        isl_val *stride_i;

        shift_i = isl_aff_copy(bound->shift);
        stride_i = isl_val_copy(bound->stride);
        shift = isl_multi_aff_set_aff(shift, i, shift_i);
        stride = isl_multi_val_set_val(stride, i, stride_i);
    }
    scale = isl_multi_aff_scale_multi_val(scale, stride);

    access = isl_map_sum(access, isl_map_from_multi_aff(shift));
    access = isl_map_preimage_range_multi_aff(access, scale);

    return access;
}

/* Check if we can find a memory tile for the given array
 * based on the given accesses, and if so, put the results in "tile".
 *
 * We project the accesses on each index in turn and look for a parametric
 * offset such that the size is constant, after removing
 * any stride that may appear in the accesses.
 *
 * tile->depth is initialized to the input dimension of the computed bounds.
 */
static isl_bool can_tile(__isl_keep isl_map *access,
                         struct amp_array_tile *tile) {
    // #define DEBUG_CAN_TILE

    int i;
    isl_bool has_strides, valid;
    isl_fixed_box *box;
    isl_multi_aff *offset;
    isl_multi_val *size;

    if (!tile)
        return isl_bool_error;

    isl_map_free(isl_map_detect_equalities(isl_map_copy(access)));

    has_strides = detect_strides(tile, access);
    if (has_strides < 0)
        return isl_bool_error;

    tile->depth = isl_map_dim(access, isl_dim_in);

    access = isl_map_copy(access);
    if (has_strides)
        access = remove_strides(access, tile);

    box = isl_map_get_range_simple_fixed_box_hull(access);

    // isl_map_free(access);

    valid = isl_fixed_box_is_valid(box);

    if (valid >= 0 && valid) {
        offset = isl_fixed_box_get_offset(box);
        size = isl_fixed_box_get_size(box);
        for (i = 0; i < tile->n; ++i) {
            tile->bound[i].size = isl_multi_val_get_val(size, i);
            tile->bound[i].lb = isl_multi_aff_get_aff(offset, i);
        }
        isl_multi_aff_free(offset);
        isl_multi_val_free(size);
    } else if (!valid) {
        box = isl_map_get_range_lattice_tile(access);

        offset = isl_fixed_box_get_offset(box);
        size = isl_fixed_box_get_size(box);
        for (i = 0; i < tile->n; ++i) {
            tile->bound[i].size = isl_multi_val_get_val(size, i);
            tile->bound[i].lb = isl_multi_aff_get_aff(offset, i);
        }
        isl_multi_aff_free(offset);
        isl_multi_val_free(size);
    }
    isl_fixed_box_free(box);

    return valid;
}

struct amp_array_tile *amp_array_tile_free(struct amp_array_tile *tile) {
    int j;

    if (!tile)
        return NULL;

    for (j = 0; j < tile->n; ++j) {
        isl_val_free(tile->bound[j].size);
        isl_val_free(tile->bound[j].stride);
        isl_aff_free(tile->bound[j].lb);
        isl_aff_free(tile->bound[j].shift);
    }
    free(tile->bound);
    isl_multi_aff_free(tile->tiling);
    free(tile);

    return NULL;
}

/* Create a gpu_array_tile for an array of dimension "n_index".
 */
static struct amp_array_tile *amp_array_tile_create(isl_ctx *ctx, int n_index) {
    // #define DEBUG_AMP_ARRAY_TILE

    int i;
    struct amp_array_tile *tile;

    tile = isl_calloc_type(ctx, struct amp_array_tile);
    if (!tile)
        return NULL;

    tile->ctx = ctx;
    tile->bound = isl_alloc_array(ctx, struct amp_array_bound, n_index);
    if (!tile->bound)
        return amp_array_tile_free(tile);

    tile->n = n_index;

    for (i = 0; i < n_index; ++i) {
        tile->bound[i].size = NULL;
        tile->bound[i].lb = NULL;
        tile->bound[i].stride = NULL;
        tile->bound[i].shift = NULL;
    }
#ifdef DEBUG_AMP_ARRAY_TILE
    fprintf(stderr, "@DEBUG: \n       in amp_array_tile_create over, the tile information are: \n");
    fprintf(stderr, "          the tile->n is %d, the tile->depth is %d . \n\n", tile->n, tile->depth);
    fprintf(stderr, "          the tile->n is %d, the tile->depth is %d, the tile->tiling is:\n", tile->n, tile->depth);
    isl_multi_aff_dump(tile->tiling);
    fprintf(stderr, "\n\n");
#endif // DEBUG_AMP_ARRAY_TILE
    return tile;
}

/* Map the domain of "access" to the outer data->shared_depth
 * schedule dimensions.  When data->shared_depth is equal to
 * data->thread_depth, this result is already available in group->access.
 */
static __isl_give isl_map *shared_access(struct amp_array_ref_group *group,
                                         __isl_keep isl_union_map *access, struct amp_group_data *data) {
    isl_union_map *shared;
    if (data->shared_depth == data->thread_depth)
        return isl_map_copy(group->access);

    shared = isl_union_map_copy(access);
    shared = isl_union_map_apply_domain(shared, isl_union_map_copy(data->shared_sched));

    return isl_map_from_union_map(shared);
}

/* Compute the private and/or shared memory tiles for the array
 * reference group "group" of array "array".
 * Return isl_stat_ok on success and isl_stat_error on error.
 *
 * If the array is a read-only scalar or if the user requested
 * not to use shared or private memory, then we do not need to do anything.
 *
 * If any reference in the reference group accesses more than one element,
 * then we would have to make sure that the layout in shared memory
 * is the same as that in global memory.  Since we do not handle this yet
 * (and it may not even be possible), we refuse to map to private or
 * shared memory in such cases.
 *
 * If the array group involves any may writes (that are not must writes),
 * then we would have to make sure that we load the data into shared/private
 * memory first in case the data is not written by the kernel
 * (but still written back out to global memory).
 * Since we don't have any such mechanism at the moment, we don't
 * compute shared/private tiles for groups involving may writes.
 *
 * We only try to compute a shared memory tile if there is any reuse
 * or if the access is not coalesced.
 * Reuse and coalescing are checked within the given kernel.
 *
 * For computing a private memory tile, we also require that there is
 * some reuse.  Moreover, we require that the access is private
 * to the thread.  That is, we check that any given array element
 * is only accessed by a single thread.
 * We compute an access relation that maps the outer
 * data->thread_depth + data->n_thread schedule dimensions.
 * The latter data->n_thread will be mapped to thread identifiers.
 * We actually check that those iterators that will be wrapped
 * partition the array space.  This check is stricter than necessary
 * since several iterations may be mapped onto the same thread
 * and then they could be allowed to access the same memory elements,
 * but our check does not allow this situation.
 *
 * For private memory tiles, the number of schedule dimensions that
 * affect the offset is computed and stored in tile->depth, with
 * a lower bound of data->kernel_depth.  If this depth is smaller
 * than the minimal depth that still ensures that every element
 * is accessed by a single thread, then the depth is raised
 * to this minimal depth.
 * The fields of the tile are then adjusted to only refer to the tile->depth
 * outer schedule dimensions.
 *
 * We also check that the index expression only depends on parallel
 * loops.  That way, we can move those loops innermost and unroll them.
 * Again, we use a test that is stricter than necessary.
 * We actually check whether the index expression only depends
 * on the iterators that are wrapped over the threads.
 * These are necessarily parallel, but there may be more parallel loops.
 *
 * Combining the injectivity of the first test with the single-valuedness
 * of the second test, we simply test for bijectivity.
 *
 * If the use of the private tile requires unrolling, but some
 * of the other arrays are forcibly mapped to private memory,
 * then we do not allow the use of this private tile since
 * we cannot move the schedule dimensions that need to be unrolled down
 * without performing some kind of expansion on those arrays
 * that are forcibly mapped to private memory.
 *
 * If the array is marked force_private, then we bypass all checks
 * and assume we can (and should) use registers only.
 *
 * If it turns out we can (or have to) use registers, we compute
 * the private memory tile size using can_tile, after introducing a dependence
 * on the thread indices.
 */
static isl_stat compute_group_bounds_core(struct amp_ppcg_kernel *kernel,
                                          struct amp_array_ref_group *group, struct amp_group_data *data) {
    // #define DEBUG_COMPUTE_GROUP_BOUNDS_CORE

    isl_ctx *ctx = isl_space_get_ctx(group->array->space);
    isl_union_map *access, *local;
    int n_index = group->array->n_index;
    int no_reuse, coalesced;
    isl_map *acc;
    int force_private = 0;
    int use_shared = 1;
    int use_private = 0;
    isl_stat r = isl_stat_ok;
    isl_bool ok;
    int requires_unroll;
    int unique_depth;

    if (!use_shared && !use_private)
        return isl_stat_ok;
    // if (amp_array_is_read_only_scalar(group->array))
    //     return isl_stat_ok;
    if (!group->exact_write)
        return isl_stat_ok;
    if (group->slice)
        return isl_stat_ok;

    access = amp_array_ref_group_access_relation(group, 1, 1);
    local = localize_access(data, isl_union_map_copy(access));
    no_reuse = isl_union_map_is_injective(local);


    if (no_reuse < 0)
        r = isl_stat_error;
    if (no_reuse)
        coalesced = access_is_coalesced(data, local);
    isl_union_map_free(local);

    if (r >= 0 && kernel->options->debug->verbose && use_shared && no_reuse && coalesced)
        report_no_reuse_and_coalesced(kernel, access);
	if (1) {
        group->shared_tile = amp_array_tile_create(ctx, group->array->n_index);
        acc = shared_access(group, access, data);

        ok = can_tile(acc, group->shared_tile);

        if (ok < 0)
            r = isl_stat_error;
        // else if (!ok)
        // {
        //     struct amp_array_tile *tile = group->shared_tile;
        //     struct amp_array_info *array = group->array;
        //     for (int i = 0; i < tile->n; i++)
        //     {
        //         fprintf(stderr, "\n\n  hbobiuhfeiopfh  \n\n");
        //         isl_multi_pw_aff_get_at(array->bound, i);

        //         isl_val *v = isl_multi_val_get_val(isl_set_get_plain_multi_val_if_fixed(array->declared_extent), i);
        //         if (isl_val_is_one(tile->bound[i].size))
        //             tile->bound[i].size = v;

        //         isl_val_dump(tile->bound[i].size);
        //         fprintf(stderr, "\n\n 123456789987654321 \n\n");
        //     }
        // }

        // #ifdef DEBUG_COMPUTE_GROUP_BOUNDS_CORE
        //         if (group->shared_tile && group) {
        //             fprintf(stderr, "@DEBUG: \n       the shared_tile create over!    the ok of can_tile function is: %d \n       the shared_tile is : \n", ok);
        //             amp_array_tile_dump(group->shared_tile);
        //             fprintf(stderr, "\n       the group is : \n");
        //             amp_array_ref_group_dump(group);
        //         } else {
        //             fprintf(stderr, "@ERROR: \n       the created shared_tile is NULL!!! \n");
        //             fprintf(stderr, "          the group->array->n_index is %d 、the acc is:\n", group->array->n_index);
        //             isl_map_dump(acc);
        //             fprintf(stderr, "          the ok of can_tile function is: %d\n", ok);
        //             fprintf(stderr, "\n\n");
        //         }
        // #endif // DEBUG_COMPUTE_GROUP_BOUNDS_CORE
        isl_map_free(acc);
    }

    if (r < 0 || (!force_private && (!use_private || no_reuse))) {
        isl_union_map_free(access);
        return r;
    }

    // access = isl_union_map_apply_domain(access, isl_union_map_copy(data->thread_sched));

    // acc = isl_map_from_union_map(access);

    // if (!force_private && !access_is_bijective(data, acc))
    // {
    //     isl_map_free(acc);
    //     return isl_stat_ok;
    // }

    // unique_depth = compute_accessed_by_single_thread_depth(data, acc);

    // acc = isl_map_intersect_domain(acc, isl_set_copy(data->privatization));
    // acc = isl_map_project_out(acc, isl_dim_in, data->thread_depth, data->n_thread);
    // requires_unroll = check_requires_unroll(data, acc, force_private);
    // if (unique_depth < 0 || requires_unroll < 0 || (requires_unroll))
    // {
    //     isl_map_free(acc);
    //     return requires_unroll < 0 ? isl_stat_error : isl_stat_ok;
    // }

    // group->private_tile = amp_array_tile_create(ctx, n_index);
    // group->private_tile->requires_unroll = requires_unroll;
    // ok = can_tile(acc, group->private_tile);
    // if (ok >= 0 && !ok)
    //     group->private_tile = amp_array_tile_free(group->private_tile);
    // isl_map_free(acc);
    // if (ok < 0)
    //     return isl_stat_error;

    // if (group->private_tile)
    // {
    //     struct amp_array_tile *tile = group->private_tile;
    //     int tile_depth = compute_tile_depth(data, tile);
    //     if (tile_depth < unique_depth)
    //         tile_depth = unique_depth;
    //     if (tile_adjust_depth(tile, tile_depth) < 0)
    //         return isl_stat_error;
    // }

    // if (force_private && !group->private_tile)
    //     isl_die(ctx, isl_error_internal,
    //             "unable to map array reference group to registers",
    //             return isl_stat_error);

    return isl_stat_ok;
}

/* Compute the private and/or shared memory tiles for the array
 * reference group "group" of array "array" and set the tile depth.
 * Return 0 on success and -1 on error.
 */
static int compute_group_bounds(struct amp_ppcg_kernel *kernel,
                                struct amp_array_ref_group *group, struct amp_group_data *data) {

    if (!group)
        return -1;
    if (compute_group_bounds_core(kernel, group, data) < 0)
        return -1;
    if (set_depth(data, group) < 0)
        return -1;

    return 0;
}

/* If two groups have overlapping access relations (as determined by
 * the "overlap" function) and if one of them involves a write,
 * then merge the two groups into one.
 * If "compute_bounds" is set, then call compute_group_bounds
 * on the merged groups.
 * If any group is merged into the current group, then its access
 * relation may have changed or it may have been turned into a write.
 * The combined group might therefore overlap with groups that
 * the original group did not overlap with.  The groups therefore
 * need to be checked again.
 *
 * Return the updated number of groups.
 * Return -1 on error.
 */
static int group_writes(struct amp_ppcg_kernel *kernel,
                        int n, struct amp_array_ref_group **groups,
                        int (*overlap)(struct amp_array_ref_group *group1,
                                       struct amp_array_ref_group *group2),
                        int compute_bounds,
                        struct amp_group_data *data) {
    int i, j;
    int any_merge;

    for (i = 0; i < n; i += !any_merge) {
        any_merge = 0;
        for (j = n - 1; j > i; --j) {
            if (!groups[i]->write && !groups[j]->write)
                continue;
            if (!overlap(groups[i], groups[j]))
                continue;

            any_merge = 1;
            groups[i] = join_groups_and_free(groups[i], groups[j]);
            if (j != n - 1)
                groups[j] = groups[n - 1];
            groups[n - 1] = NULL;
            n--;

            if (!groups[i])
                return -1;
            if (compute_bounds &&
                compute_group_bounds(kernel, groups[i], data) < 0)
                return -1;
        }
    }

    return n;
}

/* Check if the access relations of group1 and group2 overlap within
 * copy_sched.
 */
static int accesses_overlap(struct amp_array_ref_group *group1,
                            struct amp_array_ref_group *group2) {
    int disjoint;

    disjoint = isl_map_is_disjoint(group1->access, group2->access);
    if (disjoint < 0)
        return -1;
    return !disjoint;
}

/* If two groups have overlapping access relations (within the innermost
 * loop) and if one of them involves a write, then merge the two groups
 * into one.
 *
 * Return the updated number of groups.
 */
static int group_overlapping_writes(struct amp_ppcg_kernel *kernel,
                                    int n, struct amp_array_ref_group **groups,
                                    struct amp_group_data *data) {
    return group_writes(kernel, n, groups, &accesses_overlap, 0, data);
}

/* Check if the access relations of group1 and group2 overlap within
 * the outermost min(group1->min_depth, group2->min_depth) loops.
 */
static int depth_accesses_overlap(struct amp_array_ref_group *group1,
                                  struct amp_array_ref_group *group2) {
    int depth;
    int dim;
    int empty;
    isl_map *map_i, *map_j, *map;

    depth = group1->min_depth;
    if (group2->min_depth < depth)
        depth = group2->min_depth;
    map_i = isl_map_copy(group1->access);
    dim = isl_map_dim(map_i, isl_dim_in);
    map_i = isl_map_eliminate(map_i, isl_dim_in, depth, dim - depth);
    map_j = isl_map_copy(group2->access);
    map_j = isl_map_eliminate(map_j, isl_dim_in, depth, dim - depth);
    map = isl_map_intersect(map_i, map_j);
    empty = isl_map_is_empty(map);
    isl_map_free(map);

    return !empty;
}

/* Compute the size of the tile specified by "tile"
 * in number of elements and return the result.
 */
static __isl_give isl_val *amp_array_tile_size(struct amp_array_tile *tile) {
    int i;
    isl_val *size;

    if (!tile)
        return NULL;

    size = isl_val_one(tile->ctx);

    for (i = 0; i < tile->n; ++i)
        size = isl_val_mul(size, isl_val_copy(tile->bound[i].size));

    return size;
}

/* If two groups have overlapping access relations (within the outer
 * depth loops) and if one of them involves a write,
 * then merge the two groups into one.
 *
 * Return the updated number of groups.
 */
static int group_depth_overlapping_writes(struct amp_ppcg_kernel *kernel,
                                          int n, struct amp_array_ref_group **groups, struct amp_group_data *data) {
    return group_writes(kernel, n, groups, &depth_accesses_overlap, 1, data);
}

/* Is the size of the tile specified by "tile" smaller than the sum of
 * the sizes of the tiles specified by "tile1" and "tile2"?
 */
static int smaller_tile(struct amp_array_tile *tile,
                        struct amp_array_tile *tile1, struct amp_array_tile *tile2) {
    int smaller;
    isl_val *size, *size1, *size2;

    size = amp_array_tile_size(tile);
    size1 = amp_array_tile_size(tile1);
    size2 = amp_array_tile_size(tile2);

    size = isl_val_sub(size, size1);
    size = isl_val_sub(size, size2);
    smaller = isl_val_is_neg(size);

    isl_val_free(size);

    return smaller;
}

/* Given an initial grouping of array references and shared memory tiles
 * for each group that allows for a shared memory tile, merge two groups
 * if both have a shared memory tile, the merged group also has
 * a shared memory tile and the size of the tile for the merge group
 * is smaller than the sum of the tile sizes of the individual groups.
 * If any group is merged into the current group, then it may become
 * profitable to combine it with groups that were considered before
 * the merge.  The groups are therefore checked again after a merge.
 *
 * If merging two groups decreases the depth of the tile of
 * one or both of the two groups, then we need to check for overlapping
 * writes again.
 *
 * Return the number of groups after merging.
 * Return -1 on error.
 */
static int group_common_shared_memory_tile(struct amp_ppcg_kernel *kernel,
                                           struct amp_array_info *array, int n,
                                           struct amp_array_ref_group **groups, struct amp_group_data *data) {
    int i, j;
    int recompute_overlap = 0;
    int any_merge;

    for (i = 0; i < n; i += !any_merge) {
        any_merge = 0;
        if (!groups[i]->shared_tile)
            continue;
        for (j = n - 1; j > i; --j) {
            struct amp_array_ref_group *group;

            if (!groups[j]->shared_tile)
                continue;

            if (!depth_accesses_overlap(groups[i], groups[j]))
                continue;

            group = join_groups(groups[i], groups[j]);
            if (compute_group_bounds(kernel, group, data) < 0) {
                amp_array_ref_group_free(group);
                return -1;
            }
            if (!group->shared_tile || !smaller_tile(group->shared_tile, groups[i]->shared_tile, groups[j]->shared_tile)) {
                amp_array_ref_group_free(group);
                continue;
            }

            any_merge = 1;
            if (group->min_depth < groups[i]->min_depth ||
                group->min_depth < groups[j]->min_depth)
                recompute_overlap = 1;
            amp_array_ref_group_free(groups[i]);
            amp_array_ref_group_free(groups[j]);
            groups[i] = group;
            if (j != n - 1)
                groups[j] = groups[n - 1];
            n--;
        }
    }

    if (recompute_overlap)
        n = group_depth_overlapping_writes(kernel, n, groups, data);
    return n;
}

/* Group array references that should be considered together when
 * deciding whether to access them from private, shared or global memory.
 * Return -1 on error.
 *
 * In particular, if two array references overlap and if one of them
 * is a write, then the two references are grouped together.
 * We first perform an initial grouping based only on the access relation.
 * After computing shared and private memory tiles, we check for
 * overlapping writes again, but this time taking into account
 * the depth of the effective tile.
 *
 * Furthermore, if two groups admit a shared memory tile and if the
 * combination of the two also admits a shared memory tile, we merge
 * the two groups.
 *
 * If the array contains structures, then we compute a single
 * reference group without trying to find any tiles
 * since we do not map such arrays to private or shared
 * memory.  The only exception is when those arrays of structures
 * are required to be mapped to private memory.
 */
static int amp_group_array_references(struct amp_ppcg_kernel *kernel,
                                      struct amp_local_array_info *local, struct amp_group_data *data) {

    int i;
    int n;

    // struct amp_array_ref_group **groups_read;
    // struct amp_array_ref_group **groups_write;
    isl_ctx *ctx = isl_union_map_get_ctx(data->shared_sched);
    struct amp_array_ref_group **groups;

    groups = isl_calloc_array(ctx, struct amp_array_ref_group *, local->array->n_ref);
    if (!groups)
        return -1;

    n = populate_array_references(local, groups, data);

    if (local->array->has_compound_element) {
        n = join_all_groups(n, groups);
        set_array_groups(local, n, groups);
        return 0;
    }

    n = group_overlapping_writes(kernel, n, groups, data);

    for (i = 0; i < n; ++i)
        if (compute_group_bounds(kernel, groups[i], data) < 0)
            n = -1;

    n = group_depth_overlapping_writes(kernel, n, groups, data);

    n = group_common_shared_memory_tile(kernel, local->array, n, groups, data);

    set_array_groups(local, n, groups);

#ifdef DEBUG_AMP_GROUP_ARRAY_REFERENCES
    fprintf(stderr, "@DEBUG: \n       the group information in in end of amp_group_array_references is on the below:\n");
    for (i = 0; i < n; ++i) {
        fprintf(stderr, "           in amp_group_array_references structs, the index is %d.           the group is:\n", i);
        amp_array_ref_group_dump(groups[i]);
        fprintf(stderr, "\n\n");
    }
#endif // DEBUG_AMP_GROUP_ARRAY_REFERENCES

    if (n >= 0)
        return 0;

    // for (i = 0; i < local->array->n_ref; ++i)
    //     amp_array_ref_group_free(groups[i]);
    return -1;
}

/* Group references of all arrays in "kernel".
 * "node" points to the kernel mark.
 * The mapping to shared memory in computed at the "shared" mark.
 *
 * We first extract all required schedule information into
 * a gpu_group_data structure and then consider each array
 * in turn.
 */
int amp_group_references(struct amp_ppcg_kernel *kernel,
                         __isl_keep isl_schedule_node *node) {
    // #define DEBUG_AMP_GROUP_REFERENCES

    int i;
    int r = 0;
    isl_union_pw_multi_aff *contraction;
    struct amp_group_data data;

#ifdef DEBUG_AMP_GROUP_REFERENCES
    fprintf(stderr, "@DEBUG: \n       in amp_group_references, node is :\n");
    isl_schedule_node_dump(node);
    fprintf(stderr, "\n\n");
#endif // DEBUG_AMP_GROUP_REFERENCES
    // check_can_be_private_live_ranges(kernel, node);

    data.scop = kernel->prog->scop;

    data.kernel_depth = isl_schedule_node_get_schedule_depth(node);
    data.host_sched = isl_schedule_node_get_prefix_schedule_relation(node);

    node = isl_schedule_node_copy(node);
    node = amp_tree_move_down_to_shared(node, kernel->core);
    data.shared_depth = isl_schedule_node_get_schedule_depth(node);
    data.shared_sched = prefix_with_equalities(node);

#ifdef DEBUG_AMP_GROUP_REFERENCES
    fprintf(stderr, "@DEBUG: \n       data.shared_sched is :\n");
    isl_union_map_dump(data.shared_sched);
    fprintf(stderr, "\n\n");
#endif // DEBUG_AMP_GROUP_REFERENCES
    node = amp_tree_move_down_to_thread(node, kernel->core);
    node = isl_schedule_node_child(node, 0);
    data.thread_depth = isl_schedule_node_get_schedule_depth(node);
    data.n_thread = isl_schedule_node_band_n_member(node);
    if (data.thread_depth == data.shared_depth)
        data.copy_sched = isl_union_map_copy(data.shared_sched);
    else
        data.copy_sched = prefix_with_equalities(node);
    data.thread_sched = isl_union_map_copy(data.copy_sched);
    data.thread_sched = isl_union_map_flat_range_product(data.thread_sched, isl_schedule_node_band_get_partial_schedule_union_map(node));
    data.thread_sched = isl_union_map_detect_equalities(data.thread_sched);

    contraction = isl_union_pw_multi_aff_copy(kernel->contraction);
    data.host_sched = expand(data.host_sched, contraction);
    data.shared_sched = expand(data.shared_sched, contraction);
    if (data.thread_depth == data.shared_depth) {
        isl_union_map_free(data.copy_sched);
        data.copy_sched = isl_union_map_copy(data.shared_sched);
    } else {
        data.copy_sched = expand(data.copy_sched, contraction);
    }
    data.thread_sched = expand(data.thread_sched, contraction);
    isl_union_pw_multi_aff_free(contraction);

    node = isl_schedule_node_child(node, 0);
    // data.full_sched = isl_union_map_copy(data.shared_sched);
    data.full_sched = isl_union_map_copy(data.thread_sched);
    data.full_sched = isl_union_map_flat_range_product(data.full_sched, isl_schedule_node_get_subtree_schedule_union_map(node));
    isl_schedule_node_free(node);

    // compute_privatization(&data, kernel);

    for (i = 0; i < kernel->n_array; ++i) {
        r = amp_group_array_references(kernel, &kernel->array[i], &data);
        if (r < 0)
            break;
    }

    isl_union_map_free(data.host_sched);
    isl_union_map_free(data.shared_sched);
    isl_union_map_free(data.copy_sched);
    isl_union_map_free(data.thread_sched);
    isl_union_map_free(data.full_sched);
    // isl_set_free(data.privatization);

    return r;
}

/* Replace "pa" by the zero function defined over the universe domain
 * in the space of "pa".
 */
static __isl_give isl_pw_aff *set_universally_zero(__isl_take isl_pw_aff *pa) {
    isl_space *space;
    isl_aff *zero;

    space = isl_space_domain(isl_pw_aff_get_space(pa));
    isl_pw_aff_free(pa);
    zero = isl_aff_zero_on_domain(isl_local_space_from_space(space));

    return isl_pw_aff_from_aff(zero);
}

/* The sizes of the arrays on the host that have been computed by
 * extract_array_info may depend on the parameters.  Use the extra
 * constraints on the parameters that are valid at "host_domain"
 * to simplify these expressions and store the results in kernel->array.
 *
 * We only need these localized bounds for arrays that are accessed
 * by the current kernel.  If we have found at least one reference group
 * then the array is accessed by the kernel.
 *
 * The resulting sizes may be functions that are nowhere defined
 * in case the access function cannot possibly access anything inside
 * the kernel for some reason.  If so, they are replaced by the zero
 * function.  Since the access function cannot actually access anything,
 * there is no harm in printing the array sizes as zero.
 */
static void localize_bounds(struct amp_ppcg_kernel *kernel,
                            __isl_keep isl_set *host_domain) {
    int i, j;
    isl_set *context;

    context = isl_set_copy(host_domain);
    context = isl_set_params(context);

    for (i = 0; i < kernel->n_array; ++i) {
        struct amp_local_array_info *local = &kernel->array[i];
        isl_multi_pw_aff *bound;
        int n_index;

        if (local->n_group == 0)
            continue;

        n_index = local->array->n_index;
        bound = isl_multi_pw_aff_copy(local->array->bound);

        for (j = 0; j < n_index; ++j) {
            isl_pw_aff *pwaff;
            int empty;

            pwaff = isl_multi_pw_aff_get_pw_aff(bound, j);
            pwaff = isl_pw_aff_gist(pwaff, isl_set_copy(context));
            empty = isl_pw_aff_is_empty(pwaff);
            if (empty < 0)
                pwaff = isl_pw_aff_free(pwaff);
            else if (empty)
                pwaff = set_universally_zero(pwaff);
            bound = isl_multi_pw_aff_set_pw_aff(bound, j, pwaff);
        }

        local->n_index = n_index;
        local->bound = bound;
    }
    isl_set_free(context);
}

/* Mark all arrays of "kernel" that have an array reference group
 * that is not mapped to private or shared memory as
 * accessing the corresponding global device memory.
 */
static void mark_data_copy_arrays(struct amp_ppcg_kernel *kernel) {
    int i, j;

    for (i = 0; i < kernel->n_array; ++i) {
        struct amp_local_array_info *local = &kernel->array[i];

        if (local->global)
            continue;
        for (j = 0; j < local->n_group; ++j) {
            if (amp_array_ref_group_tile(local->groups[j]))
                continue;

            local->global = 1;
            local->array->global = 1;
            break;
        }
    }
}

/* Given a description of an array tile "tile" and the "space"
 *
 *	{ D -> A }
 *
 * where D represents the first tile->depth schedule dimensions
 * and A represents the array, construct an isl_multi_aff
 *
 *	{ [D[i] -> A[a]] -> A'[a'] }
 *
 * with A' a scaled down copy of A according to the shifts and strides
 * in "tile".  In particular,
 *
 *	a' = (a + shift(i))/stride
 *
 * "insert_array" represents
 *
 *	{ [D -> A] -> D }
 *
 * and is used to insert A into the domain of functions that only
 * reference D.
 */
static __isl_give isl_multi_aff *strided_tile(
    struct amp_array_tile *tile, __isl_keep isl_space *space,
    __isl_keep isl_multi_aff *insert_array) {
    int i;
    isl_ctx *ctx;
    isl_multi_aff *shift;
    isl_multi_val *stride;
    isl_space *space2;
    isl_local_space *ls;
    isl_multi_aff *tiling;

    ctx = isl_space_get_ctx(space);
    space2 = isl_space_domain(isl_space_copy(space));
    ls = isl_local_space_from_space(space2);
    space2 = isl_space_range(isl_space_copy(space));
    stride = isl_multi_val_zero(space2);
    shift = isl_multi_aff_zero(isl_space_copy(space));

    for (i = 0; i < tile->n; ++i) {
        struct amp_array_bound *bound = &tile->bound[i];
        isl_val *stride_i;
        isl_aff *shift_i;

        stride_i = isl_val_copy(bound->stride);
        shift_i = isl_aff_copy(bound->shift);

        stride = isl_multi_val_set_val(stride, i, stride_i);
        shift = isl_multi_aff_set_aff(shift, i, shift_i);
    }
    isl_local_space_free(ls);

    shift = isl_multi_aff_pullback_multi_aff(shift,
                                             isl_multi_aff_copy(insert_array));

    tiling = isl_multi_aff_range_map(isl_space_copy(space));
    tiling = isl_multi_aff_add(tiling, shift);
    tiling = isl_multi_aff_scale_down_multi_val(tiling, stride);

    return tiling;
}

// /* Print the name of the local copy of a given group of array references.
//  */
// __isl_give isl_printer *amp_array_ref_group_print_name(
//     struct amp_array_ref_group *group, __isl_take isl_printer *p) {
//     int global = 0;
//     enum ppcg_group_access_type type;

//     type = amp_array_ref_group_type(group);
//     if (type == ppcg_access_shared)
//         p = isl_printer_print_str(p, "amp_lower_");
//     else {
//         global = 1;
//         p = isl_printer_print_str(p, "amp_lower_");
//     }

//     p = isl_printer_print_str(p, group->array->name);
//     if (!global && group->local_array->n_group > 1) {
//         p = isl_printer_print_str(p, "_");
//         p = isl_printer_print_int(p, group->nr);
//     }

//     return p;
// }

/* Print the name of the local copy of a given group of array references.
 */
__isl_give isl_printer *amp_array_ref_group_print_name(
    struct amp_array_ref_group *group, __isl_take isl_printer *p, int i) {
    int global = 0;
    enum ppcg_group_access_type type;

    type = amp_array_ref_group_type(group);
    if (type == ppcg_access_shared && i < 2)
        p = isl_printer_print_str(p, "amp_lower_");
    else {
        // global = 1;
        p = isl_printer_print_str(p, "");
    }

    p = isl_printer_print_str(p, group->array->name);
    if (!global && group->local_array->n_group > 1) {
        p = isl_printer_print_str(p, "_");
        p = isl_printer_print_int(p, group->nr);
    }

    return p;
}

/* Compute a tiling for the array reference group "group".
 *
 * The tiling is of the form
 *
 *	{ [D[i] -> A[a]] -> T[t] }
 *
 * where D represents the first tile->depth schedule dimensions,
 * A represents the global array and T represents the shared or
 * private memory tile.  The name of T is the name of the local
 * array.
 *
 * If there is any stride in the accesses, then the mapping is
 *
 *	t = (a + shift(i))/stride - lb(i)
 *
 * otherwise, it is simply
 *
 *	t = a - lb(i)
 */
static void amp_array_ref_group_compute_tiling(struct amp_array_ref_group *group, int i) {
    // #define DEBUG_AMP_ARRAY_REFGROUP_COMPUTE_TILING

    int k;
    struct amp_array_tile *tile;
    isl_space *space;
    isl_multi_aff *tiling, *lb, *insert_array;
    isl_printer *p;
    char *local_name;

    tile = amp_array_ref_group_tile(group);
    if (!tile) {
        // fprintf(stderr, "@WARN_INFO: \n       in the amp_array_ref_group_compute_tiling        function, the tile of group is null !!! please notice! the group is:         \n");
        fprintf(stderr, "@WARN_INFO: \n       in the         amp_array_ref_group_compute_tiling function, the tile of group is null         !!! please notice! the group is: \n");
        amp_array_ref_group_dump(group);
        // amp_array_tile_dump(tile);
        fprintf(stderr, "\n\n");
        return;
    }

    space = isl_map_get_space(group->access);
    space = isl_space_from_range(isl_space_range(space));
    space = isl_space_add_dims(space, isl_dim_in, tile->depth);
    insert_array = isl_multi_aff_domain_map(isl_space_copy(space));

    for (k = 0; k < tile->n; ++k)
        if (tile->bound[k].shift)
            break;

    if (k < tile->n)
        tiling = strided_tile(tile, space, insert_array);
    else
        tiling = isl_multi_aff_range_map(isl_space_copy(space));


    lb = isl_multi_aff_zero(space);
    for (k = 0; k < tile->n; ++k) {
        isl_aff *lb_i = isl_aff_copy(tile->bound[k].lb);
        lb = isl_multi_aff_set_aff(lb, k, lb_i);
    }
    lb = isl_multi_aff_pullback_multi_aff(lb, insert_array);

    if (lb)
        tiling = isl_multi_aff_sub(tiling, lb);

    p = isl_printer_to_str(isl_multi_aff_get_ctx(tiling));
    p = amp_array_ref_group_print_name(group, p, i);
    local_name = isl_printer_get_str(p);
    isl_printer_free(p);
    tiling = isl_multi_aff_set_tuple_name(tiling, isl_dim_out, local_name);
    free(local_name);

    tile->tiling = tiling;
}

/* Compute a tiling for all the array reference groups in "kernel".
 */
static void compute_group_tilings(struct amp_ppcg_kernel *kernel) {
    // #define DEBUG_COMPUTE_GROUP_TILINGS

    int i, j;

    for (i = 0; i < kernel->n_array; ++i) {
        struct amp_local_array_info *array = &kernel->array[i];

        for (j = 0; j < array->n_group; ++j) {
            amp_array_ref_group_compute_tiling(array->groups[j], i);

        }
    }
}

/* Move down from the "kernel" mark (or at least a node with schedule
 * depth smaller than or equal to "depth") to a band node at schedule
 * depth "depth".  The "thread" mark is assumed to have a schedule
 * depth greater than or equal to "depth".  The branch containing the
 * "thread" mark is identified by the domain elements in "core".
 *
 * If the desired schedule depth is in the middle of band node,
 * then the band node is split into two pieces, the second piece
 * at the desired schedule depth.
 */
__isl_give isl_schedule_node *amp_tree_move_down_to_depth(
    __isl_take isl_schedule_node *node, int depth,
    __isl_keep isl_union_set *core) {

    while (node && isl_schedule_node_get_schedule_depth(node) < depth) {
        if (isl_schedule_node_get_type(node) == isl_schedule_node_band) {
            int node_depth, node_dim;
            node_depth = isl_schedule_node_get_schedule_depth(node);
            node_dim = isl_schedule_node_band_n_member(node);
            if (node_depth + node_dim > depth)
                node = isl_schedule_node_band_split(node,
                                                    depth - node_depth);
        }
        node = core_child(node, core);
    }

    return node;
}

/* Return the union of all tagged access relations in the group.
 */
static __isl_give isl_union_map *amp_group_tagged_access_relation(
    struct amp_array_ref_group *group) {
    int i;
    isl_union_map *access;

    access = isl_union_map_empty(isl_map_get_space(group->access));
    for (i = 0; i < group->n_ref; ++i) {
        isl_map *map_i;

        map_i = isl_map_copy(group->refs[i]->tagged_access);
        access = isl_union_map_union(access, isl_union_map_from_map(map_i));
    }

    return access;
}

/* Given a set of wrapped references "ref", return the corresponding
 * access relations based on the tagged access relations "tagged".
 *
 * The elements of "ref" are of the form
 *
 *	[D -> R]
 *
 * with D an iteration domains and R a reference.
 * The elements of "tagged" are of the form
 *
 *	[D -> R] -> A
 *
 * with A an array.
 *
 * Extend "tagged" to include the iteration domain in the range, i.e.,
 *
 *	[D -> R] -> [D -> A]
 *
 * apply the result to "ref" and then unwrap the resulting set
 * to obtain relations of the form
 *
 *	D -> A
 */
static __isl_give isl_union_map *wrapped_reference_to_access(
    __isl_take isl_union_set *ref, __isl_take isl_union_map *tagged) {
    isl_union_map *tag2access;

    tag2access = isl_union_map_copy(tagged);
    tag2access = isl_union_map_universe(tag2access);
    tag2access = isl_union_set_unwrap(isl_union_map_domain(tag2access));
    tag2access = isl_union_map_domain_map(tag2access);
    tag2access = isl_union_map_range_product(tag2access, tagged);

    ref = isl_union_set_coalesce(ref);
    ref = isl_union_set_apply(ref, tag2access);

    return isl_union_set_unwrap(ref);
}

/* Given an access relation "access" from one or more array reference groups,
 * remove those reads if ("read" is 1) or writes (if "read" is 0)
 * that are only needed to communicate data within
 * the same iteration of "sched".
 * The domain of "sched" corresponds to the original statement instances,
 * i.e., those that appear in the domains of the access relations.
 * "tagged" contains all tagged access relations to all
 * the array reference groups accessed by "access" from statement
 * instances scheduled by "sched".
 *
 * If the access is a read then it is either an element of
 *
 *	live_in union (range flow)
 *
 * where live_in and flow may be overapproximations, or
 * it reads an uninitialized value (that is not live-in because
 * there is an intermediate kill) or it reads a value that was
 * written within the same (compound) statement instance.
 * If the access is a write then it is either an element of
 *
 *	live_out union (domain flow)
 *
 * or it writes a value that is never read (and is not live-out
 * because of an intermediate kill) or only
 * within the same (compound) statement instance.
 * In both cases, the access relation is also a subset of
 * the group access relation.
 *
 * The cases where an uninitialized value is read or a value is written
 * that is never read or where the dataflow occurs within a statement
 * instance are also considered local and may also be removed.
 *
 * Essentially, we compute the intersection of "access" with either
 *
 *	live_in union (range non-local-flow)
 *
 * or
 *
 *	live_out union (domain non-local-flow)
 *
 * We first construct a relation "local"
 *
 *	[[D -> R] -> [D' -> R']]
 *
 * of pairs of domain iterations accessing the reference group
 * and references in the group that are coscheduled by "sched".
 *
 * If this relation does not intersect the dataflow dependences,
 * then there is nothing we can possibly remove, unless the dataflow
 * dependences themselves only relate a subset of the accesses.
 * In particular, the accesses may not be involved in any dataflow
 * dependences, either because they are uninitialized reads/dead writes
 * or because the dataflow occurs inside a statement instance.
 *
 * Since the computation below may break up the access relation
 * into smaller pieces, we only perform the intersection with
 * the non-local dependent accesses if the local pairs
 * intersect the dataflow dependences.  Otherwise, we intersect
 * with the universe of the non-local dependent accesses.
 * This should at least remove accesses from statements that
 * do not participate in any dependences.
 *
 * In particular, we remove the "local" dataflow dependences from
 * the set of all dataflow dependences, or at least those
 * that may contribute to a domain/range that intersects
 * the domain of "access".
 * Note that if the potential dataflow dependences are an overapproximation
 * of the actual dataflow dependences, then the result remains an
 * overapproximation of the non-local dataflow dependences.
 * Copying to/from global memory is only needed for the references
 * in the domain/range of the result or for accesses that are live out/in
 * for the entire scop.
 *
 * We therefore map the domain/range of the "external" relation
 * to the corresponding access relation and take the union with
 * the live out/in relation.
 */
static __isl_give isl_union_map *amp_remove_local_accesses(
    struct amp_prog *prog, __isl_take isl_union_map *tagged,
    __isl_take isl_union_map *access, __isl_take isl_union_map *sched,
    int read) {
    // #define DEBUG_AMP_REMOVE_LOCAL_ACCESSES

    int empty;
    isl_union_pw_multi_aff *tagger;
    isl_union_set *domain, *access_domain;
    isl_union_map *local, *external, *universe;
    isl_union_set *tag_set;
    struct ppcg_scop *ps = prog->scop;

    if (isl_union_map_is_empty(access)) {
        isl_union_map_free(sched);
        isl_union_map_free(tagged);
        return access;
    }

#ifdef DEBUG_AMP_REMOVE_LOCAL_ACCESSES
    fprintf(stderr, "@DEBUG: \n       in start of amp_remove_local_accesses(no group), the access is:\n");
    isl_union_map_dump(access);
    fprintf(stderr, "\n\n");
#endif // DEBUG_AMP_REMOVE_LOCAL_ACCESSES

    // compute_live_out(ps);

    tagger = isl_union_pw_multi_aff_copy(ps->tagger);
    domain = isl_union_map_domain(isl_union_map_copy(tagged));
    tagger = isl_union_pw_multi_aff_intersect_domain(tagger, isl_union_set_copy(domain));
    sched = isl_union_map_preimage_domain_union_pw_multi_aff(sched, tagger);

    local = isl_union_map_apply_range(sched, isl_union_map_reverse(isl_union_map_copy(sched)));
    local = isl_union_map_intersect(local, isl_union_map_copy(ps->tagged_dep_flow));

    empty = isl_union_map_is_empty(local);

    external = isl_union_map_copy(ps->tagged_dep_flow);
    universe = isl_union_map_universe(isl_union_map_copy(access));
    access_domain = isl_union_map_domain(universe);
    domain = isl_union_set_universe(domain);
    universe = isl_union_set_unwrap(domain);
    universe = isl_union_map_intersect_domain(universe, access_domain);
    domain = isl_union_map_wrap(universe);
    if (read)
        external = isl_union_map_intersect_range(external, domain);
    else
        external = isl_union_map_intersect_domain(external, domain);
    external = isl_union_map_intersect_params(external, isl_set_copy(ps->context));
    external = isl_union_map_subtract(external, local);

#ifdef DEBUG_AMP_REMOVE_LOCAL_ACCESSES
    fprintf(stderr, "@DEBUG: \n       in the prog->scop, the live_in and live_out is on the below :\n");
    isl_union_map_dump(ps->live_in);
    isl_union_map_dump(ps->live_out);
    fprintf(stderr, "\n\n");
#endif // DEBUG_AMP_REMOVE_LOCAL_ACCESSES

    if (read) {
        tag_set = isl_union_map_range(external);
        external = wrapped_reference_to_access(tag_set, tagged);
        external = isl_union_map_union(external, isl_union_map_copy(ps->live_in));
    } else {
        tag_set = isl_union_map_domain(external);
        external = wrapped_reference_to_access(tag_set, tagged);
        external = isl_union_map_union(external, isl_union_map_copy(ps->live_out));
    }

    if (empty < 0)
        external = isl_union_map_free(external);
    else if (empty)
        external = isl_union_map_universe(external);

    access = isl_union_map_intersect(access, external);

    return access;
}

/* Given an access relation "access" from "group", remove those reads
 * if ("read" is 1) or writes (if "read" is 0) that are only needed to
 * communicate data within the same iteration of the schedule "prefix"
 * at the position where the copying of the group is inserted.
 * That is, the output dimension of "prefix"
 * is equal to tile->depth.
 * The domain of "prefix" corresponds to the original statement instances,
 * i.e., those that appear in the domains of the access relations.
 *
 * Extract the tagged access relation of "group" and
 * then call remove_local_accesses.
 */
static __isl_give isl_union_map *amp_remove_local_accesses_group(
    struct amp_ppcg_kernel *kernel, struct amp_array_ref_group *group,
    __isl_take isl_union_map *access, __isl_keep isl_union_map *prefix,
    int read) {
    // #define DEBUG_AMP_REMOVE_LOCAL_ACCESSES_GROUP
    isl_union_map *sched, *tagged;

    if (isl_union_map_is_empty(access))
        return access;

#ifdef DEBUG_AMP_REMOVE_LOCAL_ACCESSES_GROUP
    fprintf(stderr, "@DEBUG: \n       in start of amp_remove_local_accesses_group, the access is:\n");
    isl_union_map_dump(access);
    fprintf(stderr, "\n\n");
#endif
    tagged = amp_group_tagged_access_relation(group);
    sched = isl_union_map_copy(prefix);

#ifdef DEBUG_AMP_REMOVE_LOCAL_ACCESSES_GROUP
    fprintf(stderr, "@DEBUG: \n       the tagged is:\n");
    isl_union_map_dump(tagged);
    fprintf(stderr, "\n       the sched is:\n");
    isl_union_map_dump(sched);
    fprintf(stderr, "\n\n");
#endif

    return amp_remove_local_accesses(kernel->prog, tagged, access, sched, read);
}

/* Return a read ("read" is 1) or write access relation for "group"
 * with those accesses removed that are only needed to communicate data
 * within the subtree of the schedule rooted at "node".
 * Furthermore, include the prefix schedule at "node".
 * That is, return a relation of the form
 *
 *	S -> [D -> A]
 *
 * with D the outer schedule dimensions at "node".
 */
static __isl_give isl_union_map *anchored_non_local_accesses(
    struct amp_ppcg_kernel *kernel, struct amp_array_ref_group *group,
    __isl_take isl_schedule_node *node, int read) {
    // #define DEBUG_ANCHORED_NON_LOCAL_ACCESSES

    isl_union_map *access;
    isl_union_map *prefix;

#ifdef DEBUG_ANCHORED_NON_LOCAL_ACCESSES
    fprintf(stderr, "@DEBUG: \n       anchored_non_local_accesses, the node is:\n");
    isl_schedule_node_dump(node);
    fprintf(stderr, "\n       the read's value is: %d \n\n", read);
#endif // DEBUG_ANCHORED_NON_LOCAL_ACCESSES

    prefix = isl_schedule_node_get_prefix_schedule_relation(node);
#ifdef DEBUG_ANCHORED_NON_LOCAL_ACCESSES
    fprintf(stderr, "@DEBUG: \n       the prefix from current node is:\n");
    isl_union_map_dump(prefix);
    fprintf(stderr, "\n\n");
#endif // DEBUG_ANCHORED_NON_LOCAL_ACCESSES

    prefix = isl_union_map_preimage_domain_union_pw_multi_aff(prefix, isl_union_pw_multi_aff_copy(kernel->contraction));
#ifdef DEBUG_ANCHORED_NON_LOCAL_ACCESSES
    fprintf(stderr, "@DEBUG: \n       after insert prefix(from current node) into the kernel->contraction,the result is:\n");
    isl_union_map_dump(prefix);
    fprintf(stderr, "\n\n");
#endif // DEBUG_ANCHORED_NON_LOCAL_ACCESSES

    access = amp_array_ref_group_access_relation(group, read, !read);
#ifdef DEBUG_ANCHORED_NON_LOCAL_ACCESSES
    fprintf(stderr, "@DEBUG: \n       all read (read = 1) and/or write (write = 1) access relations in the group, is:\n");
    isl_union_map_dump(access);
    fprintf(stderr, "\n       the read's value is: %d \n\n", read);
#endif // DEBUG_ANCHORED_NON_LOCAL_ACCESSES

    access = amp_remove_local_accesses_group(kernel, group, access, prefix, read);
#ifdef DEBUG_ANCHORED_NON_LOCAL_ACCESSES
    fprintf(stderr, "@DEBUG: \n       after amp_remove_local_accesses_group, the access is:\n");
    isl_union_map_dump(access);
    fprintf(stderr, "\n\n");
#endif // DEBUG_ANCHORED_NON_LOCAL_ACCESSES

    access = isl_union_map_range_product(prefix, access);
#ifdef DEBUG_ANCHORED_NON_LOCAL_ACCESSES
    fprintf(stderr, "@DEBUG: \n       after isl_union_map_range_product function, the returned access is:\n");
    isl_union_map_dump(access);
    fprintf(stderr, "\n\n");
#endif // DEBUG_ANCHORED_NON_LOCAL_ACCESSES

    return access;
}

/* Add copy statements to the schedule tree of "node"
 * for reading from global memory to shared memory (if "read" is set) or
 * for writing back from shared memory to global memory
 * (if "read" is not set) for the array reference group "group" that
 * is mapped to shared memory.
 * On input, "node" points to the kernel node, and it is moved
 * back there on output.
 *
 * The copies are performed in the order of the corresponding shared
 * memory tile.
 * The copy statement instances include a reference to the outer
 * tile->depth dimensions of the kernel schedule for ease of
 * combining them with the group tiling.
 *
 * If we are performing a read from global memory to shared memory and
 * if the array involved is not a scalar, then we copy
 * the entire tile to shared memory.  This may result in some extra
 * elements getting copied, but it should lead to simpler code
 * (which means that fewer registers may be needed) and less divergence.
 *
 * Otherwise, we only copy the elements that will be read or have been written
 * in the kernel.
 *
 * That is, the extra schedule is of the form
 *
 *	type[D -> A] -> T
 *
 * where D corresponds to the outer tile->depth dimensions of
 * the kernel schedule, A to the global array and T is the corresponding
 * shared memory tile.
 *
 * The copying is inserted in the schedule tree through an extension
 * of the form
 *
 *	D -> type[D -> A]
 *
 * where the extra domain elements type[D -> A] are those accessed
 * by the group.  In the case of read from a non-scalar, this set
 * is replaced by the entire shared memory tile.
 *
 * If the "unroll_copy_shared" option is set, then the AST generator
 * is instructed to unroll the copying code.
 *
 * A filter is inserted on type[D -> A] to map the copy instances
 * to the threads.  In particular, the thread identifiers are
 * equated to the position inside the shared memory tile (T)
 * modulo the block size.
 * We try to align the innermost tile dimension with the innermost
 * thread identifier (x) as a heuristic to improve coalescing.
 * In particular, if the dimension of the tile is greater than
 * the dimension of the block, then the schedule mapping to the tile
 * is broken up into two pieces and the filter is applied to the inner part.
 * If, on the other hand, the dimension of the tile is smaller than
 * the dimension of the block, then the initial thread identifiers
 * are equated to zero and the remaining thread identifiers are
 * matched to the memory tile.
 *
 * The extension is inserted before the core computation in case of a read
 * and after the core computation in case of a write.
 * In the case of a read, we first need to make sure there is some
 * synchronization before the core computation such that we can put the read
 * from global memory to shared memory before that synchronization.
 * This ensures that all threads have finished copying into shared memory
 * before the shared memory is used.
 * We also need to make sure that there is a synchronization node after
 * the core computation to ensure that the next load into shared memory
 * only happens after all data has been used.  There is no need for
 * this synchronization if we are at the outer level since then there
 * won't be a next load.
 * In the case of a write, we need to make sure there is some synchronization
 * after the core computation such that we can put the write from shared
 * memory to global memory after that synchronization.
 * Unless we are at the outer level, we also need a synchronization node
 * after the write to ensure the data is saved to global memory
 * before the next iteration writes to the same shared memory.
 * It also makes sure the data has arrived in global memory before
 * it is read in a subsequent iteration.
 */
//  参考CUDA的add_copies_group_shared函数实现,同时又引入了和add_copies_group_private的对比

static __isl_give isl_schedule_node *amp_add_copies_group_shared(
    struct amp_ppcg_kernel *kernel, struct amp_array_ref_group *group,
    __isl_take isl_schedule_node *node, int read, int i) {
    // #define DEBUG_AMP_ADD_COPIES_GROUP

    struct amp_array_tile *tile;
    isl_union_map *access;
    isl_union_set *domain;
    isl_multi_aff *ma;
    isl_multi_aff *from_access;
    isl_multi_pw_aff *mpa;
    isl_multi_union_pw_aff *mupa;
    isl_schedule_node *graft;
    isl_union_set *filter;
    isl_space *space;
    int kernel_depth;
    int empty;

    // if (amp_array_is_scalar(group->array))
    // {
    //     return node;
    // }

    if (read && i < 2) {
        tile = amp_array_ref_group_tile(group);

        kernel_depth = isl_schedule_node_get_schedule_depth(node);
        node = amp_tree_move_down_to_depth(node, tile->depth, kernel->core);

        access = anchored_non_local_accesses(kernel, group, node, read);
        empty = isl_union_map_is_empty(access);
        if (empty < 0 || empty) {
            isl_union_map_free(access);
            if (empty < 0)
                return isl_schedule_node_free(node);
            return amp_tree_move_up_to_kernel(node);
        }
        group->array->global = 1;
        group->local_array->global = 1;

        from_access = create_from_access(kernel->ctx, group, read);

        if (tile->tiling) {
            ma = isl_multi_aff_copy(tile->tiling);

            ma = isl_multi_aff_pullback_multi_aff(ma, isl_multi_aff_copy(from_access));
        }

        else {
            ma = isl_multi_aff_copy(from_access);
        }

        mpa = isl_multi_pw_aff_from_multi_aff(ma);


        mupa = isl_multi_union_pw_aff_from_multi_pw_aff(mpa);
        domain = isl_union_map_range(access);

        // 只对读生效,注释掉就正确了,后面再深究
        // if (read && !amp_array_is_scalar(group->array)) {
        // 	isl_map *map;
        // 	isl_union_set_free(domain);
        // 	map = group_tile(group);
        // 	domain = isl_union_set_from_set(isl_map_wrap(map));
        // }

        domain = isl_union_set_preimage_multi_aff(domain, isl_multi_aff_copy(from_access));

        // 基本上从下面开始就没办法修改了
        access = isl_union_set_wrapped_domain_map(domain);
        access = isl_union_map_reverse(access);
        access = isl_union_map_coalesce(access);


        graft = isl_schedule_node_from_extension(access);

        graft = isl_schedule_node_child(graft, 0);
        graft = isl_schedule_node_insert_partial_schedule(graft, isl_multi_union_pw_aff_copy(mupa));

        if (kernel->options->unroll_copy_shared)
            graft = ppcg_set_schedule_node_type(graft, isl_ast_loop_unroll);

        while (graft && isl_schedule_node_has_parent(graft))
            graft = isl_schedule_node_parent(graft);


            node = amp_tree_move_down_to_shared(node, kernel->core);
            // node = amp_tree_move_down_to_depth(node, kernel_depth, kernel->core);
            node = isl_schedule_node_graft_before(node, graft);
        // } else {
        //     node = amp_tree_move_down_to_shared(node, kernel->core);
        //     // node = amp_tree_move_down_to_depth(node, kernel_depth, kernel->core);
        //     node = isl_schedule_node_graft_after(node, graft);
        } else {
            // tile = amp_array_ref_group_tile(group);

            // kernel_depth = isl_schedule_node_get_schedule_depth(node);
            // node = amp_tree_move_down_to_depth(node, tile->depth, kernel->core);

            // access = anchored_non_local_accesses(kernel, group, node, read);
            // empty = isl_union_map_is_empty(access);
            // isl_union_map_free(access);
            return amp_tree_move_up_to_kernel(node);
        }


    return node;
}

// static __isl_give isl_schedule_node *amp_add_copies_group_shared(
//     struct amp_ppcg_kernel *kernel, struct amp_array_ref_group *group,
//     __isl_take isl_schedule_node *node, int read, int i) {
//     // #define DEBUG_AMP_ADD_COPIES_GROUP

//     struct amp_array_tile *tile;
//     isl_union_map *access;
//     isl_union_set *domain;
//     isl_multi_aff *ma;
//     isl_multi_aff *from_access;
//     isl_multi_pw_aff *mpa;
//     isl_multi_union_pw_aff *mupa;
//     isl_schedule_node *graft;
//     isl_union_set *filter;
//     isl_space *space;
//     int kernel_depth;
//     int empty;

//     // if (amp_array_is_scalar(group->array))
//     // {
//     //     return node;
//     // }

//     tile = amp_array_ref_group_tile(group);

//     kernel_depth = isl_schedule_node_get_schedule_depth(node);
//     node = amp_tree_move_down_to_depth(node, tile->depth, kernel->core);

//     access = anchored_non_local_accesses(kernel, group, node, read);
//     empty = isl_union_map_is_empty(access);
//     if (empty < 0 || empty) {
//         isl_union_map_free(access);
//         if (empty < 0)
//             return isl_schedule_node_free(node);
//         return amp_tree_move_up_to_kernel(node);
//     }


//     group->array->global = 1;
//     group->local_array->global = 1;

//     from_access = create_from_access(kernel->ctx, group, read);

//     if (tile->tiling) {
//         ma = isl_multi_aff_copy(tile->tiling);

//         ma = isl_multi_aff_pullback_multi_aff(ma, isl_multi_aff_copy(from_access));
//     }

//     else {
//         ma = isl_multi_aff_copy(from_access);
//     }

//     mpa = isl_multi_pw_aff_from_multi_aff(ma);


//     mupa = isl_multi_union_pw_aff_from_multi_pw_aff(mpa);
//     domain = isl_union_map_range(access);

//     // 只对读生效,注释掉就正确了,后面再深究
//     // if (read && !amp_array_is_scalar(group->array)) {
//     // 	isl_map *map;
//     // 	isl_union_set_free(domain);
//     // 	map = group_tile(group);
//     // 	domain = isl_union_set_from_set(isl_map_wrap(map));
//     // }

//     domain = isl_union_set_preimage_multi_aff(domain, isl_multi_aff_copy(from_access));

//     // 基本上从下面开始就没办法修改了
//     access = isl_union_set_wrapped_domain_map(domain);
//     access = isl_union_map_reverse(access);
//     access = isl_union_map_coalesce(access);


//     graft = isl_schedule_node_from_extension(access);

//     graft = isl_schedule_node_child(graft, 0);
//     graft = isl_schedule_node_insert_partial_schedule(graft, isl_multi_union_pw_aff_copy(mupa));

//     if (kernel->options->unroll_copy_shared)
//         graft = ppcg_set_schedule_node_type(graft, isl_ast_loop_unroll);

//     while (graft && isl_schedule_node_has_parent(graft))
//         graft = isl_schedule_node_parent(graft);


//     if (read) {
//         node = amp_tree_move_down_to_shared(node, kernel->core);
//         // node = amp_tree_move_down_to_depth(node, kernel_depth, kernel->core);
//         node = isl_schedule_node_graft_before(node, graft);
//     } else {
//         node = amp_tree_move_down_to_shared(node, kernel->core);
//         // node = amp_tree_move_down_to_depth(node, kernel_depth, kernel->core);
//         node = isl_schedule_node_graft_after(node, graft);
//     }

//     return node;
// }

/* Check whether the array reference group "group" is mapped to
 * private or shared memory and, if so,
 * add copy statements to the schedule tree of "node"
 * for reading from global memory to private or shared memory
 * (if "read" is set) or for writing back from private or shared memory
 * to global memory (if "read" is not set) for this group.
 * On input, "node" points to the kernel node, and it is moved
 * back there on output.
 */
static __isl_give isl_schedule_node *amp_add_copies_group(
    struct amp_ppcg_kernel *kernel, struct amp_array_ref_group *group,
    __isl_take isl_schedule_node *node, int read, int i) {
    // #define DEBUG_AMP_ADD_COPIES_GROUP

    enum ppcg_group_access_type type;

    type = amp_array_ref_group_type(group);
    if (type == ppcg_access_shared) {
#ifdef DEBUG_AMP_ADD_COPIES_GROUP
        fprintf(stderr, "@DEBUG: \n       the shared (amp_add_copies_group) : group->array is:\n");
        amp_array_info_dump(group->array);
#endif // DEBUG_AMP_ADD_COPIES_GROUP

        return amp_add_copies_group_shared(kernel, group, node, read, i);
        // } else if (type == ppcg_access_global) {
        //     return amp_add_copies_group_shared(kernel, group, node, read);
    }

    return node;
}

/* For each array reference group that is mapped to private or shared memory,
 * add copy statements to the schedule tree of "node"
 * for reading from global memory to private or shared memory
 * and for writing back.
 * On input, "node" points to the kernel node, and it is moved
 * back there on output.
 */
static __isl_give isl_schedule_node *amp_add_copies(struct amp_ppcg_kernel *kernel,
                                                    __isl_take isl_schedule_node *node) {
    int i, j;

    for (i = 0; i < kernel->n_array; ++i) {
        struct amp_local_array_info *array = &kernel->array[i];

        for (j = 0; j < array->n_group; ++j) {
            struct amp_array_ref_group *group = array->groups[j];

            node = amp_add_copies_group(kernel, group, node, 1, i);
            if (!node)
                return NULL;
            node = amp_add_copies_group(kernel, group, node, 0, i);
            if (!node)
                return NULL;
        }
    }

    return node;
}

static void create_amp_kernel_var(isl_ctx *ctx, struct amp_array_ref_group *group,
                                  struct amp_ppcg_kernel_var *var, int i) {
    int j;
    struct amp_array_tile *tile;
    isl_printer *p;

    var->array = group->array;

    var->type = amp_array_ref_group_type(group);
    tile = amp_array_ref_group_tile(group);

    p = isl_printer_to_str(ctx);
    p = amp_array_ref_group_print_name(group, p, i);
    var->name = isl_printer_get_str(p);
    isl_printer_free(p);

    var->size = isl_vec_alloc(ctx, group->array->n_index);

    for (j = 0; j < group->array->n_index; ++j) {
        var->size = isl_vec_set_element_val(var->size, j, isl_val_copy(tile->bound[j].size));
    }
}

static isl_stat create_amp_kernel_vars(struct amp_ppcg_kernel *kernel) {
    int i, j, n;

    n = 0;
    for (i = 0; i < kernel->n_array; ++i) {
        struct amp_local_array_info *array = &kernel->array[i];

        for (j = 0; j < array->n_group; ++j) {
            // struct amp_array_ref_group *group = array->groups[j];
            // enum ppcg_group_access_type type;

            // type = amp_array_ref_group_type(group);
            // if (type != ppcg_access_global)
            // ++n;
            ++n;
        }
    }

	kernel->var = isl_calloc_array(kernel->ctx, struct amp_ppcg_kernel_var, n);
    if (!kernel->var)
        return isl_stat_error;
    kernel->n_var = n;

    n = 0;
    for (i = 0; i < kernel->n_array; ++i) {
        struct amp_local_array_info *array = &kernel->array[i];

        for (j = 0; j < array->n_group; ++j) {
            struct amp_array_ref_group *group = array->groups[j];
            enum ppcg_group_access_type type;

            type = amp_array_ref_group_type(group);
            // if (type == ppcg_access_global)
            //     continue;
            create_amp_kernel_var(kernel->ctx, group, &kernel->var[n], i);
            ++n;
        }
    }

    return isl_stat_ok;
}

/* Create a ppcg_kernel representing the domain instances that reach "node"
 * and insert a mark node pointing to the ppcg_kernel before "node".
 * The band that "node" points to is the band that needs to be mapped
 * to block identifiers.  The band that needs to be mapped to thread
 * identifiers should be marked by a "thread" mark by the caller.
 * The linear branch between the current node and the "thread" mark
 * may also have a "shared" mark.  If present, the mapping to shared
 * memory is computed at that point.
 * Both marks are removed by this function.
 * If "scale" is set, then the band that "node" points to is scaled
 * by "sizes".
 *
 * Mark all outer band nodes as atomic to ensure each kernel is only
 * scheduled once.
 * If the domain elements that reach "node" live in more than one space,
 * then group the domain elements into a single space, named kernelX,
 * with X the kernel sequence number.
 *
 * Insert a guard node governing the kernel node to ensure that
 * no kernels with zero blocks are launched.
 *
 * Insert a context node describing the block and thread
 * identifiers inside the kernel mark.
 * The context node needs to be inserted after the effective block size
 * has been determined such that the bounds on the thread identifiers
 * would reflect the effective block size.
 * Insert a filter node inside the context node mapping the statement
 * instances to block identifiers.  In particular, the block identifiers
 * are equated to the partial schedule of band that was marked for mapping
 * to blocks modulo the grid size.
 * Insert a filter node inside the "thread" mark mapping the statement
 * instances to thread identifiers.  In particular, the thread identifiers
 * are equated to the partial schedule of band that was marked for mapping
 * to threads modulo the block size.
 *
 * Compute array reference groups for all arrays, set the local
 * array bounds based on the set of domain instances that reach
 * the kernel node, check the total amount of shared memory used
 * and compute all group tilings.
 * The array reference groups are computed after the block filter
 * has been inserted because it affects the mapping to shared or
 * private memory.  This computation also requires the thread filter
 * (in the ppcg_kernel object), but this thread filter should not
 * have been added to the schedule tree yet since the computation
 * requires the schedule of the band that needs to be mapped to
 * threads before the privatization is applied.
 *
 * If any array reference group requires the band mapped to threads
 * to be unrolled, then we perform the required unrolling.
 *
 * We save a copy of the schedule that may influence the mappings
 * to shared or private memory in kernel->copy_schedule.
 *
 * Finally, we add synchronization and copy statements to the schedule tree,
 * remove the "thread" mark and create representations for the local
 * variables in the kernel.
 *
 * We keep a copy of the isl_id that points to the kernel to ensure
 * that the kernel does not get destroyed if the schedule node
 * is freed due to some error condition.
 */
__isl_give isl_schedule_node *amp_create_kernel(struct amp_prog *prog, __isl_take isl_schedule_node *node) // __isl_keep isl_multi_val *sizes)
{
    struct amp_ppcg_kernel *kernel;
    isl_id *id;
    isl_schedule_node *node_thread;
    isl_union_map *host_schedule;
    isl_union_pw_multi_aff *contraction;
    isl_set *host_domain;
    isl_union_set *domain, *expanded;
    int single_statement;


    node = amp_tree_insert_shared_before_thread(node);

    if (!node)
        return NULL;

    kernel = isl_calloc_type(prog->ctx, struct amp_ppcg_kernel);
    kernel = amp_ppcg_kernel_create_local_arrays(kernel, prog);
    if (!kernel)
        return isl_schedule_node_free(node);

    domain = isl_schedule_node_get_domain(node);
    single_statement = isl_union_set_n_set(domain) == 1;

    kernel->ctx = prog->ctx;
    kernel->prog = prog;
    kernel->options = prog->scop->options;
    kernel->context = extract_context(node, prog);
    kernel->core = isl_union_set_universe(isl_union_set_copy(domain));
    contraction = isl_schedule_node_get_subtree_contraction(node);
    kernel->contraction = isl_union_pw_multi_aff_copy(contraction);
    expanded = isl_union_set_copy(domain);
    expanded = isl_union_set_preimage_union_pw_multi_aff(expanded, contraction);
    kernel->expanded_domain = isl_union_set_copy(expanded);
    kernel->arrays = accessed_by_domain(expanded, prog);

    kernel->id = prog->kernel_id++;

    host_schedule = isl_schedule_node_get_prefix_schedule_union_map(node);
    host_domain = isl_set_from_union_set(isl_union_map_range(host_schedule));

    // 插入mark结点
    node = atomic_ancestors(node);
    node = isl_schedule_node_child(node, 0);
    id = isl_id_alloc(prog->ctx, "amp_kernel", kernel);
    id = isl_id_set_free_user(id, &amp_ppcg_kernel_free_wrap);
    node = isl_schedule_node_insert_mark(node, isl_id_copy(id));



    node = amp_tree_move_up_to_kernel(node);

    if (amp_group_references(kernel, node) < 0)
        node = isl_schedule_node_free(node);
    localize_bounds(kernel, host_domain);
    isl_set_free(host_domain);

    mark_data_copy_arrays(kernel);
    compute_group_tilings(kernel);

    node = amp_tree_move_down_to_thread(node, kernel->core);
    kernel->copy_schedule_dim = isl_schedule_node_get_schedule_depth(node);
    kernel->copy_schedule = isl_schedule_node_get_prefix_schedule_union_pw_multi_aff(node);
    contraction = isl_union_pw_multi_aff_copy(kernel->contraction);
    kernel->copy_schedule = isl_union_pw_multi_aff_pullback_union_pw_multi_aff(kernel->copy_schedule, contraction);


    node = amp_tree_move_up_to_kernel(node);
    node = amp_add_copies(kernel, node);

    node = amp_tree_move_down_to_shared(node, kernel->core);
    node = isl_schedule_node_delete(node);

    node = amp_tree_move_down_to_thread(node, kernel->core);
    node = isl_schedule_node_delete(node);

    node = amp_tree_move_up_to_kernel(node);

    if (create_amp_kernel_vars(kernel) < 0)
        node = isl_schedule_node_free(node);

    if (!single_statement)
        node = isl_schedule_node_parent(node);

    return node;
}

/* Can "node" be tiled and then mapped to block and thread identifiers?
 * That is, is it permutable with at least one coincident dimension?
 */
static isl_bool is_permutable(__isl_keep isl_schedule_node *node)
{
	if (!node)
		return isl_bool_error;

	if (isl_schedule_node_get_type(node) != isl_schedule_node_band)
		return isl_bool_false;
	if (!isl_schedule_node_band_get_permutable(node))
		return isl_bool_false;
	if (isl_schedule_node_band_n_member(node) < 1)
		return isl_bool_false;
	if (!isl_schedule_node_band_member_get_coincident(node, 0))
		return isl_bool_false;

	return isl_bool_true;
}

/* Is "node" not a suitably permutable band?
 */
static isl_bool not_permutable(__isl_keep isl_schedule_node *node, void *user)
{
	return isl_bool_not(is_permutable(node));
}

/* Does the subtree rooted at "node" have any suitably permutable band nodes?
 * That is, does it have any nodes that are permutable and that
 * have a least one coincident dimension?
 */
static isl_bool subtree_has_permutable_bands(__isl_keep isl_schedule_node *node)
{
	isl_bool all_non_permutable;

	all_non_permutable = isl_schedule_node_every_descendant(node,
						&not_permutable, NULL);
	return isl_bool_not(all_non_permutable);
}

/* Given a set or sequence node, return the union of the filters of
 * the initial (if "initial" is set) or final (if "initial" is not set)
 * direct subtrees that do not contain any suitably permutable bands
 * (according to subtree_has_permutable_bands).
 * In the case of a set node, the subtrees can be arbitrarily reordered
 * so any subtree is considered potentially initial and final.
 *
 * If the child of a subtree containing suitably permutable bands
 * is itself a set or sequence node, then recursively collect
 * further initial/final subtrees.
 * In the case of an outer sequence node, only do this for the first/last child
 * containing suitably permutable bands.
 */
static __isl_give isl_union_set *get_non_parallel_subtree_filters(
	__isl_keep isl_schedule_node *node, int initial)
{
	isl_space *space;
	isl_union_set *filter;
	int i, n;
	int in_order;
	enum isl_schedule_node_type type;

	type = isl_schedule_node_get_type(node);
	if (type < 0)
		return NULL;
	if (type != isl_schedule_node_set && type != isl_schedule_node_sequence)
		return isl_union_set_empty_ctx(isl_schedule_node_get_ctx(node));
	in_order = type == isl_schedule_node_sequence;

	n = isl_schedule_node_n_children(node);
	if (n < 0)
		return NULL;

	node = isl_schedule_node_copy(node);
	node = isl_schedule_node_child(node, 0);
	filter = isl_schedule_node_filter_get_filter(node);
	node = isl_schedule_node_parent(node);
	space = isl_union_set_get_space(filter);
	isl_union_set_free(filter);
	filter = isl_union_set_empty(space);

	for (i = 0; i < n; ++i) {
		isl_bool parallelism;
		int child = initial ? i : n - 1 - i;

		node = isl_schedule_node_child(node, child);
		parallelism = subtree_has_permutable_bands(node);
		if (parallelism < 0) {
			filter = isl_union_set_free(filter);
		} else if (!parallelism) {
			isl_union_set *filter_i;
			filter_i = isl_schedule_node_filter_get_filter(node);
			filter = isl_union_set_union(filter, filter_i);
		} else {
			isl_union_set *filter_i;

			node = isl_schedule_node_child(node, 0);
			filter_i =
			    get_non_parallel_subtree_filters(node, initial);
			filter = isl_union_set_union(filter, filter_i);
			node = isl_schedule_node_parent(node);
			if (in_order)
				break;
		}
		node = isl_schedule_node_parent(node);
	}

	isl_schedule_node_free(node);

	return filter;
}

/* Mark all variables that are accessed by the statement instances in "domain"
 * and that are local to "prog" as requiring a declaration in the host code.
 * The statement instances in "domain" correspond to (a subset of)
 * the active instances at "node".
 * "node" is not modified by this function, except that NULL is returned
 * in case of error.
 *
 * If there are no local variables or if "domain" is empty,
 * then clearly no such declarations are needed.
 */
static __isl_give isl_schedule_node *declare_accessed_local_variables(
	__isl_take isl_schedule_node *node, struct amp_prog *prog,
	__isl_keep isl_union_set *domain)
{
	isl_union_pw_multi_aff *contraction;
	isl_union_set *arrays;
	isl_bool empty;
	int i;

	if (!ppcg_scop_any_hidden_declarations(prog->scop))
		return node;
	empty = isl_union_set_is_empty(domain);
	if (empty < 0)
		return isl_schedule_node_free(node);
	if (empty)
		return node;
	contraction = isl_schedule_node_get_subtree_contraction(node);
	domain = isl_union_set_copy(domain);
	domain = isl_union_set_preimage_union_pw_multi_aff(domain, contraction);
	arrays = accessed_by_domain(domain, prog);

	for (i = 0; i < prog->n_array; ++i) {
		isl_space *space;
		isl_set *set;
		isl_bool empty;

		if (!prog->array[i].local)
			continue;
		space = isl_set_get_space(prog->array[i].extent);
		set = isl_union_set_extract_set(arrays, space);
		empty = isl_set_plain_is_empty(set);
		isl_set_free(set);
		if (empty < 0)
			goto error;
		if (!empty)
			prog->array[i].declare_local = 1;
	}

	isl_union_set_free(arrays);
	return node;
error:
	isl_union_set_free(arrays);
	return isl_schedule_node_free(node);
}

/* If "node" points to a set or sequence node, then separate the initial
 * (if "initial" is set) or final (if "initial" is not set)
 * children that do not have suitably permutable bands and
 * return a pointer to the subsequence of children that do have such bands,
 * assuming there are any such subtrees.
 *
 * Mark all local variables in "prog" that are accessed by
 * the group without permutable bands as requiring a declaration on the host.
 */
static __isl_give isl_schedule_node *partial_isolate_permutable_subtrees(
	__isl_take isl_schedule_node *node, struct amp_prog *prog,
	int initial)
{
	isl_union_set *filter;
	enum isl_schedule_node_type type;

	if (!node)
		return NULL;
	type = isl_schedule_node_get_type(node);
	if (type != isl_schedule_node_set && type != isl_schedule_node_sequence)
		return node;

	filter = get_non_parallel_subtree_filters(node, initial);
	node = declare_accessed_local_variables(node, prog, filter);
	if (initial)
		node = isl_schedule_node_order_before(node, filter);
	else
		node = isl_schedule_node_order_after(node, filter);

	return node;
}

/* If "node" points to a set or sequence node, then separate the initial and
 * final children that do not have suitably permutable bands and
 * return a pointer to the subsequence of children that do have such bands.
 *
 * In the case of a set node, the children can be arbitrarily reordered,
 * so they can all be considered initial and final.
 * Separate them out as final children (first) so that they are executed
 * after the other children.
 * Otherwise the arrays partially written by the non-permutable subtrees
 * could get overwritten by the copy-out corresponding to the other subtrees,
 * requiring those arrays to be copied in first.
 */
static __isl_give isl_schedule_node *isolate_permutable_subtrees(
	__isl_take isl_schedule_node *node, struct amp_prog *prog)
{
	node = partial_isolate_permutable_subtrees(node, prog, 0);
	node = partial_isolate_permutable_subtrees(node, prog, 1);
	return node;
}

__isl_give isl_schedule *amp_reschedule(__isl_keep isl_ctx *ctx, amp_prog *prog, 
__isl_take isl_schedule *sched)
{
    // #define DEBUG_AMP_RESCHEDULE

    isl_schedule *schedule;
    isl_schedule_node *node;
    isl_union_set *domain;
    isl_union_map *prefix;
	isl_union_pw_multi_aff *contraction;
    // isl_set *context;
    // isl_set *guard;

    // context = isl_set_copy(prog->context);
	// context = isl_set_from_params(context);//用给定的参数域构造一个零维集合。
	// schedule = isl_schedule_insert_context(sched, context);

	// guard = isl_union_set_params(isl_union_set_copy(prog->scop->domain));
	// prog->context = isl_set_intersect(prog->context, isl_set_copy(guard));
	// guard = isl_set_from_params(guard);
    // 获取调度树的根结点,以便后续在这里插入新的调度
    node = isl_schedule_get_root(sched);
    // node = isl_schedule_get_root(schedule);
    // isl_schedule_free(sched);
    // // 获取domain(isl_union_set)
    // // domain = isl_schedule_get_domain(sched);
	node = isl_schedule_node_child(node, 0);
	// node = isl_schedule_node_child(node, 0);
    // node = isl_schedule_node_child(node, 0);
    node = isolate_permutable_subtrees(node, prog);
    domain = isl_schedule_node_get_domain(node);
	contraction = isl_schedule_node_get_subtree_contraction(node);
	domain = isl_union_set_preimage_union_pw_multi_aff(domain,
				    isl_union_pw_multi_aff_copy(contraction));
	prefix = isl_schedule_node_get_prefix_schedule_union_map(node);
	prefix = isl_union_map_preimage_domain_union_pw_multi_aff(prefix,
				    contraction);
    // // isl_union_set_list *filters = NULL;
    // node = isl_schedule_node_child(node, 0);
    // node = isl_schedule_node_insert_sequence(node, filters);
    // // 插入mark(thread)结点
    isl_id *id = isl_id_alloc(ctx, "thread", NULL);
    node = isl_schedule_node_insert_mark(node, id);
    // 返回到插入前的位置（filter的位置）
    // node = isl_schedule_node_parent(node);

    // 插入mark(amp_lower)结点
    node = insert_amp_lower(node);
    // 返回到插入前的位置（filter的位置）
    // node = isl_schedule_node_parent(node);
    // 根据amp_lower标记,生成amp_kernel（自动混合精度计算核心）
    node = amp_create_kernel(prog, node);
    if (!node)
    {
        printf("\n\033[31m@ERROR:\n       There are some errors because the node (the amp_create_kernel function returned) is NULL, Now will return the original schedule !!! \033[0m\n\n");
        goto error;
    }
    assert(node);

    // // 获取新的带有amp_kernel的调度
    schedule = isl_schedule_node_get_schedule(node);

    // 释放内存
    isl_schedule_free(sched);
    isl_schedule_node_free(node);
	isl_union_set_free(domain);
	
    return schedule;
error:
    isl_schedule_node_free(node);

    return sched;
}

// Compute a new schedule based on the sched
__isl_give isl_schedule *amp_schedule_again(__isl_keep isl_ctx *ctx, amp_prog *prog, __isl_take isl_schedule *sched)
{
	isl_schedule *schedule;

    // 如果不进行自动混合精度,则直接返回原始的调度即可.--这个检查是多余的,主要是检查一下,确保参数的正确性
    if (!prog->scop->options->automatic_mixed_precision)
    {
        return sched;
    }
	schedule = amp_reschedule(ctx, prog, sched);
    if (!schedule)
    {
        printf("\n\033[31m@ERROR:\n       There are some errors because the schedule (the amp_reschedule function returned) is NULL, Now will return the original schedule !!! \033[0m\n\n");
        goto error;
    }
    assert(schedule);

    return schedule;
error:
    return NULL;
}

/* Generate CPU code for "scop" and print it to "p".
 *
 * First obtain a schedule for "scop" and then print code for "scop"
 * using that schedule.
 */
static __isl_give isl_printer *generate(__isl_take isl_printer *p,
	struct ppcg_scop *scop, struct ppcg_options *options)
{
    isl_schedule *schedule;
	// 如果进行自动混合精度
	if (options->automatic_mixed_precision)
	{
		if (!scop)
			return isl_printer_free(p);

        // 这里先进行PPCG的调度
        schedule = get_schedule(scop, options);
        
		isl_ctx *ctx = isl_printer_get_ctx(p);
		amp_prog *prog = amp_prog_alloc(ctx, scop);
		if (!prog)
		{
			// amp_prog_free(prog);
			// return print_cpu_with_schedule(p, scop, schedule, options);
            return isl_printer_free(p);
		}
		
		// amp 再调度
        // isl_schedule *reschedule = amp_schedule_again(ctx, prog, schedule);
		isl_schedule *reschedule = amp_schedule_again(ctx, prog, isl_schedule_copy(schedule));
		if (!reschedule || (reschedule == schedule))
		{
			if (!reschedule)
				fprintf(stderr, "\n\033[31m@ERROR:\n       There are some errors because the schedule calcuted again by amp is NULL, Now will print cpu code with the ppcg calcuted schedule !!! \033[0m\n\n");
			else
				fprintf(stderr, "\n\033[31m@WARNING:\n       The schedule calcuted again by amp is the same to original schedule, Now will print cpu code with the ppcg calcuted schedule !!! \033[0m\n\n");
			amp_prog_free(prog);
			return print_cpu_with_schedule(p, scop, schedule, options);
		}
		
		isl_schedule_free(schedule);
		
		return print_cpu_with_amp(p, reschedule, options, prog);
	}
	// 不进行自动混合精度
	schedule = get_schedule(scop, options);

	return print_cpu_with_schedule(p, scop, schedule, options);
}

/* Wrapper around generate for use as a ppcg_transform callback.
 */
static __isl_give isl_printer *print_cpu_wrap(__isl_take isl_printer *p,
	struct ppcg_scop *scop, void *user)
{
	struct ppcg_options *options = user;

	return generate(p, scop, options);
}

/* Transform the code in the file called "input" by replacing
 * all scops by corresponding CPU code and write the results to a file
 * called "output".
 */
int generate_cpu(isl_ctx *ctx, struct ppcg_options *options,
	const char *input, const char *output)
{
	FILE *output_file;
	int r;

	output_file = get_output_file_with_amp(input, output, options);
	if (!output_file)
		return -1;

	r = ppcg_transform(ctx, input, output_file, options,
					&print_cpu_wrap, options);

	fclose(output_file);

	return r;
}
