#include "display_common.h"
#include "i965_post_processing.h"
#pragma region
//struct static_parameter static_parameter;
//struct inline_parameter inline_parameter;

struct
{
	dri_bo *bo;
} curbe;

struct
{
	dri_bo *ss_bo;
	dri_bo *s_bo;
} surfaces[MAX_SURFACES];

struct
{
	dri_bo *bo;
} binding_table;

struct
{
	dri_bo *bo;
	int num_interface_descriptors;
} idrt;

struct
{
	dri_bo *bo;
} vfe_state;

struct
{
	dri_bo *bo;
	dri_bo *bo_8x8;
	dri_bo *bo_8x8_uv;
} sampler_state_table;

struct
{
	unsigned int size;

	unsigned int vfe_start;
	unsigned int cs_start;

	unsigned int num_vfe_entries;
	unsigned int num_cs_entries;

	unsigned int size_vfe_entry;
	unsigned int size_cs_entry;
} urb;

struct static_parameter static_parameter;
struct inline_parameter inline_parameter;
//#define pp_static_parameter     pp_context->pp_static_parameter
//#define pp_inline_parameter     pp_context->pp_inline_parameter

#pragma endregion

unsigned int dest_w, dest_h;

static void display_module_initialize(VADriverContextP ctx, VASurfaceID surface_id);

static const uint32_t module_array_gen6[][4] = {
	// wait n0;
	 { 0x00000030, 0x32000084, 0x00001200, 0x00000000 },
	// EOT
     { 0x07800031, 0x24001cc8, 0x00000000, 0x82000010 },
//#include "../i965_drv_video/shaders/post_processing/null.g4b.gen5"
//#include "../i965_drv_video/shaders/post_processing/nv12_load_save_nv12.g4b.gen5" //die, drm hangcheck error;
//#include "../i965_drv_video/shaders/post_processing/nv12_avs_nv12.g4b.gen5"
//#include "../i965_drv_video/shaders/post_processing/nv12_scaling_nv12.g4b.gen5"
//#include "../i965_drv_video/shaders/post_processing/nv12_dndi_nv12.g4b.gen5"
//#include "../i965_drv_video/shaders/render/exa_sf.g4b.gen5"  //die
//#include "../i965_drv_video/shaders/render/exa_wm_xy.g4b.gen5"
//#include "../i965_drv_video/shaders/render/exa_sf.g4b.gen5"  //die
//
//#include "cl_compiler/kernel.g6b"
};

static struct module display_module = {
	{
	 "NULL module (for testing)",
	 0,
	 module_array_gen6,
	 sizeof(module_array_gen6),
	 NULL,
	 }
	,
	display_module_initialize,
};


static void set_surface_tiling(struct i965_surface_state *ss, unsigned int tiling)
{
	switch (tiling)
	{
	case I915_TILING_NONE:
		ss->ss3.tiled_surface = 0;
		ss->ss3.tile_walk = 0;
		break;
	case I915_TILING_X:
		ss->ss3.tiled_surface = 1;
		ss->ss3.tile_walk = I965_TILEWALK_XMAJOR;
		break;
	case I915_TILING_Y:
		ss->ss3.tiled_surface = 1;
		ss->ss3.tile_walk = I965_TILEWALK_YMAJOR;
		break;
	}
}

static void i965_destroy_surface(struct object_heap *heap, struct object_base *obj)
{
	struct object_surface *obj_surface = (struct object_surface *) obj;

	dri_bo_unreference(obj_surface->bo);
	obj_surface->bo = NULL;

	if (obj_surface->free_private_data != NULL)
	{
		obj_surface->free_private_data(&obj_surface->private_data);
		obj_surface->private_data = NULL;
	}

	object_heap_free(heap, obj);
}

VAStatus i965_DestroySurfaces(VADriverContextP ctx, VASurfaceID * surface_list, int num_surfaces)
{
	struct i965_driver_data *i965 = i965_driver_data(ctx);

	int i;

	for (i = num_surfaces; i--;)
	{
		struct object_surface *obj_surface = SURFACE(surface_list[i]);

		assert(obj_surface);
		i965_destroy_surface(&i965->surface_heap, (struct object_base *) obj_surface);
	}

	return VA_STATUS_SUCCESS;
}

VAStatus
i965_CreateSurfaces(VADriverContextP ctx,
					int width, int height, int format, int num_surfaces, VASurfaceID * surfaces)
{
	struct i965_driver_data *i965 = i965_driver_data(ctx);

	int i;

	VAStatus vaStatus = VA_STATUS_SUCCESS;

	/* We only support one format */
	if (VA_RT_FORMAT_YUV420 != format)
	{
		return VA_STATUS_ERROR_UNSUPPORTED_RT_FORMAT;
	}

	for (i = 0; i < num_surfaces; i++)
	{
		int surfaceID = NEW_SURFACE_ID();

		struct object_surface *obj_surface = SURFACE(surfaceID);

		if (NULL == obj_surface)
		{
			vaStatus = VA_STATUS_ERROR_ALLOCATION_FAILED;
			break;
		}
		surfaces[i] = surfaceID;
		obj_surface->status = VASurfaceReady;
		obj_surface->subpic = VA_INVALID_ID;
		obj_surface->orig_width = width;
		obj_surface->orig_height = height;

		obj_surface->width = ALIGN(obj_surface->orig_width, 128);
		obj_surface->height = ALIGN(obj_surface->orig_height, 32);

		obj_surface->size = SIZE_YUV420(obj_surface->width, obj_surface->height);
		obj_surface->flags = SURFACE_REFERENCED;
		obj_surface->fourcc = 0;
		obj_surface->bo = NULL;
		obj_surface->locked_image_id = VA_INVALID_ID;
		obj_surface->private_data = NULL;
		obj_surface->free_private_data = NULL;
	}
	/* Error recovery */
	if (VA_STATUS_SUCCESS != vaStatus)
	{
		/* surfaces[i-1] was the last successful allocation */
		for (; i--;)
		{
			struct object_surface *obj_surface = SURFACE(surfaces[i]);

			surfaces[i] = VA_INVALID_SURFACE;
			assert(obj_surface);
			i965_destroy_surface(&i965->surface_heap, (struct object_base *) obj_surface);
		}
	}

	return vaStatus;
}

static void display_module_initialize(VADriverContextP ctx, VASurfaceID surface_id)
{
	struct i965_driver_data *i965 = i965_driver_data(ctx);

	struct object_surface *obj_surface;

	struct i965_surface_state *ss;

	dri_bo *bo;

	int index, w, h;

	int orig_w, orig_h;

	unsigned int tiling, swizzle;

	// destination surface
	obj_surface = SURFACE(surface_id);
	orig_w = obj_surface->orig_width;
	orig_h = obj_surface->orig_height;
	w = obj_surface->width;
	h = obj_surface->height;
	dri_bo_get_tiling(obj_surface->bo, &tiling, &swizzle);

	// destination Y surface index 0
	index = 0;
	surfaces[index].s_bo = obj_surface->bo;
	dri_bo_reference(surfaces[index].s_bo);
	bo = dri_bo_alloc(i965->intel.bufmgr, "surface state", sizeof(struct i965_surface_state), 4096);
	assert(bo);
	surfaces[index].ss_bo = bo;
	dri_bo_map(bo, True);
	assert(bo->virtual);
	ss = bo->virtual;
	memset(ss, 0, sizeof(*ss));
	ss->ss0.surface_type = I965_SURFACE_2D;
	ss->ss0.surface_format = I965_SURFACEFORMAT_R8_UNORM;
	ss->ss1.base_addr = surfaces[index].s_bo->offset;
	ss->ss2.width = orig_w / 4 - 1;
	ss->ss2.height = orig_h - 1;
	ss->ss3.pitch = w - 1;
	set_surface_tiling(ss, tiling);
	dri_bo_emit_reloc(bo,
					  I915_GEM_DOMAIN_RENDER,
					  I915_GEM_DOMAIN_RENDER,
					  0, offsetof(struct i965_surface_state, ss1), surfaces[index].s_bo);
	dri_bo_unmap(bo);

	// destination UV surface index 1
	index = 1;
	surfaces[index].s_bo = obj_surface->bo;
	dri_bo_reference(surfaces[index].s_bo);
	bo = dri_bo_alloc(i965->intel.bufmgr, "surface state", sizeof(struct i965_surface_state), 4096);
	assert(bo);
	surfaces[index].ss_bo = bo;
	dri_bo_map(bo, True);
	assert(bo->virtual);
	ss = bo->virtual;
	memset(ss, 0, sizeof(*ss));
	ss->ss0.surface_type = I965_SURFACE_2D;
	ss->ss0.surface_format = I965_SURFACEFORMAT_R8G8_UNORM;
	ss->ss1.base_addr = surfaces[index].s_bo->offset + w * h;
	ss->ss2.width = orig_w / 4 - 1;
	ss->ss2.height = orig_h / 2 - 1;
	ss->ss3.pitch = w - 1;
	set_surface_tiling(ss, tiling);
	dri_bo_emit_reloc(bo,
					  I915_GEM_DOMAIN_RENDER,
					  I915_GEM_DOMAIN_RENDER,
					  w * h, offsetof(struct i965_surface_state, ss1), surfaces[index].s_bo);
	dri_bo_unmap(bo);

	// surface index 2
	index = 2;
	surfaces[index].s_bo = obj_surface->bo;
	dri_bo_reference(surfaces[index].s_bo);
	bo = dri_bo_alloc(i965->intel.bufmgr, "surface state", sizeof(struct i965_surface_state), 4096);
	assert(bo);
	surfaces[index].ss_bo = bo;
	dri_bo_map(bo, True);
	assert(bo->virtual);
	ss = bo->virtual;
	memset(ss, 0, sizeof(*ss));
	ss->ss0.render_cache_read_mode = 1;
	ss->ss0.surface_type = I965_SURFACE_BUFFER;
	ss->ss1.base_addr = surfaces[index].s_bo->offset + w * h * 2;
	ss->ss2.width = 0x7f;
	ss->ss2.height = 0x1fff;
	ss->ss3.depth = 0x7f;
	set_surface_tiling(ss, tiling);
	dri_bo_emit_reloc(bo,
					  I915_GEM_DOMAIN_RENDER,
					  I915_GEM_DOMAIN_RENDER,
					  w * h * 2, offsetof(struct i965_surface_state, ss1), surfaces[index].s_bo);
	dri_bo_unmap(bo);

	/* private function & data */
	dest_h = h;
	dest_w = w;

	inline_parameter.grf5.block_count_x = w / 16;	/* 1 x N */
	inline_parameter.grf5.number_blocks = w / 16;
}


//static void
//ironlake_pp_surface_state(struct i965_post_processing_context *pp_context)
//{
//}

static void
ironlake_pp_interface_descriptor_table()
{
    struct i965_interface_descriptor *desc;
    dri_bo *bo;
    //int pp_index = pp_context->current_pp;

    bo = idrt.bo;
    dri_bo_map(bo, 1);
    assert(bo->virtual);
    desc = bo->virtual;
    memset(desc, 0, sizeof(*desc));
    desc->desc0.grf_reg_blocks = 10;
    desc->desc0.kernel_start_pointer = display_module.kernel.bo->offset >> 6; /* reloc */
    desc->desc1.const_urb_entry_read_offset = 0;
    desc->desc1.const_urb_entry_read_len = 4; /* grf 1-4 */
    desc->desc2.sampler_state_pointer = sampler_state_table.bo->offset >> 5;
    desc->desc2.sampler_count = 0;
    desc->desc3.binding_table_entry_count = 0;
    desc->desc3.binding_table_pointer =  binding_table.bo->offset >> 5; /*reloc */

    dri_bo_emit_reloc(bo,
                      I915_GEM_DOMAIN_INSTRUCTION, 0,
                      desc->desc0.grf_reg_blocks,
                      offsetof(struct i965_interface_descriptor, desc0),
                      display_module.kernel.bo);

    dri_bo_emit_reloc(bo,
                      I915_GEM_DOMAIN_INSTRUCTION, 0,
                      desc->desc2.sampler_count << 2,
                      offsetof(struct i965_interface_descriptor, desc2),
                      sampler_state_table.bo);

    dri_bo_emit_reloc(bo,
                      I915_GEM_DOMAIN_INSTRUCTION, 0,
                      desc->desc3.binding_table_entry_count,
                      offsetof(struct i965_interface_descriptor, desc3),
                      binding_table.bo);

    dri_bo_unmap(bo);
    idrt.num_interface_descriptors++;
}

static void
ironlake_pp_binding_table()
{
    unsigned int *binding_table2;
    dri_bo *bo = binding_table.bo;
    int i;

    dri_bo_map(bo, 1);
    assert(bo->virtual);
    binding_table2 = bo->virtual;
    memset(binding_table2, 0, bo->size);

    for (i = 0; i < MAX_PP_SURFACES; i++) {
        if (surfaces[i].ss_bo) {
            assert(surfaces[i].s_bo);

            binding_table2[i] = surfaces[i].ss_bo->offset;
            dri_bo_emit_reloc(bo,
                              I915_GEM_DOMAIN_INSTRUCTION, 0, 0,
                              i * sizeof(*binding_table2),
                              surfaces[i].ss_bo);
        }
    
    }

    dri_bo_unmap(bo);
}

static void
ironlake_pp_vfe_state()
{
    struct i965_vfe_state *vfe_state2;
    dri_bo *bo;

    bo = vfe_state.bo;
    dri_bo_map(bo, 1);
    assert(bo->virtual);
    vfe_state2 = bo->virtual;
    memset(vfe_state2, 0, sizeof(*vfe_state2));
    vfe_state2->vfe1.max_threads = urb.num_vfe_entries - 1;
    vfe_state2->vfe1.urb_entry_alloc_size = urb.size_vfe_entry - 1;
    vfe_state2->vfe1.num_urb_entries = urb.num_vfe_entries;
    vfe_state2->vfe1.vfe_mode = VFE_GENERIC_MODE;
    vfe_state2->vfe1.children_present = 0;
    vfe_state2->vfe2.interface_descriptor_base = 
        idrt.bo->offset >> 4; /* reloc */
    dri_bo_emit_reloc(bo,
                      I915_GEM_DOMAIN_INSTRUCTION, 0,
                      0,
                      offsetof(struct i965_vfe_state, vfe2),
                      idrt.bo);
    dri_bo_unmap(bo);
}

static void
ironlake_pp_upload_constants()
{
    unsigned char *constant_buffer;

    assert(sizeof(static_parameter) == 128);
    dri_bo_map(curbe.bo, 1);
    assert(curbe.bo->virtual);
    constant_buffer = curbe.bo->virtual;
    memcpy(constant_buffer, &static_parameter, sizeof(static_parameter));
    dri_bo_unmap(curbe.bo);
}

static void
ironlake_pp_states_setup()//VADriverContextP ctx)
{
 //   struct i965_driver_data *i965 = i965_driver_data(ctx);
 //   struct i965_post_processing_context *pp_context = i965->pp_context;

 //   ironlake_pp_surface_state(pp_context);
 //printf("before binding_table\n");
    ironlake_pp_binding_table();
 //printf("before interface_descriptor_table\n");
    ironlake_pp_interface_descriptor_table();
 //printf("before vfe_state\n");
    ironlake_pp_vfe_state();
 //printf("before upload_constants\n");
    ironlake_pp_upload_constants();
}

static void
ironlake_pp_pipeline_select(VADriverContextP ctx)
{
    struct i965_driver_data *i965 = i965_driver_data(ctx);
    struct intel_batchbuffer *batch = i965->batch;

    BEGIN_BATCH(batch, 1);
    OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
    ADVANCE_BATCH(batch);
}

static void
ironlake_pp_urb_layout(VADriverContextP ctx)//, struct i965_post_processing_context *pp_context)
{
    struct i965_driver_data *i965 = i965_driver_data(ctx);
    struct intel_batchbuffer *batch = i965->batch;
    unsigned int vfe_fence, cs_fence;

    vfe_fence = urb.cs_start;
    cs_fence = urb.size;

    BEGIN_BATCH(batch, 3);
    OUT_BATCH(batch, CMD_URB_FENCE | UF0_VFE_REALLOC | UF0_CS_REALLOC | 1);
    OUT_BATCH(batch, 0);
    OUT_BATCH(batch, 
              (vfe_fence << UF2_VFE_FENCE_SHIFT) |      /* VFE_SIZE */
              (cs_fence << UF2_CS_FENCE_SHIFT));        /* CS_SIZE */
    ADVANCE_BATCH(batch);
}

static void
ironlake_pp_state_base_address(VADriverContextP ctx)
{
    struct i965_driver_data *i965 = i965_driver_data(ctx);
    struct intel_batchbuffer *batch = i965->batch;

    BEGIN_BATCH(batch, 8);
    OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
    OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
    OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
    OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
    OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
    OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
    OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
    OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
    ADVANCE_BATCH(batch);
}

static void
ironlake_pp_state_pointers(VADriverContextP ctx)//, struct i965_post_processing_context *pp_context)
{
    struct i965_driver_data *i965 = i965_driver_data(ctx);
    struct intel_batchbuffer *batch = i965->batch;

    BEGIN_BATCH(batch, 3);
    OUT_BATCH(batch, CMD_MEDIA_STATE_POINTERS | 1);
    OUT_BATCH(batch, 0);
    OUT_RELOC(batch, vfe_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
    ADVANCE_BATCH(batch);
}

static void 
ironlake_pp_cs_urb_layout(VADriverContextP ctx)//, struct i965_post_processing_context *pp_context)
{
    struct i965_driver_data *i965 = i965_driver_data(ctx);
    struct intel_batchbuffer *batch = i965->batch;

    BEGIN_BATCH(batch, 2);
    OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
    OUT_BATCH(batch,
              ((urb.size_cs_entry - 1) << 4) |     /* URB Entry Allocation Size */
              (urb.num_cs_entries << 0));          /* Number of URB Entries */
    ADVANCE_BATCH(batch);
}

static void
ironlake_pp_constant_buffer(VADriverContextP ctx)//, struct i965_post_processing_context *pp_context)
{
    struct i965_driver_data *i965 = i965_driver_data(ctx);
    struct intel_batchbuffer *batch = i965->batch;

    BEGIN_BATCH(batch, 2);
    OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
    OUT_RELOC(batch, curbe.bo,
              I915_GEM_DOMAIN_INSTRUCTION, 0,
              urb.size_cs_entry - 1);
    ADVANCE_BATCH(batch);    
}

static void
ironlake_pp_object_walker(VADriverContextP ctx)//, struct i965_post_processing_context *pp_context)
{
    struct i965_driver_data *i965 = i965_driver_data(ctx);
    struct intel_batchbuffer *batch = i965->batch;
    /*int x, x_steps, y, y_steps;
printf("before x_steps\n");
    x_steps = 1;//pp_context->pp_x_steps(&pp_context->private_context);
    y_steps = 1;//pp_context->pp_y_steps(&pp_context->private_context);
printf("before for loop\n");
    for (y = 0; y < y_steps; y++) {
        for (x = 0; x < x_steps; x++) {
            if (!pp_context->pp_set_block_parameter(pp_context, x, y)) {
printf("before BEGIN_BATCH\n");*/
if(0){
                BEGIN_BATCH(batch, 20);
                OUT_BATCH(batch, CMD_MEDIA_OBJECT | 18);
                OUT_BATCH(batch, 0);
                OUT_BATCH(batch, 0); /* no indirect data */
                OUT_BATCH(batch, 0);

                /* inline data grf 5-6 */
                assert(sizeof(inline_parameter) == 64);
//printf("before intel_batchbuffer_data\n");
                intel_batchbuffer_data(batch, &inline_parameter, sizeof(inline_parameter));
//printf("after intel_batchbuffer_data\n");
                ADVANCE_BATCH(batch);
}
//            }
//        }
//    }
}

static void
ironlake_pp_pipeline_setup(VADriverContextP ctx)
{
    struct i965_driver_data *i965 = i965_driver_data(ctx);
    struct intel_batchbuffer *batch = i965->batch;
//	intel_batchbuffer_flush(batch);
//	return;
//    struct i965_post_processing_context *pp_context = i965->pp_context;
    intel_batchbuffer_start_atomic(batch, 0x1000);
    intel_batchbuffer_emit_mi_flush(batch);
//printf("in ironlake_pp_pipeline_setup, before ironlake_pp_pipeline_select\n");
    ironlake_pp_pipeline_select(ctx);
    ironlake_pp_state_base_address(ctx);
    ironlake_pp_state_pointers(ctx);//, pp_context);
    ironlake_pp_urb_layout(ctx);//, pp_context);
    ironlake_pp_cs_urb_layout(ctx);//, pp_context);
//printf("in ironlake_pp_pipeline_setup, before ironlake_pp_constant_buffer\n");
    ironlake_pp_constant_buffer(ctx);//, pp_context);
//printf("in ironlake_pp_pipeline_setup, before ironlake_pp_object_walker\n");
    ironlake_pp_object_walker(ctx);//, pp_context);
//printf("in ironlake_pp_pipeline_setup, after ironlake_pp_object_walker\n");
    intel_batchbuffer_end_atomic(batch);
intel_batchbuffer_flush(batch);
}


static void
ironlake_pp_initialize(
    VADriverContextP   ctx,
    VASurfaceID        surface_id
)
{
    struct i965_driver_data *i965 = i965_driver_data(ctx);
    //struct i965_post_processing_context *pp_context = i965->pp_context;
    //struct pp_module *pp_module;
    dri_bo *bo;
    int i;

    dri_bo_unreference(curbe.bo);
    bo = dri_bo_alloc(i965->intel.bufmgr,
                      "constant buffer",
                      4096, 
                      4096);
    assert(bo);
    curbe.bo = bo;

    dri_bo_unreference(binding_table.bo);
    bo = dri_bo_alloc(i965->intel.bufmgr, 
                      "binding table",
                      sizeof(unsigned int), 
                      4096);
    assert(bo);
    binding_table.bo = bo;

    dri_bo_unreference(idrt.bo);
    bo = dri_bo_alloc(i965->intel.bufmgr, 
                      "interface discriptor", 
                      sizeof(struct i965_interface_descriptor), 
                      4096);
    assert(bo);
    idrt.bo = bo;
    idrt.num_interface_descriptors = 0;

    dri_bo_unreference(sampler_state_table.bo);
    bo = dri_bo_alloc(i965->intel.bufmgr, 
                      "sampler state table", 
                      4096,
                      4096);
    assert(bo);
    dri_bo_map(bo, True);
    memset(bo->virtual, 0, bo->size);
    dri_bo_unmap(bo);
    sampler_state_table.bo = bo;

    dri_bo_unreference(sampler_state_table.bo_8x8);
    bo = dri_bo_alloc(i965->intel.bufmgr, 
                      "sampler 8x8 state ",
                      4096,
                      4096);
    assert(bo);
    sampler_state_table.bo_8x8 = bo;

    dri_bo_unreference(sampler_state_table.bo_8x8_uv);
    bo = dri_bo_alloc(i965->intel.bufmgr, 
                      "sampler 8x8 state ",
                      4096,
                      4096);
    assert(bo);
    sampler_state_table.bo_8x8_uv = bo;

    dri_bo_unreference(vfe_state.bo);
    bo = dri_bo_alloc(i965->intel.bufmgr, 
                      "vfe state", 
                      sizeof(struct i965_vfe_state), 
                      4096);
    assert(bo);
    vfe_state.bo = bo;
    
    for (i = 0; i < MAX_PP_SURFACES; i++) {
        dri_bo_unreference(surfaces[i].ss_bo);
        surfaces[i].ss_bo = NULL;

        dri_bo_unreference(surfaces[i].s_bo);
        surfaces[i].s_bo = NULL;
    }

    memset(&static_parameter, 0, sizeof(static_parameter));
    memset(&inline_parameter, 0, sizeof(inline_parameter));
    //assert(pp_index >= PP_NULL && pp_index < NUM_PP_MODULES);
    //pp_context->current_pp = pp_index;
    //pp_module = &pp_context->pp_modules[pp_index];
    
    if (display_module.initialize)
        display_module.initialize(ctx, surface_id);
}

VASurfaceID setup_pipeline(VADriverContextP ctx, unsigned width, unsigned height)
{
	struct i965_driver_data *i965 = i965_driver_data(ctx);

	struct object_surface *obj_surface;

	VASurfaceID surface_id;

	VAStatus status;

	status = i965_CreateSurfaces(ctx, width, height, VA_RT_FORMAT_YUV420, 1, &surface_id);
	assert(status == VA_STATUS_SUCCESS);

	obj_surface = SURFACE(surface_id);
	i965_check_alloc_surface_bo(ctx, obj_surface, 0, VA_FOURCC('N', 'V', '1', '2'));
    //printf("before ironlake_pp_initialize. \n");
	ironlake_pp_initialize(ctx, surface_id);
    //printf("before ironlake_pp_states_setup. \n");
	ironlake_pp_states_setup();//ctx);
    //printf("before ironlake_pp_pipeline_setup. \n");	
	ironlake_pp_pipeline_setup(ctx);
    //printf("after ironlake_pp_pipeline_setup. \n");
	return surface_id;
}

void wzyPrintFlag(int flag)
{
	if(flag == I915_EXEC_RING_MASK)//              (7<<0)
		printf("wzyPrintFlag: flag is I915_EXEC_RING_MASK. \n");
	else if(flag == I915_EXEC_DEFAULT)//                (0<<0)
		printf("wzyPrintFlag: flag is I915_EXEC_DEFAULT. \n");
	else if(flag == I915_EXEC_RENDER)//                 (1<<0)
		printf("wzyPrintFlag: flag is I915_EXEC_RENDER. \n");
	else if(flag == I915_EXEC_BSD) //                   (2<<0)
		printf("wzyPrintFlag: flag is I915_EXEC_BSD. \n");
	else if(flag == I915_EXEC_BLT)  //                  (3<<0)
		printf("wzyPrintFlag: flag is I915_EXEC_BLT. \n");
	else
		printf("wzyPrintFlag: flag is invalid. \n");
}

void display_init(VADriverContextP ctx)
{
	struct i965_driver_data *i965 = i965_driver_data(ctx);
//struct i965_post_processing_context *pp_context = i965->pp_context;
    //i965->batch->flag = I915_EXEC_DEFAULT;
	wzyPrintFlag(i965->batch->flag);
	  //printf("<func: display_init> flag in batch is : %d. \n", i965->batch->flag);
    //int i;
    if (HAS_PP(i965)) {
 //  if (pp_context == NULL) {
//	pp_context = calloc(1, sizeof(*pp_context));
//	i965->pp_context = pp_context;

	urb.size = URB_SIZE((&i965->intel));
	urb.num_vfe_entries = 32;
	urb.size_vfe_entry = 1;		/* in 512 bits unit */
	urb.num_cs_entries = 1;
	urb.size_cs_entry = 2;		/* in 512 bits unit */
	urb.vfe_start = 0;
	urb.cs_start = urb.vfe_start + 
		urb.num_vfe_entries * urb.size_vfe_entry;
	assert(urb.cs_start + 
		urb.num_cs_entries * urb.size_cs_entry <= URB_SIZE((&i965->intel)));

	dri_bo_unreference(display_module.kernel.bo);
	display_module.kernel.bo = dri_bo_alloc(i965->intel.bufmgr, display_module.kernel.name, 
			display_module.kernel.size, 4096);
	assert(display_module.kernel.bo);
	dri_bo_subdata(display_module.kernel.bo, 0, display_module.kernel.size,
				   display_module.kernel.bin);
    //printf("after init bo in display_init\n");
    // }
    }
}

void display_terminate(VADriverContextP ctx, VASurfaceID * surface)
{
	int i;

	dri_bo_unreference(curbe.bo);
	curbe.bo = NULL;

	for (i = 0; i < MAX_SURFACES; i++)
	{
		dri_bo_unreference(surfaces[i].ss_bo);
		surfaces[i].ss_bo = NULL;

		dri_bo_unreference(surfaces[i].s_bo);
		surfaces[i].s_bo = NULL;
	}

	dri_bo_unreference(sampler_state_table.bo);
	sampler_state_table.bo = NULL;

	dri_bo_unreference(sampler_state_table.bo_8x8);
	sampler_state_table.bo_8x8 = NULL;

	dri_bo_unreference(sampler_state_table.bo_8x8_uv);
	sampler_state_table.bo_8x8_uv = NULL;

	dri_bo_unreference(binding_table.bo);
	binding_table.bo = NULL;

	dri_bo_unreference(idrt.bo);
	idrt.bo = NULL;
	idrt.num_interface_descriptors = 0;

	dri_bo_unreference(vfe_state.bo);
	vfe_state.bo = NULL;

	dri_bo_unreference(display_module.kernel.bo);
	display_module.kernel.bo = NULL;

	i965_DestroySurfaces(ctx, surface, 1);

}
