//**************************************************************************************
// MediaFrame is an Open Source streaming media platform in Java 
// which provides a fast, easy to implement and extremely small applet 
// that enables to view your audio/video content without having 
// to rely on external player applications or bulky plug-ins.
//
//--------------------------------------------------------------------------------------
//
// We changed a lot of code and added a lot of functionality.
// This includes, but not limited to, the following changes:
// 1. The project was renamed to MediaFrame;
// 2. The connection speed detection procedure was added;
// 3. The JavaScript API functions were added;
// 4. The pre and post image support functionality was added;
// 5. The ability to save movie into the local disk was added;
// 6. The inner buffer for a movie file was added;
// 7. The click-through functionality was added;    
// 8. The .zip files support was added;    
// 9. The realtime feedback agent functionality was added.    
// For the full list of the current functionality please visit the following web page:
// http://mediaframe.org/
//    
// 06 Jul 2002 - 19 Dec 2004 Konstantin Belous, Oleg Lebedev
//
//--------------------------------------------------------------------------------------
//
//							"MPEG_video.java"
//
// This file contains the class "MPEG_video". The object of this class scans the MPEG-
// video stream, extracts the information (especially the DCT values), activates the
// IDCT, applies the motion vectors and passes the pixel values to the applet.
// Furthermore it resizes the applet and (possibly) the frame once it has recognized
// the dimensions of the frame.
// 
// To understand how the video scanner works knowledge of the MPEG file format is
// needed (See ISO 11172-2).
//
//--------------------------------------------------------------------------------------
//
//		Joerg Anders, TU Chemnitz, Fakultaet fuer Informatik, GERMANY
//		ja@informatik.tu-chemnitz.de
//
//
//--------------------------------------------------------------------------------------
//
// This program is free software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the Free Software
// Foundation; either version 2 of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
// PARTICULAR PURPOSE. See the GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along with this
// program; (See "LICENSE.GPL"). If not, write to the Free Software Foundation, Inc.,
// 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
//
//--------------------------------------------------------------------------------------
//
// If the program runs as Java applet it isn't "interactive" in the sense of the GNU
// General Public License. So paragraph 2c doesn't apply.
//
//****************************************************************************************

package mediaframe.mpeg1;

import java.io.*;
import java.applet.Applet;

/**
 * The <code>MPEG_video</code> class implements the video decoder of the MPEG bit stream.
 * 
 * @author Joerg Anders
 */
public class MPEG_video implements Runnable {
	/** The input movie's data bit stream. */
	private io_tool mpeg_stream;	
	/** The VLC (Hufmann) decoder. */
	private Huffmann Huf = null;	 
	/** A matrix of zeros. */
	private int nullmatrix[] = new int[64]; 

	/** The default intramatrix. */
	private int intramatrix[] = {		 
		 8, 16, 19, 22, 26, 27, 29, 34,
		 16, 16, 22, 24, 27, 29, 34, 37,
		 19, 22, 26, 27, 29, 34, 34, 38,
		 22, 22, 26, 27, 29, 34, 37, 40,
		 22, 26, 27, 29, 32, 35, 40, 48,
		 26, 27, 29, 32, 35, 40, 48, 58,
		 26, 27, 29, 34, 38, 46, 56, 69,
		 27, 29, 35, 38, 46, 56, 69, 83 };
	
	/** The reverse zigzag scan order. */
	private final int zigzag[] = {		 
		 0,  1,  8, 16,  9,  2,  3, 10,
		17, 24, 32, 25, 18, 11,  4,  5,
		12, 19, 26, 33, 40, 48, 41, 34,
		27, 20, 13,  6,  7, 14, 21, 28,
		35, 42, 49, 56, 57, 50, 43, 36,
		29, 22, 15, 23, 30, 37, 44, 51,
		58, 59, 52, 45, 38, 31, 39, 46,
		53, 60, 61, 54, 47, 55, 62, 63 };
	
	/** An IDCT object to tranform the DCT coefficients. */
	private IDCT idct = new IDCT();			
	/** The values before IDCT transformation. */
	private int dct_recon[] = new int[64];
	/** A quantization matrix for non intra frames. */
	private int non_intramatrix[] = new int[64];
	/** <tt>False</tt> if in the current macroblock hasn't had any luminance blocks. */ 
	private boolean lum_block; 
	/** The value of past DC (cr) value. */
	private int dct_dc_cr_past; 
	/** The value of past DC (cb) value. */
	private int dct_dc_cb_past; 
	/** The value of past DC (y) value. */
	private int dct_dc_y_past;

	/**
	 * The "Pel_buffer" is a main feature of the video scanner.
	 * 3 frames are stored at:
	 * <pre>
	 *				Pel_buffer[0]
	 *				Pel_buffer[1]
	 *				Pel_buffer[2]</pre>
	 * The frame at index "ak_idx" is the frame coming into being. The frame at
	 * index "pred_idx" is the frame for forward prediction. The frame at index
	 * "back_idx" is the frame for backward prediction. The method "Parse_Picture"
	 * administers the values of these 3 variables.
	 * The index in second dimension determins whether the information is:
	 * <pre>
	 *		luminance   information 	-	Pel_buffer[?][0]
	 *		chrominance information (cr)	-	Pel_buffer[?][1]
	 * 		chrominance information	(cb)	-	Pel_buffer[?][2]
	 * </pre>
	 */
	private int Pel_buffer[][][];		
						
	/** The index of the frame coming into being. */
	private int ak_idx = 0; 
	/** The index of the frame for forward prediction. */
	private int pred_idx = -1;
	/** The index of the frame for backward prediction. */
	private int back_idx = -1;	
	/** The reference to the applet. */
	private MPEG1 Player;

	/** MPEG video layer constanta. */
	private static final int SEQ_END_CODE = 0x000001b7;
	/** MPEG video layer constanta. */
	private static final int SEQ_START_CODE = 0x000001b3;
	/** MPEG video layer constanta. */
	private static final int GOP_START_CODE = 0x000001b8;
	/** MPEG video layer constanta. */
	private static final int PICTURE_START_CODE = 0x00000100;
	/** MPEG video layer constanta. */
	private static final int SLICE_MIN_START_CODE = 0x00000101;
	/** MPEG video layer constanta. */
	private static final int SLICE_MAX_START_CODE = 0x000001af;
	/** MPEG VIDEO layer constanta. */
	private static final int EXT_START_CODE = 0x000001b5;
	/** MPEG VIDEO layer constanta. */
	private static final int USER_START_CODE = 0x000001b2;

	/** I FRAME frame type constanta. */
	public static final int I_TYPE = 0x1;
	/** P FRAME frame type constanta. */
	public static final int P_TYPE = 0x2;
	/** B FRAME frame type constanta. */
	public static final int B_TYPE = 0x3;

	/** The width in pixels of the movie data. */
	private int Width; 
	/** The height in pixels of the movie data. */
	private int Height;
	/** The aspect ratio used by the MPEG standard given in the sequence headerr. */
	private int Asp_ratio;
	/** The frame rate code used by the MPEG standard given in the sequence header. */
	private int Pic_rate;
	/** The macroblock width. */
	private int mb_width;
	/** The macroblock height. */
	private int mb_height;
	/** The bit rate specified in the sequence header. */
	private int Bit_rate;
	/** The vbv buffer size specified in the sequence header. */
	private int VBV_buffer; 
	/** <tt>True</tt> if the constrained parameter flag was set in the sequence header and <tt>false</tt> otherwise. */
	private boolean const_param;
	/** <tt>True</tt> if the quantizer matrix (intra or nonintra) should be loaded from MPEG stream. */
	private boolean quant_matrix;
	/** The Hour time value specified in the group of pictures header. */
	private int Hour;
	/** The Minute time value specified in the group of pictures header. */
	private int Minute;
	/** The Second time value specified in the group of pictures header. */
	private int Second;
	/** The Pict Count value specified in the group of pictures header. */
	private int Pict_Count;
	/** The Drop Flag value specified in the group of pictures header. */
	private boolean Drop_Flag;
	/** The Closed Group value specified in the group of pictures header. */
	private boolean Closed_Group;
	/** The Broken Link value specified in the group of pictures header. */
	private boolean Broken_Link;
	/** The display order of the picture in the group of pictures. */
	private int Temp_ref;
	/** The type of the frame (I, B, or P). */
	private int Pic_Type;
	/** The first frame number of last group of pictures. */
	private int Frame_nr_offset = -1;
	/** The frame number in the movie. */
	private int Frame_nr = 0;
	/** The VBV delay value of the frame. */
	private int VBV_Delay;
	/** 
	 * Equals <tt>true</tt> if full pixel accuracy is used for forward prediction, 
	 * equals <tt>false</tt> if half pixel accuracy is used. 
	 */
	private boolean Full_pel_forw_vector;
	/** The motion code value for forward prediction. */
	private int forw_f_code; 
	/** The the size of the motion data for forward prediction. */
	private int forward_f; 
	/** The size of residual value for forward prediction. */
	private int forward_r_size; 
	/** The motion code value of horizontal part for forward prediction. */
	private int motion_horiz_forw_code;
	/** The motion residual value of horizontal part for forward prediction. */
	private int motion_horiz_forw_r;
	/** The motion code value of vertical part for forward prediction. */
	private int motion_verti_forw_code;
	/** The motion residual value of vertical part for forward prediction. */
	private int motion_verti_forw_r;

	/** Equals <tt>true</tt> if full pixel accuracy is used for backward prediction, equals <tt>false</tt> if half pixel accuracy is used. */
	private boolean Full_pel_back_vector;
	/** The motion code value for backward prediction. */
	private int backward_f_code; 
	/** The the size of the motion data for backward prediction. */
	private int backward_f;
	/** The size of residual value for backward prediction. */
	private int backward_r_size;
	/** The motion code value of horizontal part for backward prediction. */
	private int motion_horiz_back_code;
	/** The motion residual value of horizontal part for backward prediction. */
	private int motion_horiz_back_r;
	/** The motion code value of vertical part for backward prediction. */
	private int motion_verti_back_code;
	/** The motion residual value of vertical part for backward prediction. */
	private int motion_verti_back_r;

	/** The quantization factor. */
	private int Quant_scale; 
	/** The actual MB address. */
	private int macro_block_address;
	/** The past MB address. */
	private int past_intra_address;
	/** The actual row position of the macro block. */  
	private int mb_row;
	/** The actual column position of the macro block. */  
	private int mb_column; 

	/** <tt>True</tt> if motion vector for forward prediction exists. */
	private boolean macro_block_motion_forward;
	/** <tt>True</tt> if motion vector for backward prediction exists. */
	private boolean macro_block_motion_backward;
	/** <tt>True</tt> if a coded block pattern supplied. */
	private boolean macro_block_pattern;
	/** <tt>True</tt> if a new quantization factor is supplied. */
	private boolean macro_block_quant = false;
	/** <tt>True</tt> if a macro block is for intra coded frames. */
	private boolean macro_block_intra = false;

	/** The array to grab the referred area from predicted frame. */
	private int pel1[] = new int[16 * 16 + 2 * 8 * 8];
	/** The array to grab the referred area from predicted frame. */
	private int pel2[] = new int[16 * 16 + 2 * 8 * 8];

	/** The object of class "motion_data" to notice and compute the forward motion values. */
	private motion_data Forward = new motion_data();
	/** The object of class "motion_data" to notice and compute the backward motion values. */
	private motion_data Backward = new motion_data();

	/** The number of pixels per luminance line. */
	private int pixel_per_lum_line;
	/** The number of pixels per colour line. */
	private int pixel_per_col_line;
	/** The increment for the luminance line, equals to pixel_per_lum_line - 8. */
	private int lum_y_incr;
	/** The increment for the luminance line, equals to pixel_per_col_line - 8. */
	private int col_y_incr;

	/** The "Video Decoder" thread object. */
	private volatile Thread video_thread; 

	/** The current amount of printed video errors. */
	private int error_count = 0;

	/**
	 * Constructs a <code>MPEG_video</code> object, notices the parameters, inits "non_intra" and "null" matrixes
	 * and creates an VLC (Hufmann) decoder object.
	 * @param play the reference to the applet.
	 * @param tool the input movie's data bit stream.
	 */
	MPEG_video(MPEG1 play, io_tool tool) {
		Player = play; 					// notice
		for (int i = 0; i < 64; i++) {
			non_intramatrix[i] = 16;	// initialization
			nullmatrix[i] = 0;
		}

		// The necessity of the following to calls is only understandable after
		// reading "http://rnvs.informatik.tu-chemnitz.de/~ja/MPEG/HTML/IDCT.html"

		idct.norm(intramatrix);
		idct.norm(non_intramatrix);

		mpeg_stream = tool;
		Huf = new Huffmann(mpeg_stream);// create VLC (Hufmann) decoder
	}

	/** Starts the "Video Decoder" thread. */
	public void start() {
		video_thread = new Thread(this, "Video Decoder");
		video_thread.start();
	}

	/** The "Video Decoder" thread that is being stopped. */
	private volatile Thread moribund = null;

	/** Stops the "Video Decoder" thread. */
	public void stop() {
//		System.out.println("MPEG_video::stop(): stopping video decoder");

		if (video_thread != null)
		{
			moribund = video_thread;
			video_thread = null;
			try {
				Thread.sleep(100);
			} catch (Exception ex) {}
			if(moribund != null) {
				try {
					moribund.interrupt();
				} catch (Throwable tr) {}
			}
		}

	}

	/**
	 * Waits until the "Video Decoder" thread is dead.
	 */
	public void join(){
			if(moribund != null){
				// Wait until video dead
				try { moribund.join(1000); } catch (Throwable tr) { System.out.println("strange---interrupted"); }
			}
	}

	/**
	 * The method "run" parses the MPEG video stream
	 * according to ISO 11172-2 and performs some initial steps.
	 * This method is called by the JRE after the "Video Decoder" thread is started. 
	 */
	public void run() {
                Thread thisThread = Thread.currentThread();
//		thisThread.setPriority(Thread.MAX_PRIORITY);
		try {
//			thisThread.sleep(1000); // don't start right away... (dirty)

			while (!mpeg_stream.next_bits(SEQ_START_CODE, 32))
			{ 
				mpeg_stream.skip_bits(8);
				mpeg_stream.next_start_code();
			}

			mpeg_stream.next_start_code();
			do {
				Parse_sequence_header();

				/* After reading the header the dimensions are known.
				 * Therefore let's resize the applet and (probably)
				 * the frame and initialize the 2 prediction objects.
				 * Then create the pel buffer:
				 */

				if (Pel_buffer == null) { // The sequence header can appear many times
					Player.set_dim(mb_width * 16, mb_height * 16, Width, Height);
					Pel_buffer = new int[3][3][mb_width * 16 * mb_height * 16];

					// some derived values used in "set_lum_pixel", "set_col_pixel",
					// "correct_lum_pixel" and "correct_col_pixel":

					pixel_per_lum_line = mb_width << 4;
					pixel_per_col_line = mb_width << 3;
					lum_y_incr = pixel_per_lum_line - 8;
					col_y_incr = pixel_per_col_line - 8;
					Forward.init(pixel_per_lum_line, pixel_per_col_line, pixel_per_lum_line - 16, col_y_incr);
					Backward.init(pixel_per_lum_line, pixel_per_col_line, pixel_per_lum_line - 16, col_y_incr);
				}

				do {
					Parse_group_of_pictures();
				}
				while (thisThread == video_thread &&
					   !mpeg_stream.is_eof() && mpeg_stream.next_bits(GOP_START_CODE, 32));

				// go to next SEQ_CODE... <-- I know... it should be there already... but... :)
				while (!mpeg_stream.next_bits(SEQ_START_CODE, 32) && !mpeg_stream.is_eof()
						&& thisThread == video_thread)
				{
					mpeg_stream.get_bits(8);
				}
			}
			while(thisThread == video_thread && 
				  !mpeg_stream.is_eof() && mpeg_stream.next_bits(SEQ_START_CODE, 32));

		} catch (InterruptedException ex) {
//			System.out.println("MPEG_video::run() caught InterruptedException.");
//			mpeg_stream.buhBye();
		} catch (EOFException eof) {//			Player.movieScreen.movieState = MovieScreen.ENDED;
		}

		System.out.println("MPEG_video::run(): saying goodnight.  Tip your waitresses.");
//		if(Player != null)
//		Player.movieScreen.myPaint();
		video_thread = null;
		moribund = null;

		Player.playerEnd(this);
	}

	/**
	 * The method "Parse_sequence_header" parses the sequence header according
	 * to ISO 11172-2.
	 * @throws InterruptedException if another process interrupts the current process.
	 * @throws EOFException if the end of the file has been reached.
	 */
	private void Parse_sequence_header() throws InterruptedException, EOFException {
		if (mpeg_stream.get_bits(32) != SEQ_START_CODE) {
			MovieScreen.ErrNum = 3;
			MovieScreen.Msg = "SEQ_START_CODE expected";			
			Player.repaint();
			return;
		}
		Width = mpeg_stream.get_bits(12);
		Height  = mpeg_stream.get_bits(12);
		mb_width = (Width + 15) / 16;
		mb_height = (Height + 15) / 16;
		Asp_ratio = mpeg_stream.get_bits(4);
		Pic_rate = mpeg_stream.get_bits(4);
		Bit_rate = mpeg_stream.get_bits(18);
		mpeg_stream.skip_bits(1);
		VBV_buffer = mpeg_stream.get_bits(10);
		const_param = mpeg_stream.get_bits(1) == 1;
		quant_matrix = mpeg_stream.get_bits(1) == 1;
		if (quant_matrix) {
			for (int i = 0; i < 64; intramatrix[i++] = (0xff & mpeg_stream.get_bits(8)));
			idct.norm(intramatrix);
		}
		quant_matrix = mpeg_stream.get_bits(1) == 1;
		if (quant_matrix) {
			for (int i = 0; i < 64; non_intramatrix[i++] = (0xff & mpeg_stream.get_bits(8)));
			idct.norm(non_intramatrix);
		}
		mpeg_stream.next_start_code();

		//
		// Note: this is not compliant.  The spec allows for only one user_data block,
		// yet it appears that at least one encoder creates more than one.
		//
		while (mpeg_stream.next_bits(USER_START_CODE, 32)) {
			mpeg_stream.skip_bits(32);
			while (!mpeg_stream.next_bits(0x1, 24)) {
				mpeg_stream.skip_bits(8);
			}
		}
	}

	/**
	 * The method "Parse_group_of_pictures" parses the group of pictures
	 * according to ISO 11172-2. 
	 * @throws InterruptedException if another process interrupts the current process.
	 * @throws EOFException if the end of the file has been reached.
	 */
	private void Parse_group_of_pictures() throws InterruptedException, EOFException {
		int bitz = mpeg_stream.get_bits(32);
		if (bitz != GOP_START_CODE) {
			MovieScreen.ErrNum = 4;
			MovieScreen.Msg = "GOP_START_CODE expected";
			Player.repaint();
			return;
		}
		// The information is simply ignored.
		Drop_Flag = mpeg_stream.get_bits(1) == 1;
		Hour = mpeg_stream.get_bits(5);
		Minute = mpeg_stream.get_bits(6);
		mpeg_stream.skip_bits(1);
		Second = mpeg_stream.get_bits(6);
		Pict_Count = mpeg_stream.get_bits(6);
		Closed_Group = mpeg_stream.get_bits(1) == 1;
		Broken_Link = mpeg_stream.get_bits(1) == 1;
		mpeg_stream.next_start_code();

		if (mpeg_stream.next_bits(EXT_START_CODE, 32)) {
			while (!mpeg_stream.next_bits(0x1,24)) {
				mpeg_stream.get_bits(8);
			}
			MovieScreen.ErrNum = 5;
			MovieScreen.Msg = "Cannot deal with MPEG-2 data stream!";
			return;
		}

		if (mpeg_stream.next_bits(USER_START_CODE, 32)) {
			while (!mpeg_stream.next_bits(0x1,24)) {
				mpeg_stream.get_bits(8);
			}
		}

		// notice the frame number because it is reset to zero
		// at group of pictures

		Frame_nr_offset = Frame_nr + 2; // don't know why ???

		while (!mpeg_stream.is_eof() && mpeg_stream.next_bits(PICTURE_START_CODE, 32))
		{
			Parse_picture();
		}
	}

	/**
	 * The method "Parse_picture" parses a picture according
	 * to ISO 11172-2. 
	 * @throws InterruptedException if another process interrupts the current process.
	 * @throws EOFException if the end of the file has been reached.
	 */
	/*
	 * This method determines the frame number in display
	 * order and the picture type. Depending on the picture type
	 * some special actions are performed. Especially the index of
	 * the referred frame for forward and backward prediction is to
	 * be defined.
	 */
	private void Parse_picture() throws InterruptedException, EOFException {
		int start_c;

		// Get presentation timestamp (if any)
		// FIXME: this fails when PICTURE_START_CODE crosses packet boundary
		// (only if the mpeg_stream buffer is empty right now
		//
                long pts = -1;
		
		if(mpeg_stream.getBuffer().getCurrentReadFrame() != null)
			pts = mpeg_stream.getBuffer().getCurrentReadFrame().getTimeStamp() / 90;
			
//		System.out.println(pts);
//                if (mpeg_stream.getStream() instanceof CutBuffer)
//                        pts = ((CutBuffer)mpeg_stream.getStream()).getTimeStamp();

                // -- long ago, when people still used MPEG_scan... :)
                //long pts = mpeg_stream.getPts();

		if (mpeg_stream.get_bits(32) != PICTURE_START_CODE) {
			MovieScreen.Msg = "PICTURE_START_CODE expected";
			MovieScreen.ErrNum = 6;
			Player.repaint();
			return;
		}

		Temp_ref = mpeg_stream.get_bits(10); // picture number in display order
		Frame_nr = Frame_nr_offset + Temp_ref;
		Pic_Type = mpeg_stream.get_bits(3);
		VBV_Delay =  mpeg_stream.get_bits(16); // ignored
		if (Pic_Type == P_TYPE || Pic_Type == I_TYPE) pred_idx = back_idx;
		if (Pic_Type == P_TYPE || Pic_Type == B_TYPE) {
			if (pred_idx == -1) {
				//System.out.println("Warning: No predictive Frame in P_FRAME");
				pred_idx = (ak_idx + 2) % 3;
			}

			Full_pel_forw_vector = mpeg_stream.get_bits(1) == 1;  // meaning of motion data
			forw_f_code = mpeg_stream.get_bits(3);			// THE
			forward_r_size = forw_f_code - 1;				// MOTION
			forward_f = 1 << forward_r_size;				// DATA

			Forward.set_pic_data(forward_f, Full_pel_forw_vector);// (forward)
		}
		if (Pic_Type == B_TYPE) {
			if (back_idx == -1) {
				System.out.println("Warning: No Backward Predictive Frame in B_TYPE");
				back_idx = (ak_idx + 1) % 3;
			}
			Full_pel_back_vector = mpeg_stream.get_bits(1) == 1;  // meaning of motion data
			backward_f_code = mpeg_stream.get_bits(3);	       // THE
			backward_r_size = backward_f_code - 1;		       // MOTION
			backward_f = 1 << backward_r_size;	               // DATA

			Backward.set_pic_data(backward_f, Full_pel_back_vector);// (backward)
		}
		while(mpeg_stream.next_bits(0x1,1)) {
			mpeg_stream.skip_bits(8);
		}
		mpeg_stream.skip_bits(1);
		mpeg_stream.next_start_code();

		if (mpeg_stream.next_bits(EXT_START_CODE, 32)) {
			mpeg_stream.skip_bits(32);
			while (!mpeg_stream.next_bits(0x1, 24)) {
				mpeg_stream.skip_bits(8);
			}
		}

		if (mpeg_stream.next_bits(USER_START_CODE,32)) {
			mpeg_stream.skip_bits(32);
			while (!mpeg_stream.next_bits(0x1, 24)) {
				mpeg_stream.skip_bits(8);
			}
		}
		if (Pic_Type == 4) { // implement ???
			MovieScreen.ErrNum = 7;
			MovieScreen.Msg = "can't decode D-Type Frames";
			return;
		}
		
		start_c = mpeg_stream.peek_bits(32);
		while(start_c >= SLICE_MIN_START_CODE && start_c <= SLICE_MAX_START_CODE)
		{
			Parse_slice();
			start_c = mpeg_stream.peek_bits(32);
		}

		// A frame (picture) is ready. Pass the YUV values to the applet:
		if (Pic_Type != 4)
		{
			if(video_thread != null)
				Player.set_Pixels(Pel_buffer[ak_idx], Frame_nr, Pic_Type, pts, Pic_rate);
		}
		if (Pic_Type == P_TYPE || Pic_Type == I_TYPE) { // reorder the indexes
			back_idx = ak_idx; ak_idx = (ak_idx + 1) % 3;
		}
	}

	/**
	 * The method "Parse_slice" parses a slice according
	 * to ISO 11172-2. It determines quantization scale and
	 * the macroblock address of the first macro block of
	 * the slice.
	 * @throws InterruptedException if another process interrupts the current process.
	 * @throws EOFException if the end of the file has been reached.
	 */
	private final void Parse_slice() throws InterruptedException, EOFException {
		int k = mpeg_stream.get_bits(32); // this field contains the 
						  // macro block address
		int b_nr = 0;			  // macroblock nr in slice

		past_intra_address = -2;	// initialization (see ISO 11172-2)
		dct_dc_y_past = dct_dc_cb_past = dct_dc_cr_past = 1024; // dito
		Forward.reset_prev(); Backward.reset_prev(); 		// reset motion data
		macro_block_address = ((k & 0xff) - 1) * mb_width - 1; //extract MB address
		if (k < SLICE_MIN_START_CODE || k > SLICE_MAX_START_CODE) {
			MovieScreen.ErrNum = 8; 
			MovieScreen.Msg = "SLICE START CODE expected";
			Player.repaint();
			return;
		}
		Quant_scale = mpeg_stream.get_bits(5);
		
		while (mpeg_stream.next_bits(0x1, 1)) {
			mpeg_stream.skip_bits(9);
			// mpeg_stream.get_bits(1);
			// mpeg_stream.get_bits(8);
		}
		mpeg_stream.skip_bits(1);

		//while (!mpeg_stream.next_bits(0x0, 23))
		do 
		{
			Parse_macroblock(b_nr++);
		}
		while (!mpeg_stream.next_bits(0x0, 23));
		mpeg_stream.next_start_code();
	}

	/**
	 * The method "Parse_macroblock" parses a macroblock according
	 * to ISO 11172-2. It is one of the most complex methods because of
	 * the great variety of the constitution of a macroblock. The
	 * constitution and existence of the the most information fields
	 * depends on the constitution and existence of information fields
	 * before.
	 * <p/>Furthermore the decoding process is controlled by this method.
	 * In some situations some variables must be reset to some default
	 * values or in case of skipped macroblocks implizit values must be
	 * applied.
	 * <p/>Bear in mind that some variables used in this method are member
	 * (class) variables for later reference!
	 * @param b_nr the macroblock number in slice.
	 * @throws InterruptedException if another process interrupts the current process.
	 * @throws EOFException if the end of the file has been reached.
	 */
	private final void Parse_macroblock(int b_nr) throws InterruptedException, EOFException {
		try
		{

		int inc = 0;	// if the macro block increment is 
						// greater than 1 some blocks are
						// skipped; this requires special treatment.
		int cbp = 0x3f; // coded block pattern: it determins which of
						// the 6 blocks are really coded
		int inc_tmp, mb_a_tmp, mb_r_tmp, mb_c_tmp; // working variables
		int values[];	// return values of the Hufmann decoder

		while (mpeg_stream.next_bits(0xf, 11)) {
			mpeg_stream.skip_bits(11); // skip macro block escape
		}
		while (mpeg_stream.next_bits(0x8, 11)) {
			mpeg_stream.skip_bits(11); // macro block skipping
			inc += 33; 		  // every skip means +33
		}
		values = Huf.decode(11, Huf.tables.macro_block_inc); 
		inc += values[2];		  // decode macro block increment
		if (inc > 1) {	// special treatment for skipped macroblocks
			dct_dc_y_past = dct_dc_cr_past = dct_dc_cb_past = 1024; // default values
			if (Pic_Type == B_TYPE && b_nr > 0) { // in this case the motion vectors rest valid
				for (inc_tmp = inc - 1, mb_a_tmp = macro_block_address + 1; inc_tmp-- > 0; mb_a_tmp++) {
					mb_r_tmp = mb_a_tmp / mb_width;	// compute the macroblock row and
					mb_c_tmp = mb_a_tmp % mb_width; // column for the next skipped block
					if (macro_block_motion_forward) { // apply forward prediction
						if (!macro_block_motion_backward) { 
							Forward.copy_area(mb_r_tmp, mb_c_tmp, Pel_buffer[pred_idx], Pel_buffer[ak_idx]);
						} else {
							Forward.get_area(mb_r_tmp, mb_c_tmp, Pel_buffer[pred_idx], pel1);
						}
					}
					if (macro_block_motion_backward) { // apply backward prediction
						if (!macro_block_motion_forward) { 
							Backward.copy_area(mb_r_tmp, mb_c_tmp, Pel_buffer[back_idx],Pel_buffer[ak_idx]);
						}
						else {
							Backward.get_area(mb_r_tmp, mb_c_tmp, Pel_buffer[back_idx], pel2);
							Backward.put_area(mb_r_tmp, mb_c_tmp, pel1, pel2, Pel_buffer[ak_idx]);
						}
					}
				}
			}
			else if (Pic_Type != I_TYPE) { // in P_TYPE the motion vector is to be reset
				Forward.reset_prev();
				if (b_nr > 0 && !macro_block_motion_backward && !macro_block_motion_backward) {
					for (inc_tmp = inc - 1, mb_a_tmp = macro_block_address + 1; inc_tmp-- > 0; mb_a_tmp++) {
						mb_r_tmp = mb_a_tmp / mb_width; // compute the macroblock row and
						mb_c_tmp = mb_a_tmp % mb_width; // column for the next skipped block
						Forward.copy_unchanged(mb_r_tmp, mb_c_tmp, Pel_buffer[pred_idx], Pel_buffer[ak_idx]);
					}
				}
			}
		}
		macro_block_address += inc; // (but) now: compute the new macro block address
		mb_row = macro_block_address / mb_width;    
		mb_column = macro_block_address % mb_width;
		switch (Pic_Type) { // depending on the frame type the existence of some information
			// fields must be determined
		case I_TYPE:
			macro_block_motion_forward =		// these information is certainly not
				macro_block_motion_backward =	// suppplied in intra coded frames
				macro_block_pattern = false; 
			macro_block_intra = true;	    // Of course!
			if (mpeg_stream.get_bits(1) == 1) { // possibly a
				macro_block_quant = false;  // new quantization
			}					 			// factor is
			else {				 			// supplied
				macro_block_quant = true;
				mpeg_stream.skip_bits(1);
			}
			break;
		case P_TYPE:
			values = Huf.decode(6, Huf.tables.p_type_mb_type);   // decode
			macro_block_quant = values[2] != 0;	   // extract
			macro_block_motion_forward = (values[3] == 1);
			macro_block_motion_backward = false;
			macro_block_pattern = (values[4] == 1);
			if (!(macro_block_intra = values[5] != 0)) { // default values
				dct_dc_y_past = dct_dc_cr_past = dct_dc_cb_past = 1024;
				cbp = 0;
			}
			break;
		case B_TYPE:
			values = Huf.decode(6, Huf.tables.b_type_mb_type); // decode
			macro_block_quant = values[2] != 0;	 // extract
			macro_block_motion_forward = (values[3] == 1);
			macro_block_motion_backward = (values[4] == 1);
			macro_block_pattern = (values[5] == 1);
			if (!(macro_block_intra = values[6] != 0)) { // default values
				dct_dc_y_past = dct_dc_cr_past = dct_dc_cb_past = 1024;
				cbp = 0;
			}
			break;
		default: MovieScreen.Msg = "unknown Frame-Type : " + Pic_Type;
			MovieScreen.ErrNum = 9; 
			Player.repaint();
			return;
		}
		if (macro_block_quant) { // extract new quantization factor
			Quant_scale = mpeg_stream.get_bits(5);
		}
		if (macro_block_motion_forward) { // motion vector for forward prediction exists
			values = Huf.decode(11, Huf.tables.motion_code); // decode horizontal motion information
			motion_horiz_forw_code = values[2];	  // extract horizontal motion information
			if (forward_f != 1 && motion_horiz_forw_code != 0) {
				motion_horiz_forw_r = mpeg_stream.get_bits(forward_r_size);
			}
			values = Huf.decode(11, Huf.tables.motion_code); // decode vertical motion information
			motion_verti_forw_code = values[2];	  // extract vertical motion information
			if (forward_f != 1 && motion_verti_forw_code != 0) {
				motion_verti_forw_r = mpeg_stream.get_bits(forward_r_size);
			}
			// according to this information the motion vector must be decoded
			Forward.compute_motion_vector(motion_horiz_forw_code, motion_verti_forw_code,
										  motion_horiz_forw_r, motion_verti_forw_r);
				
			// grab the referred area into "pel1"
			if (Pic_Type != B_TYPE || !macro_block_motion_backward) { // no backward prediction
				// put the grabbed area into the actual frame:
				Forward.copy_area(mb_row, mb_column, Pel_buffer[pred_idx], Pel_buffer[ak_idx]);
			} else {
				Forward.get_area(mb_row, mb_column, Pel_buffer[pred_idx], pel1);
			}
		}
		else if (Pic_Type != B_TYPE){ // (only) in P_TYPE the motion vector is to be reset. 
			Forward.reset_prev(); // (in B_TYPE it rests valid)
		}
		if (macro_block_motion_backward) { // motion vector for backward prediction exists
			values = Huf.decode(11, Huf.tables.motion_code); // decode horizontal motion information
			motion_horiz_back_code = values[2];	  // extract horizontal motion information
			if (backward_f != 1 && motion_horiz_back_code != 0) { 
				motion_horiz_back_r = mpeg_stream.get_bits(backward_r_size);
			}
			values = Huf.decode(11, Huf.tables.motion_code);  // decode vertical motion information
			motion_verti_back_code = values[2];	   // extract vertical motion information
			if (backward_f != 1 && motion_verti_back_code != 0) {
				motion_verti_back_r = mpeg_stream.get_bits(backward_r_size);
			}
			// according to this information the motion vector must be decoded
			Backward.compute_motion_vector(motion_horiz_back_code, motion_verti_back_code,
										   motion_horiz_back_r, motion_verti_back_r);
				
			if (!macro_block_motion_forward) { // no forward prediction
				// put the grabbed area into the actual frame:
				Backward.copy_area(mb_row, mb_column, Pel_buffer[back_idx], Pel_buffer[ak_idx]);
			} else { // forward and backward prediction:
				// grab the refered area into "pel2"
				Backward.get_area(mb_row, mb_column, Pel_buffer[back_idx], pel2);
				// put the average of the 2 areas into the actual frame:
				Backward.put_area(mb_row, mb_column, pel1, pel2, Pel_buffer[ak_idx]);
			}
		}
		
		if (macro_block_pattern) { // coded block pattern supplied
			values = Huf.decode(9, Huf.tables.block_pattern); // decode coded block pattern
			cbp = values[2];			   // extract
		}

		if (Pic_Type == P_TYPE && !macro_block_motion_backward && !macro_block_motion_forward) {
			Forward.copy_unchanged(mb_row, mb_column, Pel_buffer[pred_idx], Pel_buffer[ak_idx]);
		}
		lum_block = false; // there wasn't any luminace block in this macroblock
		for (int i = 0; i < 6; i++) { // all 6 blocks
			if ((cbp & (1 << (5 - i))) != 0) { // block information supplied ?
				Parse_Block(i);		   // yes -->  get block information

				try { // corrupt video could generate an error here...
					if (macro_block_intra) {   // in intra macro blocks the values are absolute
						if (i < 4) set_lum_pixel(i);
						else	   set_col_pixel(i);
					}
					else { // in inter coded macroblocks the values are correctings
						if (i < 4) correct_lum_pixel(i);
						else       correct_col_pixel(i);
					}
				} 
				catch (Exception e) {
					error_count ++;
					if (error_count < 5)
						System.out.println("Could not draw macroblock... corrupt video?");
					else if (error_count == 5)
						System.out.println("Ignoring minor errors in MPEG_video...");
				}
			}
		}
		if (Pic_Type == B_TYPE && macro_block_intra) {		// otherwise the motion
			Forward.reset_prev(); Backward.reset_prev();    // vectors rest valid
		}

		}catch(Exception e) // VERY corrupt video -> skip to next GOP-START
		{
			mpeg_stream.next_start_code();
			while (!mpeg_stream.next_bits(GOP_START_CODE, 32))
			{
				mpeg_stream.get_bits(8);
			}
			System.out.println("hmmm... What is this you are feeding me... I'm not eating THAT!!"+e);
//			e.printStackTrace(System.out);
		}
	}

	/**
	 * The method "Parse_Block" parses a block according to ISO 11172-2.
	 * Thereby the DC and AC coefficients are reconstructed and placed
	 * into the "dct_recon" field in de-"zigzag"-ed order. After that
	 * the IDCT routine is called. The method counts the coefficients
	 * and calls a sparse IDCT method if the coefficient count is 1.
	 * @param nr the block number (0..6) in macroblock.
	 * @throws InterruptedException if another process interrupts the current process.
	 * @throws EOFException if the end of the file has been reached.
	 */
	private final void Parse_Block(int nr) throws InterruptedException, EOFException {
		int idx = 0, size, sign, idx_run  = 0, level; // working variables
		int coeffCount = 0;	// coefficient count
		int pos = 0;		// the actual (de-"zigzag"-ed) position of the coefficient
		int values[];		// return values of the Hufmann decoder
		int pValues;		// Integer-packed dual huffman value

		System.arraycopy(nullmatrix, 0, dct_recon, 0, 64); // initialization
		if (macro_block_intra) {
			if (nr < 4) { // luminance block
				values = Huf.decode(7, Huf.tables.dct_size_luminance);
				size = values[2]; // size of the DC coefficient
				if (size != 0) {
					set_dct_diff(mpeg_stream.get_bits(size), size);
				}
				if (lum_block) { // not first luminance block
					dct_dc_y_past = dct_recon[0] = dct_dc_y_past + (dct_recon[0] << 3);
				}
				else {		// first luminance block
					lum_block = true;
					dct_recon[0] <<= 3;
					if (macro_block_address - past_intra_address > 1) {
						dct_dc_y_past = dct_recon[0] += 1024;
					}
					else { // relative if no skipping
						dct_dc_y_past = dct_recon[0] += dct_dc_y_past;
					}
				}
				past_intra_address = macro_block_address; // notice
			}
			else { // chrominance block
				values = Huf.decode(8, Huf.tables.dct_size_crominance);
				size = values[2];
				if (size != 0) {
					set_dct_diff(mpeg_stream.get_bits(size), size);
				}
				switch (nr) {
					case 4: dct_recon[0] <<= 3;
						if (macro_block_address - past_intra_address > 1) {
							dct_dc_cb_past = dct_recon[0] += 1024;
						}
						else { // relative if no skipping
							dct_dc_cb_past = dct_recon[0] += dct_dc_cb_past;
						}
						break;
					case 5: dct_recon[0] <<= 3;
						if (macro_block_address - past_intra_address > 1) {
							dct_dc_cr_past = dct_recon[0] += 1024;
						}
						else { // relative if no skipping
							dct_dc_cr_past = dct_recon[0] += dct_dc_cr_past;
						}
						break;
				}
			}
			past_intra_address = macro_block_address; // notice
			if (dct_recon[0] != 0) coeffCount = 1; // count coefficients
			dct_recon[0] <<= idct.VAL_BITS - 3;    // because of the IDCT technique:
							       // the DC values are not quantized;
							       // therefore the fix point translation
							       // must be performed
		}
		else { //  no intra coded block --> first AC value
			if (mpeg_stream.next_bits(0x1, 1)) { // special treatment of the VLC "1"
				idx = 0; 
				mpeg_stream.skip_bits(1);
				sign = level = mpeg_stream.get_bits(1) == 0 ? 1 : -1; // the sign follows
			}
			else {
				pValues = Huf.decodeCoeff(); // decode AC value
				idx = pValues & 0xff;			// extract AC value
				if (idx == Huf.DCT_ESCAPE) {		// special treatment
					idx  = mpeg_stream.get_bits(6); // once again
					if ((((level = mpeg_stream.get_bits(8)) & 0x7f) == 0)) { // 16 bit
						level = (level << 8) | mpeg_stream.get_bits(8); 
						if ((level & 0x8000) != 0) level |= 0xffffff00; // sign ??
					}
					else if ((0x80 & level) != 0) { // sign ??
						level |= 0xffffff00;
					}
				}
				else { // "normal" treatment (no escape); extract AC coefficient
					level = mpeg_stream.get_bits(1) == 0 ? pValues >> 8 : -(pValues >> 8);
				}
				sign = (level == 0) ? 0 : ((level < 0) ? -1 : 1);  // determine sign
			}
			pos = zigzag[idx]; // de-"zigzag" 

			// Quantization:

			dct_recon[pos] = ((level + sign) * Quant_scale *  non_intramatrix[pos]) >> 3;

			/* oddification */

			if ((dct_recon[pos] & 1) == 0) {
				if (dct_recon[pos] >= 0)
					dct_recon[pos]--;
				else
					dct_recon[pos]++;
			}

 	/*++++++ ATTENTION: The "oddification" absences here. Feel free to insert the code. ++++*/
	/*++++++            I've regarded this as a waste of time because I can't recognize ++++*/
	/*++++++	    any difference.						    ++++*/
	
			if (level != 0) coeffCount++; // count the coefficients
		}

//		int bits = 0;

		pValues = Huf.decodeCoeff(); // decode AC value
		while((idx_run = (pValues & 0xff)) != Huf.EOB) { // no end of block; read the other AC values 
			if (idx_run == Huf.DCT_ESCAPE) {           // special treatment
				idx_run = mpeg_stream.get_bits(6); // once again
				if ((((level = mpeg_stream.get_bits(8)) & 0x7f) == 0)) { // 16 bit
					level = (level << 8) | mpeg_stream.get_bits(8);
					if ((level & 0x8000) != 0) level |= 0xffffff00;  // sign ??
				}
				else if ((0x80 & level) != 0) { // sign ??
					level |= 0xffffff00;
				}
				idx += idx_run + 1; // the position is now given as a difference
			}
			else { // "normal" treatment (no escape); extract AC coefficient
				idx += idx_run + 1; // the position is now given as a difference
				level = mpeg_stream.get_bits(1) == 0 ? (pValues >> 8) : -(pValues >> 8); 
			}

			if (idx > 63) idx = 63;
			pos = zigzag[idx]; // de-"zigzag"

			if (macro_block_intra) { // different treatment of quantization depending on type
				dct_recon[pos] = (level * Quant_scale *  intramatrix[pos]) >> 3;
			}
			else {
				sign = (level == 0) ? 0 : ((level < 0) ? -1 : 1); 
				dct_recon[pos] = ((level + sign) * Quant_scale *  non_intramatrix[pos]) >> 3;
			}

			/* oddification */

			if ((dct_recon[pos] & 1) == 0) {
				if (dct_recon[pos] >= 0)
					dct_recon[pos]--;
				else
					dct_recon[pos]++;
			}

 	/*++++++ ATTENTION: The "oddification" absences here. Feel free to insert the code. ++++*/
	/*++++++            I've regarded this as a waste of time because I can't recognize ++++*/
	/*++++++	    any difference.						    ++++*/

			if (level != 0) coeffCount++; // count the coefficients
			pValues = Huf.decodeCoeff(); // decode next value
/*
			bits = mpeg_stream.get_bits(16); // decode next value

			if(bits >= 1024) {

				pValues = Huffmann.dct_coeff_val_tab2[bits >> 8];
				mpeg_stream.bit_pos += Huffmann.dct_coeff_len_tab2[bits >> 8];

			} else {

				pValues = Huffmann.dct_coeff_val_tab1[bits];
				mpeg_stream.bit_pos += Huffmann.dct_coeff_len_tab1[bits];
			}
*/

		}

		// this works ;)
		if (coeffCount == 1) { // only one coefficient ??
			idct.invers_dct_special (dct_recon, pos); // call a sparse method
		}
		else {
			idct.invers_dct(dct_recon); // full decoding
		}

		// If you want to kill something, kill this code!! :)
//		for (int i=0; i<64; i++)
//		{
			//dct_recon[i] <<= 2;
			//dct_recon[i] += (count%3 == 0) ? 0 : 1; // tries to counter greenish-bug
			//dct_recon[i] += (count%4 == 0) ? 0 : 1; // tries to counter greenish-bug
//		}

//		count++;
	}

//	private int count = 0;
//	private static double D = (256.0 / Math.pow(256.0, 1.2));

	/** 
	 * The method "set_dct_diff" computes the DCT difference according to
	 * ISO 11172-2. It sets "dct_recon[0]".
	 * @param dct_diff the difference of the DC coefficient
	 * @param dct_size the size of the DC coefficient.
	 */
	private void set_dct_diff(int dct_diff, int dct_size) {
		if ((dct_diff & (1 << (dct_size - 1))) != 0) dct_recon[0] = dct_diff;
		else dct_recon[0] = ((-1) << (dct_size)) | (dct_diff+1);
	}

	/**
	 * The method "set_lum_pixel" takes the re-transformed luminance values and
	 * places them at the appropriate position. Note that the variables:
	 * <pre>
	 * 			pixel_per_lum_line
	 *			mb_row
	 *			mb_column
	 * </pre>
	 * are computed in "run()" as soon it was possible.
	 */
	private void set_lum_pixel(int nr) {
		int j, pos = pixel_per_lum_line * ((mb_row << 4) + ((nr & 0x2) << 2)) +
			(mb_column << 4) + ((nr & 0x1) << 3);

		int[] pb0 = Pel_buffer[ak_idx][0];

		for (j = 0; j < 64; j += 8) {
			// System.out.println("pb length: " + dct_recon.length + " " + j + " " + pb0.length + " " + pos);
			// System.out.println("pixel_per_lum_line: " + pixel_per_lum_line + " mb_row " + mb_row + " nr " + nr + " mb_column " + mb_column);
			System.arraycopy(dct_recon, j, pb0, pos, 8);
			pos += pixel_per_lum_line;
		}
	}

	/**
	 * The method "set_col_pixel" takes the re-transformed chrominance values and
	 * places them at the appropriate position.<br/><br/> 
	 * 
	 * Note that the variables:
	 * <pre>
	 * 			pixel_per_col_line
	 *			mb_row
	 *			mb_column
	 * </pre>
	 * are computed in "run()" as soon it was possible.
	 * @param nr the block number (0..6) in macroblock.
	 */
	private void set_col_pixel(int nr) {
		int j, pos = pixel_per_col_line * (mb_row << 3) + (mb_column << 3);

		switch (nr) {
		case 4:
			int[] pb2 = Pel_buffer[ak_idx][2];
			for (j = 0; j < 64; j += 8) {
				System.arraycopy(dct_recon, j, pb2, pos, 8);
				pos += pixel_per_col_line;
			}
			break;
		case 5:
			int[] pb1 = Pel_buffer[ak_idx][1];
			for (j = 0; j < 64; j += 8) {
				System.arraycopy(dct_recon, j, pb1, pos, 8);
				pos += pixel_per_col_line;
			}
			break;
		}
	}
		
	/**
	 * The method "correct_lum_pixel" is called in predicted macro blocks. Because
	 * the values in "dct_recon" are motion compensation information the task of
	 * this method is to correct the already supplied (copied) luminance values.<br/><br/>
	 *
	 * Note that the variables:
	 * <pre>
	 * 			pixel_per_lum_line
	 *			lum_y_incr
	 *			mb_row
	 *			mb_column
	 * </pre>
	 * are computed in "run()" as soon it was possible.
	 * @param nr the block number (0..6) in macroblock.
	 */
	private void correct_lum_pixel(int nr) {
		int i, j, k = 0;
		int pos = pixel_per_lum_line * ((mb_row << 4) + ((nr & 0x2) << 2)) +
			(mb_column << 4) + ((nr & 0x1) << 3);

		int[] pb0 = Pel_buffer[ak_idx][0];

		for (j = 8; j > 0; j--) {
			for (i = 8; i > 0; i--) {
				pb0[pos++] += dct_recon[k++];
			}
			pos += lum_y_incr; // pixel_per_lum_line - 8
		}
	}

	/**
	 * The method "correct_col_pixel" is called in predicted macro blocks. Because
	 * the values in "dct_recon" are motion compensation information the task of
	 * this method is to correct the already supplied (copied) chrominance values.<br/><br/>
	 *
	 * Note that the variables:
	 * <pre>
	 * 			pixel_per_col_line
	 *			col_y_incr
	 *			mb_row
	 *			mb_column
	 * </pre>
	 * are computed in "run()" as soon it was possible.
	 * @param nr the block number (0..6) in macroblock.
	 */
	private void correct_col_pixel(int nr) {
		int i, j, k = 0;
		int pos = pixel_per_col_line * (mb_row << 3) + (mb_column << 3);

		switch (nr) {
		case 4:
			int[] pb2 = Pel_buffer[ak_idx][2];
			for (j = 8; j > 0; j--) {
				for (i = 8; i > 0; i--) {
					pb2[pos++] += dct_recon[k++];
				}
				pos += col_y_incr; // pixel_per_col_line - 8
			}
			break;
		case 5:
			int[] pb1 = Pel_buffer[ak_idx][1];
			for (j = 8; j > 0; j--) {
				for (i = 8; i > 0; i--) {
					pb1[pos++] += dct_recon[k++];
				}
				pos += col_y_incr; // pixel_per_col_line - 8
			}
			break;
		}
	}
}