package com.example.camerafacedetect;

import android.Manifest;
import android.app.Activity;
import android.app.AlertDialog;
import android.app.Dialog;
import android.content.Context;
import android.content.DialogInterface;
import android.content.pm.PackageManager;
import android.content.res.Configuration;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.ImageFormat;
import android.graphics.Paint;
import android.graphics.Point;
import android.graphics.PointF;
import android.graphics.Rect;
import android.graphics.SurfaceTexture;
import android.hardware.Camera;
import android.hardware.camera2.CameraAccessException;
import android.hardware.camera2.CameraCaptureSession;
import android.hardware.camera2.CameraCharacteristics;
import android.hardware.camera2.CameraDevice;
import android.hardware.camera2.CameraManager;
import android.hardware.camera2.CaptureRequest;
import android.hardware.camera2.params.StreamConfigurationMap;
import android.media.FaceDetector;
import android.media.Image;
import android.media.ImageReader;
import android.os.Bundle;
import android.os.Handler;
import android.os.HandlerThread;
import android.util.Log;
import android.util.Range;
import android.util.Size;
import android.util.SparseIntArray;
import android.view.Surface;
import android.view.SurfaceHolder;
import android.view.SurfaceView;
import android.view.TextureView;

import androidx.annotation.Nullable;
import androidx.appcompat.app.AppCompatActivity;
import androidx.core.app.ActivityCompat;

import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.List;
import java.util.concurrent.TimeUnit;


public class LLCamreaActivity extends AppCompatActivity implements TextureView.SurfaceTextureListener {
    private TextureView mPreviewView;
    private Handler mHandler;
    private HandlerThread mThreadHandler;
    private Size mPreviewSize;
    private CaptureRequest.Builder mCaptureRequest;
    private static final String TAG = "++++++ LLCamreaActivity";
    private SurfaceHolder mSurfaceHolder = null;


    private static final SparseIntArray ORIENTATIONS = new SparseIntArray();
    //硬编码器
    private  LLAHardEncode mHardEncode;
    static
    {
        ORIENTATIONS.append(Surface.ROTATION_0, 90);//ROTATION_0-->0 设备方向是0 对应传感器方向是90
        ORIENTATIONS.append(Surface.ROTATION_90, 0);//ROTATION_90-->1
        ORIENTATIONS.append(Surface.ROTATION_180, 270);
        ORIENTATIONS.append(Surface.ROTATION_270, 180);
    }
    @Override
    protected void onCreate(@Nullable Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_llcamera);
        SurfaceView sv = findViewById(R.id.surfaceview);
        mSurfaceHolder = sv.getHolder();

        initView();
        initLooper();
    }

    @Override
    protected void onResume() {
        super.onResume();
    }

    //很多过程都变成了异步的了，所以这里需要一个子线程的looper
    private void initLooper() {
        mThreadHandler = new HandlerThread("CAMERA2");
        mThreadHandler.start();
        // 获取线程的looper，当且仅当线程started，并且isAlive返回为true
        // 如果线程还在初始化过程则，则会卡住，直到初始化完成，getLooper()有同步的作用
        mHandler = new Handler(mThreadHandler.getLooper());
    }

    //可以通过TextureView或者SurfaceView
    private void initView() {
        mPreviewView = (TextureView) findViewById(R.id.textureview);
        mPreviewView.setSurfaceTextureListener(this);
    }

    private String mCameraId;
    @Override
    public void onSurfaceTextureAvailable(SurfaceTexture surface, int width, int height) {//1080 1692
        try {
            mCameraId = "0";//默认使用后置摄像头  0-->后置  1---前置
            //获得所有摄像头的管理者CameraManager
            CameraManager cameraManager = (CameraManager) this.getSystemService(Context.CAMERA_SERVICE);
            //获得某个摄像头的特征，支持的参数
            CameraCharacteristics characteristics = cameraManager.getCameraCharacteristics(mCameraId);
            StreamConfigurationMap map = characteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP);
            //支持的帧率8-30
            Range<Integer>[] fpsRanges = characteristics.get(CameraCharacteristics.CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES);
//            Log.d(TAG, "Camera2-fpsRange=: "+fpsRanges);
            //摄像头支持的预览Size数组 1440x1080
            List list =   Arrays.asList(map.getOutputSizes(ImageFormat.YUV_420_888));

            Size size4 = new Size(640,480);
            if (list.contains(size4) == false){
                mPreviewSize = map.getOutputSizes(SurfaceTexture.class)[0];//640x480
            }else {
                mPreviewSize = size4;
            }

            for (String cameraId : cameraManager.getCameraIdList()){
                CameraCharacteristics characteristic
                        = cameraManager.getCameraCharacteristics(cameraId);
                Integer facing = characteristic.get(CameraCharacteristics.LENS_FACING);
                Log.d(TAG, "onSurfaceTextureAvailable: "+cameraId);
                if (facing != null && facing == CameraCharacteristics.LENS_FACING_FRONT) {
                    Log.d(TAG, "onSurfaceTextureAvailable: front camera is cameraid="+cameraId);
                    break;
                }
            }
            //打开相机
            if (ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA) !=
                    PackageManager.PERMISSION_GRANTED) {
                // TODO: Consider calling
                //    ActivityCompat#requestPermissions
                // here to request the missing permissions, and then overriding
                //   public void onRequestPermissionsResult(int requestCode, String[] permissions,
                //                                          int[] grantResults)
                // to handle the case where the user grants the permission. See the documentation
                // for ActivityCompat#requestPermissions for more details.
                return;
            }
            // 打开此id摄像头；callback代表代开摄像头之后要执行的逻辑，mhandler则是执行callback的线程；如果为null
            // 则代表使用当前线程的looper
            cameraManager.openCamera(mCameraId, mCameraDeviceStateCallback, mHandler);
        } catch (CameraAccessException e) {
            e.printStackTrace();
        }

    }

    @Override
    public void onSurfaceTextureSizeChanged(SurfaceTexture surface, int width, int height) {

    }

    @Override
    public boolean onSurfaceTextureDestroyed(SurfaceTexture surface) {
        return false;
    }

    @Override
    public void onSurfaceTextureUpdated(SurfaceTexture surface) {

    }

    public  CameraDevice mCameraDevice;
    private CameraDevice.StateCallback mCameraDeviceStateCallback = new CameraDevice.StateCallback() {

        @Override
        public void onOpened(CameraDevice camera) {
            try {
                mCameraDevice = camera;//保存此摄像头对象
                startPreview(camera);
            } catch (CameraAccessException e) {
                e.printStackTrace();
            }
        }

        @Override
        public void onDisconnected(CameraDevice camera) {
            camera.close();
            mCameraDevice = null;
        }

        @Override
        public void onError(CameraDevice camera, int error) {
            camera.close();
            mCameraDevice = null;
        }
    };


    public ImageReader mImageReader;
//public SurfaceView mSurfaceView;//未赋值
    // 做摄像头采集数据初始化工作
    private void startPreview(CameraDevice camera) throws CameraAccessException {
        //以下操作也是在子线程
        SurfaceTexture texture = mPreviewView.getSurfaceTexture();

//      这里设置的就是预览大小
        texture.setDefaultBufferSize(mPreviewSize.getWidth(), mPreviewSize.getHeight());
        Surface surface = new Surface(texture);
        try {
            // 设置捕获请求为预览，这里还有拍照啊，录像等（原来是TEMPLATE_RECORD）
            mCaptureRequest = camera.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW);
        } catch (CameraAccessException e) {
            e.printStackTrace();
        }
//      就是在这里，通过这个set(key,value)方法，设置曝光啊，自动聚焦等参数！！ 如下举例：
//        mCaptureRequest.set(CaptureRequest.CONTROL_AE_MODE,CaptureRequest.CONTROL_AE_MODE_ON_AUTO_FLASH);

        //设置读取的图片分辨率
        CameraManager manager = (CameraManager) getSystemService(Context.CAMERA_SERVICE);
        CameraCharacteristics characteristics
                = manager.getCameraCharacteristics(mCameraId);
        StreamConfigurationMap map = characteristics.get(
                CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP);
        //小米不支持NV21  支持YV12
        Size largest = Collections.max(
                Arrays.asList(map.getOutputSizes(ImageFormat.YUV_420_888)),//YUV_420_888
                new CompareSizesByArea());
        List list =   Arrays.asList(map.getOutputSizes(ImageFormat.YUV_420_888));

        Size size4 = new Size(640,480);
        if (list.contains(size4) == false){
            size4 = largest;
        }
        //初始化硬编码器 - 此处后面会颠倒所以交换输入的宽高
        mLastDate = new Date(System.currentTimeMillis());
        mHardEncode = new LLAHardEncode(size4.getHeight(),size4.getWidth(),20,this);
        //开始编码
        mHardEncode.startEncoderThread();
        mHardEncode.startSendPacketThread();
        //YUV_420_888
        /*此处还有很多格式，比如我所用到YUV等 最大的图片数， 此处设置的就是输出分辨率mImageReader里能获取到图片数，但是实际中是2+1张图片，就是多一张*/
        mImageReader = ImageReader.newInstance(size4.getWidth(), size4.getHeight(), ImageFormat.YUV_420_888,2);
        //监听数据回调
        mImageReader.setOnImageAvailableListener(mOnImageAvailableListener, mHandler);

        //设置方向
        // TODO 未来这部分内容要再研究一下
        //摄像头传感器方向90
        mSensorOrientation = characteristics.get(CameraCharacteristics.SENSOR_ORIENTATION);
        // 获取设备方向
        int rotation = getWindowManager().getDefaultDisplay().getRotation();//0
        // 根据设备方向计算设置摄像头的方向
        mCaptureRequest.set(CaptureRequest.JPEG_ORIENTATION
                , getOrientation(rotation+180));
//        Range fpsRange = new Range(15,25);
//        mCaptureRequest.set(CaptureRequest.CONTROL_AE_TARGET_FPS_RANGE, fpsRange);

        // 这里一定分别add两个surface，一个Textureview的，一个ImageReader的，如果没add，会造成没摄像头预览，或者没有ImageReader的那个回调！！
        mCaptureRequest.addTarget(surface);
        mCaptureRequest.addTarget(mImageReader.getSurface());
        mCameraDevice.createCaptureSession(Arrays.asList(surface, mImageReader.getSurface()),mSessionStateCallback, mHandler);

    }


    private Date mLastDate;
    private ImageReader.OnImageAvailableListener mOnImageAvailableListener
            = new ImageReader.OnImageAvailableListener() {

        /**
         *  当有一张图片可用时会回调此方法，但有一点一定要注意：
         *  一定要调用 reader.acquireNextImage()和close()方法，否则画面就会卡住！！！！！我被这个坑坑了好久！！！
         *    很多人可能写Demo就在这里打一个Log，结果卡住了，或者方法不能一直被回调。
         **/
        @Override
        public void onImageAvailable(ImageReader reader) {

//            Log.d(TAG, "onImageAvailable: "+Thread.currentThread());
            //以下也是在子线程中执行的
            Date currentDate = new Date(System.currentTimeMillis());//毫秒
            long intervalTime = currentDate.getTime()-mLastDate.getTime();
            if (intervalTime < (1.0/20.0)*1000.0){
                Log.d(TAG, "Camera2- interval is "+intervalTime);
                return;
            }
            mLastDate = currentDate;

            Image image = reader.acquireNextImage();
            //YUV_420_888格式 ---->获取到的三个通道分别对应YUV (已验证过)

            int width =  image.getWidth();
            int height = image.getHeight();
            FaceDetector fd = new FaceDetector(width, height, 1);
            // 从image里获取三个plane
            Image.Plane[] planes = image.getPlanes();
            int format = image.getFormat();
//            Log.d(TAG, "Camera- image format is "+format);//35
            //将底层数据以I420 YYYYVVUU 方式保存
            byte[] data=  getDataFromImage(image,COLOR_FormatI420);
            // 将I420旋转
            byte[] data2 = new byte[width*height*3/2];
            if (mCameraId.equals("0")){
                //后置摄像头
                yuv_rotate_90(data2,data,width,height);
            }else {
                //前置摄像头
                yuv_rotate_270(data2,data,width,height);
            }

            //旋转后转nv12 目前硬编码接收的数据必须为NV12
            byte[] data3 = new byte[width * height * 3 / 2];
            for (int i = 0; i < width*height; i++) {
                data3[i] = data2[i];
            }

            //yyyyyyyy uvuv NV12
            for (int i=0;i<width*height/2;i++){
                data3[width*height+i]=data2[width*height+i/2];//存入U
                i++;
                data3[width*height+i]=data2[width*height+(width*height/4)+(i-1)/2];
            }


            //编码一帧数据 旋转后高和宽替换
//            handleAframeData(data2,height,width);

            //开始硬编码
            if (mHardEncode.myuvQueue.size() >= 10){
                mHardEncode.myuvQueue.poll();
            }

//            try {
//                mHardEncode.myuvQueue.put(data3);
//            } catch (InterruptedException e) {
//                e.printStackTrace();
//            }
//            Log.d(TAG, "HardEncode- add a yuv frame"+"yuvQueueSize="+mHardEncode.myuvQueue.size());

//            dumpFile("yuv420_"+width+"_"+height+".yuv",data2);
            image.close();
        }
    };



    private CameraCaptureSession.StateCallback mSessionStateCallback = new CameraCaptureSession.StateCallback() {

        @Override
        public void onConfigured(CameraCaptureSession session) {
            try {
                updatePreview(session);
            } catch (CameraAccessException e) {
                e.printStackTrace();
            }
        }

        @Override
        public void onConfigureFailed(CameraCaptureSession session) {

        }
    };

    private void updatePreview(CameraCaptureSession session) throws CameraAccessException {
        session.setRepeatingRequest(mCaptureRequest.build(), null, mHandler);
    }

    static class CompareSizesByArea implements Comparator<Size> {

        @Override
        public int compare(Size lhs, Size rhs) {
            // We cast here to ensure the multiplications won't overflow
            return Long.signum((long) lhs.getWidth() * lhs.getHeight() -
                    (long) rhs.getWidth() * rhs.getHeight());
        }

    }

    /**
     * Orientation of the camera sensor
     */
    private int mSensorOrientation;
    /**
     * Retrieves the JPEG orientation from the specified screen rotation.
     *
     * @param rotation The screen rotation.
     * @return The JPEG orientation (one of 0, 90, 270, and 360)
     */
    private int getOrientation(int rotation) {
        // Sensor orientation is 90 for most devices, or 270 for some devices (eg. Nexus 5X)
        // We have to take that into account and rotate JPEG properly.
        // For devices with orientation of 90, we simply return our mapping from ORIENTATIONS.
        // For devices with orientation of 270, we need to rotate the JPEG 180 degrees.
        return (ORIENTATIONS.get(rotation) + mSensorOrientation + 270) % 360;
    }

    FileOutputStream mFos;
    public  void saveTofile(String fileName, byte[] mbyte) throws IOException {
        File file = new File(getFilesDir(), fileName);
        if(!file.exists()){
            file.createNewFile();
        }

        if (mFos == null){
            mFos  = new FileOutputStream(file);
        }

        mFos.write(mbyte);
//        mFos.close();

    }


    //别人转换写法
    private static final int COLOR_FormatI420 = 1;
    private static final int COLOR_FormatNV21 = 2;
    private static Boolean VERBOSE = false;

    private  boolean isImageFormatSupported(Image image) {
        int format = image.getFormat();
        switch (format) {
            case ImageFormat.YUV_420_888://是指yuv420这一系列中的某一个
            case ImageFormat.NV21:
            case ImageFormat.YV12://就是指NV12
                return true;
        }
        return false;
    }

    private  byte[] getDataFromImage(Image image, int colorFormat) {
        if (colorFormat != COLOR_FormatI420 && colorFormat != COLOR_FormatNV21) {
            throw new IllegalArgumentException("only support COLOR_FormatI420 " + "and COLOR_FormatNV21");
        }
        if (!isImageFormatSupported(image)) {
            throw new RuntimeException("can't convert Image to byte array, format " + image.getFormat());
        }
        // 过去Frame里面的图像区域
        Rect crop = image.getCropRect();
        // 获取图片格式，每个数字代表一种格式
        int format = image.getFormat();
        // 获取crop的宽高
        int width = crop.width();
        int height = crop.height();
        // 获取颜色通道，这里是YUV三个通道
        Image.Plane[] planes = image.getPlanes();
        // 颜色深度，每个像素位数
        int totaolLength = ImageFormat.getBitsPerPixel(format);//12
        // 一帧的数据
        byte[] data = new byte[width * height * ImageFormat.getBitsPerPixel(format) / 8];//yuv一帧数据
        // 构建一行数据的容器(planes[0].getRowStride()=640)
        byte[] rowData = new byte[planes[0].getRowStride()];//一行数据
        if (VERBOSE) Log.v(TAG, "get data from " + planes.length + " planes");
        int channelOffset = 0;
        int outputStride = 1; // 输出间隔，=1代表没有间隔，逐个填充
        // 遍历planes
        for (int i = 0; i < planes.length; i++) {
            // 设置每种场景下的channeloffset以及outputstride
            switch (i) {
                case 0:
                    channelOffset = 0;
                    outputStride = 1;
                    break;
                case 1:
                    if (colorFormat == COLOR_FormatI420) {
                        channelOffset = width * height;
                        outputStride = 1;
                    } else if (colorFormat == COLOR_FormatNV21) {
                        channelOffset = width * height + 1;
                        outputStride = 2;
                    }
                    break;
                case 2:
                    if (colorFormat == COLOR_FormatI420) {
                        channelOffset = (int) (width * height * 1.25);
                        outputStride = 1;
                    } else if (colorFormat == COLOR_FormatNV21) {
                        channelOffset = width * height;
                        outputStride = 2;
                    }
                    break;
            }
            // 获取指定通道的数据（存储在buffer结构中）
            ByteBuffer buffer = planes[i].getBuffer();
            // rowstride就是一行像素数
            int rowStride = planes[i].getRowStride();// i=0,rowstride=640
            // 设定采样间隔，Y通道是全采样，即无损采样，值是1，UV通道，在422模式下，采样数是Y通道的一半，所以间隔为2
            int pixelStride = planes[i].getPixelStride();//Y-->1 U/V-->2 i=0, pixelStride=1
            if (VERBOSE) {
                Log.v(TAG, "pixelStride " + pixelStride);
                Log.v(TAG, "rowStride " + rowStride);
                Log.v(TAG, "width " + width);
                Log.v(TAG, "height " + height);
                Log.v(TAG, "buffer size " + buffer.remaining());
            }
            // 第一个plane不需要位移
            // 其他palne需要右移1位
            int shift = (i == 0) ? 0 : 1;
            // 右移相当于除法，位移为1相当于除2，即减小分辨率
            // Y通道全采样，w和h等于原图大小；UV通道下采样1/2，则w和h都只保留一半（除以2）即可
            int w = width >> shift; // w = 640/2 = 320
            int h = height >> shift; // h = 480/2 = 240
            // top=0，left=0，所以position=0，这里计算buffer（数组）的下标位置，用来后面根据postion来获取里面的数据
            // 因为是要指定第一个数据，所以position必然为0，不过后面所有的position计算都是采用此公式
            int position = rowStride * (crop.top >> shift) + pixelStride * (crop.left >> shift);//0
            buffer.position(position);//buff位置从0开始
            //一行一行添加像素
            for (int row = 0; row < h; row++) {
                int length;
                // 如果是Y通道数据
                if (pixelStride == 1 && outputStride == 1) {
                    length = w;//一行数据的长度
                    // 从buffer中的channeloffset位置，获取length长度的数据放到data数组中
                    buffer.get(data, channelOffset, length);
                    channelOffset += length; // offset位置要向后移动
                }
                // 如果是UV通道
                else {
                    // 这样计算可以保证只需要取length个数据就可以采样到w个数据
                    // 注意，并不是要获取完整一行数据，再间隔的去采样，真是处理流程是取出少于一行的数据即可满足数量
                    length = (w - 1) * pixelStride + 1;//1439 偶数位都是像素 所以只要到长度的最后一个字节即可
                    buffer.get(rowData, 0, length);//保存这一行数据 取了lenght长后 下次取数据的时候将从lenht位置开始
                    // 采样w次（采用间隔采样）
                    for (int col = 0; col < w; col++) {
                        // 作为UV通道，需要隔一个像素采样一次，即下表为偶数的数据进行采样；
                        // col * pixelStride的值：2，4，6，8
                        data[channelOffset] = rowData[col * pixelStride];//也就是说每隔一个字节才是像素  也就是偶数位置的都是像素
                        channelOffset += outputStride;//下一个像素
                    }
                }
                // 只要不是最后一行，就需要设定一下buffer的位置，即下一次个取值buffer的起始位置
                if (row < h - 1) {
                    //下一次取出数据从那个位置开始
                    // buffer当前行的起始位置+一行长度=>下一行的其实位置，然后再进行倒推
                    buffer.position(buffer.position() + rowStride - length);
                }
            }
            if (VERBOSE) Log.v(TAG, "Finished reading data from plane " + i);
        }
        return data;
    }

    //写入文件
    private  void dumpFile(String fileName, byte[] data) {

        File file = new File(getFilesDir(), fileName);
        if(!file.exists()){
            try {
                file.createNewFile();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }

        ;
        try {
            if (mFos == null){
                mFos = new FileOutputStream(file);
            }

        } catch (IOException ioe) {
            throw new RuntimeException("Unable to create output file " + fileName, ioe);
        }
        try {
            mFos.write(data);
//            mFos.close();
        } catch (IOException ioe) {
            throw new RuntimeException("failed writing data to file " + fileName, ioe);
        }
    }




    ////用户后置摄像头 将yuv420p旋转90度
    void yuv_rotate_90(byte[] des,byte[] src,int width,int height)
    {
        int n = 0;
        int hw = width / 2;//u 应该看成只有Y的1/4的小长方形
        int hh = height / 2;
        //copy y
        for(int j = 0; j < width;j++)
        {
            for(int i = height - 1; i >= 0; i--)
            {
                des[n++] = src[width * i + j];
            }
        }

        //copy u
        for(int j = 0;j < hw;j++)
        {
            for(int i = hh - 1;i >= 0;i--)
            {

                des[n++] = src[width * height + hw*i + j ];//ptemp[ hw*i + j ];
            }
        }

        //copy v
        for(int j = 0; j < hw; j++)
        {
            for(int i = hh - 1;i >= 0;i--)
            {

                des[n++] = src[width * height + width * height / 4 + hw*i + j];//ptemp[hw*i + j];
            }
        }
    }

    //前置摄像头需要逆时针旋转90即 270顺
    void yuv_rotate_270(byte[] des,byte[] src,int width,int height)
    {
        int n = 0;
        int hw = width / 2;
        int hh = height / 2;
        //copy y
        for(int j = width; j > 0; j--)
        {
            for(int i = 0; i < height;i++)
            {
                des[n++] = src[width*i + j];
            }
        }

        //copy u
        for(int j = hw-1; j >=0;j--)
        {
            for(int i = 0; i < hh;i++)
            {
                des[n++] = src[width * height + hw * i + j]; //ptemp[hw * i + j];
            }
        }

        //copy v
        for(int j = hw-1; j >=0;j--)
        {
            for(int i = 0; i < hh;i++)
            {
                des[n++] = src[width * height + width * height / 4 +hw * i + j];//ptemp[hw * i + j];
            }
        }
    }

    //水平镜像
    void yuv_flip_horizontal(byte[] des,byte[] src,int width,int height)
    {
        int n = 0;
        int hw = width / 2;
        int hh = height / 2;
        //copy y
        for(int j = 0; j < height; j++)
        {
            for(int i = width - 1;i >= 0;i--)
            {
                des[n++] = src[width * j + i];
            }
        }

        //copy u
        for(int j = 0; j < hh; j++)
        {
            for(int i = hw - 1;i >= 0;i--)
            {
                des[n++] = src[width * height + hw * j + i];//ptemp[hw * j + i];
            }
        }

        //copy v
        for(int j = 0; j < hh; j++)
        {
            for(int i = hw - 1;i >= 0;i--)
            {
                des[n++] = src[width*height + width * height / 4 + hw * j + i];//ptemp[hw * j + i];
            }
        }
    }

}