package util;

import ai.djl.modality.cv.Image;
import ai.djl.modality.cv.ImageFactory;
import ai.djl.modality.cv.output.DetectedObjects;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.DataType;

import com.arcsoft.face.FaceInfo;
import net.coobird.thumbnailator.Thumbnails;
import org.bytedeco.javacv.*;
import org.bytedeco.opencv.global.opencv_core;
import org.bytedeco.opencv.global.opencv_imgcodecs;
import org.bytedeco.opencv.global.opencv_imgproc;
import org.bytedeco.opencv.opencv_core.IplImage;
import org.bytedeco.opencv.opencv_core.Mat;
import org.bytedeco.opencv.opencv_core.Size;
import org.springframework.util.CollectionUtils;
import vfile.FaceEngineUtils;
import vfile.face.FeatureDetection;
import vfile.face.RetinaFaceOpencvDetection;

import java.awt.image.BufferedImage;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.nio.Buffer;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;

import static org.bytedeco.ffmpeg.global.avcodec.AV_CODEC_ID_AAC;
import static org.bytedeco.ffmpeg.global.avcodec.AV_CODEC_ID_H264;
import static org.bytedeco.ffmpeg.global.avutil.AV_PIX_FMT_YUV420P;

public class VideoTool {
    static boolean exit = false;
    public static String savePath = "F:/face/";
    public static void main(String[] args) throws Exception {
    	Mat one = new Mat();
    	System.out.println("start...");
        String rtmpPath = savePath+"vido/1.avi";
        String rtspPath = savePath+"vido/file2.flv";
        boolean saveVideo = false;
        Path vidoePath = Paths.get("src/main/resources/video/ds.mp4");
        Path pushPath = Paths.get("build/output/file2.mp4");
       /* List<BufferedImage> imgs = new ArrayList();
       
        push(vidoePath.toString(),rtspPath,imgs);
        System.out.println("end...");*/
       /* List<Image> driving_video = VideoTool.getKeyFrame(vidoePath.toString());
        for(Image img : driving_video){
        	saveBoundingBoxImage(img);
        }
        System.out.println(driving_video.size());*/
        
        List<BufferedImage> resultVideo = new ArrayList<BufferedImage>();
        for(int i=0;i<401;i++){
        	 Path imageFile = Paths.get("src/main/resources/img/2m.jpg"); 
             Image image = ImageFactory.getInstance().fromFile(imageFile); 
             NDManager manager = NDManager.newBaseManager();
             NDArray img = image.toNDArray(manager).toType(DataType.UINT8, true); 
             //Thumbnails.of((BufferedImage)ImageFactory.getInstance().fromNDArray(img).getWrappedImage()).scale(1).outputFormat("png").toFile("F:/1/face"+ File.separator +  "FACE.jpg");
             //ByteArrayOutputStream out = new ByteArrayOutputStream();
             BufferedImage tmpImg = (BufferedImage)ImageFactory.getInstance().fromNDArray(img).getWrappedImage();
             resultVideo.add(tmpImg);
        }
        
        push(vidoePath.toString(), pushPath.toString(), resultVideo,"mp4"); 
    }
    private static void saveBoundingBoxImage(Image img)
            throws IOException {
        Path outputDir = Paths.get("build/output/1");
        Files.createDirectories(outputDir);

        // Make image copy with alpha channel because original image was jpg
        Image newImage = img.duplicate(Image.Type.TYPE_INT_ARGB); 
        Path imagePath = outputDir.resolve(WorkId.sortUID()+".png");
        // OpenJDK can't save jpg with alpha channel
        newImage.save(Files.newOutputStream(imagePath), "png");
       
    }
    public static List<Image> getKeyFrame(String filePath) throws Exception {
        // 使用rtsp的时候需要使用 FFmpegFrameGrabber，不能再用 FrameGrabber
        File vf = new File(filePath);
        FFmpegFrameGrabber grabberI = FFmpegFrameGrabber.createDefault(vf);
        grabberI.start();
        Java2DFrameConverter converter = new Java2DFrameConverter();
        System.out.println("all start!");
        //视频+音频
        //Frame frame = grabber.grab();
        //视频图片
        //音频 grabber.grabSamples()

        // 帧总数
        BufferedImage bImg = null;
        System.out.println("总时长:"+grabberI.getLengthInTime()/1000/60);
        System.out.println("总音频长:"+grabberI.getLengthInAudioFrames());
        System.out.println("总视频长:"+grabberI.getLengthInVideoFrames());
        System.out.println("总贞长:"+grabberI.getLengthInFrames());
        int audios = grabberI.getLengthInAudioFrames() >= Integer.MAX_VALUE ? 0 : grabberI.getLengthInAudioFrames();
        int vidoes = grabberI.getLengthInVideoFrames() >= Integer.MAX_VALUE ? 0 : grabberI.getLengthInVideoFrames();
        //获取图片
        int frame_number =  vidoes;
        Frame img = null;
        grabberI.flush();
        List<Image> cvImgs = new ArrayList<>();
        for (int i = 0; i < frame_number; i++) { 
        	
            if((img = grabberI.grab()) == null){
                continue;
            }
            Buffer[] imgs = img.image;
            if( (bImg = converter.convert(img)) == null){
                continue;
            } 
           // Thumbnails.of(bImg).scale(1).outputFormat("jpg").toFile("E:/code/git_work/FaceMerge/build/output/"+"2/"+WorkId.sortUID());
            cvImgs.add(ImageFactory.getInstance().fromImage(copyImg(bImg)));
        }
        grabberI.release();
        return cvImgs;
    }

    public static BufferedImage copyImg(BufferedImage img){  
		BufferedImage checkImg = new BufferedImage(img.getWidth(),img.getHeight(),img.getType() == 0 ? 5 : img.getType());
		checkImg.setData(img.getData());
		return checkImg;
	}	
    
    public static void push(String rtmpPath, String rtspPath, List<BufferedImage> cimgs,String fileType) throws Exception {
        // 使用rtsp的时候需要使用 FFmpegFrameGrabber，不能再用 FrameGrabber
        File vf = new File(rtmpPath);
        FFmpegFrameGrabber grabberI = FFmpegFrameGrabber.createDefault(vf);
        grabberI.start();
        FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(rtspPath, grabberI.getImageWidth(), grabberI.getImageHeight(), 2);
        recorder.setVideoCodec(AV_CODEC_ID_H264);
        // 音频编/解码器
        recorder.setAudioCodec(AV_CODEC_ID_AAC);
        // rtmp的类型
        recorder.setFormat(fileType);
        recorder.setPixelFormat(AV_PIX_FMT_YUV420P);
        recorder.start();
        //
        OpenCVFrameConverter.ToIplImage conveter = new OpenCVFrameConverter.ToIplImage();
        Java2DFrameConverter converter = new Java2DFrameConverter();
        System.out.println("all start!"); 
        //视频+音频
        //Frame frame = grabber.grab();
        //视频图片
        Frame frameI = null;
        //音频 grabber.grabSamples()
        Frame frameA = null;
        // 帧总数
        BufferedImage bImg = null;
        System.out.println("总时长:"+grabberI.getLengthInTime()/1000/60);
        System.out.println("总音频长:"+grabberI.getLengthInAudioFrames());
        System.out.println("总视频长:"+grabberI.getLengthInVideoFrames());
        System.out.println("总贞长:"+grabberI.getLengthInFrames());
        int audios = grabberI.getLengthInAudioFrames() >= Integer.MAX_VALUE ? 0 : grabberI.getLengthInAudioFrames();
        int vidoes = grabberI.getLengthInVideoFrames() >= Integer.MAX_VALUE ? 0 : grabberI.getLengthInVideoFrames();
        int frame_number = audios + vidoes;
        long time = System.currentTimeMillis();
        int width = grabberI.getImageWidth();
        int height = grabberI.getImageHeight();
        int depth = 0;
        int channels = 0;
        int stride = 0;
        int pixelFormat = 0;
        int index = 0;
        for (int i = 0; i < frame_number; i++) {
        	System.out.println("总共："+frame_number + " 完成："+i);
            Frame frame1 = grabberI.grab();
            if(frame1 == null){
                continue;
            }
            Buffer[] smples = frame1.samples;
            if (smples != null) {
                recorder.recordSamples(smples);
            }
            Buffer[] imgs = frame1.image;
            if (imgs != null) {  
            	if((bImg = converter.convert(frame1)) != null){
            		System.out.println("放入图片"); 
            		//修改后的人脸
            		Mat newImg = ConverterImg.b2M2(cimgs.get(index),opencv_core.CV_8UC3);
            		opencv_imgproc.resize(newImg, newImg, new Size(width,height)); 
            		
            		Mat oldImg = ConverterImg.b2M2(bImg,opencv_core.CV_8UC3); 
            		
            		
            		//List<Mat> oldFace = FeatureDetection.faceDetection(oldImg);
            		//List<Mat> newFace = FeatureDetection.faceDetection(newImg); 
            		
            		//此处添加人脸比对 ， 先默认获取第一张人脸
            		
            		Mat combinFace = OpenCVFaceVideoSwap.faceMerge(oldImg,newImg,true); 
            		//Thumbnails.of(bImg).scale(1).outputFormat("jpg").toFile("E:/code/git_work/FaceMerge/build/output/"+"2/"+WorkId.sortUID());
            		//opencv_imgcodecs.imwrite("E:/code/git_work/FaceMerge/build/output/"+"3/"+WorkId.sortUID()+".jpg", combinFace);
            		Frame frame3= conveter.convert(combinFace);
                    imgs = frame3.image;
                    depth = frame3.imageDepth;
                    channels = frame3.imageChannels;
                    stride = frame3.imageStride;
                    index++;
                    recorder.recordImage(width, height, depth, channels, stride, -1, imgs);
                    recorder.setTimestamp(frame1.timestamp); 
            	}  
            }
        }
        grabberI.release();
        recorder.stop();
        recorder.release();
        recorder.close();
    }
}
