/*
 * Copyright (C) 2014 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/* This is a JNI example where we use native methods to play video
 * using the native AMedia* APIs.
 * See the corresponding Java source file located at:
 *
 *   src/com/example/nativecodec/NativeMedia.java
 *
 * In this example we use assert() for "impossible" error conditions,
 * and explicit handling and recovery for more likely error conditions.
 */

#include <assert.h>
#include <jni.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <limits.h>
#include <iostream>
#include <fstream>
#include "arm_neon.h"

#include "looper.h"
#include "ir_build.h"
#include "media/NdkMediaCodec.h"
#include "media/NdkMediaExtractor.h"

// for __android_log_print(ANDROID_LOG_INFO, "YourApp", "formatted message");
#include <android/log.h>
#define TAG "NativeCodec"
#define LOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)

// for native window JNI
#include <android/native_window_jni.h>
#include <android/asset_manager.h>
#include <android/asset_manager_jni.h>

#define  ALIGN(x,base) ((x+base-1)/base*base)

typedef struct {
    int fd;
    ANativeWindow* window;
    AMediaExtractor* ex;
    AMediaCodec *codec;
    int64_t renderstart;
    bool sawInputEOS;
    bool sawOutputEOS;
    bool isPlaying;
    bool renderonce;
    int width;
    int height;
    bool isHiAIInit;
    shared_ptr<hiai::AiTensor> itensor;
    shared_ptr<hiai::AiTensor> otensor;
    bool SRflag;
} workerdata;

workerdata gdata = {-1, NULL, NULL, NULL, 0,
                   false, false, false, false,
                   0,0, false, nullptr,nullptr,false};

enum {
    kMsgCodecBuffer,
    kMsgPause,
    kMsgResume,
    kMsgPauseAck,
    kMsgDecodeDone,
    kMsgSeek,
    kMsgSR,
    kMsgNotSR,
};


class mylooper: public looper {
    virtual void handle(int what, void* obj);
};

static mylooper *mlooper = NULL;

int64_t systemnanotime() {
    timespec now;
    clock_gettime(CLOCK_MONOTONIC, &now);
    return now.tv_sec * 1000000000LL + now.tv_nsec;
}

void WriteFile(std::string path, void* data, size_t size)
{
    std::ofstream file(path.c_str(), std::ios::out | std::ios::binary);
    file.write((char*)data, size);
    file.close();
}

void ReadFile(std::string path, void* data, size_t size)
{
    std::ifstream file(path,std::ios::binary);
    file.read((char*)data,size);
    file.close();
}

char imgBuffer[1000*2000*4];

// 为了演示做出效果，原视频降低一点亮度与超分后适配做区别
static void yuv420sp_to_rgba_fast_asm(const unsigned char* yuv420sp, int w, int h, unsigned char* rgba)
{
    const unsigned char* yptr = yuv420sp;
    const unsigned char* vuptr = yuv420sp + w * h;

    const unsigned char luDeta = 10;

    int8x8_t _v128 = vdup_n_s8(128);
    int8x8_t _v90 = vdup_n_s8(90);
    int8x8_t _v46 = vdup_n_s8(46);
    int8x8_t _v22 = vdup_n_s8(22);
    int8x8_t _v113 = vdup_n_s8(113);
    int8x8_t _vlu = vdup_n_s8(luDeta); //亮度增量

    for (int y = 0; y < h; y += 2)
    {
        const unsigned char* yptr0 = yptr;
        const unsigned char* yptr1 = yptr + w;
        unsigned char* rgba0 = rgba;
        unsigned char* rgba1 = rgba + w * 4;

#if __ARM_NEON
        int nn = w >> 3;
        int remain = w - (nn << 3);
#else
        int remain = w;
#endif // __ARM_NEON

#if __ARM_NEON
        #if __aarch64__
        for (; nn > 0; nn--)
        {
            int16x8_t _yy0 = vreinterpretq_s16_u16(vshll_n_u8(vsub_u8(vld1_u8(yptr0),_vlu), 6));
            int16x8_t _yy1 = vreinterpretq_s16_u16(vshll_n_u8(vsub_u8(vld1_u8(yptr1),_vlu), 6));

            int8x8_t _vvuu = vsub_s8(vreinterpret_s8_u8(vld1_u8(vuptr)), _v128);
            int8x8x2_t _vvvvuuuu = vtrn_s8(_vvuu, _vvuu);
            int8x8_t _uu = _vvvvuuuu.val[0];
            int8x8_t _vv = _vvvvuuuu.val[1];

            int16x8_t _r0 = vmlal_s8(_yy0, _vv, _v90);
            int16x8_t _g0 = vmlsl_s8(_yy0, _vv, _v46);
            _g0 = vmlsl_s8(_g0, _uu, _v22);
            int16x8_t _b0 = vmlal_s8(_yy0, _uu, _v113);

            int16x8_t _r1 = vmlal_s8(_yy1, _vv, _v90);
            int16x8_t _g1 = vmlsl_s8(_yy1, _vv, _v46);
            _g1 = vmlsl_s8(_g1, _uu, _v22);
            int16x8_t _b1 = vmlal_s8(_yy1, _uu, _v113);

            uint8x8x4_t _rgba0;
            _rgba0.val[0] = vqshrun_n_s16(_r0, 6);
            _rgba0.val[1] = vqshrun_n_s16(_g0, 6);
            _rgba0.val[2] = vqshrun_n_s16(_b0, 6);

            uint8x8x4_t _rgba1;
            _rgba1.val[0] = vqshrun_n_s16(_r1, 6);
            _rgba1.val[1] = vqshrun_n_s16(_g1, 6);
            _rgba1.val[2] = vqshrun_n_s16(_b1, 6);

            vst4_u8(rgba0, _rgba0);
            vst4_u8(rgba1, _rgba1);

            yptr0 += 8;
            yptr1 += 8;
            vuptr += 8;
            rgba0 += 8*4;
            rgba1 += 8*4;
        }
#endif // __aarch64__
#endif // __ARM_NEON

        for (; remain > 0; remain-=2)
        {
            int v = vuptr[1] - 128;
            int u = vuptr[0] - 128;

            int ruv = 90 * v;
            int guv = -46 * v + -22 * u;
            int buv = 113 * u;

#define SATURATE_CAST_UCHAR(X) (unsigned char)std::min(std::max(X, 0), 255);

            int y00 = (yptr0[0]-luDeta) << 6;
            rgba0[0] = SATURATE_CAST_UCHAR((y00 + ruv) >> 6);
            rgba0[1] = SATURATE_CAST_UCHAR((y00 + guv) >> 6);
            rgba0[2] = SATURATE_CAST_UCHAR((y00 + buv) >> 6);

            int y01 = (yptr0[1]-luDeta) << 6;
            rgba0[4] = SATURATE_CAST_UCHAR((y01 + ruv) >> 6);
            rgba0[5] = SATURATE_CAST_UCHAR((y01 + guv) >> 6);
            rgba0[6] = SATURATE_CAST_UCHAR((y01 + buv) >> 6);

            int y10 = (yptr1[0]-luDeta) << 6;
            rgba1[0] = SATURATE_CAST_UCHAR((y10 + ruv) >> 6);
            rgba1[1] = SATURATE_CAST_UCHAR((y10 + guv) >> 6);
            rgba1[2] = SATURATE_CAST_UCHAR((y10 + buv) >> 6);

            int y11 = (yptr1[1]-luDeta) << 6;
            rgba1[4] = SATURATE_CAST_UCHAR((y11 + ruv) >> 6);
            rgba1[5] = SATURATE_CAST_UCHAR((y11 + guv) >> 6);
            rgba1[6] = SATURATE_CAST_UCHAR((y11 + buv) >> 6);

#undef SATURATE_CAST_UCHAR

            yptr0 += 2;
            yptr1 += 2;
            vuptr += 2;
            rgba0 += 2*4;
            rgba1 += 2*4;
        }

        yptr += 2 * w;
        rgba += 2 * 4 * w;
    }
}

static void rgb_Fp16_NCHW_to_RGBAU8_fast_asm(float16_t* rgb,
                                             int w, int h, unsigned char* rgba)
{
    int hw = h*w;
    float16_t* r = rgb;
    float16_t* g = r+hw;
    float16_t* b = g+hw;

    unsigned char* dst = rgba;

#if __ARM_NEON
    int nn = hw >> 3;
    int remain = hw - (nn << 3);
#else
    int remain = hw;
#endif // __ARM_NEON

#if __ARM_NEON
#if __aarch64__
    for (; nn > 0; nn--)
    {
        float16x8x4_t rgbPackf16;
        rgbPackf16.val[0] = vld1q_f16(r);
        rgbPackf16.val[1] = vld1q_f16(g);
        rgbPackf16.val[2] = vld1q_f16(b);
        uint16x8x4_t rgbPacku16;
        rgbPacku16.val[0] = vcvtq_u16_f16(rgbPackf16.val[0]);
        rgbPacku16.val[1] = vcvtq_u16_f16(rgbPackf16.val[1]);
        rgbPacku16.val[2] = vcvtq_u16_f16(rgbPackf16.val[2]);
        uint8x8x4_t rgbPacku8;
        rgbPacku8.val[0] = vmovn_u16(rgbPacku16.val[0]);
        rgbPacku8.val[1] = vmovn_u16(rgbPacku16.val[1]);
        rgbPacku8.val[2] = vmovn_u16(rgbPacku16.val[2]);
        vst4_u8(dst, rgbPacku8);

        r += 8;
        g += 8;
        b += 8;
        dst += 8*4;
    }
#endif
#endif
    for (; remain > 0; remain--)
    {
        dst[0] = (unsigned char)r[remain];
        dst[1] = (unsigned char)g[remain];
        dst[2] = (unsigned char)b[remain];
        r ++;
        g ++;
        b ++;
        dst +=4;
    }
}

static void yuv420sp_to_rgb_Fp16_NCHW_fast_asm(const unsigned char* yuv420sp,
                                          int w, int h, float16_t* rgb)
{
    const unsigned char* yptr = yuv420sp;
    const unsigned char* vuptr = yuv420sp + w * ALIGN(h,16);

    int8x8_t _v128 = vdup_n_s8(128);
    int8x8_t _v90 = vdup_n_s8(90);
    int8x8_t _v46 = vdup_n_s8(46);
    int8x8_t _v22 = vdup_n_s8(22);
    int8x8_t _v113 = vdup_n_s8(113);
    int16x8_t v0 = vdupq_n_s16(0);
    int16x8_t v255 = vdupq_n_s16(255);


    for (int y=0; y<h; y+=2)
    {
        const unsigned char* yptr0 = yptr;
        const unsigned char* yptr1 = yptr + w;

        float16_t* r0 = rgb;
        float16_t* r1 = rgb + w;
        float16_t* g0 = r0 + h*w;
        float16_t* g1 = g0 + w;
        float16_t* b0 = g0 + h*w;
        float16_t* b1 = b0 + w;


#if __ARM_NEON
        int nn = w >> 3;
        int remain = w - (nn << 3);
#else
        int remain = w;
#endif // __ARM_NEON

#if __ARM_NEON
#if __aarch64__
        for (; nn>0; nn--)
        {
            int16x8_t _yy0 = vreinterpretq_s16_u16(vshll_n_u8(vld1_u8(yptr0), 6));
            int16x8_t _yy1 = vreinterpretq_s16_u16(vshll_n_u8(vld1_u8(yptr1), 6));

            int8x8_t _vvuu = vsub_s8(vreinterpret_s8_u8(vld1_u8(vuptr)), _v128);
            int8x8x2_t _vvvvuuuu = vtrn_s8(_vvuu, _vvuu);
            int8x8_t _uu = _vvvvuuuu.val[0];
            int8x8_t _vv = _vvvvuuuu.val[1];

            int16x8_t _r0 = vmlal_s8(_yy0, _vv, _v90);
            int16x8_t _g0 = vmlsl_s8(_yy0, _vv, _v46);
            _g0 = vmlsl_s8(_g0, _uu, _v22);
            int16x8_t _b0 = vmlal_s8(_yy0, _uu, _v113);

            int16x8_t _r1 = vmlal_s8(_yy1, _vv, _v90);
            int16x8_t _g1 = vmlsl_s8(_yy1, _vv, _v46);
            _g1 = vmlsl_s8(_g1, _uu, _v22);
            int16x8_t _b1 = vmlal_s8(_yy1, _uu, _v113);

            float16x8x3_t _rgb0;
            _rgb0.val[0] = vcvtq_f16_s16(vminq_s16(vmaxq_s16(vshrq_n_s16(_r0, 6),v0),v255));
            _rgb0.val[1] = vcvtq_f16_s16(vminq_s16(vmaxq_s16(vshrq_n_s16(_g0, 6),v0),v255));
            _rgb0.val[2] = vcvtq_f16_s16(vminq_s16(vmaxq_s16(vshrq_n_s16(_b0, 6),v0),v255));

            float16x8x3_t _rgb1;
            _rgb1.val[0] = vcvtq_f16_s16(vminq_s16(vmaxq_s16(vshrq_n_s16(_r1, 6),v0),v255));
            _rgb1.val[1] = vcvtq_f16_s16(vminq_s16(vmaxq_s16(vshrq_n_s16(_g1, 6),v0),v255));
            _rgb1.val[2] = vcvtq_f16_s16(vminq_s16(vmaxq_s16(vshrq_n_s16(_b1, 6),v0),v255));

            vst1q_f16(r0, _rgb0.val[0]);
            vst1q_f16(g0, _rgb0.val[1]);
            vst1q_f16(b0, _rgb0.val[2]);

            vst1q_f16(r1, _rgb1.val[0]);
            vst1q_f16(g1, _rgb1.val[1]);
            vst1q_f16(b1, _rgb1.val[2]);

            yptr0 += 8;
            yptr1 += 8;
            vuptr += 8;

            r0 += 8;
            g0 += 8;
            b0 += 8;
            r1 += 8;
            g1 += 8;
            b1 += 8;
        }
#endif // __aarch64__
#endif // __ARM_NEON

        for (; remain>0; remain-=2)
        {
            LOGV("cpu trans! %d",remain);
            int v = vuptr[1] - 128;
            int u = vuptr[0] - 128;

            int ruv = 90 * v;
            int guv = -46 * v + -22 * u;
            int buv = 113 * u;

#define SATURATE_CAST_UCHAR(X) (unsigned char)std::min(std::max(X, 0), 255);

            int y00 = yptr0[0] << 6;

            r0[0] = SATURATE_CAST_UCHAR((y00 + ruv) >> 6);
            g0[0] = SATURATE_CAST_UCHAR((y00 + guv) >> 6);
            b0[0] = SATURATE_CAST_UCHAR((y00 + buv) >> 6);

            int y01 = yptr0[1] << 6;
            r0[1] = SATURATE_CAST_UCHAR((y01 + ruv) >> 6);
            g0[1] = SATURATE_CAST_UCHAR((y01 + guv) >> 6);
            b0[1] = SATURATE_CAST_UCHAR((y01 + buv) >> 6);

            int y10 = yptr1[0] << 6;
            r1[0] = SATURATE_CAST_UCHAR((y10 + ruv) >> 6);
            g1[0] = SATURATE_CAST_UCHAR((y10 + guv) >> 6);
            b1[0] = SATURATE_CAST_UCHAR((y10 + buv) >> 6);

            int y11 = yptr1[1] << 6;
            r1[1] = SATURATE_CAST_UCHAR((y11 + ruv) >> 6);
            g1[1] = SATURATE_CAST_UCHAR((y11 + guv) >> 6);
            b1[1] = SATURATE_CAST_UCHAR((y11 + buv) >> 6);

#undef SATURATE_CAST_UCHAR

            yptr0 += 2;
            yptr1 += 2;
            vuptr += 2;

            r0 += 2;
            g0 += 2;
            b0 += 2;
            r1 += 2;
            g1 += 2;
            b1 += 2;
        }
        yptr += 2*w;
        rgb += 2*w;
    }
}


void manualTransform(void* input, void* output)
{
    if(gdata.SRflag == false) {
        yuv420sp_to_rgba_fast_asm((const unsigned char*)input,
                                  gdata.width,ALIGN(gdata.height,16),
                                  (unsigned char*)output);
    } else {
        int64_t start = systemnanotime();
        yuv420sp_to_rgb_Fp16_NCHW_fast_asm((const unsigned char*)input,
                                           gdata.width,gdata.height,
                                           (float16_t *)gdata.itensor->GetBuffer());
        HiaiRunIRModel();
        rgb_Fp16_NCHW_to_RGBAU8_fast_asm((float16_t*) gdata.otensor->GetBuffer(),
                                         gdata.width*2, gdata.height*2,
                                         (unsigned char*)output);
        int64_t end = systemnanotime();
        float time = (end - start)/1000000.0;
        LOGV("AI end2end:%.2fms",time);
    }

}

void sendToSurfaceView(workerdata *d)
{

}

void doCodecWork(workerdata *d) {

    ssize_t bufidx = -1;
    if (!d->sawInputEOS) {
        bufidx = AMediaCodec_dequeueInputBuffer(d->codec, 2000);
        LOGV("input buffer %zd", bufidx);
        if (bufidx >= 0) {
            size_t bufsize;
            auto buf = AMediaCodec_getInputBuffer(d->codec, bufidx, &bufsize);
            auto sampleSize = AMediaExtractor_readSampleData(d->ex, buf, bufsize);
            if (sampleSize < 0) {
                sampleSize = 0;
                d->sawInputEOS = true;
                LOGV("EOS");
            }
            auto presentationTimeUs = AMediaExtractor_getSampleTime(d->ex);

            AMediaCodec_queueInputBuffer(d->codec, bufidx, 0, sampleSize, presentationTimeUs,
                    d->sawInputEOS ? AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM : 0);
            AMediaExtractor_advance(d->ex);
        }
    }

    if (!d->sawOutputEOS) {
        AMediaCodecBufferInfo info;
        auto status = AMediaCodec_dequeueOutputBuffer(d->codec, &info, 0);
        if (status >= 0) {
            if (info.flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) {
                LOGV("output EOS");
                d->sawOutputEOS = true;
            }
            size_t buffsize;
            auto output = AMediaCodec_getOutputBuffer(d->codec, status,&(buffsize));

            ANativeWindow_Buffer buffer;
            if (ANativeWindow_lock(d->window, &buffer, NULL) < 0) {
                LOGV("Unable to lock window buffer");
                return;
            }
            //LOGV("ANativeWindow height=%d, width=%d, format=%d, stride=%d",buffer.height,buffer.width,buffer.format,buffer.stride);
            //这里可以换成AI特性加持
            manualTransform(output,imgBuffer);
            //把buffer中的数据进行赋值（修改）
            uint8_t *dst_data = static_cast<uint8_t *>(buffer.bits);
            int dst_lineSize = buffer.stride * 4;//ARGB
            //逐行拷贝
            auto src_lineSize = buffer.width*4;
            for (int i = 0; i < buffer.height; ++i) {
                memcpy(dst_data + i * dst_lineSize, imgBuffer + i * src_lineSize, dst_lineSize);
            }
            ANativeWindow_unlockAndPost(d->window);

            int64_t presentationNano = info.presentationTimeUs * 1000;
            if (d->renderstart < 0) {
                d->renderstart = systemnanotime() - presentationNano;
            }
            int64_t delay = (d->renderstart + presentationNano) - systemnanotime();
            if (delay > 0) {
                //LOGV("sleep to sync %ld",delay/1000);
                usleep(delay / 1000);
            }

            AMediaCodec_releaseOutputBuffer(d->codec, status, false);
            if (d->renderonce) {
                d->renderonce = false;
                return;
            }
        } else if (status == AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED) {
            LOGV("output buffers changed");
        } else if (status == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED) {
            auto format = AMediaCodec_getOutputFormat(d->codec);
            LOGV("format changed to: %s", AMediaFormat_toString(format));
            AMediaFormat_delete(format);
        } else if (status == AMEDIACODEC_INFO_TRY_AGAIN_LATER) {
            LOGV("no output buffer right now");
        } else {
            LOGV("unexpected info code: %zd", status);
        }
    }

    if (!d->sawInputEOS || !d->sawOutputEOS) {
        mlooper->post(kMsgCodecBuffer, d);
    }
}

void mylooper::handle(int what, void* obj) {
    switch (what) {
        case kMsgCodecBuffer:
            doCodecWork((workerdata*)obj);
            break;

        case kMsgDecodeDone:
        {
            workerdata *d = (workerdata*)obj;
            AMediaCodec_stop(d->codec);
            AMediaCodec_delete(d->codec);
            AMediaExtractor_delete(d->ex);
            d->sawInputEOS = true;
            d->sawOutputEOS = true;
        }
        break;

        case kMsgSeek:
        {
            workerdata *d = (workerdata*)obj;
            AMediaExtractor_seekTo(d->ex, 0, AMEDIAEXTRACTOR_SEEK_NEXT_SYNC);
            AMediaCodec_flush(d->codec);
            d->renderstart = -1;
            d->sawInputEOS = false;
            d->sawOutputEOS = false;
            if (!d->isPlaying) {
                d->renderonce = true;
                post(kMsgCodecBuffer, d);
            }
            LOGV("seeked");
        }
        break;

        case kMsgPause:
        {
            workerdata *d = (workerdata*)obj;
            if (d->isPlaying) {
                // flush all outstanding codecbuffer messages with a no-op message
                d->isPlaying = false;
                post(kMsgPauseAck, NULL, true);
            }
        }
        break;

        case kMsgResume:
        {
            workerdata *d = (workerdata*)obj;
            if (!d->isPlaying) {
                d->renderstart = -1;
                d->isPlaying = true;
                post(kMsgCodecBuffer, d);
            }
        }
        break;

        case kMsgSR:
        {
            workerdata *d = (workerdata*)obj;
            d->SRflag = true;
            auto ret = ANativeWindow_setBuffersGeometry(gdata.window,
                                                        gdata.width*2,
                                                        gdata.height*2,
                                                        WINDOW_FORMAT_RGBA_8888); //yuv420_sp
            LOGV("kMsgSR ANativeWindow_setBuffersGeometry=%d",ret);
            post(kMsgCodecBuffer, d,true);
        }
        break;

        case kMsgNotSR:
        {
            workerdata *d = (workerdata*)obj;
            d->SRflag = false;
            auto ret = ANativeWindow_setBuffersGeometry(gdata.window,
                                                        gdata.width,
                                                        gdata.height,
                                                        WINDOW_FORMAT_RGBA_8888); //yuv420_sp
            LOGV("kMsgNotSR ANativeWindow_setBuffersGeometry=%d",ret);
            post(kMsgCodecBuffer, d,true);
        }
        break;
    }
}


void HIAI_Init(AAssetManager* mgr, int height, int width)
{
    char* version = HiaiGetVersion();
    if(version == nullptr ){
        gdata.isHiAIInit = false;
        return;
    } else if(std::string(version).compare("100.320.000.000") < 0) {
        gdata.isHiAIInit = false;
        return;
    }
    HiaiBuildIRModel(mgr, height, width);
    // reading data
    auto itensors = HiaiGetInputTensors();
    auto otensors = HiaiGetOutputTensors();
    if(itensors.size()!=1 || otensors.size()!=1) {
        LOGE("Tensor not inited!");
        return;
    }
    gdata.itensor=itensors[0];
    gdata.otensor=otensors[0];
}


extern "C" {

jboolean Java_com_example_nativecodec_NativeCodec_createStreamingMediaPlayer(JNIEnv* env,
        jclass clazz, jobject assetMgr, jstring filename)
{
    LOGV("@@@ create");

    // convert Java string to UTF-8
    const char *utf8 = env->GetStringUTFChars(filename, NULL);
    LOGV("opening %s", utf8);

    off_t outStart, outLen;
    int fd = AAsset_openFileDescriptor(AAssetManager_open(AAssetManager_fromJava(env, assetMgr), utf8, 0),
                                       &outStart, &outLen);

    env->ReleaseStringUTFChars(filename, utf8);
    if (fd < 0) {
        LOGE("failed to open file: %s %d (%s)", utf8, fd, strerror(errno));
        return JNI_FALSE;
    }

    gdata.fd = fd;
    workerdata *d = &gdata;

    AMediaExtractor *ex = AMediaExtractor_new();
    media_status_t err = AMediaExtractor_setDataSourceFd(ex, d->fd,
                                                         static_cast<off64_t>(outStart),
                                                         static_cast<off64_t>(outLen));
    close(d->fd);
    if (err != AMEDIA_OK) {
        LOGV("setDataSource error: %d", err);
        return JNI_FALSE;
    }

    int numtracks = AMediaExtractor_getTrackCount(ex);

    AMediaCodec *codec = NULL;

    LOGV("input has %d tracks", numtracks);
    for (int i = 0; i < numtracks; i++) {
        AMediaFormat *format = AMediaExtractor_getTrackFormat(ex, i);
        const char *s = AMediaFormat_toString(format);
        LOGV("track %d format: %s", i, s);
        const char *mime;
        if (!AMediaFormat_getString(format, AMEDIAFORMAT_KEY_MIME, &mime)) {
            LOGV("no mime type");
            return JNI_FALSE;
        } else if (!strncmp(mime, "video/", 6)) {
            // Omitting most error handling for clarity.
            // Production code should check for errors.
            AMediaExtractor_selectTrack(ex, i);
            codec = AMediaCodec_createDecoderByType(mime);
            AMediaFormat_setInt32(format, AMEDIAFORMAT_KEY_COLOR_FORMAT, 0x7F420888);

            AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_WIDTH, &gdata.width);
            AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_HEIGHT, &gdata.height);
            LOGV("video height:%d, width:%d",gdata.height,gdata.width);
            auto ret = ANativeWindow_setBuffersGeometry(gdata.window,
                                                        gdata.width,
                                                        gdata.height,
                                                        WINDOW_FORMAT_RGBA_8888); //yuv420_sp
            LOGV("ANativeWindow_setBuffersGeometry=%d",ret);

            AMediaCodec_configure(codec, format, NULL, NULL, 0);
            d->ex = ex;
            d->codec = codec;
            d->renderstart = -1;
            d->sawInputEOS = false;
            d->sawOutputEOS = false;
            d->isPlaying = false;
            d->renderonce = true;
            AMediaCodec_start(codec);
        }
        HIAI_Init(AAssetManager_fromJava(env, assetMgr), gdata.height, gdata.width);
        AMediaFormat_delete(format);
    }

    mlooper = new mylooper();
    mlooper->post(kMsgCodecBuffer, d);

    return JNI_TRUE;
}

// set the playing state for the streaming media player
void Java_com_example_nativecodec_NativeCodec_setPlayingStreamingMediaPlayer(JNIEnv* env,
        jclass clazz, jboolean isPlaying)
{
    LOGV("@@@ playpause: %d", isPlaying);
    if (mlooper) {
        if (isPlaying) {
            mlooper->post(kMsgResume, &gdata);
        } else {
            mlooper->post(kMsgPause, &gdata);
        }
    }
}

void Java_com_example_nativecodec_NativeCodec_setMediaPlayerSR(JNIEnv* env,
     jclass clazz, jboolean isSR)
{
    LOGV("@@@ isSR: %d", isSR);
    if (mlooper) {
        if (isSR) {
            mlooper->post(kMsgSR, &gdata);
        } else {
            mlooper->post(kMsgNotSR, &gdata);
        }
    }
}

// shut down the native media system
void Java_com_example_nativecodec_NativeCodec_shutdown(JNIEnv* env, jclass clazz)
{
    LOGV("@@@ shutdown");
    if (mlooper) {
        mlooper->post(kMsgDecodeDone, &gdata, true /* flush */);
        mlooper->quit();
        delete mlooper;
        mlooper = NULL;
    }
    if (gdata.window) {
        ANativeWindow_release(gdata.window);
        gdata.window = NULL;
    }
}


// set the surface
void Java_com_example_nativecodec_NativeCodec_setSurface(JNIEnv *env, jclass clazz, jobject surface)
{
    // obtain a native window from a Java surface
    if (gdata.window) {
        ANativeWindow_release(gdata.window);
        gdata.window = NULL;
    }
    gdata.window = ANativeWindow_fromSurface(env, surface);
    LOGV("@@@ setsurface %p", gdata.window);
}


// rewind the streaming media player
void Java_com_example_nativecodec_NativeCodec_rewindStreamingMediaPlayer(JNIEnv *env, jclass clazz)
{
    LOGV("@@@ rewind");
    if (mlooper) {
        mlooper->post(kMsgSeek, &gdata);
    }
}

}
