#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <microhttpd.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
#include <libavdevice/avdevice.h>

#define PORT 8888
#define VIDEO_NODE "/dev/video0"
//#define TEST
/*
//encoder
  struct SwsContext *swsContext = sws_getContext(
      con_info->codec_context->width,
      con_info->codec_context->height, con_info->codec_context->pix_fmt,
      con_info->codec_context->width, con_info->codec_context->height,
      AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
  const AVCodec *encoder = NULL;
  if (!(encoder = avcodec_find_encoder(AV_CODEC_ID_H264))) {
    fprintf(stderr, "Codec not found\n");
    ret = -1;
    exit(1);
  }
  AVCodecContext *c;
  if (!(c = avcodec_alloc_context3(encoder))) {
    fprintf(stderr, "Could not allocate video codec context\n");
    ret = -1;
    exit(1);
  }
  c->width = con_info->codec_context->width;
  c->height = con_info->codec_context->height;
  c->pix_fmt = AV_PIX_FMT_YUV420P;
  c->time_base = (AVRational){1, 30}; // 设置为30fps
  //c->time_base.num = 1;
  //c->time_base.den = 1;
  if ((ret = avcodec_open2(c, encoder, NULL)) < 0) {
    fprintf(stderr, "Could not open encoder\n");
    exit(1);
  }
          printf("%d pts:%ld pkt_dts:%ld\n", __LINE__,
              frame->pts, frame->pkt_dts);
          AVFrame *dst_frame = av_frame_alloc();
          printf("width:%d, height:%d\n", 
              con_info->codec_context->width,
              con_info->codec_context->height);
          dst_frame->format = AV_PIX_FMT_YUV420P;
          dst_frame->width = con_info->codec_context->width;
          dst_frame->height = con_info->codec_context->height;
          if (av_frame_get_buffer(dst_frame, 0) < 0) {
            fprintf(stderr, "错误：无法为 AVFrame 分配内存\n");
            return -1;
          }
          ret = sws_scale(swsContext,
              (const uint8_t * const*)frame->data,
              frame->linesize, 0, con_info->codec_context->height,
              dst_frame->data, dst_frame->linesize);
          if (ret < 0) {
            printf("%d ret:%s\n", __LINE__, strerror(-ret));
            fprintf(stderr, "Error sws\n");
            exit(1);
          }
          printf("%d\n", __LINE__);
          if (!dst_frame->data[0]) {
              fprintf(stderr, "错误：AVFrame 数据为空\n");
              return -1;
          }

          dst_frame->pts = dst_frame->pkt_dts = frame->best_effort_timestamp;//time(NULL);
          printf("%d pts:%ld pkt_dts:%ld\n", __LINE__,
              dst_frame->pts, dst_frame->pkt_dts);
          if ((ret = avcodec_send_frame(c, dst_frame)) < 0) {
            printf("%d ret:%s\n", __LINE__, strerror(-ret));
            fprintf(stderr, "Error sending a frame for encoding\n");
            exit(1);
          }
          printf("%d\n", __LINE__);
          ret = avcodec_receive_packet(c, pkt);
          if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            return ret;
          else if (ret < 0) {
            printf("%d ret:%s\n", __LINE__, strerror(-ret));
            fprintf(stderr, "Error during encoding\n");
            exit(1);
          }
          printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
          //fwrite(pkt->data, 1, pkt->size, outfile);
          av_packet_unref(pkt);
          printf("%d\n", __LINE__);
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
                       AVStream *st, AVFrame *frame, AVPacket *pkt)
{
    int ret;

    // send the frame to the encoder
    ret = avcodec_send_frame(c, frame);
    if (ret < 0) {
        fprintf(stderr, "Error sending a frame to the encoder: %s\n",
                av_err2str(ret));
        exit(1);
    }

    while (ret >= 0) {
        ret = avcodec_receive_packet(c, pkt);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            break;
        else if (ret < 0) {
            fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret));
            exit(1);
        }

        av_packet_rescale_ts(pkt, c->time_base, st->time_base);
        pkt->stream_index = st->index;

        log_packet(fmt_ctx, pkt);
        ret = av_interleaved_write_frame(fmt_ctx, pkt);
        if (ret < 0) {
            fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
            exit(1);
        }
    }

    return ret == AVERROR_EOF ? 1 : 0;
}
static AVFrame *get_video_frame(OutputStream *ost)
{
    AVCodecContext *c = ost->enc;

    if (av_compare_ts(ost->next_pts, c->time_base,
                      STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
        return NULL;

    if (av_frame_make_writable(ost->frame) < 0)
        exit(1);

    if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
        if (!ost->sws_ctx) {
            ost->sws_ctx = sws_getContext(c->width, c->height,
                                          AV_PIX_FMT_YUV420P,
                                          c->width, c->height,
                                          c->pix_fmt,
                                          SCALE_FLAGS, NULL, NULL, NULL);
            if (!ost->sws_ctx) {
                fprintf(stderr,
                        "Could not initialize the conversion context\n");
                exit(1);
            }
        }
        fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
        sws_scale(ost->sws_ctx, (const uint8_t * const *) ost->tmp_frame->data,
                  ost->tmp_frame->linesize, 0, c->height, ost->frame->data,
                  ost->frame->linesize);
    } else {
        fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
    }

    ost->frame->pts = ost->next_pts++;

    return ost->frame;
}
*/

#define MJPEG_BOUNDARY "myboundary" // MJPEG 边界

static AVFormatContext *fmt_ctx = NULL;
static AVCodecContext *codec_ctx = NULL;
static AVCodec *codec = NULL;
static AVFrame *frame = NULL;
static AVPacket packet;
static int video_stream_idx = -1;
//static struct MHD_Daemon *daemon = NULL;
static struct SwsContext *sws_ctx = NULL;

// 该回调函数将逐帧读取视频流并将其以 MJPEG 格式传输
static int send_video_data(void *cls, struct MHD_Connection *connection,
                            const char *url, const char *method,
                            const char *version, const char *upload_data,
                            size_t *upload_data_size, void **con_cls) {
    if (strcmp(method, "GET") != 0) {
        return MHD_NO; // 只处理 GET 请求
    }

    if (!fmt_ctx) {
  avdevice_register_all();
        avformat_network_init();

        // 打开摄像头流，假设使用 V4L2 设备（例如 /dev/video0）
        if (avformat_open_input(&fmt_ctx, "/dev/video0", NULL, NULL) < 0) {
            fprintf(stderr, "无法打开摄像头设备。\n");
            return MHD_NO;
        }

        // 查找流信息
        if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
            fprintf(stderr, "无法获取流信息。\n");
            return MHD_NO;
        }

        // 查找视频流索引
        for (int i = 0; i < fmt_ctx->nb_streams; i++) {
            if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
                video_stream_idx = i;
                break;
            }
        }

        if (video_stream_idx == -1) {
            fprintf(stderr, "无法找到视频流。\n");
            return MHD_NO;
        }

        // 获取视频编解码器
        codec = avcodec_find_decoder(fmt_ctx->streams[video_stream_idx]->codecpar->codec_id);
        codec_ctx = avcodec_alloc_context3(codec);
        avcodec_parameters_to_context(codec_ctx, fmt_ctx->streams[video_stream_idx]->codecpar);

        if (avcodec_open2(codec_ctx, codec, NULL) < 0) {
            fprintf(stderr, "无法打开视频编解码器。\n");
            return MHD_NO;
        }

        // 初始化帧和数据包
        frame = av_frame_alloc();
        av_init_packet(&packet);

        // 设置像素格式转换上下文
        sws_ctx = sws_getContext(codec_ctx->width, codec_ctx->height,
            codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height,
            AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
    }

    // 读取视频帧
    if (av_read_frame(fmt_ctx, &packet) < 0) {
        fprintf(stderr, "读取帧失败。\n");
        return MHD_NO;
    }

    // 如果是视频流，则处理
    if (packet.stream_index == video_stream_idx) {
        if (avcodec_send_packet(codec_ctx, &packet) < 0) {
            fprintf(stderr, "发送数据包到编解码器失败。\n");
            return MHD_NO;
        }

        if (avcodec_receive_frame(codec_ctx, frame) == 0) {
            // 创建一个临时帧，用于存储转换后的 RGB 数据
            AVFrame *rgb_frame = av_frame_alloc();
            uint8_t *buffer = NULL;

            // 分配缓冲区，存储转换后的 RGB 数据
            int num_bytes = av_image_get_buffer_size(
                AV_PIX_FMT_YUV420P, codec_ctx->width, codec_ctx->height, 1);
            buffer = (uint8_t *)av_malloc(num_bytes * sizeof(uint8_t));

            av_image_fill_arrays(
                rgb_frame->data, rgb_frame->linesize, buffer,
                AV_PIX_FMT_YUV420P,
                codec_ctx->width, codec_ctx->height, 1);

            // 将 YUV 帧转换为 RGB
            sws_scale(sws_ctx, (const uint8_t * const *)frame->data, frame->linesize, 0, codec_ctx->height, rgb_frame->data, rgb_frame->linesize);

            // 创建 JPEG 编码器
            AVCodec *jpeg_codec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);
            AVCodecContext *jpeg_codec_ctx = avcodec_alloc_context3(jpeg_codec);
            jpeg_codec_ctx->strict_std_compliance = FF_COMPLIANCE_UNOFFICIAL;

            jpeg_codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
            jpeg_codec_ctx->width = codec_ctx->width;
            jpeg_codec_ctx->height = codec_ctx->height;
            jpeg_codec_ctx->time_base = (AVRational){1, 25};
            avcodec_open2(jpeg_codec_ctx, jpeg_codec, NULL);

            // 编码 RGB 帧为 JPEG
            AVPacket jpeg_packet;
            av_init_packet(&jpeg_packet);
            jpeg_packet.data = NULL;
            jpeg_packet.size = 0;

            if (avcodec_send_frame(jpeg_codec_ctx, rgb_frame) == 0) {
                if (avcodec_receive_packet(jpeg_codec_ctx, &jpeg_packet) == 0) {
                    // 发送 MJPEG 数据
                    size_t packet_size = jpeg_packet.size;
                    const char *header = "Content-Type: image/jpeg\r\n\r\n";
                    size_t header_len = strlen(header);

                    // 构建响应头
                    char boundary[1024];
                    snprintf(boundary, sizeof(boundary), "--" MJPEG_BOUNDARY "\r\nContent-Type: image/jpeg\r\n\r\n");

                    // 发送边界和 JPEG 数据
                    struct MHD_Response *response = MHD_create_response_from_buffer(header_len + packet_size, (void *)header, MHD_RESPMEM_PERSISTENT);
                    MHD_add_response_header(response, "Content-Type", "multipart/x-mixed-replace; boundary=" MJPEG_BOUNDARY);

                    // 发送 MJPEG 帧（边界 + 图片数据）
                    MHD_add_response_header(response, "Content-Length", "0");
                    MHD_queue_response(connection, MHD_HTTP_OK, response);
                    MHD_add_response_header(response, "Content-Type", "image/jpeg\r\n");
                    MHD_add_response_header(response, "Content-Length", "0");

                    // 发送 JPEG 图像数据
                    struct MHD_Response *jpeg_response = MHD_create_response_from_buffer(jpeg_packet.size, jpeg_packet.data, MHD_RESPMEM_MUST_FREE);
                    MHD_queue_response(connection, MHD_HTTP_OK, jpeg_response);

                    // 清理
                    av_packet_unref(&jpeg_packet);
                    av_frame_free(&rgb_frame);
                    avcodec_free_context(&jpeg_codec_ctx);
                }
            }
        }
    }

    av_packet_unref(&packet);
    return MHD_NO;
}


// 结构体来存储视频流信息
struct connection_info_struct {
    AVFormatContext *format_context;
    AVCodecContext *codec_context;
    AVFrame *frame;
    AVPacket packet;
};
#ifdef TEST
int test()
{
  int ret;
  struct connection_info_struct *con_info;
  con_info = calloc(1, sizeof(struct connection_info_struct));
  if (avformat_open_input(&con_info->format_context, "/dev/video0", NULL, NULL) != 0) {
//    fprintf(stderr, "Unable to open video device\n");
    printf("%d Unable to open video device\n", __LINE__);
 //   return MHD_NO;
  }
          printf("%d\n", __LINE__);

  if (avformat_find_stream_info(con_info->format_context, NULL) < 0) {
    fprintf(stderr, "Unable to find stream information\n");
    return MHD_NO;
  }

  const AVCodec *codec = NULL;
  int i = 0;
  for (i = 0; i < con_info->format_context->nb_streams; i++) {
    if (con_info->format_context->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
      codec = avcodec_find_decoder(con_info->format_context->streams[i]->codecpar->codec_id);
      if (codec) {
        con_info->codec_context = avcodec_alloc_context3(codec);
        avcodec_parameters_to_context(con_info->codec_context, con_info->format_context->streams[i]->codecpar);
        if (avcodec_open2(con_info->codec_context, codec, NULL) < 0) {
          fprintf(stderr, "Unable to open codec\n");
          return MHD_NO;
        }
        break;
      }
    }
  }

//encoder
  struct SwsContext *swsContext = sws_getContext(
      con_info->codec_context->width,
      con_info->codec_context->height, con_info->codec_context->pix_fmt,
      con_info->codec_context->width, con_info->codec_context->height,
      AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
  AVFrame *frame = av_frame_alloc();
  if (!frame) {
    fprintf(stderr, "Could not allocate video frame\n");
    exit(1);
  }
#if 1
  while (av_read_frame(con_info->format_context, &con_info->packet) >= 0) {
    if (con_info->packet.stream_index == 0) {
      if (avcodec_send_packet(con_info->codec_context, &con_info->packet) == 0) {
        if (avcodec_receive_frame(con_info->codec_context, frame) == 0) {
          printf("%d pts:%ld pkt_dts:%ld\n", __LINE__,
              frame->pts, frame->pkt_dts);
          AVFrame *dst_frame = av_frame_alloc();
          printf("width:%d, height:%d\n", 
              con_info->codec_context->width,
              con_info->codec_context->height);
          dst_frame->format = AV_PIX_FMT_YUV420P;
          dst_frame->width = con_info->codec_context->width;
          dst_frame->height = con_info->codec_context->height;
          if (av_frame_get_buffer(dst_frame, 0) < 0) {
            fprintf(stderr, "错误：无法为 AVFrame 分配内存\n");
            return -1;
          }
          ret = sws_scale(swsContext,
              (const uint8_t * const*)frame->data,
              frame->linesize, 0, con_info->codec_context->height,
              dst_frame->data, dst_frame->linesize);
          if (ret < 0) {
            printf("%d ret:%s\n", __LINE__, strerror(-ret));
            fprintf(stderr, "Error sws\n");
            exit(1);
          }
          printf("%d\n", __LINE__);
          if (!dst_frame->data[0]) {
              fprintf(stderr, "错误：AVFrame 数据为空\n");
              return -1;
          }

          dst_frame->pts = dst_frame->pkt_dts = frame->best_effort_timestamp;//time(NULL);
          printf("%d pts:%ld pkt_dts:%ld\n", __LINE__,
              dst_frame->pts, dst_frame->pkt_dts);
          av_frame_free(&frame);
  const AVCodec *encoder = NULL;
  if (!(encoder = avcodec_find_encoder(AV_CODEC_ID_H264))) {
    fprintf(stderr, "Codec not found\n");
    ret = -1;
    exit(1);
  }
  AVCodecContext *c;
  if (!(c = avcodec_alloc_context3(encoder))) {
    fprintf(stderr, "Could not allocate video codec context\n");
    ret = -1;
    exit(1);
  }
  c->width = con_info->codec_context->width;
  c->height = con_info->codec_context->height;
  c->pix_fmt = AV_PIX_FMT_YUV420P;
  c->time_base = (AVRational){1, 30}; // 设置为30fps
  if ((ret = avcodec_open2(c, encoder, NULL)) < 0) {
    fprintf(stderr, "Could not open encoder\n");
    exit(1);
  }
          if ((ret = avcodec_send_frame(c, dst_frame)) < 0) {
            printf("%d ret:%s\n", __LINE__, strerror(-ret));
            fprintf(stderr, "Error sending a frame for encoding\n");
            exit(1);
          }
        av_frame_free(&dst_frame);
          printf("%d\n", __LINE__);
  AVPacket *pkt = av_packet_alloc();
  if (!pkt)
    exit(1);
  av_packet_unref(&con_info->packet);
  avformat_close_input(&con_info->format_context);
  avcodec_free_context(&con_info->codec_context);
  sws_freeContext(swsContext);
          printf("%d\n", __LINE__);
          ret = avcodec_receive_packet(c, pkt);
          if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            return ret;
          else if (ret < 0) {
            printf("%d ret:%s\n", __LINE__, strerror(-ret));
            fprintf(stderr, "Error during encoding\n");
            exit(1);
          }
          printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
          /*
          //fwrite(pkt->data, 1, pkt->size, outfile);
          av_packet_unref(pkt);
          printf("%d\n", __LINE__);
          struct MHD_Response *response;
          response = MHD_create_response_from_buffer(
              pkt->size, pkt->data,
              MHD_RESPMEM_MUST_COPY);
          if (!response) {
            printf("[ERROR]F%s L%d RESPONSE IS NULL\n", __FILE__, __LINE__);
            exit(1);
          }
          av_frame_free(&frame);
          */
        }
      }
    }
    av_packet_unref(&con_info->packet);
  }
  avcodec_free_context(&con_info->codec_context);
  avformat_close_input(&con_info->format_context);
#endif
}
#endif
#ifndef TEST
// 摄像头捕获视频并推送
static int stream_video(struct MHD_Connection *connection, const char *url)
{
  int ret;
  struct connection_info_struct *con_info;
  con_info = calloc(1, sizeof(struct connection_info_struct));

  if (avformat_open_input(&con_info->format_context, VIDEO_NODE,
        NULL, NULL) != 0) {
//    fprintf(stderr, "Unable to open video device\n");
    printf("%d Unable to open video device\n", __LINE__);
 //   return MHD_NO;
  }

  if (avformat_find_stream_info(con_info->format_context, NULL) < 0) {
    fprintf(stderr, "Unable to find stream information\n");
    return MHD_NO;
  }

  const AVCodec *codec = NULL;
  int i = 0;
  for (i = 0; i < con_info->format_context->nb_streams; i++) {
    if (con_info->format_context->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
      codec = avcodec_find_decoder(con_info->format_context->streams[i]->codecpar->codec_id);
      if (codec) {
        con_info->codec_context = avcodec_alloc_context3(codec);
        avcodec_parameters_to_context(con_info->codec_context, con_info->format_context->streams[i]->codecpar);
        if (avcodec_open2(con_info->codec_context, codec, NULL) < 0) {
          fprintf(stderr, "Unable to open codec\n");
          return MHD_NO;
        }
        break;
      }
    }
  }

  AVFrame *frame = av_frame_alloc();
  if (!frame) {
    fprintf(stderr, "Could not allocate video frame\n");
    exit(1);
  }
  AVPacket *pkt = av_packet_alloc();
  if (!pkt)
    exit(1);
  while (av_read_frame(con_info->format_context,
        &con_info->packet) >= 0) {
    if (con_info->packet.stream_index == 0) {
      if (avcodec_send_packet(con_info->codec_context,
            &con_info->packet) == 0) {
        if (avcodec_receive_frame(con_info->codec_context,
              frame) == 0) {
#if 0
            const char *response_header = "HTTP/1.1 200 OK\r\nContent-Type: video/mp4\r\n\r\n";
            struct MHD_Response *response = MHD_create_response_from_buffer(strlen(response_header), (void *)response_header, MHD_RESPMEM_PERSISTENT);
            MHD_add_response_header(response, "Content-Type", "video/mp4");

            // Send the response
            int ret = MHD_queue_response(connection, MHD_HTTP_OK, response);
            MHD_destroy_response(response);
#else
          struct MHD_Response *response;
          response = MHD_create_response_from_buffer(
              //pkt->size, pkt->data,
              4, "test",
              MHD_RESPMEM_MUST_COPY);
          if (!response) {
            printf("[ERROR]F%s L%d RESPONSE IS NULL\n", __FILE__, __LINE__);
            exit(1);
          }
          ret = MHD_queue_response (connection, MHD_HTTP_OK, response);
          MHD_destroy_response (response);
#endif
          av_frame_free(&frame);
        }
      }
    }
    av_packet_unref(&con_info->packet);
  }

  avcodec_free_context(&con_info->codec_context);
  avformat_close_input(&con_info->format_context);

  return MHD_YES;
}
#endif

enum MHD_Result answer_to_connection(
    void *cls, struct MHD_Connection *connection,
    const char *url, const char *method,
    const char *version, const char *upload_data,
    size_t *upload_data_size, void **con_cls)
{
  if (strcmp(url, "/video_stream") == 0) {
    return stream_video(connection, url);
  }
  return MHD_NO;
}

int main()
{
  avdevice_register_all();
  avformat_network_init();
#ifdef TEST
  test();
  exit(1);
#endif
    struct MHD_Daemon *daemon;
    daemon = MHD_start_daemon(
        MHD_USE_THREAD_PER_CONNECTION, PORT, NULL, NULL,
        &send_video_data, NULL, MHD_OPTION_END);
        //&answer_to_connection, NULL, MHD_OPTION_END);
    if (daemon == NULL) {
        fprintf(stderr, "Failed to start HTTP server\n");
        return 1;
    }

    printf("Server running on port %d...\n", PORT);
    getchar();  // 等待用户输入停止服务器
    MHD_stop_daemon(daemon);

    return 0;
}

