#include <iostream>
#include "ros/ros.h"
#include "msg/ImageTextMsg.h"
#include <sensor_msgs/CompressedImage.h>
#include <opencv2/opencv.hpp>
#include <cv_bridge/cv_bridge.h>
#include <std_msgs/String.h>
#include "ImageSave.h"
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>

#include <algorithm>
#include <cctype>
#include <memory>
#include "decoder/params.h"
#include "frontend/wav.h"
//#include <image_transport/image_transport.h>
//#include <sensor_msgs/msg/image.hpp>
//#include <std_msgs/msg/string.hpp>

bool stop = false;
std::shared_ptr<wenet::DecodeOptions> g_decode_config;
std::shared_ptr<wenet::FeaturePipelineConfig> g_feature_config;
std::shared_ptr<wenet::DecodeResource> g_decode_resource;


static int32_t RecordCallback(const void* input_buffer, void* /*output_buffer*/,
                              unsigned long frames_per_buffer,  // NOLINT
                              const PaStreamCallbackTimeInfo* /*time_info*/,
                              PaStreamCallbackFlags /*status_flags*/,
                              void* user_data) {
  auto feature_pipeline = reinterpret_cast<wenet::FeaturePipeline*>(user_data);

  auto const_stream = reinterpret_cast<const float*>(input_buffer);
  float* stream = const_cast<float*>(const_stream);
  for (int i = 0; i < frames_per_buffer; i++) {
    stream[i] *= 32768;
  }

  const float* input_buffer_scaled = stream;
  feature_pipeline->AcceptWaveform(input_buffer_scaled, frames_per_buffer);

  return stop ? paComplete : paContinue;
}

static void Handler(int32_t sig) {
  stop = true;
  fprintf(stderr, "\nCaught Ctrl + C. Exiting...\n");
}

int32_t main(int32_t argc, char* argv[]) {
  signal(SIGINT, Handler);

  ros::init(argc, argv, "publish_image_and_text");
  ros::NodeHandle nh;
  //image_transport::ImageTransport it(nh);
  ros::Publisher pub = nh.advertise<sensor::ImageTextMsg>("image_and_text_topic", 10);
  ros::Rate rate(0.02);  // 发布频率为1Hz

  cv_bridge::CvImage cv_image;
  cv_image.encoding = sensor_msgs::image_encodings::BGR8;
  cv::Mat image = cv::imread("/home/fengchao/Robot/Sensor/src/Sensor/src/robot-cs01.jpg", cv::IMREAD_COLOR);

  const char* kUsageMessage = "help message";

  gflags::ParseCommandLineFlags(&argc, &argv, false);
  google::InitGoogleLogging(argv[0]);

  g_decode_config = wenet::InitDecodeOptionsFromFlags();
  g_feature_config = wenet::InitFeaturePipelineConfigFromFlags();
  g_decode_resource = wenet::InitDecodeResourceFromFlags();

  wenet::Microphone mic;

  PaDeviceIndex num_devices = Pa_GetDeviceCount();
  fprintf(stderr, "Num devices: %d\n", num_devices);

  PaStreamParameters param;

  param.device = Pa_GetDefaultInputDevice();
  if (param.device == paNoDevice) {
    fprintf(stderr, "No default input device found\n");
    exit(EXIT_FAILURE);
  }
  fprintf(stderr, "Use default device: %d\n", param.device);

  const PaDeviceInfo* info = Pa_GetDeviceInfo(param.device);
  fprintf(stderr, "  Name: %s\n", info->name);
  fprintf(stderr, "  Max input channels: %d\n", info->maxInputChannels);

  param.channelCount = 1;
  param.sampleFormat = paFloat32;

  param.suggestedLatency = info->defaultLowInputLatency;
  param.hostApiSpecificStreamInfo = nullptr;

  auto feature_pipeline =
      std::make_shared<wenet::FeaturePipeline>(*g_feature_config);

  float sample_rate = 16000;
  PaStream* stream;
  PaError err =
      Pa_OpenStream(&stream, &param, nullptr, sample_rate, 0, paClipOff,
                    RecordCallback, feature_pipeline.get());

  if (err != paNoError) {
    fprintf(stderr, "portaudio error: %s\n", Pa_GetErrorText(err));
    exit(EXIT_FAILURE);
  }

  err = Pa_StartStream(stream);
  fprintf(stderr, "Started\n");

  if (err != paNoError) {
    fprintf(stderr, "portaudio error: %s\n", Pa_GetErrorText(err));
    exit(EXIT_FAILURE);
  }

  wenet::AsrDecoder decoder(feature_pipeline, g_decode_resource,
                            *g_decode_config);

  std::string last_result;
  while (!stop) {
    wenet::DecodeState state = decoder.Decode();
    std::string result = decoder.result()[0].sentence;
    if (state == wenet::DecodeState::kEndFeats) {
      decoder.Rescoring();
      break;
    } else if (state == wenet::DecodeState::kEndpoint) {
      decoder.Rescoring();
      int resusize = result.size();
      std::cout << "final result: " << result << " size: " << result.size() << std::endl;
      
      if (resusize > 18){ 
      	sensor::ImageTextMsg msg;
      	msg.text = result;
      	image = take_pics();
	cv_image.image = image;
      	cv_image.toImageMsg(msg.image);
     	pub.publish(msg);
      	ros::spinOnce();
      	//rate.sleep();
      }else{
	std::cout << " no voice input  result: " << result << std::endl;
      }

      decoder.ResetContinuousDecoding();
    } else {
      if (decoder.DecodedSomething()) {
        if (!result.empty() && last_result != result) {
          last_result = result;
          std::cout << "\rPartial result: " << result.c_str() << std::flush;
        }
      }
    }

    Pa_Sleep(20);
  }
  err = Pa_CloseStream(stream);
  if (err != paNoError) {
    fprintf(stderr, "portaudio error: %s\n", Pa_GetErrorText(err));
    exit(EXIT_FAILURE);
  }

  return 0;
}

