/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or (at
 * your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 * 02110-1301, USA.
 * */

// AmzServer - Tototoy's server model.
// Author: tonyjobmails@gmail.com (tonyhack).
//
// Define class TestThread.
//

#include "ring_queue_multiple_model_boundless.h"
#include "test_ring_queue_multiple_model_boundless.h"
#include "logging.h"
#include "data_element.pb.h"

namespace amz {

namespace test_ring_queue_multiple_model_boundless {

static Mutex g_test_thread_mutex;

Producer::Producer(RingQueueBlockingInterface *communication,
       std::string name, int seed, int from, int to)
	   : Thread(name, true), total_(0), seed_(seed), from_(from), to_(to) {
  if(communication) {
    this->communication_ = communication;
  } else {
    LOG(INFO) << "Error, communication is a null value!";
  }
}

Producer::~Producer() {
}

void Producer::Run() {
  g_test_thread_mutex.Lock();
  LOG(INFO) << "Thread [" << this->GetName() << "] begin to run ...";
  g_test_thread_mutex.Unlock();

  char buffer[1024];

  int pos = this->from_;
  while(pos <= this->to_) {
    DataElement message;
    message.set_seed(this->seed_);
    message.set_variable(pos);
    message.set_add_value(pos + this->seed_);
    message.set_sub_value(pos - this->seed_);
    message.set_mul_value(pos * this->seed_);
    message.set_div_value(pos / this->seed_);

    if(message.SerializeToArray(buffer, sizeof(buffer)) == false) {
      g_test_thread_mutex.Lock();
      LOG(INFO) << "SerializeToArray error.";
      g_test_thread_mutex.Unlock();
    } else {
      int buffer_size = message.ByteSize();
      if(buffer_size >= 1024 || this->communication_->Add(buffer, buffer_size) != buffer_size) {
        g_test_thread_mutex.Lock();
        LOG(INFO) << "Add RingQueue error.";
        g_test_thread_mutex.Unlock();
      } else {
        ++this->total_;
      }
    }
    ++pos;
    this->Sleep(1);
  }

  g_test_thread_mutex.Lock();
  LOG(INFO) << "Thread [" << this->GetName() << "] finishes producing, begins to flush.";
  g_test_thread_mutex.Unlock();

  // Finish producing, begin to flush data.
  while(true) {
    if(this->communication_->Flush() == true) {
      // Flush complete, set complete.
      this->communication_->ProduceCompletely();
      break;
    }
  }

  g_test_thread_mutex.Lock();
  LOG(INFO) << "Thread [" << this->GetName() << "] end running ...";
  g_test_thread_mutex.Unlock();
}

Consumer::Consumer(RingQueueBlockingInterface *communication,
       std::string name)
	   : Thread(name, true), total_(0) {
  if(communication) {
    this->communication_ = communication;
  } else {
    g_test_thread_mutex.Lock();
    LOG(INFO) << "Error, communication is a null value!";
    g_test_thread_mutex.Unlock();
  }
}

Consumer::~Consumer() {
}
  
void Consumer::Run() {
  g_test_thread_mutex.Lock();
  LOG(INFO) << "Thread [" << this->GetName() << "] begin to run ...";
  g_test_thread_mutex.Unlock();

  char buffer[1024];

  int pos = 0;
  while(true) {
    ++pos;
    int size = this->communication_->Remove(buffer, sizeof(buffer));
    if(size > 0) {
      DataElement message;
      message.ParseFromArray(buffer, size);
      g_test_thread_mutex.Lock();
      LOG(INFO) << "Read data: "
                       << " seed=" << message.seed() << ", "
                       << " variable=" << message.variable() << ", "
                       << " add_value=" << message.add_value() << ", "
                       << " sub_value=" << message.sub_value() << ", "
                       << " mul_value=" << message.mul_value() << ", "
                       << " div_value=" << message.div_value();
      g_test_thread_mutex.Unlock();
      ++this->total_;
      this->Sleep(2);
    } else if(size == 0) {
      if(this->communication_->CheckAllProducersComplete() == true) {
        g_test_thread_mutex.Lock();
	LOG(INFO) << "Thread [" << this->GetName() << "] check all producers complete, pos=" << pos;
	g_test_thread_mutex.Unlock();
        break;
      }
    }
  }

  g_test_thread_mutex.Lock();
  LOG(INFO) << "Thread [" << this->GetName() << "] end running ...";
  g_test_thread_mutex.Unlock();
}

}  // namespace test_ring_queue_multiple_model_boundless

}  // namespace amz


using namespace amz;
using namespace test_ring_queue_multiple_model_boundless;

int main() {
  InitializeLogger("/tmp/info.log", "/tmp/warn.log", "/tmp/erro.log");

  RingQueueMultipleModelBoundless *ring_queue = new RingQueueMultipleModelBoundless(10, 128, 64, true);

  Producer p1((RingQueueBlockingInterface *)ring_queue, "p1", 1, 0, 1000);
  Producer p2((RingQueueBlockingInterface *)ring_queue, "p2", 5, 1001, 2000);
  Producer p3((RingQueueBlockingInterface *)ring_queue, "p3", 10, 2001, 3000);
  Producer p4((RingQueueBlockingInterface *)ring_queue, "p4", 15, 3001, 4000);
  Producer p5((RingQueueBlockingInterface *)ring_queue, "p5", 20, 4001, 5000);
  Producer p6((RingQueueBlockingInterface *)ring_queue, "p6", 25, 5001, 6000);
  Producer p7((RingQueueBlockingInterface *)ring_queue, "p7", 30, 6001, 7000);
  Producer p8((RingQueueBlockingInterface *)ring_queue, "p8", 35, 7001, 8000);
  Producer p9((RingQueueBlockingInterface *)ring_queue, "p9", 40, 8001, 9000);
  Producer p10((RingQueueBlockingInterface *)ring_queue, "p10", 45, 9001, 10000);

  Consumer c1((RingQueueBlockingInterface *)ring_queue, "c1");
  Consumer c2((RingQueueBlockingInterface *)ring_queue, "c2");
  Consumer c3((RingQueueBlockingInterface *)ring_queue, "c3");
  Consumer c4((RingQueueBlockingInterface *)ring_queue, "c4");

  p1.Start();
  p2.Start();
  p3.Start();
  p4.Start();
  p5.Start();
  p6.Start();
  p7.Start();
  p8.Start();
  p9.Start();
  p10.Start();

  c1.Start();
  c2.Start();
  c3.Start();
  c4.Start();

  p1.Join();
  p2.Join();
  p3.Join();
  p4.Join();
  p5.Join();
  p6.Join();
  p7.Join();
  p8.Join();
  p9.Join();
  p10.Join();

/*
  sleep(5);

  g_test_thread_mutex.Lock();
  LOG(INFO) << "[main thread] begin to flush data";
  g_test_thread_mutex.Unlock();

  int pos = 0;
  while(true) {
    if(ring_queue->Flush() == true) {
      break;
    } else if(pos++ == 5000) {
      // Signal all the consumer thread.
      ring_queue->ProduceCompletely();
      break;
    }
    usleep(30000);
  }
*/

  c1.Join();
  c2.Join();
  c3.Join();
  c4.Join();
 
  LOG(INFO) << "Summary: "
            << "produce = \"" << p1.GetTotal() + p2.GetTotal() + p3.GetTotal() + p4.GetTotal() +
                                      p5.GetTotal() + p6.GetTotal() + p7.GetTotal() + p8.GetTotal() +
                                      p9.GetTotal() + p10.GetTotal() << "\", "
            << "consume = \"" << c1.GetTotal() + c2.GetTotal() + c3.GetTotal() + c4.GetTotal() << "\"";

  delete ring_queue;
  
}
