namespace classifier_test
{

class experimenter : private noncopyable
{
public:
	struct build_case
	{
		string key;
		vector<vector<double> > build_times;
		vector<vector<double> > test_times;
		vector<vector<double> > error_rates;
		vector<vector<double> > concept_counts;
	};
	struct cutter_case
	{
		string key;
		vector<vector<double> > times;
		vector<vector<double> > est_verrors;
		vector<vector<double> > verrors;
		vector<vector<double> > chunk_counts;
	};
	struct clustering_case
	{
		string key;
		vector<vector<double> > times;
		vector<vector<double> > verrors;
		vector<vector<double> > concept_counts;
	};
	struct iteration_case
	{
		vector<vector<double> > concept_counts;
		vector<vector<double> > verrors;
	};

	experimenter(const string& OutFileName)
	{
		//srand((int)time(0));
		srand(0);
		clogger()->set_level(logger::finer_level);
		Tester.reset(new classifier_tester);
		OutFile.reset(OutFileName, "w");
		BaseClassifier.reset(new c45r8_classifier);
	}
	void test_vs_build_len(int Which)
	{
		Classifiers.push_back(make_weight_by_accuracy_ensemble_classifier());
		Classifiers.push_back(make_repro_classifier());
		Classifiers.push_back(make_high_order_classifier());
		Classifiers.push_back(make_vem_classifier());
		smart_ptr<data_stream> Stream;
		vector<build_case> Cases(10);
		for (int I = 0; I < (int)Cases.size(); I++)
		{
			int BuildCount = 0, SkipCount = 0, TestCount = 0;
			if (Which == 0)
				BuildCount = (I + 1) * 10000,
				SkipCount = 0,
				TestCount = 100000;
			else if (Which == 1)
				BuildCount = (I + 1) * 100000,
				SkipCount = 1000000 - BuildCount,
				TestCount = -1;
			Cases[I].key = to_string(BuildCount);
			for (int J = 0; J < 10; J++)
			{
				if (Which == 0) Stream = make_conceptual_stream(&*make_hyperplane_generator(4, 3), 0.001);
				else if (Which == 1) Stream = make_data_file_stream("KDDCUPx/kddcup.data");
				test_build(&*Stream, SkipCount, BuildCount, TestCount, Cases[I]);
			}
			print_case(OutFile, Cases[I]);
		}
		print_cases(OutFile, Cases);
	}
	void test_vs_chunk_size()
	{
		Classifiers.push_back(make_weight_by_accuracy_ensemble_classifier());
		Classifiers.push_back(make_repro_classifier());
		Classifiers.push_back(make_high_order_classifier());
		Classifiers.push_back(make_vem_classifier());
		smart_ptr<data_stream> Stream;
		vector<build_case> Cases(10);
		for (int I = 0; I < (int)Cases.size(); I++)
		{
			int ChunkSize = (I + 1) * 100;
			Cases[I].key = to_string(ChunkSize);
			for (int J = 0; J < 10; J++)
			{
				Stream = make_conceptual_stream(&*make_hyperplane_generator(4, 3), 1.0 / ChunkSize);
				test_build(&*Stream, 0, 100000, 100000, Cases[I]);
			}
			print_case(OutFile, Cases[I]);
		}
		print_cases(OutFile, Cases);
	}
	void test_vs_dimensionality()
	{
		Classifiers.push_back(make_weight_by_accuracy_ensemble_classifier());
		Classifiers.push_back(make_repro_classifier());
		Classifiers.push_back(make_high_order_classifier());
		Classifiers.push_back(make_vem_classifier());
		smart_ptr<data_stream> Stream;
		vector<build_case> Cases(10);
		for (int I = 0; I < (int)Cases.size(); I++)
		{
			int Dim = I + 1;
			Cases[I].key = to_string(Dim);
			for (int J = 0; J < 10; J++)
			{
				Stream = make_conceptual_stream(&*make_hyperplane_generator(4, Dim), 0.001);
				test_build(&*Stream, 0, 100000, 100000, Cases[I]);
			}
			print_case(OutFile, Cases[I]);
		}
		print_cases(OutFile, Cases);
	}
	void test_vs_concept_count()
	{
		Classifiers.push_back(make_weight_by_accuracy_ensemble_classifier());
		Classifiers.push_back(make_repro_classifier());
		Classifiers.push_back(make_high_order_classifier());
		Classifiers.push_back(make_vem_classifier());
		smart_ptr<data_stream> Stream;
		vector<build_case> Cases(10);
		for (int I = 0; I < (int)Cases.size(); I++)
		{
			int ConceptCount = (I + 1) * 2;
			Cases[I].key = to_string(ConceptCount);
			for (int J = 0; J < 10; J++)
			{
				Stream = make_conceptual_stream(&*make_hyperplane_generator(ConceptCount, 3), 0.001);
				test_build(&*Stream, 0, 100000, 100000, Cases[I]);
			}
			print_case(OutFile, Cases[I]);
		}
		print_cases(OutFile, Cases);
	}
	void test_cutter_vs_build_len()
	{
		Classifiers.push_back(make_high_order_classifier());
		Classifiers.push_back(make_vem_classifier());
		smart_ptr<data_stream> Stream;
		vector<cutter_case> Cases(10);
		for (int I = 0; I < (int)Cases.size(); I++)
		{
			int DataCount = (I + 1) * 10000;
			Cases[I].key = to_string(DataCount);
			for (int J = 0; J < 10; J++)
			{
				Stream = make_conceptual_stream(&*make_hyperplane_generator(4, 3), 0.001);
				test_cutter(&*Stream, DataCount, Cases[I]);
			}
			print_case(OutFile, Cases[I]);
		}
		print_cases(OutFile, Cases);
	}
	void test_cutter_vs_concept_count()
	{
		Classifiers.push_back(make_high_order_classifier());
		Classifiers.push_back(make_vem_classifier());
		smart_ptr<data_stream> Stream;
		vector<cutter_case> Cases(10);
		for (int I = 0; I < (int)Cases.size(); I++)
		{
			int ConceptCount = (I + 1) * 2;
			Cases[I].key = to_string(ConceptCount);
			for (int J = 0; J < 10; J++)
			{
				Stream = make_conceptual_stream(&*make_hyperplane_generator(ConceptCount, 3), 0.001);
				test_cutter(&*Stream, 100000, Cases[I]);
			}
			print_case(OutFile, Cases[I]);
		}
		print_cases(OutFile, Cases);
	}
	void test_cutter_vs_amplification()
	{
		smart_ptr<data_stream> Stream;
		vector<cutter_case> Cases(10);
		for (int I = 0; I < (int)Cases.size(); I++)
		{
			double Amplification = 1 + (I + 1) * 0.1;
			Cases[I].key = format_string("%.1lf", Amplification);
			Classifiers.clear();
			Classifiers.push_back(make_vem_classifier(1, Amplification));
			for (int J = 0; J < 10; J++)
			{
				Stream = make_conceptual_stream(&*make_hyperplane_generator(4, 3), 0.001);
				test_cutter(&*Stream, 100000, Cases[I]);
			}
			print_case(OutFile, Cases[I]);
		}
		print_cases(OutFile, Cases);
	}
	void test_cutter_vs_move_offset()
	{
		smart_ptr<data_stream> Stream;
		vector<cutter_case> Cases(10);
		for (int I = 0; I < (int)Cases.size(); I++)
		{
			double MoveOffset = (I + 1) * 0.1;
			Cases[I].key = format_string("%.1lf", MoveOffset);
			Classifiers.clear();
			Classifiers.push_back(make_vem_classifier(MoveOffset));
			for (int J = 0; J < 10; J++)
			{
				Stream = make_conceptual_stream(&*make_hyperplane_generator(4, 3), 0.001);
				test_cutter(&*Stream, 100000, Cases[I]);
			}
			print_case(OutFile, Cases[I]);
		}
		print_cases(OutFile, Cases);
	}
	void test_cutter_vs_approximation()
	{
		smart_ptr<data_stream> Stream;
		vector<cutter_case> Cases(10);
		for (int I = 0; I < (int)Cases.size(); I++)
		{
			double Approx = (I + 1) * 0.05;
			double Amplification = 1 / sqrt(Approx);
			double MoveOffset = Amplification - 1;
			Cases[I].key = format_string("%.2lf", Approx);
			Classifiers.clear();
			Classifiers.push_back(make_vem_classifier(MoveOffset, Amplification));
			for (int J = 0; J < 10; J++)
			{
				Stream = make_conceptual_stream(&*make_hyperplane_generator(4, 3), 0.001);
				test_cutter(&*Stream, 100000, Cases[I]);
			}
			print_case(OutFile, Cases[I]);
		}
		print_cases(OutFile, Cases);
	}
	void test_clustering_vs_chunk_count()
	{
		Classifiers.push_back(make_high_order_classifier());
		Classifiers.push_back(make_vem_classifier());
		smart_ptr<conceptual_data_generator> Gen;
		vector<clustering_case> Cases(10);
		for (int I = 0; I < (int)Cases.size(); I++)
		{
			int ChunkCount = (I + 1) * 10;
			Cases[I].key = to_string(ChunkCount);
			for (int J = 0; J < 10; J++)
			{
				Gen = make_hyperplane_generator(4, 3);
				test_clustering(&*Gen, 1000, ChunkCount, Cases[I]);
			}
			print_case(OutFile, Cases[I]);
		}
		print_cases(OutFile, Cases);
	}
	void test_clustering_vs_concept_count()
	{
		Classifiers.push_back(make_high_order_classifier());
		Classifiers.push_back(make_vem_classifier());
		smart_ptr<conceptual_data_generator> Gen;
		vector<clustering_case> Cases(10);
		for (int I = 0; I < (int)Cases.size(); I++)
		{
			int ConceptCount = (I + 1) * 2;
			Cases[I].key = to_string(ConceptCount);
			for (int J = 0; J < 10; J++)
			{
				Gen = make_hyperplane_generator(ConceptCount, 3);
				test_clustering(&*Gen, 1000, 100, Cases[I]);
			}
			print_case(OutFile, Cases[I]);
		}
		print_cases(OutFile, Cases);
	}
	void test_iteration_vs_concept_count()
	{
		Classifiers.push_back(make_vem_classifier());
		smart_ptr<conceptual_data_generator> Gen;
		vector<iteration_case> Cases(10);
		for (int I = 0; I < (int)Cases.size(); I++)
		{
			int ConceptCount = (I + 1) * 2;
			for (int J = 0; J < 10; J++)
			{
				Gen = make_hyperplane_generator(ConceptCount, 3);
				test_iteration(&*Gen, 1000, 100, Cases[I]);
			}
			checked_fprintf(OutFile, "concept count: %d\n", ConceptCount);
			for (int J = 0; J < (int)Cases[I].concept_counts.size(); J++)
			{
				print_stats(OutFile, Cases[I].concept_counts[J], 3);
				print_stats(OutFile, Cases[I].verrors[J], 7);
				checked_fprintf(OutFile, "\n");
			}
			fflush(OutFile);
		}
	}

private:
	smart_ptr<classifier_tester> Tester;
	smart_file OutFile;
	smart_ptr<classifier> BaseClassifier;
	vector<smart_ptr<stream_classifier> > Classifiers;

	smart_ptr<hyperplane_generator> make_hyperplane_generator(int ConCount, int Dim)
	{
		smart_ptr<hyperplane_generator> Gen(new hyperplane_generator);
		Gen->set_concept_count(ConCount);
		Gen->set_dimensionality(Dim);
		return Gen;
	}
	smart_ptr<conceptual_stream> make_conceptual_stream(conceptual_data_generator* Gen, double ChangRate, bool ExactPeriod = false)
	{
		smart_ptr<conceptual_stream> Stream(new conceptual_stream);
		Stream->set_generator(Gen);
		Stream->set_changing_freq(ChangRate);
		Stream->set_exact_period(ExactPeriod);
		Stream->open();
		return Stream;
	}
	smart_ptr<data_file_stream> make_data_file_stream(const string& Filename)
	{
		return make_data_file_stream(Filename, make_scheme(change_filename_ext(Filename, "scheme")));
	}
	smart_ptr<data_file_stream> make_data_file_stream(const string& Filename, const record_scheme& Scheme)
	{
		smart_ptr<data_file_stream> Stream(new data_file_stream);
		Stream->set_scheme(Scheme);
		Stream->open(Filename, "r");
		return Stream;
	}
	smart_ptr<weight_by_accuracy_ensemble_classifier> make_weight_by_accuracy_ensemble_classifier()
	{
		smart_ptr<weight_by_accuracy_ensemble_classifier> Val(new weight_by_accuracy_ensemble_classifier);
		Val->set_chunk_size(100);
		Val->set_classifier_count(20);
		return Val;
	}
	smart_ptr<repro_classifier> make_repro_classifier()
	{
		smart_ptr<repro_classifier> Val(new repro_classifier);
		Val->set_window_size(20);
		Val->set_stable_learning_size(500);
		Val->set_trigger_error_threshold(1 - 0.7);
		Val->set_probability_threshold(0.8);
		Val->set_accuracy_threshold(0.8);
		Val->set_equivalence_threshold(0.8);
		return Val;
	}
	smart_ptr<high_order_classifier> make_high_order_classifier()
	{
		return make_smart_ptr(new high_order_classifier);
	}
	smart_ptr<vem_classifier> make_vem_classifier(double MoveOffset = 1, double Amplification = 2)
	{
		using namespace vem;
		smart_ptr<vem_classifier> Val(new vem_classifier);
		vem_concept_cutter* Cutter = dynamic_cast<vem_concept_cutter*>(Val->get_cutter());
		Cutter->set_move_offset(MoveOffset);
		Cutter->set_amplification(Amplification);
		return Val;
	}

	static double avg_of(const vector<double>& P)
	{
		double Sum = 0;
		for (int I = 0; I < (int)P.size(); I++) Sum += P[I];
		return Sum / P.size();
	}
	static double dev_of(const vector<double>& P)
	{
		double Sum = 0;
		double Avg = avg_of(P);
		for (int I = 0; I < (int)P.size(); I++) Sum += (P[I] - Avg) * (P[I] - Avg);
		return sqrt(Sum / P.size());
	}
	void print_stats(FILE* File, const vector<double>& P, int  Digits)
	{
		checked_fprintf(File, "%10.*lf(%10.*lf)", Digits, avg_of(P), Digits, dev_of(P));
	}
	void print_case(FILE* File, const build_case& Case, int Cl)
	{
		fprintf(File, "%10s", Case.key.c_str());
		print_stats(File, Case.build_times[Cl], 3);
		print_stats(File, Case.test_times[Cl], 3);
		print_stats(File, Case.error_rates[Cl], 7);
		print_stats(File, Case.concept_counts[Cl], 3);
		checked_fprintf(File, "\n");
		fflush(File);
	}
	void print_case(FILE* File, const cutter_case& Case, int Cl)
	{
		fprintf(File, "%10s", Case.key.c_str());
		print_stats(File, Case.times[Cl], 3);
		print_stats(File, Case.est_verrors[Cl], 7);
		print_stats(File, Case.verrors[Cl], 7);
		print_stats(File, Case.chunk_counts[Cl], 3);
		checked_fprintf(File, "\n");
		fflush(File);
	}
	void print_case(FILE* File, const clustering_case& Case, int Cl)
	{
		fprintf(File, "%10s", Case.key.c_str());
		print_stats(File, Case.times[Cl], 3);
		print_stats(File, Case.verrors[Cl], 7);
		print_stats(File, Case.concept_counts[Cl], 3);
		checked_fprintf(File, "\n");
		fflush(File);
	}
	template<class T>
	void print_case(FILE* File, const T& Case)
	{
		for (int Cl = 0; Cl < (int)Classifiers.size(); Cl++)
			print_case(File, Case, Cl);
	}
	template<class T>
	void print_cases(FILE* File, const vector<T>& Cases)
	{
		for (int Cl = 0; Cl < (int)Classifiers.size(); Cl++)
		{
			fprintf(File, "Classifier: %s\n", typeid(*Classifiers[Cl]).name());
			for (int I = 0; I < (int)Cases.size(); I++) print_case(File, Cases[I], Cl);
		}
	}
	void test_build(data_stream* Stream, int SkipCount, int BuildCount, int TestCount, build_case& Case)
	{
		Tester->set_scheme(Stream->get_scheme());
		Tester->set_base_classifier(&*BaseClassifier);
		Tester->set_classifiers(make_subarray(Classifiers).convert<stream_classifier* const>());
		Tester->skip(Stream, SkipCount);
		Tester->build(Stream, BuildCount);
		Tester->test(Stream, TestCount);
		Case.build_times.resize(Classifiers.size());
		Case.test_times.resize(Classifiers.size());
		Case.error_rates.resize(Classifiers.size());
		Case.concept_counts.resize(Classifiers.size());
		for (int I = 0; I < (int)Classifiers.size(); I++)
		{
			Case.build_times[I].push_back(Tester->get_build_results()[I].build_time);
			Case.test_times[I].push_back(Tester->get_test_results()[I].test_time);
			Case.error_rates[I].push_back(Tester->get_test_results()[I].get_error_rate());
			Case.concept_counts[I].push_back(Tester->get_classifiers()[I]->get_concept_count());
		}
	}
	void test_cutter(data_stream* Stream, int DataCount, cutter_case& Case)
	{
		using namespace high_order;
		BaseClassifier->set_domain(Stream->get_scheme().to_domain());
		vector<smart_ptr<stream_record> > Data(DataCount);
		for (int I = 0; I < DataCount; I++)
			Data[I].reset(new stream_record),
			Stream->read(*Data[I]);
		Case.times.resize(Classifiers.size());
		Case.est_verrors.resize(Classifiers.size());
		Case.verrors.resize(Classifiers.size());
		Case.chunk_counts.resize(Classifiers.size());
		for (int I = 0; I < (int)Classifiers.size(); I++)
		{
			smart_ptr<concept_cutter> TheCutter =
				dynamic_cast<high_order_classifier*>(&*Classifiers[I])->get_cutter()->clone();
			TheCutter->set_base_classifier(&*BaseClassifier);
			TheCutter->set_base_validation_dataset(&*make_smart_ptr(new cross_validation_dataset(2)));
			TheCutter->set_data(make_subarray(Data).convert<const record* const>());
			timer Timer;
			Timer.start();
			TheCutter->compute();
			Case.times[I].push_back(Timer.time());
			Case.est_verrors[I].push_back(TheCutter->compute_estimated_validation_error());
			Case.verrors[I].push_back(TheCutter->compute_validation_error());
			Case.chunk_counts[I].push_back(TheCutter->get_chunk_count());
		}
	}
	void test_clustering(conceptual_data_generator* Gen, int ChunkSize, int ChunkCount, clustering_case& Case)
	{
		using namespace high_order;
		Gen->open();
		BaseClassifier->set_domain(Gen->get_scheme().to_domain());
		vector<vector<smart_ptr<record> > > Data;
		vector<smart_ptr<validation_dataset> > VDatasets;
		vector<smart_ptr<validation_classifier> > VClassifiers;
		make_clustering_data(Gen, ChunkSize, ChunkCount, Data, VDatasets, VClassifiers);
		Case.times.resize(Classifiers.size());
		Case.verrors.resize(Classifiers.size());
		Case.concept_counts.resize(Classifiers.size());
		for (int I = 0; I < (int)Classifiers.size(); I++)
		{
			smart_ptr<concept_clustering> TheClustering =
				dynamic_cast<high_order_classifier*>(&*Classifiers[I])->get_clustering()->clone();
			TheClustering->set_base_classifier(&*BaseClassifier);
			TheClustering->set_chunk_count(ChunkCount);
			for (int J = 0; J < ChunkCount; J++)
				TheClustering->set_chunk(J, VDatasets[J]->clone(), VClassifiers[J]->clone(), VClassifiers[J]->test_error(&*VDatasets[J]));
			timer Timer;
			Timer.start();
			TheClustering->compute();
			Case.times[I].push_back(Timer.time());
			Case.verrors[I].push_back(TheClustering->compute_validation_error());
			Case.concept_counts[I].push_back(TheClustering->get_concept_count());
		}
	}
	void test_iteration(conceptual_data_generator* Gen, int ChunkSize, int ChunkCount, iteration_case& Case)
	{
		using namespace vem;
		Gen->open();
		BaseClassifier->set_domain(Gen->get_scheme().to_domain());
		vector<vector<smart_ptr<record> > > Data;
		vector<smart_ptr<validation_dataset> > VDatasets;
		vector<smart_ptr<validation_classifier> > VClassifiers;
		make_clustering_data(Gen, ChunkSize, ChunkCount, Data, VDatasets, VClassifiers);

		smart_ptr<vem_concept_clustering> TheClustering =
			dynamic_convert<vem_concept_clustering>(
				dynamic_cast<high_order_classifier*>(&*Classifiers[0])->get_clustering()->clone());
		TheClustering->set_base_classifier(&*BaseClassifier);
		TheClustering->set_chunk_count(ChunkCount);
		for (int J = 0; J < ChunkCount; J++)
			TheClustering->set_chunk(J, VDatasets[J]->clone(), VClassifiers[J]->clone(), VClassifiers[J]->test_error(&*VDatasets[J]));
		TheClustering->compute();
		int IterCount = TheClustering->get_iteration_concept_counts().size();
		while ((int)Case.concept_counts.size() < IterCount)
			if (Case.concept_counts.empty())
				Case.concept_counts.resize(1),
				Case.verrors.resize(1);
			else
				Case.concept_counts.push_back(Case.concept_counts.back()),
				Case.verrors.push_back(Case.verrors.back());
		for (int I = 0; I < (int)Case.concept_counts.size(); I++)
			if (I < IterCount)
				Case.concept_counts[I].push_back(TheClustering->get_iteration_concept_counts()[I]),
				Case.verrors[I].push_back(TheClustering->get_iteration_errors()[I]);
			else
				Case.concept_counts[I].push_back(TheClustering->get_iteration_concept_counts().back()),
				Case.verrors[I].push_back(TheClustering->get_iteration_errors().back());
	}
	void make_clustering_data(conceptual_data_generator* Gen, int ChunkSize, int ChunkCount, vector<vector<smart_ptr<record> > >& Data, vector<smart_ptr<high_order::validation_dataset> >& VDatasets, vector<smart_ptr<high_order::validation_classifier> >& VClassifiers)
	{
		int ConCount = Gen->get_concept_count();
		Data.resize(ChunkCount);
		VDatasets.resize(ChunkCount);
		VClassifiers.resize(ChunkCount);
		for (int I = 0; I < ChunkCount; I++)
		{
			Data[I].resize(ChunkSize);
			for (int J = 0; J < ChunkSize; J++)
				Data[I][J].reset(new record),
				Gen->generate(I % ConCount, *Data[I][J]);
			VDatasets[I].reset(new high_order::cross_validation_dataset(2));
			VDatasets[I]->build(make_subarray(Data[I]).convert<const record* const>());
			VClassifiers[I] = VDatasets[I]->train(&*BaseClassifier);
		}
	}
};

}
