package exp;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;

import crawl.topsy.ReadResult;

import tools.FileReaderEnhance;
import exp.ranking.Ex2;
import exp.ranking.Ex4;
import exp.ranking.Ex5;

/**
 * All input/output files are UTF-8 encoding
 * 
 * @author Lanjun ZHOU
 *
 */
public class RunAll {
	public static void main(String[] args) {
		
		try {
			System.setOut(new PrintStream(new File("./logs/RunAll.out"), "UTF-8"));
			System.setErr(new PrintStream(new File("./logs/RunAll.err"), "UTF-8"));
		} catch (FileNotFoundException e) {
			e.printStackTrace();
		} catch (UnsupportedEncodingException e) {
			e.printStackTrace();
		}
		
		String path = "data/_newData/";
		
		//Convert crawled data to plain text
		ReadResult rr = new ReadResult();
		rr.run("data/objs/topsyCrawl/", path + "plainText/");
		
		//extract all urls
		//if the target file exists, will do nothing
		String input1 = path + "plainText/";
		String output1 = path + "obj/urlContent/";	
		EXP3_0 exp0 = new EXP3_0();
		exp0.run(input1, output1);
		
		//divide the urls to batches for downloading
		int batch = 2000;
		String output2 = path + "obj/urlContent1/";
		EXP3_3 exp3 = new EXP3_3();
		exp3.run(output1, output2, batch);
		
		//download the urls for the first time
		//if the target file exists, will do nothing
		String output3 = path + "obj/urlContent1_1/";
		EXP3_1 exp1 = new EXP3_1();
		exp1.run(output2, output3);
		
		//download all URLs and find the real URL
		//if the target file exists, will do nothing
		int thread = 10;
		String output4 = path + "obj/urlContent2/";
		EXP3_4 exp4 = new EXP3_4();
		exp4.run(output3, output4, thread);
		
		//extract titles for all webpages
		//for CNN and usatoday, extract highlights and contents
		//if the target file exists, will do nothing
		String output5 = path + "obj/highlights/";
		String[] topicList = FileReaderEnhance.readToString("data/_newData/topiclist", "UTF-8").split("\n");
		EXP5 exp5 = new EXP5();
		exp5.run(topicList, output4, output5);
		
		//Output pure texts for the CNN/USATODAY highlights and all titles for all html pages.
		String output6 = path + "highlights/";
		String outputPathSelect = path + "highlights_CNN_USATODAY/";	
		EXP6 exp6 = new EXP6();
		exp6.run(output5, output6, outputPathSelect);
		
		//Align tweets to cnn, USA Today news
		String tPath = path + "plainText/";
		String output8 = path + "obj/highlights_align/";
		String output8_1 = path + "news_2_tweets/";
		EXP8 exp8 = new EXP8();
		exp8.run(output5, tPath, output8, output8_1);
		
		//Evaluate the sentences of the news according to the highlights using ROUGE score
		String hPath = path + "obj/highlights/";
		String hcPath = path + "obj/highlights_contents_ROUGE/";
		int N = 1;
		Ex2 ranking1 = new Ex2();
		ranking1.run(hPath, hcPath, N);
		
//		//Evaluate the tweets of the topic according to highlights - content Mapping
//		//Evalutaion method: ROUGE-1
//		String tfPath = path + "plainText_filtered/";
//		String output9 = path + "plainText_filtered_ranked/";
//		int TOPNCONTENTS = 1;
//		int TOPNTWEETS = 3;
//		Ex4 ranking2 = new Ex4();
//		ranking2.run(hcPath, tfPath, output9, TOPNCONTENTS, TOPNTWEETS);
//		
//		//Evaluate the tweets of the topic according to only highlights
//		//Evaluation method: Cosine
//		String output10 = path + "plainText_filtered_ranked_1/";
//		Ex5 ranking3 = new Ex5();
//		ranking3.run(tfPath, hcPath, output10, 3);
	}
}
