package lucene;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;

import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;

import utils.Stemmer;
import utils.WordUtil;

import com.mysql.jdbc.Connection;
import com.mysql.jdbc.ResultSet;
import com.mysql.jdbc.Statement;

import data.TestUserSelection;
import data.TrainTestPaperRetrieval;

public class BuildUserTagIndex {
	private String user;
	private Connection conn;
	private HashSet<String> tagPapers = new HashSet<String>();
	private List<String> trainPapers;
	private List<String> testPapers;

	//private WordUtil stopper;
	//private Stemmer stemmer;

	private PaperSearcher searcher;

	public BuildUserTagIndex(String u, Connection c, PaperSearcher searcher)
			throws Exception {
		this.user = u;
		this.conn = c;

		this.searcher = searcher;
		extractUserTagPapers();
		//splitTrainTest();
		retrieveTrainTest();
		extractTagProb();

	}
	
	private void retrieveTrainTest() throws Exception{
		HashSet<String> tests = TrainTestPaperRetrieval.retrieveTest(user);
		testPapers = new ArrayList<String>();
		for(String str : tests){
			testPapers.add(str);
		}
		HashSet<String> trains = TrainTestPaperRetrieval.retrieveTrain(user);
		trainPapers = new ArrayList<String>();
		for(String str : trains){
			trainPapers.add(str);
		}
	}

	private void extractUserTagPapers() throws Exception {
		Statement stmt = null;
		ResultSet rs = null;
		try {
			stmt = (Statement) conn.createStatement();
			rs = (ResultSet) stmt
					.executeQuery("SELECT content_id from tas_new where content_type = '2' and user = '"
							+ user + "'");
			while (rs.next()) {
				String content = rs.getString("content_id");
				tagPapers.add(content);
			}

		} finally {
			if (rs != null) {
				try {
					rs.close();
				} catch (SQLException sqlEx) {
				} // ignore
				rs = null;
			}

			if (stmt != null) {
				try {
					stmt.close();
				} catch (SQLException sqlEx) {
				} // ignore
				stmt = null;
			}
		}

	}

	public void index() throws Exception {
		indexTrainPapers();
		indexTestPapers();
	}

	private void indexTrainPapers() throws Exception {
		File indexDir = new File("D:\\CityU\\project\\bibsonomy\\experiment\\"
				+ user + "\\index\\");
		if (!indexDir.exists())
			indexDir.mkdir();

		IndexWriter writer = new IndexWriter(FSDirectory.open(indexDir),
				new StandardAnalyzer(Version.LUCENE_CURRENT), true,
				IndexWriter.MaxFieldLength.LIMITED);

		try {
			writer.setUseCompoundFile(false);

			for (String paper : trainPapers) {
				//System.out.println(paper);
				Document p = searcher.retrieveByPid(paper);
				if(p==null)
					System.out.println("user\t" + user + "\tnull paper\t" + paper);

				String text = p.get("content");

				String auStr = p.get("author");
				
				Document doc = new Document();

				doc.add(new Field("content_id", paper, Field.Store.YES,
						Field.Index.ANALYZED, Field.TermVector.NO));

				doc.add(new Field("content", text, Field.Store.YES,
						Field.Index.ANALYZED, Field.TermVector.YES));
				doc.add(new Field("author", auStr, Field.Store.YES,
						Field.Index.ANALYZED, Field.TermVector.YES));
				writer.addDocument(doc);
			}
		} finally {

			writer.close();

		}

	}

	private void indexTestPapers() throws Exception {

		File indexDir = new File("D:\\CityU\\project\\bibsonomy\\experiment\\"
				+ user + "\\testIndex\\");
		if (!indexDir.exists())
			indexDir.mkdir();

		IndexWriter writer = new IndexWriter(FSDirectory.open(indexDir),
				new StandardAnalyzer(Version.LUCENE_CURRENT), true,
				IndexWriter.MaxFieldLength.LIMITED);

		try {
			writer.setUseCompoundFile(false);

			for (String paper : testPapers) {
				Document p = searcher.retrieveByPid(paper);

				String text = p.get("content");

				Document doc = new Document();
				String auStr = p.get("author");
				doc.add(new Field("content_id", paper, Field.Store.YES,
						Field.Index.ANALYZED, Field.TermVector.NO));

				doc.add(new Field("content", text, Field.Store.YES,
						Field.Index.ANALYZED, Field.TermVector.YES));
				doc.add(new Field("author", auStr, Field.Store.YES,
						Field.Index.ANALYZED, Field.TermVector.YES));
				
				writer.addDocument(doc);
			}
		} finally {

			writer.close();

		}

	}

	private void splitTrainTest() throws Exception {
		String[] arr = tagPapers.toArray(new String[0]);
		List<String> neighborList = Arrays.asList(arr);
		Collections.shuffle(neighborList);
		int size = tagPapers.size();
		int testSize = 0;
		/*
		 * 10 cross-validation
		 */
		if (tagPapers.size() <= 10) {
			testSize = 1;
		} else {
			testSize = (size / 10) + 1;
		}
		List<String> testCases = new ArrayList<String>();
		List<String> trainCases = new ArrayList<String>();

		for (int i = 0; i < neighborList.size(); i++) {
			String node = neighborList.get(i);
			if (i < testSize) {
				testCases.add(node);
				continue;
			}
			trainCases.add(node);
		}

		this.trainPapers = trainCases;
		this.testPapers = testCases;

		File dir = new File("D:\\CityU\\project\\bibsonomy\\experiment\\"
				+ user);
		if (!dir.exists()) {
			dir.mkdir();
		}

		String trainPath = "D:\\CityU\\project\\bibsonomy\\experiment\\" + user
				+ "\\train.txt";
		BufferedWriter writer1 = new BufferedWriter(new FileWriter(trainPath));

		try {

			for (String str : trainCases) {
				writer1.write(str);
				writer1.newLine();
			}

		} finally {
			writer1.flush();
			writer1.close();
		}

		String testPath = "D:\\CityU\\project\\bibsonomy\\experiment\\" + user
				+ "\\test.txt";
		BufferedWriter writer2 = new BufferedWriter(new FileWriter(testPath));

		try {
			for (String str : testCases) {
				writer2.write(str);
				writer2.newLine();
			}
		} finally {
			writer2.flush();
			writer2.close();
		}

	}

	private void extractTagProb() throws Exception {
		Statement stmt = null;
		ResultSet rs = null;
		HashMap<String, Double> map = new HashMap<String, Double>();
		String path = "D:\\CityU\\project\\bibsonomy\\experiment\\" + user
				+ "\\tagProb.txt";
		BufferedWriter writer = new BufferedWriter(new FileWriter(path));
		try {
			stmt = (Statement) conn.createStatement();
			rs = (ResultSet) stmt
					.executeQuery("SELECT tag, count(distinct(content_id)) as cnt from tas_new where user = '"
							+ user + "' group by tag;");
			double total = 0;
			while (rs.next()) {
				String tag = rs.getString("tag");
				double cnt = rs.getDouble("cnt");
				map.put(tag, cnt);
				total += cnt;
			}

			//System.out.println("total\t" + total);

			for (Map.Entry<String, Double> entry : map.entrySet()) {
				String tag = entry.getKey();
				double v = entry.getValue();
				v = v / total;
				writer.write(tag + "\t" + v);
				writer.newLine();
			}

		} finally {
			writer.flush();
			writer.close();
			if (rs != null) {
				try {
					rs.close();
				} catch (SQLException sqlEx) {
				} // ignore
				rs = null;
			}

			if (stmt != null) {
				try {
					stmt.close();
				} catch (SQLException sqlEx) {
				} // ignore
				stmt = null;
			}
		}

	}

	public static void main(String[] args) throws Exception {
		//String user = "1";
		List<String> users = TestUserSelection.retrieveTestUsers();
		Class.forName("com.mysql.jdbc.Driver").newInstance();
		String url = "jdbc:mysql://localhost:3306/" + "bibsonomy";
		String dbuser = "root";
		String dbpass = "123456";
		Connection con = (Connection) DriverManager.getConnection(url, dbuser,
				dbpass);

		File indexDir = new File("D:\\CityU\\project\\bibsonomy\\"
				+ "\\BibIndex\\");
		PaperSearcher searcher = new PaperSearcher(indexDir);

		for (String user : users) {
			/*
			if (user.equalsIgnoreCase("1") || user.equalsIgnoreCase("129"))
				continue;
			 */
			System.out.println("index testuser\t" + user);
			BuildUserTagIndex indexer = new BuildUserTagIndex(user, con,
					searcher);
			indexer.index();
		}
		//indexer.splitTrainTest();
		//indexer.extractTagProb();

	}

}
