import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Random;
import java.util.Vector;

import org.htmlparser.util.ParserException;

import websphinx.Link;

/**
 * @author ofer
 *
 */
/**
 * @author ofer
 *
 */
public class ScriptsDB2   {

	public enum AppMode {LEARN, DETECT, DETECT_FP, DETECT_FN}
	public static String newline = System.getProperty("line.separator");

	private HashMap<Integer,ScriptNode> appScripts;
	Vector<Integer> attackScriptsList =null; //this list holds all alternate hash values of attack scripts
	Vector<Integer> normalizedScriptsList =null; //this list holds all the different hash values of normalized scripts
	HashMap<Integer,String> learnedURLs = null; //this list contains all URL's learned during the learning phase
	private int newScriptCounter = 0;
	private BufferedWriter out;
	private Log detectorLog = null;
	private DetectorResults detectorResults = null;

	/**
	 * main constructor
	 * @param String mode, Log log
	 * @throws DetectorException
	 * @throws IOException
	 */
	public ScriptsDB2(String mode,Log detectorLog) throws DetectorException, IOException {
		//initialize lists
		attackScriptsList = new Vector<Integer>();
		attackScriptsList.clear();
		normalizedScriptsList = new Vector<Integer>();
		normalizedScriptsList.clear();
		this.appScripts = new HashMap<Integer,ScriptNode>();
		appScripts.clear();
		this.learnedURLs = new HashMap<Integer,String>();
		learnedURLs.clear();
		this.detectorResults = new DetectorResults();

		if (mode.equals(AppMode.LEARN.toString()))
			out = new BufferedWriter(new FileWriter("LrnDB.txt"));
		else
			out = new BufferedWriter(new FileWriter("DetDB.txt"));
		this.detectorLog = detectorLog;
	}

	public DetectorResults learn(ArrayList<ExtendedLink> links, double learnPart) 
	throws IOException, DetectorException, ParserException{

		String hostPath = null;
		detectorLog.WriteToLog("Learning. The detector will learn " + 
				(Math.ceil((double)learnPart*links.size())) + "(" + (int)(learnPart*100) + "%) of total " + 
				links.size() +" urls",true);

		int learnCounter = 0;
		ExtendedLink exLink = null;
		String currentUrl = null;
		Vector<ScriptNode> pageScripts = null;
		int appScriptsDBSize=0;
		Vector<Integer> incrementalScripts = new Vector<Integer>();
		DetectorResults dr = null;

		//choose random url from list
		for (Iterator<ExtendedLink> iter = new RandomIterator<ExtendedLink>(links,new Random()); iter.hasNext();){

			learnCounter+=1;
			exLink = iter.next();
			currentUrl = exLink.getLink().toURL();
			
			//add current URL hash to learned URL list
			Link link = exLink.getLink();
			//Omit query part
			hostPath = OmitQuery(link);
			learnedURLs.put(hostPath.hashCode(),currentUrl);

			//logging
			detectorLog.WriteToLog("Checking for new scripts in " + currentUrl,false);
			detectorLog.WriteToLog("------------------------------------------------", false);
			out.write(newline + "url: " + currentUrl +newline + "--------------------------------------------" + newline + newline);

			ScriptExtractor2 scrext = null;
			try{
				scrext = new ScriptExtractor2(currentUrl,AppMode.LEARN);
			}catch (ParserException e){
				throw (new DetectorException(e.getMessage()));
			}

			//extract page script nodes
			pageScripts = scrext.getScriptNodes();

			dr = CheckScripts(pageScripts,AppMode.LEARN);
			if ((dr.getNumMatchNormalized()==0) && (dr.getNumNoMatch()==0) ){
				detectorLog.WriteToLog("No new scripts were found",false);
			}
			else{
				detectorLog.WriteToLog(dr.getNumNoMatch() + " new scripts were found and added to scripts DB",false);
				//incrementalScripts.add(dr.getNumNoMatch());
				detectorLog.WriteToLog(dr.getNumMatchNormalized() + " new mutant scripts were found and added to scripts DB",false);
				detectorLog.WriteToLog(dr.getNumMatchRegular() + " similiar scripts were found and  not added to scripts DB",false);

			}
			incrementalScripts.add(dr.getNumNoMatch());
			appScriptsDBSize += dr.getNumNoMatch();
			if (learnCounter>=Math.ceil((double)learnPart*links.size()))
				break;
		}
		detectorLog.WriteToLog("Finished to learn " + new URL(hostPath).getHost() ,true);
		detectorLog.WriteToLog(currentUrl +"'s DB holds " + appScriptsDBSize + " scripts",false);
		dr.setNewScripts(incrementalScripts);
		//dr.SetNewScriptsIncremental(incrementalScripts);
		out.close();
		return (dr);
	}

	public void setLearnedURLs(HashMap<Integer, String> learnedURLs) {
		this.learnedURLs = learnedURLs;
	}

	public HashMap<Integer, String> getLearnedURLs() {
		return learnedURLs;
	}

	private String OmitQuery(Link link){
		if (link.getQuery().length()>0){
			String filePart = link.getFile().substring(0,link.getFile().indexOf('?'));
			return (link.getProtocol()+ "://" +link.getHost()+ filePart);
		}
		else
			return link.getURL().toString();
	}
	
	public DetectorResults detect(ArrayList<ScriptNode> scriptsArray, 
			ArrayList<ExtendedLink> links, HashMap <Integer,String> learnedUrls,
			double p, String mode)
	throws IOException, ParserException, DetectorException{

		int detectCounter=0,alternateHashCounter=0,alternateHashAttack=0,effectiveUrlTested=0;
		int responsecode=0,normalHashCounter=0,normalHashAttack=0;
		int newURLAttackCounter=0;
		String hostPath=null;
		AppMode detectMode;
		boolean urlIsEffectiveAttack=false;
		DetectorResults dr_accum = new DetectorResults();
		DetectorResults dr_page = new DetectorResults();
		this.learnedURLs = learnedUrls ;

		//determine detection mode
		if (mode.toLowerCase().equals("fn"))
			detectMode = AppMode.DETECT_FN;
		else if (mode.toLowerCase().equals("fp"))
			detectMode = AppMode.DETECT_FP;
		else
			throw (new DetectorException("Detector Mode not recognized"));

		//calculate number of urls to check
		int numUrlToCheck = (int)(Math.ceil(((p/100)*links.size())));
		detectorLog.WriteToLog("Starting Detection Phase. The detector will detect " + 
				numUrlToCheck + "(" + p + "%) of total " + 
				links.size() +" urls",true);

		//build application scripts DB 
		Vector<ScriptNode> pageScripts = null;
		appScripts.clear();
		//convert scriptsArray to hashmap representation so we can detect script existence in O(1)
		for (ScriptNode s : scriptsArray){
			appScripts.put(s.getHash(), s);
			appScripts.put(s.getAlternateHash(),s);
		}

		//randomly traverse given links
		for (Iterator<ExtendedLink> iter = 
			new RandomIterator<ExtendedLink>(links,new Random()); iter.hasNext();){

			urlIsEffectiveAttack=false;
			//increase number of examined URL's 
			detectCounter+=1;
			effectiveUrlTested+=1;

			//check if we reached max number of URL's 
			if (detectCounter>numUrlToCheck)
				break;

			//extract url string
			ExtendedLink exLink = iter.next();
			String currentUrl = exLink.getLink().toURL();

			//htmlparser doesn't like whitespace in http request - replace with %20
			currentUrl = currentUrl.replaceAll("\\s", "%20");

			//ignore query part (to correctly compare to learned urls)
			Link link = exLink.getLink();
			hostPath = OmitQuery(link);
			
			//if this URL has been learned, increase overlap factor
			if (learnedURLs.containsKey(hostPath.hashCode())) 
				dr_accum.IncrementOverlap();

			//start examination phase
			detectorLog.WriteToLog(detectCounter + ") Detecting for new scripts in " + currentUrl,false);
			out.write(newline + "url: " + currentUrl +newline + "--------------------------------------------" + newline + newline);

			//check if this URL is valid (i.e. , contains valid characters)
			//TODO: handle html encoding problems

			//check if this is an SSL connection, as this might pose a problem
			if (currentUrl.startsWith("https")){
				System.out.println("This is a secured connection (HTTPS). Trying to reach the same URL over HTTP");
				currentUrl = currentUrl.replaceFirst("https", "http");
			}
			HttpURLConnection con = ((HttpURLConnection)(new URL(currentUrl)).openConnection());
			responsecode = con.getResponseCode();
			if (responsecode<200 || responsecode>=300){
				System.out.println("Server responded with " + responsecode + " " + con.getResponseMessage() + 
				". Skipping this URL");
				detectorLog.WriteToLog("Server responded with " + responsecode + " " + con.getResponseMessage() + 
						". Skipping this URL", false);
				effectiveUrlTested-=1;
				continue;
			}

			/*  Extract page script nodes.
			 *   Because this is a detection phase, the normalization process will be conducted only when 
			 *	no regular hash match was found (improve performance).
			 */
			ScriptExtractor2 scrext = null;
			try{
				scrext = new ScriptExtractor2(currentUrl,AppMode.DETECT);
			}catch (ParserException e){
				throw (new DetectorException(e.getMessage()));
			}

			//extract page script nodes
			pageScripts = scrext.getScriptNodes();

			//check if these are new scripts or not
			dr_page = CheckScripts(pageScripts,detectMode);

			if (dr_page.getNumNoMatch()==0){
				detectorLog.WriteToLog("No FP/FN scripts were found",false);
			}

			//check the attacks that had been detected if not using canoninized form.
			if (dr_page.getNumMatchNormalized()!=0){
				System.out.println("Detector found " + dr_page.getNumMatchNormalized() + " scripts with different script code, " +
				"but their canoninized version was the same");
				//increment number of URL's that might cause an alarm if we wouldn't be using normalization
				dr_accum.IncrementUrlWithFalseAttack();
				//increment number of JS that might cause an alarm if we we wouldn't be using normalization
				dr_accum.IncrementScriptsWithFalseAttack(dr_page.getNumMatchNormalized());
			}

			if (dr_page.getNumNoMatch()>0){//found new script nodes
				//check if this URL has been learned
				if (!learnedURLs.containsKey(hostPath.hashCode())){
					//this is an un-learned Url
					dr_accum.IncrementNumUnLearnedUrlWithAttack();
					detectorLog.WriteToLog(" " + dr_page.getNumNoMatch() +" new scripts. This URL hasn't been learned.",false);
					
				}
				else{
					//this URL has been learned
					detectorLog.WriteToLog(dr_page.getNumNoMatch() + " new scripts. This URL hasn't been learned",false);
					dr_accum.IncrementUrlWithRealAttack();
				}
				dr_accum.IncrementScriptsWithRealAttack(dr_page.getNumNoMatch());
			}
			if (urlIsEffectiveAttack==false)
				effectiveUrlTested-=1;
		}
		out.close();
		return dr_accum;
	}
	/**
	 * 
	 * @param pageScripts
	 * @param mode
	 * @return
	 * @throws IOException
	 * @throws DetectorException
	 */
	public DetectorResults CheckScripts(Vector<ScriptNode> pageScripts, AppMode mode) throws IOException, DetectorException{

		ScriptNode currentNode;
		int currentAlternativeHashVal;
		boolean xssAttackIndicator=false;
		DetectorResults dr = new DetectorResults();
		newScriptCounter = 0;
		int normalizedScriptCounter = 0;

		if (pageScripts!=null){
			//need to go over all scripts and check for each script if it is in internal scripts DB
			for (Iterator<ScriptNode> iter = pageScripts.iterator();iter.hasNext();){ 

				xssAttackIndicator=false;

				currentNode = (ScriptNode)iter.next(); // current script

				//check if this script need to be skipped (problematic script)
				/*				if (currentNode.isSkip())
					continue;				
				 */
				//check if this script is an injected attack
				if ( (currentNode.getOrigCode()!=null)&&
						( (currentNode.getOrigCode().toLowerCase().indexOf("xss")>-1) || 
								(currentNode.getOrigCode().toLowerCase().indexOf("fromcharcode")>-1) ) )
					xssAttackIndicator = true;

				//check if learnDB already contains script

				//first check native hash and then check alternative hash
				if (!appScripts.containsKey(currentNode.getHash())){ 
					// the script is not in DB according to regular hash
					// if this is a detection mode ,need to build normalized code
					if ((mode==AppMode.DETECT_FP)||(mode==AppMode.DETECT_FN)) {
						currentNode.setNormCode(new Normalizer(currentNode).Normalize().getNormCode());
					}
					currentAlternativeHashVal = currentNode.getAlternateHash();
					// check alternative hash 
					if (!appScripts.containsKey(currentAlternativeHashVal)){
						// no match (regular/alternate) was found in scriptDB
						switch (mode){
						case LEARN:
							if (currentNode.isSkip()){
								//System.out.println("Although this script is new I am skipping it");
								break;
							}
							//add new script to DB
							appScripts.put(currentNode.getHash(),currentNode);
							appScripts.put(currentAlternativeHashVal,currentNode);
							newScriptCounter+=1;
							out.write("-----------------------------------------------------------------" + newline +
									"New Script Learned #"  + newScriptCounter + newline +
									"-----------------------------------------------------------------" + newline + 
									currentNode.getScriptType() + newline + 
									currentNode.getOrigCode() + newline);
							dr.IncrementNoMatch();
							break;
						case DETECT_FP:
							if (currentNode.isSkip()){
								System.out.println("Although this script is new I am skipping it");
								break;
							}
							//this case is a False Positive (script should have been learned before)
							//check if this FP script hasn't been detected before
							if (!attackScriptsList.contains(currentAlternativeHashVal)){
								newScriptCounter+=1;
								out.write("-----------------------------------------------------------------" + newline +
										"False Positive Script #"  + newScriptCounter + newline +
										"-----------------------------------------------------------------" + newline + 
										currentNode.getScriptType() + newline + 
										currentNode.getOrigCode() + newline);
								//add this script to detection results, so it won't be detected twice.
								attackScriptsList.add(currentAlternativeHashVal);
								dr.IncrementNoMatch();
							}
							break;
						case DETECT_FN:
							//Found an attack
							//check if this attack script hasn'nt been detected before
							if (!attackScriptsList.contains(currentAlternativeHashVal)){
								newScriptCounter+=1;
								out.write("-----------------------------------------------------------------" + newline +
										"Attack #"  + newScriptCounter + newline +
										"-----------------------------------------------------------------" + newline + 
										currentNode.getScriptType() + newline + 
										currentNode.getOrigCode() + newline);
								if (xssAttackIndicator)
									detectorLog.WriteToLog("Found the injected XSS attack.",false);
								else
									detectorLog.WriteToLog("Found a new attack.",false);
								detectorLog.WriteToLog("Script details:",false);
								detectorLog.WriteToLog(currentNode.getOrigCode(),false);
								//add this script to detection results, so it won't be detected twice.
								attackScriptsList.add(currentAlternativeHashVal);
								dr.IncrementScriptsWithRealAttack(1);
							}
							break;
						}
					}else{//normalize script exists in DB
						//increment counters
						if (!normalizedScriptsList.contains(currentNode.getHash())){
							normalizedScriptsList.add(currentNode.getHash());
							normalizedScriptCounter+=1;
							dr.IncrementMatchNorm();
						}
						
						/*out.write("-----------------------------------------------------------------" + newline +
								"Mutation Script # " + globalCounter + newline +
								"-----------------------------------------------------------------" + newline + 
								currentNode.getOrigCode() + newline);*/
					}
				}//regular hash exists - no need to normalize
				else{
					dr.IncrementMatchReg();
				}
			}
		}
		return (dr);
	}


	public ArrayList<ScriptNode> getAppScripts() {
		ArrayList<ScriptNode> arr = new ArrayList<ScriptNode>();
		Collection<ScriptNode> col = this.appScripts.values();
		for (ScriptNode s : col){
			arr.add(s);
		}
		return arr;
	}

	/*	public static void main(String[] args) {

		Properties props = System.getProperties();
		props.setProperty("http.proxyHost", "isa.idc.ac.il");
		props.put("http.proxyPort", "8080");
		try {
			ScriptsDB2 s = new ScriptsDB2("LEARN");
			ArrayList<ExtendedLink> webLinks = new ArrayList<ExtendedLink>();
			ExtendedLink link = new ExtendedLink(new Link(args[0]));
			BufferedWriter scrDB = null;
			webLinks.add(link);
			scrDB = s.learn(webLinks, 1);
		} catch (DetectorException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (ParserException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

	}*/

}
