package URLsearcher;

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;

public class HTMLlist {
	String str;
	HTMLlist next; 
	URLlist urls;

	// Constructors

	HTMLlist(String s, HTMLlist n, URLlist u) {
		str = s;
		next = n;
		urls = u;
	}

	
	// Building a HTMLlist with URLlists from a file
	/**
	 * Creates a HTMLlist with URLlist from a file path
	 * @param file The .txt file.
	 * @throws IOException File not found. 
	 */
	public HTMLlist(String file) throws IOException 
{
		String lineRead;
		HTMLlist tmp_list = new HTMLlist(null, null, null);

		// Reading the first line:
		BufferedReader infile = new BufferedReader(new FileReader(file));
		lineRead = infile.readLine();

		while (lineRead != null) 
		{
			// Skips to to next line if reading a URL
			if (!(lineRead.length() >= 6 && lineRead.substring(0, 6).equals("*PAGE:")))
			{
				// set the addWord flag to true
				boolean addWord = true;

					// Travel all the words already in the HTMLlist looking for
					// a match
					HTMLlist travelList = new HTMLlist(null,null, null);
					travelList = tmp_list;
					while (travelList.next != null) {
						// If finding a match sets the addWord flag to false
						if (lineRead.equalsIgnoreCase(travelList.str)) {
							addWord = false;
						}
						// Increments the travelList
						travelList = travelList.next;
					}
				

				// If the word still needs to be added
				if (addWord == true) 
				{
					// Finds the URL's containing the word calling a URLlist
					// method
					URLlist MatchingURLs = containingURLs(lineRead, file);
					// Adds the word and the URLlist to the HTMLlist
					HTMLlist HTMLAdder = new HTMLlist(lineRead, tmp_list,MatchingURLs);	// This makes the mistake, it creates a new now even though next is null!!!
					tmp_list = HTMLAdder;							
				}
				
				
			}
			// Reads the next line
			lineRead = infile.readLine();
	
		}
		//Set instance variables
		str = tmp_list.str;
		next = tmp_list; // This makes the mistake, it creates a new now even though next is null!!!
		
		urls = tmp_list.urls;
		infile.close(); // Close the file
	}
	
	public void HTMLlistToString(HTMLlist DataList) throws IOException
	{
		HTMLlist travelList = DataList;
		while (travelList.str != null) 
		{
			System.out.println(travelList.str);
			while (travelList.urls.next != null) 
				{
					System.out.println(travelList.urls.url);
					travelList.urls = travelList.urls.next;
				}
			travelList = travelList.next;
			
			}
	}
	
	// Methods
	
	/**
	 * Finds a list of matching URLs
	 * @param word the word to search for.
	 * @param file The file to search in.
	 * @return Returns a URLlist.
	 * @throws IOException
	 */
	private URLlist containingURLs(String word, String file) throws IOException {
		// Initialize variables
		String lastURL = "";
		String lineRead;
		boolean hasBeenAdded = false;
		URLlist MatchingURLs = new URLlist(null, null);

		// Search through the linked list
		// Reading the first line:
		BufferedReader infile = new BufferedReader(new FileReader(file));
		lineRead = infile.readLine();

		while (lineRead != null) {
			// Look for the "*PAGE:" pattern in the first 6 characters.
			// The substring method will fail if we don't check for words
			// smaller than 6 characters. This creates a bias towards pages
			// than have many words shorter than 6 characters
			if (lineRead.length() >= 6
					&& lineRead.substring(0, 6).equals("*PAGE:")) {
				// Yay! We found a URL, discard the identifier, extract
				// the page URL and update the last URL we encountered
				lastURL = lineRead.substring(6);

				// Reset the flag to signal that the last URL we encountered
				// has not yet been added to our return string
				hasBeenAdded = false;
			} else if (lineRead.equalsIgnoreCase(word)) {
				// Looks like we found the word we were searching for
				// If it has not yet been added to our return string...
				if (!hasBeenAdded) {
					// ...we add it and set our flag to true. When a word
					// is found more than once on a page, this ensures that
					// the page URL is only added once

					URLlist URLAdder = new URLlist(lastURL, MatchingURLs);

					MatchingURLs = URLAdder;
					hasBeenAdded = true;
				}
			}

			// Get the reference to the next object in the linked list
			lineRead = infile.readLine();
		}

		
	
    	infile.close(); // Close the file
		return MatchingURLs;
	}
	
	
	
/**
 * Prints a HTML list node for a specific word.
 * @param list The HTMLlist to search
 * @param word The word to look for
 */
public void print(HTMLlist list, String word) 
	{
		
		HTMLlist travelList = new HTMLlist(null,null, null);
		travelList = list;
		boolean hasBeenPrinted=false;
		while (travelList.str != null) 
		{
			
			if (word.equalsIgnoreCase(travelList.str)) 
			{
			System.out.println(travelList.str+" was found in:");
			while (travelList.urls.next != null) 
				{
					
					System.out.println(travelList.urls.url);
					if(travelList.urls.next != null)
					{
						hasBeenPrinted=true;
						travelList.urls = travelList.urls.next;
					}
	
				}
			break;
			}
			
		// Increments the travelList	
		travelList = travelList.next;
		}
		if(!hasBeenPrinted)
		{
			System.out.println(word+" was NOT found");
			hasBeenPrinted=true;
		}

	}

}
