package simplecrawler;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.*;
import java.util.Vector;
import threads.*;

public class SearcherThread extends ControllableThread {

    public void process(Object o) {
        // The objects that we're dealing with here are strings for urls
        try {
            URL pageURL = (URL) o;

            // See if it's a html or php page
            String filename = pageURL.getFile().toLowerCase();

            if (filename.matches(".*((.(zip|rar|pdf|wmv|avi|jpg|jpeg|gif|wav|mp3|flv|swf|torrent|css|js|chm|tar|gz|tgz))|mailto).*")) {
                System.out.println("Ignoring \"" + filename + "\"");
                return;
            }         
            
            try {
                // get URL content
                URLConnection conn = pageURL.openConnection();

                // open the stream and put it into BufferedReader
                BufferedReader br = new BufferedReader(
                        new InputStreamReader(conn.getInputStream()));

                String inputLine;

                while ((inputLine = br.readLine()) != null) {
                    if (inputLine.contains("Andrei")) {
                    //if ("Andrei".matches("Andrei")) {
                        System.out.println("Keyword found in line: " + inputLine);
                    } else {
                        //System.out.println("Did NOT find Andrei in URL: " + filename);
                    }
                }

                br.close();

                //System.out.println("Done");

            } catch (MalformedURLException e) {
            } catch (IOException e) {
            }

            // If it's neither a jpg nor some text, it's not interesting.
            String mimetype = pageURL.openConnection().getContentType();
            if (!mimetype.startsWith("text")) {
                return;
            }

            String rawPage = SaveURL.getURL(pageURL);
            String smallPage = rawPage.toLowerCase().replaceAll("\\s", " ");

            // treat the url a a html file and try to extract links
            Vector links = SaveURL.extractLinks(rawPage, smallPage, initURL);

            // Convert each link text to a url and enque
            for (int n = 0; n < links.size(); n++) {
                try {
                    // urls might be relative to current page
                    URL link = new URL(pageURL,
                            (String) links.elementAt(n));
                    // If layers are not used, write everything into same layer
                    if (tc.getMaxLevel() == -1) {
                        queue.push(link, level);
                    } else {
                        queue.push(link, level + 1);
                    }
                } catch (MalformedURLException e) {
                    // Ignore malformed URLs, the link extractor might
                    // have failed.
                }
            }
        } catch (Exception e) {
            //e.printStackTrace();
            // process of this object has failed, but we just ignore it here
        }
    }
}
