package Data;

import edu.uci.ics.crawler4j.crawler.Page;

import java.io.*;
import java.net.URL;
import java.security.DigestInputStream;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Properties;

public class DownloadPage {


    /*
     * Opens a buffered stream on the url and copies the contents to writer
     */
    private static void saveURL(URL url, Writer writer)
            throws IOException {
        BufferedInputStream in = new BufferedInputStream(url.openStream());
        for (int c = in.read(); c != -1; c = in.read()) {
            writer.write(c);
        }
    }

    /*
     * Opens a buffered stream on the url and copies the contents to OutputStream
     */
    private static void saveURL(URL url, OutputStream os) throws IOException {
        InputStream is = url.openStream();
        // Buffer configuration 1048576
        byte[] buf = new byte[2048];
        int n = is.read(buf);
        //long start = System.currentTimeMillis();
        while (n != -1) {
            os.write(buf, 0, n);
            n = is.read(buf);
        }
        //long finish = System.currentTimeMillis();
        //System.out.println(finish - start);
    }

    /*
     * Writes the contents of the url to a string by calling saveURL with a
     * string writer as argument
     */
    public static String getURL(URL url)
            throws IOException {
        StringWriter sw = new StringWriter();
        saveURL(url, sw);
        return sw.toString();
    }

    /*
     * Writes the contents of the url to a new file by calling saveURL with
     * a file writer as argument
     */
    public static void writeURLtoFile(Page page) throws IOException {

        Properties downloadPath = new Properties();

        FileInputStream propInputStream = new FileInputStream(Settings.CONFIGURATION);
        downloadPath.load(propInputStream);

        String downloadDir = downloadPath.getProperty("storage") + downloadPath.getProperty("HTMLPages");


        String url = page.getWebURL().getURL();
        int file = page.getWebURL().getDocid();

        String filename = Integer.toString(file); //we will fire this out to the db

        URL aurl = new URL(url);

        FileOutputStream os = new FileOutputStream(downloadDir + filename + ".html");
        System.out.println(filename + ".html" + " downloaded");
        saveURL(aurl, os);

        File inputFile = new File(downloadDir + filename + ".html");

        os.close();

        MessageDigest mD = null;
        InputStream is = null;

        try {
            mD = MessageDigest.getInstance("MD5");
            is = new FileInputStream(inputFile); //creates a hash of each html file
        } catch (NoSuchAlgorithmException e) {
            //Logger.getLogger(ScoringAlgorithm.class.getName()).log(Level.SEVERE, null, e);
        }

        try {
            is = new DigestInputStream(is, mD);
        } finally {
            assert is != null;
            is.close();
        }


    }

}
