package crawl;

import java.io.BufferedWriter;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.Charset;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import crawl.gov.GovHTMLFilter;
import crawl.http.HtmlCrawlProcessThread;
import crawl.http.HtmlParser;
import crawl.http.PageGraph;

import utils.Logger;
import utils.Util;

/**
 * @author huangcd
 */
public class Crawler
{
    private static Pattern       href;
    private static Pattern       charset;
    private static String        defaultCharset;
    private URIFilter            filter;
    private ProcessThread        finishThread;
    private URI                  initURI;
    private List<URI>            uriList;
    private Set<String>          crawledURI;
    private long                 interval;
    private long                 maxCount;
    private long                 count;
    private int                  threadCount;
    private int                  threadAlive;
    private int                  fileName;
    private Set<String>          uriSet;
    private Map<String, Integer> uriMap;
    private BufferedWriter       outResult;
    private BufferedWriter       outMap;

    static
    {
        href = Pattern.compile("<a *href *= *\"[\\d\\D]*?\"",
                Pattern.CASE_INSENSITIVE);
        charset = Pattern.compile("\\<meta[\\d\\D]*?charset[\\d\\D]*?\\>",
                Pattern.CASE_INSENSITIVE);
        defaultCharset = "GBK";
    }

    /**
     * Construct a {@code Crawler} by the given {@code initURI}, {@code
     * nameFilter}, {@code timeInterval}, {@code CrawlerSize} and {@code
     * threadCount}
     * 
     * @param initURI
     *            the URI which the Crawler start to crawl
     * @param filter
     *            decided which URI should be ignored
     * @param finishThread
     *            a thread to be start when crawl finished
     * @param timeInterval
     *            time interval between two crawling
     * @param crawlerSize
     *            maximum crawl uri count, crawling all if crawlerSize less than
     *            0
     * @param threadCount
     *            total thread for the crawler
     * @param isFreshCrawl
     *            weather the files store in the cache should be ignore. if the
     *            value is true, the direction cache will be deleted, otherwise,
     *            the uri that has been crawled before will be ignore, (if the
     *            {@code initURI} has been crawled before, then no uri be crawl)
     * @throws IOException
     */
    public Crawler(URI initURI, URIFilter filter, ProcessThread finishThread,
            long timeInterval, long crawlerSize, int threadCount,
            boolean isFreshCrawl) throws CrawlingException, IOException
    {
        if (!filter.accept(initURI))
            throw new CrawlingException("initURI" + initURI.toString()
                    + " doesn't accept by the filter. No uri to be crawled");
        if (isFreshCrawl)
        {
            File cache = new File("cache");
            if (cache.exists() && cache.isDirectory())
            {
                for (File files : cache.listFiles())
                {
                    files.delete();
                }
                cache.delete();
            }
        }
        initCache(initURI, filter);
        this.finishThread = finishThread;
        this.interval = timeInterval;
        this.maxCount = crawlerSize < 0 ? Long.MAX_VALUE : crawlerSize;
        this.count = 0;
        this.threadCount = threadCount;
        addURI(this.initURI);
    }

    /**
     * Construct a {@code Crawler} by the given {@code initURI}, {@code filter}
     * and default setting
     * 
     * @param initURI
     *            the URI which the Crawler start to crawl
     * @param filter
     *            decided which URI should be ignored
     * @throws IOException
     */
    public Crawler(URI initURI, URIFilter filter) throws Exception
    {
        initCache(initURI, filter);
        this.finishThread = new DefaultProcessThread();
        this.interval = 0;
        this.maxCount = Long.MAX_VALUE;
        this.count = 0;
        this.threadCount = 1;
        addURI(this.initURI);
    }

    private void initCache(URI uri, URIFilter filter) throws IOException
    {
        Util.setQueryMode(false);
        this.initURI = filter.normalize(uri, uri);
        this.filter = filter;
        this.crawledURI = Collections.synchronizedSet(new HashSet<String>());
        this.uriList = Collections.synchronizedList(new LinkedList<URI>());
        this.uriSet = Collections.synchronizedSet(new HashSet<String>());
        this.uriMap = Collections
                .synchronizedMap(new HashMap<String, Integer>());
        this.outResult = new BufferedWriter(
                new FileWriter(Util.getResultFile()));
        switch (Scheme.fromURI(initURI))
        {
            case HTTP:
            case FTP:
                this.outResult.write("Crawling the Web..."
                        + Util.getLineSeparator());
                break;
            case FILE:
                this.outResult.write("Crawling the File System..."
                        + Util.getLineSeparator());
                break;
            default:
                break;
        }
        File dir = new File("cache");
        if (!dir.exists())
        {
            dir.mkdirs();
        }
        File map = Util.getMapFile();
        if (!map.exists())
        {
            this.fileName = 0;
            this.outMap = new BufferedWriter(new FileWriter(map));
        }
        Scanner scan = new Scanner(map);
        while (scan.hasNext())
        {
            // fileName
            int i = scan.nextInt();
            if (i > this.fileName)
            {
                this.fileName = i;
            }
            // uri
            this.uriSet.add(scan.next());
            // time
            scan.next();
        }
        scan.close();
        this.outMap = new BufferedWriter(new FileWriter(map, true));
    }

    public int getDocumentID(String url)
    {
        return uriMap.get(url);
    }

    // close the two file stream open before
    private synchronized void closeLogFiles()
    {
        Logger.logInfo("close log files");
        if (this.outMap != null)
        {
            try
            {
                this.outMap.close();
            }
            catch (IOException e)
            {
                Logger.logException(e);
            }
        }
        if (this.outResult != null)
        {
            try
            {
                this.outResult.close();
            }
            catch (IOException e)
            {
                Logger.logException(e);
            }
        }
        this.notifyAll();
    }

    // save the information to the file, return the id of the document
    private synchronized int save(String file, URI uri) throws IOException
    {
        if (this.count >= this.maxCount)
            return -1;
        this.count++;
        BufferedWriter writer = new BufferedWriter(new FileWriter("cache\\"
                + this.fileName));
        writer.write(file);
        writer.flush();
        writer.close();
        this.uriMap.put(uri.toString(), this.fileName);
        this.outMap.append(String.format("%-8d", this.fileName)).append(
                String.format("%-16d", System.currentTimeMillis())).append(
                String.format("%-230s", uri.toString())).append(
                Util.getLineSeparator());
        this.outMap.flush();
        this.outResult.write(this.fileName + " " + uri.toString()
                + Util.getLineSeparator());
        this.outResult.flush();
        Logger.logInfo(uri.toString() + "  currentCount = " + this.count
                + "  uriListSize = " + this.uriList.size());
        crawledURI.add(uri.toString());
        this.fileName++;
        return this.fileName - 1;
    }

    /**
     * get the next uri that haven't crawl by this application
     * 
     * @return if there is no more uri to crawl return null, otherwise return
     *         the first one of the list
     */
    public synchronized URI getNextURI()
    {
        if (this.uriList.isEmpty() || this.count >= this.maxCount)
            return null;
        return this.uriList.remove(0);
    }

    /**
     * try to add a uri to the crawler
     * 
     * @param uri
     *            a uri to be added
     * @return
     */
    public synchronized boolean addURI(URI uri)
    {
        // add a normalized uri to the list only if the uri can pass the
        // filter and the uri hasn't been crawled
        if (this.filter.accept(uri) && !this.uriSet.contains(uri.toString()))
        {
            // some thread has stopped and there is a lot uri to crawl, so start
            // a new thread
            if (this.threadAlive < this.threadCount
                    && this.uriList.size() > this.maxCount / 10 + 1)
            {
                startNewCrawlerThread();
            }
            this.uriList.add(uri);
            this.uriSet.add(uri.toString());
            return true;
        }
        return false;
    }

    public Set<String> getCrawledURI()
    {
        return crawledURI;
    }

    // try to start a new crawl thread
    private synchronized void startNewCrawlerThread()
    {
        if (this.threadAlive >= this.threadCount)
            return;
        new CrawlerThread(this).start();
        Logger.logInfo("A new crawlerThread has started");
    }

    public void start()
    {
        startNewCrawlerThread();
    }

    public String getCharset(byte[] htmlData)
    {
        String str = new String(htmlData);
        Matcher mat = charset.matcher(str);
        String charset = defaultCharset;
        if (mat.find())
        {
            String tmp = mat.group().toLowerCase();
            charset = tmp.substring(tmp.indexOf("charset") + 8, tmp.indexOf(
                    "\"", tmp.indexOf("charset") + 8));
            charset = charset.replaceAll("=", "").trim();
        }
        return charset;
    }

    /**
     * Return a new {@code String} by decoding the specified array of bytes
     * using the {@code Charset} mention in the data.
     * 
     * @param htmlData
     * @return
     */
    public String toRegexString(byte[] htmlData)
    {
        String charset = getCharset(htmlData);
        return new String(htmlData, Charset.forName(charset));
    }

    /**
     * a thread that download and process web page or file
     * 
     * @author huangcd
     */
    private class CrawlerThread extends Thread
    {
        Crawler   su;
        PageGraph graph;

        private CrawlerThread(Crawler crawler)
        {
            this.su = crawler;
            this.graph = PageGraph.getInstance();
        }

        private void crawlHttp(URI uri)
        {
            InputStream in = null;
            try
            {
                in = uri.toURL().openStream();
                ByteArrayOutputStream out = new ByteArrayOutputStream();
                int c = 0;
                while ((c = in.read()) != -1)
                {
                    out.write(c);
                }
                byte[] buf = out.toByteArray();
                // succeed crawl a uri
                // encoding
                String charset = this.su.getCharset(buf);
                String data = new String(buf, Charset.forName(charset));
                int id = this.su.save(data, uri);
                if (id == -1)
                    return;
                new HtmlParser(id, charset).parse();
                // find uri in this page
                Matcher mat = href.matcher(data);
                while (mat.find())
                {
                    try
                    {
                        String group = mat.group().toLowerCase().trim()
                                .replaceAll("<a href *= *\"", "").replaceAll(
                                        "\"", "").replaceAll("\\?[\\D\\d]*+",
                                        "").replaceAll("[\r|\n|#]", "").trim();
                        if (group == null || group.isEmpty())
                            continue;
                        URI newURI = this.su.filter.normalize(uri, new URI(
                                group));
                        this.su.addURI(newURI);
                        graph.addEdge(uri.toString(), newURI.toString());
                    }
                    catch (URISyntaxException e)
                    {
                        Logger.logException(e);
                    }
                }
            }
            catch (Exception e)
            {
                Logger.logException(e);
            }
            finally
            {
                try
                {
                    in.close();
                }
                catch (IOException e)
                {
                    Logger.logException(e);
                    Logger.logException(uri.toString());
                }
            }
        }

        private void crawlLocal(URI uri)
        {

        }

        private void crawlFTP(URI uri)
        {

        }

        @Override
        public void run()
        {
            this.su.threadAlive++;
            URI uri;
            while (true)
            {
                try
                {
                    uri = this.su.getNextURI();
                    // stop the current crawler thread
                    if (uri == null)
                    {
                        this.su.threadAlive--;
                        // if this thread is the last thread alive, close the
                        // log file
                        if (this.su.threadAlive == 0)
                        {
                            closeLogFiles();
                            // start the new thread to continue process
                            this.su.finishThread.start(this.su);
                        }
                        Logger.logInfo(toString() + " stop. count = "
                                + Crawler.this.count);
                        return;
                    }
                    switch (Scheme.fromURI(uri))
                    {
                        case HTTP:
                            crawlHttp(uri);
                            break;
                        case FILE:
                            crawlLocal(uri);
                            break;
                        case FTP:
                            crawlFTP(uri);
                            break;
                        default:
                            break;
                    }
                }
                catch (Exception e)
                {
                    Logger.logException(e);
                }
                try
                {
                    Thread.sleep(Crawler.this.interval);
                }
                catch (InterruptedException e)
                {
                    Logger.logException(e);
                }
            }
        }
    }

    public static void main(String[] args) throws Exception
    {
        // 用GovHTMLFilter网址过滤器抓取http://www.gov.cn/上的网页，抓取完成以后
        // 调用GovProcessThread进行后续处理，每个线程抓取完一个网页后休眠10000毫秒，
        // 最多抓取1000个网页，最多开10个线程抓取，清空系统缓存
        new Crawler(new URI("http://www.gov.cn/"), new GovHTMLFilter(),
                new HtmlCrawlProcessThread(), 10000, 1000, 10, true).start();
    }
}
