/**
 * @(#)HtmlCrawler.java, 2010-5-12. 
 * 
 */
package com.rsstuan.crawl.impl;

import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;

import java.net.URL;

import com.rsstuan.utils.CharSetUtils;

/**
 *
 * @author guosq
 *
 */
public class HtmlCrawler {

    public String crawl(String urlStr) throws IOException {
        URL url = new URL(urlStr);
        HttpURLConnection connection = (HttpURLConnection) url.openConnection();
        connection.setDoOutput(true);
        connection.setRequestMethod("GET");

        String charSet;
        try {
            charSet = connection.getContentEncoding();
        }
        catch (Exception e) {
            charSet = null;
        }
        //对groupon.cn单独处理
        if (charSet == null ) 
            charSet = CharSetUtils.getCharSetByUrl(urlStr);
        InputStreamReader input = new InputStreamReader(connection.getInputStream(), charSet);
        BufferedReader reader = new BufferedReader(input);
        String line = "";
        String ret = "";
        while ((line = reader.readLine()) != null)
            ret += line;
        reader.close();
        return ret;
    }
    
    public byte[] crawlImage(String urlStr) throws IOException {
        
        URL url = new URL(urlStr);
        HttpURLConnection connection = (HttpURLConnection) url.openConnection();
        try {
            connection.setDoOutput(true);
            connection.setRequestMethod("GET");
            InputStream in = connection.getInputStream();
            ByteArrayOutputStream buffer = new ByteArrayOutputStream();
            byte[] tmp = new byte[1024*1024];
            int numBytesRead = 0;
          
            while ((numBytesRead = in.read(tmp)) >= 0) 
                buffer.write(tmp, 0, numBytesRead);
           
            return buffer.toByteArray();
       }
       finally {
           connection.disconnect();
       }
    }
}
