package cn.springboot.linked;
/**
 * @author liufl E-mail:Huberier@allpyra.com
 * @version 创建时间：2017年9月27日 下午4:32:19
 * 类说明
 */
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.htmlparser.util.ParserException;


public class Spider {

    private LinkCollection lc=new LinkCollection();
    private DownLoadTool dlt=new DownLoadTool();
    private HtmlNodeParser hnp=new HtmlNodeParser();

    //将URL作为文件名，而文件名不能含有某些字符，所以用该方法进行替换
    public String getFileName(String url){
        String filename=url.toString().substring(7);
        filename=filename.replaceAll("/", "-");
        filename=filename.replace(".", ",");
        return filename;
    }

    public void crawling(String url,String directory) throws FileNotFoundException{
        //1.先添加url到待取队列中
        lc.addUnVisitedUrl(url);
        try {
            Configuration conf=new Configuration();
            URI uri=new URI("hdfs://192.168.1.123:9000");   //hdfs主机uri
            FileSystem hdfs=FileSystem.get(uri, conf);
            //2.循环这个队列，到这个队列为空时
            while(lc.isUnVisitedUrisEmpty()==false){
                //3.取出待取地址
                String visiturl=lc.deQueueUnVisitedUrl();
                //4.下载这个页面
                try {
                    String html=dlt.downLoadUrl(visiturl);
                    Set<String> allneed=TitleDown.getImageLink(html);
                    for (String addr : allneed) {
                        String a=addr.substring(addr.indexOf("\t")+1);
                        String filename=addr.substring(0,addr.indexOf("\t"));
                        filename=getFileName(filename);
                        System.out.println(filename);
                        Path p=new Path("/spider/"+filename);
                        FSDataOutputStream dos=hdfs.create(p);
                        try {
                            System.out.print(a);
                            dos.write(a.getBytes());
                        } catch (IOException e) {
                            e.printStackTrace();
                        }finally {
                            dos.close();    //这里一定要将dos关闭，不然内容无法写入
                        }
                    }
                    //5.从页面中分析出超链接地址，放入待取地址中
                    Set<String> newurl=hnp.parseNode(visiturl, null);
//                  dlt.createLogFile(TitleDown.getImageLink(html));
                    //将这些地址又加入到待取地址中
                    for(String s:newurl){

                        String httpregex="http://([\\w-]+\\.)+[\\w-]+(/[\\w- ./?%&=]*)?";
                        Pattern p2=Pattern.compile(httpregex,Pattern.CASE_INSENSITIVE);
                        Matcher matcher=p2.matcher(s);
                        while(matcher.find()){
                            lc.addUnVisitedUrl(s);
                            //boolean b=matcher.
                        }

                    }
                } catch (ParserException e) {
                    e.printStackTrace();
                }
            }

        } catch (IllegalArgumentException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        } catch (URISyntaxException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        } catch (IOException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        }

    }
}

