package net.freestu.sola.web.spider.ballerina.core;

import java.io.BufferedInputStream;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
import java.math.BigDecimal;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.SocketTimeoutException;
import java.net.URL;
import java.net.URLConnection;

import net.freestu.sola.web.global.AllLinksHashtable;
import net.freestu.sola.web.global.AllPagesCounter;
import net.freestu.sola.web.global.GlobleVar;
import net.freestu.sola.web.global.LinkHash;
import net.freestu.sola.web.global.PageCounter;
import net.freestu.sola.web.util.CharTools;
import net.freestu.sola.web.util.MyLogger;
import net.freestu.sola.web.util.TimeOperator;




public class PageFetcher {

	/**
	 * 从指定一个网址下载到指定文件，一个URL对应一个Fetcher
	 */

	// private PageCounter pc=PageCounter.getInstance();
	private String downUrl;
	private String redirect;
    private String filePath;
    private String name;
    private String pagecharset;
    private int level;//主要是为了计算分数.
    private double iscore;//基本分数。初始分数。
    

	public PageFetcher(String downUrl, String filePath,String name,int level) {
		
		this.downUrl = downUrl;
		this.filePath = filePath;
		this.name=name;
		this.level=level;
		this.iscore=div(GlobleVar.INITSCORE,level,2);
	}

	
	/**
	 * 重新构建的下载器
	 */
	public void download(){ 
		
		System.out.println();
		AllPagesCounter apc=AllPagesCounter.getInstance();
		PageCounter pc=apc.get(name);
		URL myurl = null;
		
		try {
			myurl = new URL(downUrl);
			MyLogger.log("正在下载:"+downUrl);
		} catch (MalformedURLException e) {
			e.printStackTrace();
		}
		
		//修改：这个地方的判断应该判断报头是否是text/html
		//这里下载只下载text/html这种类型的文件。
		HttpURLConnection connect = getValidConnection(myurl);// 判断是否是合法的HttpURLConnection
		
		if (connect != null) {// 判断其是否得到合法的connect
			try {
				connect.getResponseCode();
			} catch (IOException e1) {
				e1.printStackTrace();
				return;
			}
			
			redirect=connect.getURL().toString();//这里怎么没有往urlhash里面存放呢??

			//--------------------------------
			AllLinksHashtable alltable = AllLinksHashtable.getInstance();
			LinkHash table=alltable.get(name);
			table.put(redirect);//将种子放进Hash表中.避免再重复爬行.
			//--------------------------------
			//有时候重定向后的URL还会重定向到其他地方.
			
			pagecharset=judgePageCharSet(connect);//得到网页编码情况。
			try {
                String pagecontent=null;
				pagecontent = getPageContent(downUrl, pagecharset);
					
				if((pagecontent!=null)&&!(pagecontent.trim().equals(""))){//这里如果为空的话就不好了啊
				
				  String str = downUrl;
				  //#这里有严重的效率问题.
				  File file=null;
				  
				  //在filePath文件夹下面的文件.
				  file = new File(filePath + File.separator + "P"+ pc.getPageNum());
				  
				  BufferedWriter b =connectToFile(file,true);
				  b.write("Url="+str+";");
				  b.newLine();
				  b.write("Redirect="+cut(redirect)+";");
				  b.newLine();
				  b.write("Date="+TimeOperator.getCurrTime()+";");
				  b.newLine();
				  b.write("InitScore="+iscore);
				  b.newLine();
				  b.write("---------------------");
				  b.newLine();
				
				
				  b.write(pagecontent);
				  b.close();
				  MyLogger.log("download \"" + downUrl + "\"");
				  pc.addOne();// 网页计数器增加
				}else{
					MyLogger.log("Fail to download :"+downUrl);
					return;
				}

			}catch(NullPointerException e){
				e.printStackTrace();
				MyLogger.log(e);
				return;
				
			}catch (MalformedURLException e) {
				//e.printStackTrace();
				MyLogger.log(e);
				return;
			} catch (IOException e) {
				//e.printStackTrace();       
				MyLogger.log(e);
				return;
			}
		 } else {
			return;
		 }
	}
	
	
	/**
	 * 得到网页的内容。?这个地方有错误啊？？？!!!!
	 * @param url
	 * @param charset
	 * @return
	 * @throws Exception
	 */
	public String getPageContent(String url,String charset){
		
		HttpURLConnection conn=null;
		BufferedReader in = null; 
		
		StringBuffer sb=null;
		try {
			conn = (HttpURLConnection) new URL(url).openConnection();
			
			sb = new StringBuffer(); 
			try {
				in = new BufferedReader(new InputStreamReader(conn.getInputStream(),charset));
			} catch (java.net.BindException e) {
				//e.printStackTrace();
				/**
				不能这样处理.
				try {
					Thread.sleep(5000);//这里线程先休息5秒然后再启动.??.我猜测的处理方法.
				} catch (InterruptedException e1) {}
				in = new BufferedReader(new InputStreamReader(conn.getInputStream(),charset));
				*/
			} 
			String inputLine; 
			while ((inputLine = in.readLine()) != null) {
			sb.append(inputLine); 
			sb.append("\n"); 
			}
			in.close();
		} catch (MalformedURLException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
			conn.disconnect();
			return null;
		} catch (UnsupportedEncodingException e) {
			
			conn.disconnect();
			return null;
			
		}catch(SocketTimeoutException e){
			conn.disconnect();
			System.out.println("发生了SocketTimeoutException："+url);
			return null;
		}catch (IOException e) {
			// TODO Auto-generated catch block
			conn.disconnect();
			e.printStackTrace();
			return null;
		}
		conn.disconnect();
		return sb.toString(); 
	}
	

	/**
	 * 与文件建立链接。返回
	 * @param file
	 * @param append
	 * @return BufferedWriter
	 */
	private  BufferedWriter connectToFile(File file,boolean append){
		
		FileWriter w;//检查覆盖.true是清除所有的东西再往里面写入东西。
		BufferedWriter b=null;
		try {
			w = new FileWriter(file, append);
		    b=new BufferedWriter(w);
		} catch (Exception e) {}
		
		return b;
	}
	

	private HttpURLConnection getValidConnection(URL url) {// 从URL中取得HttpURLConnection.取得connection时需要考虑的完整的东西。

		HttpURLConnection httpurlconnection = null;

		try {
			URLConnection urlconnection = url.openConnection();
			
			urlconnection.setConnectTimeout(10000);//设置连接超时。
			urlconnection.setReadTimeout(10000);//设置读取数据超时。
			
			urlconnection.connect();// ?

			if (!(urlconnection instanceof HttpURLConnection)) {// ?
				return null;
			}
			httpurlconnection = (HttpURLConnection) urlconnection;
			String contenttype= httpurlconnection.getContentType();
			//---这个地方判断是否为网页类型，当然我们下载的都是网页。不会下载其他的页面---
			
			if(contenttype==null) return null;
			
			if(contenttype.toLowerCase().indexOf("text/html")==-1){
				return null;
			}
			//---
			int responsecode = httpurlconnection.getResponseCode();// 得到响应码。通过响应码来判断链接情况。
			
			switch (responsecode) {
			// here valid codes!
			case HttpURLConnection.HTTP_OK:
			case HttpURLConnection.HTTP_MOVED_PERM: // 重定向，永久。
			case HttpURLConnection.HTTP_MOVED_TEMP: // 重定向，临时。
				break;
			default:
				httpurlconnection.disconnect();
				return null;
			}
		}catch(SocketTimeoutException e){
			
			MyLogger.log("链接超时了:"+url);
			if (httpurlconnection != null) {
				httpurlconnection.disconnect();
			}
			return null;
			
		}catch (java.net.ConnectException e){
			
			 //这里如果连接不上URL的时候，这个链接将没有可下载性，返回null;
			if (httpurlconnection != null) {
				httpurlconnection.disconnect();
			}
			 return null;
		}
		catch (MalformedURLException e) {
			// TODO Auto-generated catch block
			//e.printStackTrace();
			if (httpurlconnection != null) {
				httpurlconnection.disconnect();
			}
			MyLogger.log("抛出MalformedURLException异常:"+url);
			
			 return null;
		}catch (IOException ioexception) {

			MyLogger.log("unable to connect: " + ioexception);

			if (httpurlconnection != null) {
				httpurlconnection.disconnect();
			}
			return null;
		}

		return httpurlconnection;
	}

	
	/**
	 * 判断网页的编码形式。
	 * 算法是：
	 * 1)先通过字符串判断是否是UTF-8形式,如果是则返回utf8.
	 * 2)如果没有则在charset中判断，如果是不是utf8,则返回。
	 * 3)如果是utf8则再在报头中判断，如果是utf8 则返回gbk,如果不是则返回。
	 * 
	 * 这个算法基本上能将gbk,和utf8这两种编码的网站收录。其他的......
	 * @param conn
	 * @return
	 */
	private  String judgePageCharSet(HttpURLConnection conn) {
		
		//预处理
		if(isUtf8(conn)){
			return "utf8";
		}
		
		String encodefheader=getEncodingFromHeader(conn);
		String encodefcharset=getEncodingFromCharSet(conn);
		
		if(encodefcharset==null||encodefcharset.equalsIgnoreCase("utf-8")||encodefcharset.equalsIgnoreCase("utf8")){
			
			if(encodefheader==null||encodefheader.equalsIgnoreCase("utf-8")||encodefheader.equalsIgnoreCase("utf8")){
				return "gbk";
			}else{
				return encodefheader;
			}
			
		}else{
			return encodefcharset;
		}
		
		
				
	}
	
	
	private double div(double v1,double v2,int scale){
		if(scale<0){
		throw new IllegalArgumentException(
		"The scale must be a positive integer or zero");
		}
		BigDecimal b1 = new BigDecimal(Double.toString(v1));
		BigDecimal b2 = new BigDecimal(Double.toString(v2));
		return b1.divide(b2,scale,BigDecimal.ROUND_HALF_UP).doubleValue();
		}
	
	private String getEncodingFromCharSet(HttpURLConnection conn){
		
		
		String[] pagecharset=GlobleVar.PAGE_CHARSET.split(" ");
		String head = null;
		try {
			conn.connect();
			BufferedReader in = null;
			StringBuffer sb = new StringBuffer();
			in = new BufferedReader(new InputStreamReader(
					conn.getInputStream(), "ASCII"));// 先自定义为asc码
			String inputLine;
			while ((inputLine = in.readLine()) != null) {

				sb.append(inputLine);
				if (!(sb.indexOf("</head>") == -1))
					break;
			}
			head = sb.toString();
		} catch (IOException ioexception) {
		}
		
		if(head==null){//如果不能进行链接的话。
			conn.disconnect();
				return null;
		}
		int indexOfcharset = head.toLowerCase().indexOf("charset");
		if (indexOfcharset == -1) {// 如果没有这个字符串则使用向自定义的字符集charset
			return null;
		}
		int indexOfend = head.indexOf(">", indexOfcharset);
		if (indexOfend == -1) {
			System.out.println("检查<head></head>中是否正确");
			
			return null;
		}
		
		
		String chstr = head.substring(indexOfcharset, indexOfend);
		for (String str : pagecharset) {
			if (!(chstr.toLowerCase().indexOf(str) == -1)) {
				
				return str;
			}
		}
		return null;//默认为UTF-8
	}
	
	
	
	public String getEncodingFromHeader(HttpURLConnection conn){
		
         String str=conn.getContentType();
         //这里str有可能是text/html;charset=GB2312。这里是这个不规范的地方。
         
         if(str==null) return "utf8";
         
         if(str.toLowerCase().indexOf("charset")==-1) return "utf8";
         
         String[] pagecharset=GlobleVar.PAGE_CHARSET.split(" ");
         
         for (String cond : pagecharset) {
 			if (!(str.toLowerCase().indexOf(cond) == -1)) {
 				
 				return cond;
 			}
	     }
         
         return "utf8"; //如果全部都没有设置则自动设置为UTF8
	}
	
	
	/**
	 * 判断网页编码是否为UTF-8
	 * @param conn
	 * @return
	 */
	private boolean isUtf8(HttpURLConnection conn){
		
		 BufferedInputStream in = null; 
		 byte[] buf=new byte[2048];
		 try {
			in=new BufferedInputStream(conn.getInputStream());
		    in.read(buf);
		 } catch (IOException e) {
				e.printStackTrace();
		  }
		return (CharTools.isValidUtf8(buf, buf.length)) ;
	}
	                                           
	
	
	
	/**
	 * 去掉Url的最后"/"
	 * @param str
	 * @return
	 */
	private String cut(String str){
		
		//String str="http://www.sina/com/";
		String ss=str;
		
	    if(str!=null){	

		if(str.endsWith("/")){

		int i=str.lastIndexOf("/");
		ss=str.substring(0,i);
		}
	    }

		return ss;
	}
	
	/*
	 * public static void main(String[] args) {
	 * 
	 * PageFetcher pf = new PageFetcher("http://www.baidu.com", "./LinkDB");
	 * 
	 * pf.download();
	 *  }
	 */
}