package com.itcast.catchfiveeight;

import com.itcast.util.DataClean;
import org.apache.commons.io.FileUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;

import java.io.File;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;

/**
 * Created by Administrator on 2016/2/22 0022.
 * 抓取属性，生成最终的文件
 */
public class CatchWeb {


        public static List<String> list = new ArrayList<String>();
        public static void main(String[] agrs)throws Exception{
            //首先递归读取文件
            //获取到所有文件名称以及文件的路径
            String filePath = Contents.linkPath;
            wirteFiles(filePath);
        }

       /*
       * 写入文件
       */
        static void wirteFiles( String filePath )throws Exception {

            File root = new File( filePath );
            File[] files = root.listFiles();
            for ( File file : files ) {
                List<String> list = FileUtils.readLines(file, "GB2312");
                String fileName = list.get(0).split(",")[0]+".csv";
                //我就读取一个文件，然后对一个文件进行抓取操作
                String ffp = Contents.tmpPath;
                File fileFruit = new File(ffp, fileName);
                CatchLinks.writeTitle(fileFruit);
                int i = 0;
                //先把整个数据放到HashSet里面
                HashSet<String> hashSet = new HashSet();//这个主要是用来排重
                String city = list.get(0).split(",")[0];//城市的名字
                for(String str : list){
                    String[] strAttr = str.split(",");
                    hashSet.add(strAttr[1]);//把链接加进一个set中
                }
                Iterator<String> it = hashSet.iterator();
                while (it.hasNext()){
                    //如果有下一个链接就执行写操作
                    String strUrl = it.next();
                    Page entity = GetPage.get58Data(strUrl);
                    //通过数据清洗获取关键字
                   String keyWord =  CatchWeb.getKeyWords(strUrl, Contents.getkeyWordList());
                    if(!entity.getPaiqian()){
                        CatchLinks.writeProperty(fileFruit, strUrl, keyWord,city);
                        System.out.println(i + " " + city + " " +strUrl);
                        i++;
                    }
                }
            }
        }

    public static String getKeyWords(String url,List<String> keyWordList){
        //获取链接页面的所有内容
        String st = "";
        try {
            String userAgent = "Mozilla/5.0（Linux；U；Android2.2；en-us；NexusOneBuild/FRF91）AppleWebKit/533.1（KHTML，likeGecko）Version/4.0MobileSafari/533";
            Document doc = null; // 设置 User-Agent;设置超时1秒
            doc = Jsoup.connect(url).userAgent(userAgent).get();
            String strDoc = doc.text();
            String cleanedData = DataClean.clean(strDoc);
            StringBuilder stringBuilder = new StringBuilder();
            for(String str : keyWordList){
                if(cleanedData.contains(str)){
                    stringBuilder.append(str);
                    stringBuilder.append(",");
                }
            }
            st = stringBuilder.toString();
            return st;
        } catch (Exception e) {
            e.printStackTrace();
        }
        return st;
    }
}
