package com.hbgc.service.spider;


import org.apache.commons.io.IOUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.seimicrawler.xpath.JXDocument;
import org.seimicrawler.xpath.JXNode;
import org.springframework.stereotype.Component;

import java.io.*;
import java.net.MalformedURLException;
import java.net.URL;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.List;

/**
 * 1.当前使用的是Jsoup爬虫框架
 * 2.要求可以根据 3个条件爬数据
 *   采集当天TODAY  采集本周WEEK  采集所有ALL
 *
 */
@Component
public class HongNiuYunSpider implements SuperSpider {


    static final  String API="http://www.hongniuzy.com/inc/api.php";
    static final  String PageURL="https://hongniuziyuan.com/?m=vod-index-pg-#.html";

    final static  String UTF8_BOM = "\uFEFF";
     int    recordcount=0;
     int    pageSize=50;
     int    pageCount=0;
    //使用API接口 获取到它一共有多少条数据  从判断它有多少页
    //把所有的数据全部同步下来  存放到我们的本地的文件
    //采集当天 我们就只需要从本地的文件中 抽取当天的数据 collectToday()
    //采集本周 我们就只需要从本地的文件中 抽取本周的数据 collectWeek()
    //采集所有  我们就需要从本地的文件中  抽取所有的数据 collectToday()




   @Override
   public  void  init()
   {
       try {
           Document document = Jsoup.parse(new URL(API), 200 * 1000);
           JXDocument jxDocument=JXDocument.create(document);
           List<JXNode> jxNodes =jxDocument.selN("//list/@recordcount");
           JXNode jxNode = jxNodes.get(0);
           recordcount=Integer.parseInt(jxNode.toString());
           pageCount=recordcount/pageSize==0?(recordcount/pageSize):(recordcount/pageSize)+1;
           System.out.println("总记录数:"+recordcount);
           System.out.println("总页数"+pageCount);
       } catch (IOException e) {
           e.printStackTrace();
       }


   }


   @Override
   public  void  getAllData(String dirPath)
   {
       try {
       File file=new File(dirPath);
       BufferedWriter bufferedWriter=new BufferedWriter(new FileWriter(file));
      for (int i=1;i<=pageCount;i++)
     {


         String pageUrl=PageURL.replace("#",i+"");


             Document parse = Jsoup.parse(new URL(pageUrl), 200 * 1000);
             Elements es = parse.select("body > strong > div.xing_vb > ul >li");
             for (Element e:es)
             {
                String eNode=e.toString();
                if(eNode.contains("class=\"tt\""))
                {
                   // System.out.println(e);
                    //电影名称   所属类型   地址   更新时间
                    String movieName=e.selectFirst(".xing_vb4 a").text();
                    String[] videos=movieName.split(" ");
                    String videoname="";
                    if(videos!=null&&videos.length>0) {
                        if (videos.length > 2) {
                            for (int j=0;j<videos.length-1;j++) {
                                videoname += videos[j] + "-";
                            }
                            videoname = videoname.substring(0, videoname.length() - 1);
                        } else {
                            videoname = videos[0];
                        }
                        movieName=videoname+" "+videos[videos.length-1];
                    }
                    System.out.println(movieName);


                    String movieUrl=e.selectFirst(".xing_vb4 a").attr("href");
                    String movieType=e.selectFirst(".xing_vb5").text();
                    Element updateElement=e.selectFirst(".xing_vb6");
                    if(null==updateElement)
                    {
                        updateElement=e.selectFirst(".xing_vb7");
                    }
                    String updateTime=updateElement.text();
                    String row=movieName+","+movieType+","+updateTime+","+movieUrl;
                    System.out.println(row);
                    bufferedWriter.write(row);
                    bufferedWriter.newLine();
                }

             }


     }
           bufferedWriter.flush();
           bufferedWriter.close();
       } catch (MalformedURLException e) {
           e.printStackTrace();
       } catch (IOException e) {
           e.printStackTrace();
       }

   }



   //爬真正的数据时  要去重
   @Override
   public   List  collectToday(String dirPath)
   {
       List<String> todays=new ArrayList();
       DateFormat format=new SimpleDateFormat("yyyy-MM-dd");
       String today = format.format(new Date());

       //从我们的文本中去抽取
       try {
           List<String> list = IOUtils.readLines(new FileInputStream(new File(dirPath)));
           for(String row:list)
           {
               if (row.startsWith(UTF8_BOM)) {
                   row = row.substring(1);
               }
              String updateTime= row.split(",")[2];

               if(updateTime.contains(today))
               {
                   System.out.println(updateTime);

                   todays.add(row);
               }
           }
       } catch (IOException e) {
           e.printStackTrace();
       }
       return todays;
   }


    /**
     * 获取一周的数据
     * 如何判断一周
     */
    @Override
    public   List  collectWeek(String dirPath)
    {
        List<String> weeks=new ArrayList();
        Calendar calendar= Calendar.getInstance();
        int dayOfWeek=calendar.get(Calendar.DAY_OF_WEEK)-1;
        int day=calendar.get(Calendar.DATE);
        int month=calendar.get(Calendar.MONTH)+1;
        int year=calendar.get(Calendar.YEAR);
        List<String> weekdate=new ArrayList<>();
        for(int i=0;i<dayOfWeek;i++)
        {
            weekdate.add(year+"-"+(month<10?("0"+month):month)+"-"+(day-i));
        }
        //从我们的文本中去抽取
        try {
            List<String> list = IOUtils.readLines(new FileInputStream(new File(dirPath)));
            for(String row:list)
            {
                if (row.startsWith(UTF8_BOM)) {
                    row = row.substring(1);
                }
                String updateTime= row.split(",")[2];
                String updateDate=updateTime.split(" ")[0];
                if(weekdate.contains(updateDate))
                {
                    weeks.add(row);
                }

            }
        } catch (IOException e) {
            e.printStackTrace();
        }


        return weeks;
    }


    /**
     * 获取所有数据
     *
     */
    @Override
    public   List  collectAll(String dirPath)
    {
        List<String>all=new ArrayList();
        try {
        List<String> rows=IOUtils.readLines(new FileInputStream(new File(dirPath)));
            for(String row:rows) {
                if (row.startsWith(UTF8_BOM)) {
                    row = row.substring(1);

                }
                all.add(row);
            }

        } catch (IOException e) {
            e.printStackTrace();
        }
      return all;

    }



}
