package com.book.common.utils;

import com.book.book.NovelSiteEnum;
import com.book.book.configuration.Configuration;
import com.book.chapter.common.utils.ChapterDetailSpiderFactory;
import com.book.chapter.common.utils.ChapterSpiderFactory;
import com.book.chapter.entity.Chapter;
import com.book.chapter.entity.ChapterDetail;
import com.book.chapter.service.ChapterDetailSpider;
import com.book.chapter.service.ChapterSpider;

import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.*;
import java.util.concurrent.*;

/***
 * @Author Lin
 * @Date Create By 12:58 2017/10/5
 * @Description
 * 如何实现多线程下载任意网站的小说
 * 1.拿到该网站的某本小说的所有章节，章节列表
 * 2.通过计算，将这些章节分配给指定数量的线程，让它们去解析，然后保存到文本文件中
 * 3.通知主线程，将这些零散的小文件合并成一个大文件。最后将那些分片的小文件删除
 */
public class NovelDownloadImpl implements NovelDownload {
    @Override
    public String download(String url, Configuration config) {

        ChapterSpider chapterSpider = ChapterSpiderFactory.getChapterSpider(url);
        List<Chapter> chapters = chapterSpider.getChapter(url);
        int size = config.getSize();
        //计算最大线程数
        int maxThreadSize = (int) Math.ceil(chapters.size() * 1.0 / size);
        Map<String, List<Chapter>> downloadTaskAlloc = new HashMap<>();
        //开始分配线程任务
        for (int i = 0; i < maxThreadSize; i++) {
            //i = 0 0-99 0-99.txt
            //i = 1 100-199 100-199.txt
            //i = 2 200-299 200-299.txt
            int fromIndex = i * config.getSize();
            //判断是否为最后一个线程，是toIndex为小说最后一个章节
            int toIndex = i == maxThreadSize - 1 ? chapters.size() : i * config.getSize() + config.getSize();

            downloadTaskAlloc.put(fromIndex + "-" + toIndex, chapters.subList(fromIndex, toIndex));
        }
        //创建线程池
        ExecutorService service = Executors.newFixedThreadPool(maxThreadSize);
        Set<String> keySet = downloadTaskAlloc.keySet();
        List<Future<String>> tasks = new ArrayList<>();
        /**
         * 创建  某盘:/../某网址   这样的地址
         */
        String savePath = config.getPath() + "/" + NovelSiteEnum.getEnumByUrl(url).getUrl();
        new File(savePath).mkdirs();

        for (String key : keySet) {
            tasks.add(service.submit(new DownloadCallable(savePath + "/" + key + ".txt", downloadTaskAlloc.get(key), config.getTryTimes())));
        }
        service.shutdown();
        for (Future<String> future : tasks) {
            try {
                System.out.println(future.get() + ",下载完成");
            } catch (InterruptedException | ExecutionException e) {
                e.printStackTrace();
            }
        }
        NovelSpiderUtil.multiFileMerge(savePath,null,true);
        return savePath + "/merge.txt";
    }
}

class DownloadCallable implements Callable<String> {
    private List<Chapter> chapters;
    private String path;
    private int tryTimes;

    public DownloadCallable(String path, List<Chapter> chapters, int tryTimes) {
        this.chapters = chapters;
        this.path = path;
        this.tryTimes = tryTimes;
    }

    @Override
    public String call() throws Exception {
        try (
                PrintWriter out = new PrintWriter(new File(path), "UTF-8");
        ) {
            for (Chapter chapter : chapters) {
                ChapterDetailSpider spider = ChapterDetailSpiderFactory.getChapterDetailSpider(chapter.getChapterUrl());
                ChapterDetail detail = null;
                //根据最大章节尝试下载数 来循环下载章节
                for (int i = 0; i < tryTimes; i++) {
                    try {
                        detail = spider.getChapterDetail(chapter.getChapterUrl());
                        out.println(detail.getTitle());
                        out.println(detail.getContent());
                        break;
                    } catch (RuntimeException e) {
                        System.out.println(detail.getTitle()+":第["+i+1+"/"+tryTimes+"]次下载失败了！");
                    }
                }
            }
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
        return path;
    }
}
