package edu.zut.cs.network.dao.service.impl;

import java.sql.SQLException;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

import edu.zut.cs.network.dao.BookDao;
import edu.zut.cs.network.dao.impl.BookDaoImpl;
import edu.zut.cs.network.dao.service.BookDaoManager;
import edu.zut.cs.network.dao.service.runnable.GetAllRunnable;
import edu.zut.cs.network.dao.service.runnable.InnerReader;
import edu.zut.cs.network.dao.service.runnable.InnerWriter;
import edu.zut.cs.network.dao.service.runnable.InsertRunnable;
import edu.zut.cs.network.entity.Book;

public class BookDaoManagerImpl extends BookDaoImpl implements BookDaoManager{
	static ThreadPoolExecutor pools = getThreadPool();//定义线程池
	private static ThreadPoolExecutor getThreadPool() {//获得线程池
		/*
		 * public ThreadPoolExecutor(int corePoolSize,        核心线程数最大值 
                          int maximumPoolSize,                线程总数最大值 
                          long keepAliveTime,                 非核心线程闲置超时时长
                          TimeUnit unit,                      keepAliveTime的单位
                          BlockingQueue<Runnable> workQueue,  线程池中的任务队列
                          RejectedExecutionHandler handler)   抛出异常专用的
         */ 
		return new ThreadPoolExecutor(10, 10, 5, TimeUnit.SECONDS, new LinkedBlockingDeque<>(10),
				new ThreadPoolExecutor.CallerRunsPolicy());
	}
	public BookDaoManagerImpl() throws SQLException {
		super();
	}

	BookDao book=new BookDaoImpl();
	
	@Override
	public List<Book> findByName(String name) {	
		return book.findByName(name);
	}

	@Override
	public int insert(List<Book> bookList, String tableName) {
		
		return book.insert(bookList, tableName);
	}

	@Override
	public List<Book> getALl(String tableName) {
		return book.getALl(tableName);
	}

	@Override
	public int delete(String name) {
		return book.delete(name);
	}

	@Override
	public int upDate(List<Book> bookList) {
		return book.upDate(bookList);
	}

	@Override
	public Book findById(Long id) {
		return book.findById(id);
	}

	@Override
	public void exportToCSV(String filepath, String tableName) {
		book.exportToCSV(filepath, tableName);
		
	}

	@Override
	public void createTable(String tableName) {
		book.createTable(tableName);
		
	}

	@Override
	public void importFromCSV(String filepath, String tableName) {
		book.importFromCSV(filepath, tableName);
		
	}

	@Override
	public void exportToJson(String filepath, String tableName) {
		book.exportToJson(filepath, tableName);
		
	}

	@Override
	public void importFromJson(String filepath, String tableName) {
		book.importFromJson(filepath, tableName);
		
	}

	@Override
	public void mulitImportFromCSV(String filePath, String tableName) {
		final int DEFAULT_QUEUE_SIZE = 10;
		BlockingQueue<List<Book>> blockingQueue = new ArrayBlockingQueue<>(DEFAULT_QUEUE_SIZE);	//定义阻塞队列
		pools.execute(new InnerReader(blockingQueue, filePath, DEFAULT_QUEUE_SIZE));
		 while (true) {
	            if (blockingQueue.isEmpty()) {//队列为空线程等待一秒钟
	                try {
						TimeUnit.SECONDS.sleep(1);
					} catch (InterruptedException e) {
						e.printStackTrace();
					}
	            }

	            List<Book> bookList = blockingQueue.poll();//取列表头元素并移除
	            if (bookList != null && bookList.isEmpty()) {
	                logger.info("process the big file done.");
	                break;
	            }
	            if (bookList == null) {
	               continue;
	            }
	           try {
				pools.execute(new InsertRunnable(bookList, filePath,"test"));
			} catch (SQLException e) {
				e.printStackTrace();
			}
	        }
	        pools.shutdown();		
	}

	@Override
	public void mulitExportToCSV(String filePath, String tableName) {
		final int DEFAULT_QUEUE_SIZE = 10;
		BlockingQueue<List<Book>> blockingQueue = new ArrayBlockingQueue<>(DEFAULT_QUEUE_SIZE);	//定义阻塞队列
		try {
			pools.execute(new GetAllRunnable(blockingQueue, tableName));
		} catch (SQLException e1) {
			e1.printStackTrace();
		}
		 while (true) {
	            if (blockingQueue.isEmpty()) {//队列为空线程等待一秒钟
	                try {
						TimeUnit.SECONDS.sleep(1);
					} catch (InterruptedException e) {
						e.printStackTrace();
					}
	            }

	            List<Book> bookList = blockingQueue.peek();//取列表头元素不移除
	            if (bookList != null && bookList.isEmpty()) {
	                logger.info("export the big data done.");
	                break;
	            }
	           pools.execute(new InnerWriter(blockingQueue, filePath));
	        }
	        pools.shutdown();
		
	}

}
