package cn.wx.read.kafka.service;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.csvreader.CsvReader;

import cn.hutool.core.lang.Singleton;
import cn.wx.common.utils.TypeConversion;
import cn.wx.read.core.entity.Paper;
import cn.wx.read.core.service.PaperService;
import cn.wx.read.kafka.pool.ExecutorPool;
import cn.wx.read.kafka.pool.Gone;
import cn.wx.read.neo4j.entity.Neo4jPaper;
import cn.wx.read.neo4j.service.Neo4jImportDataService;

/**
 * 专家解析
 * 张剑
 */
@Service
public class ConsumePaperService {
	
	Logger logger = LoggerFactory.getLogger(ConsumePaperService.class);
	
	@Autowired
	Neo4jImportDataService neo4jImportDataService;
	
	@Autowired
	PaperService paperService;
	
	ExecutorPool executorPool = Singleton.get(ExecutorPool.class);
	
	private final static String zoneName = "ZonePaper_";
	
	/**
	 * 开始分片
	 * filePath 文件所在路径
	 * fileName 文件名称
	 */
	public void consumeZone(String filePath,String fileName,Gone gone) {
		
		/**
		 * 获取分片的个数
		 */
		Integer size = executorPool.zoneCsv(filePath, fileName, zoneName);
		
		executorPool.initPool();
		
		/**
         * 分片完毕，启动线程
         */
		int sz = size / ExecutorPool.tc;
		
		if(sz == 0) {
			for(int i=0;i<size;i++){
				executorPool.execute(new ThreadMultiReader(filePath,i,i,gone));
			}
		}else{
			int e=0;
			for(int i=0;i<size;i++){
			    if(e==sz) {
			    	executorPool.execute(new ThreadMultiReader(filePath,i-e,i,gone));
			    	e=0;
			    }else{
			    	e++;
			    }
			}
			if(e>0){
				int a=size - e + 1;
				int b=size - 1;
				executorPool.execute(new ThreadMultiReader(filePath,(a>b?b:a),b,gone));
			}
		}

        executorPool.poolShutdown();
	}
	
	/**
	 * 内部类线程
	 */
	class ThreadMultiReader implements Runnable {

		private Integer s_;
		
		private Integer e_;
		
		private String fp_;
		
		private Gone gone_;
		
		/**
		 * @param fp_ 文件所在路径
		 * @param s_ 分片开始索引
		 * @param e_ 分片结束索引
		 * @param gone_ 数据插入的去向
		 */
		public ThreadMultiReader(String fp_,Integer s_,Integer e_,Gone gone_) {
			this.s_ = s_;
			this.e_ = e_;
			this.fp_ = fp_;
			this.gone_ = gone_;
		}
		
		@Override
		public void run() {
			
			/**
			 * 开始处理
			 */
			for(int i=s_;i<=e_;i++) {
				consumePaper(fp_ + zoneName + i +".csv",gone_);
			}
			
		}

		public Integer getS_() {
			return s_;
		}

		public void setS_(Integer s_) {
			this.s_ = s_;
		}

		public Integer getE_() {
			return e_;
		}

		public void setE_(Integer e_) {
			this.e_ = e_;
		}

		public String getFp_() {
			return fp_;
		}

		public void setFp_(String fp_) {
			this.fp_ = fp_;
		}

		public Gone getGone_() {
			return gone_;
		}

		public void setGone_(Gone gone_) {
			this.gone_ = gone_;
		}
		
	}
	
	/**
	 * 解析专家
	 * @param filePath
	 */
	public void consumePaper(String filePath,Gone go){
		
		/**
		 * 定义ne4j对象
		 */
		Neo4jPaper ne = null;
		List<Neo4jPaper> nes = new ArrayList<>();
		
		/**
		 * 定义数据库对象
		 */
		Paper bo = null;
		List<Paper> bos = new ArrayList<>();
		
		try {
			
			CsvReader csvReader = executorPool.getCsvReader(filePath);
            csvReader.readHeaders();
            
            while (csvReader.readRecord()){
            	
            	String line = csvReader.getRawRecord();
            	
				if(StringUtils.isBlank(line)) {
					continue;
				}
				/**
				 * 从csv中获取数据
				 * "nId","title","citations","year", "url","fAbstract","publisher","Authors","docType"
				 * 新增领域的fId 集合
				 */
				String fId = csvReader.get("nId");
				String publishDate = csvReader.get("year");
				Integer year = TypeConversion.converYear(publishDate);
				String publisher = csvReader.get("publisher");
				String title = csvReader.get("title");
				String authors = csvReader.get("Authors");
				Integer citations = TypeConversion.converInteger(csvReader.get("citations"));
				Integer docType = TypeConversion.converInteger(csvReader.get("docType"));
				String fAbstract = csvReader.get("fAbstract");
				String url = csvReader.get("url");
				
				ne = new Neo4jPaper();
				ne.setfId(fId);
				ne.setfPublishDate(publishDate);
				ne.setYear(year);
				ne.setDocType(docType);
				nes.add(ne);
				
				bo = new Paper();
				bo.setfId(Long.valueOf(fId));
				bo.setTitle(title);
				bo.setPublisher(publisher);
				bo.setfPublishDate(publishDate);
				bo.setYear(year);
				bo.setfAuthors(authors);
				bo.setCitations(citations);
				bo.setfAbstract(fAbstract);
				bo.setUrl(url);
				bos.add(bo);
				/**
				 * 批量插入
				 * 2000一组
				 */
				if(nes.size() == go.getIc()) {
					try{
						Long st = System.currentTimeMillis();
						if(go.equals(Gone.Neo4j)) {
							neo4jImportDataService.importBetchPaper(nes);
						}else if(go.equals(Gone.Mysql)) {
							paperService.insertBatchPaper(bos);
						}
						Long et = System.currentTimeMillis();
						logger.info("一组论文耗时：" + (float)(et-st)/1000);
					}catch (Exception e) {
						/**
						 * 执行错误 插入日志
						 */
					}finally {
						nes.clear();
						bos.clear();
					}
				}
            }
            
            if(nes.size() > 0){
            	if(go.equals(Gone.Neo4j)) {
					neo4jImportDataService.importBetchPaper(nes);
				}else if(go.equals(Gone.Mysql)) {
					paperService.insertBatchPaper(bos);
				}
            }
            /**
			 * 关闭读入流
			 */
			csvReader.close();
			csvReader = null;
			ne = null;
			nes = null;
			bo = null;
			bos = null;
		} catch (IOException e) {
			e.printStackTrace();
		}
	}
}
