package cn.edu.scau.cmi.crawler.service;

import static org.hamcrest.CoreMatchers.nullValue;

import java.io.File;
import java.lang.reflect.Field;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;

import javax.swing.filechooser.FileNameExtensionFilter;

import org.apache.commons.collections4.map.HashedMap;
import org.apache.poi.ss.usermodel.Cell;
import org.apache.poi.ss.usermodel.Row;
import org.apache.poi.ss.usermodel.Sheet;
import org.apache.poi.ss.usermodel.Workbook;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;

import com.alibaba.fastjson.JSONObject;

import cn.edu.scau.cmi.crawler.saver.CrawlerdetectSaver;
import cn.edu.scau.cmi.crawler.saver.CrawlereventSaver;
import cn.edu.scau.cmi.ema.controller.base.CmiEmaController;
import cn.edu.scau.cmi.ema.domain.*;
import cn.edu.scau.cmi.ema.service.base.EntityFactoryService;
import cn.edu.scau.cmi.ema.util.CmiSetUtil;
import cn.edu.scau.cmi.ema.util.CmiTimeUtil;
import cn.edu.scau.cmi.ema.util.ExcelUtil;

@Service()
public class CrawlerdetectImportServiceImpl extends CmiEmaController implements CrawlerdetectImportService{
	
	protected File nonImportEventFile =null;
	protected File nonImportDetectFile =null;

	@Autowired EntityFactoryService entityFactory;
	@Autowired CrawlerdetectSaver crawlerdetectSaver;
	@Autowired CrawlereventSaver crawlereventSaver;
	@Autowired CrawlerImportService importService;

	@Override
	public boolean importCrawlerdetect(Crawlerfile crawelfile) throws Exception {

		Workbook workbook = importService.getWorkbook(crawelfile, samrQualifyDetect);
		if(workbook == null) {
			crawelfile.setExist(false);
			crawlerfileService.saveCrawlerfile(crawelfile);
			return false;
		}
	
		Sheet sheet = workbook.getSheetAt(0);
		int lastRowNum = ExcelUtil.getRows(sheet, 0).size();
		
		Row titleRow = importService.getTitleRow(sheet);
		if(titleRow == null) {
			System.out.println(crawelfile.getId() + crawelfile.getName() + "没有成功获取title行，导入数据不成功");
			return false;
		}
		
		int startRowNum = titleRow.getRowNum() +1;
		List<Row> xlsEventRows = ExcelUtil.getRows(sheet, startRowNum, lastRowNum);
		List<Row> actualEventRows = importService.convertXlsRow2EventRows(xlsEventRows);
		System.out.println("读取表格成功，共有" + actualEventRows.size() + "条实际需要处理的数据");
		
		boolean isSuccess = importActualDetectRowsIntoCrwalerdetect(crawelfile, sheet, actualEventRows);
		workbook.close();
		
		if(isSuccess) {
			System.out.println("导入事件成功，请查看数据库和控制台输出的信息！！！");
			return true;
		}else {
			System.out.println(crawelfile.getName() + "文件暂时还没有对应的匹配算法，请完成后再运行！！！");
			return false;
		}
	
	}
	
	private boolean importActualDetectRowsIntoCrwalerdetect(Crawlerfile crawlerfile, Sheet sheet, List<Row> actualEventRows) {

		System.out.println("拆分后，总共有" + actualEventRows.size() + "条数据待匹配并插入");
		Crawlerfilestandard standard = crawlerfile.getCrawlerfilestandard();
		System.out.println("当前输入的记录的标准是：" + standard.getName());
		
		boolean isSuccess = false;
		
		for(Row row : actualEventRows) {
			crawlerdetectSaver.saveActualDetectRowIntoCrawlerdetect(crawlerfile, sheet, row);
		}
		return isSuccess;
	}
	
	
//	导入电子表格：获取所有的数据，转换成为准备导入的事件列表
	@Transactional(timeout = 300000000)
	public boolean importEvent(Crawlerfile excel) throws Exception {

		String timeStamp = CmiTimeUtil.getYMDHMSString(Calendar.getInstance());
		
		//description封装了相关的爬取信息，使用json格式封装。目前封装了crawlTime，sourceTime，reportor，好像暂时没用到
		String description=excel.getDescription();
		JSONObject descriptionJson=JSONObject.parseObject(description);
		//{'crawlTime':'2021-08-16 22:14:28','reportor':'北京市市场监督管理局','sourceTime':'2021-07-23'}

		//北京市市场监督管理局
		//D:\EMA工作目录\CrawlerFile\北京市
		String siteName=excel.getUrl().getSite().getName();
		//城市或者省份的名称
		String samrAddressName=siteName.substring(0,siteName.length()-7);
		
		nonImportEventFile = new File(samrDirectory + samrAddressName +"/" + samrNonImportDetect +excel.getName().replace(".xlsx", "-事件导入异常-" + timeStamp + ".xlsx"));
		Workbook workbook = ExcelUtil.getWorkbook(samrDirectory + samrAddressName + "/" + samrNonQualifyDetect + excel.getName());
		Sheet sheet = workbook.getSheetAt(0);
		
		int lastRowNum = ExcelUtil.getRows(sheet, 0).size();
		int startRowNum =2;

		// 导入的表格数据，电子表格中的每一行可能包含几个事件，需要将每一行事件转换为真正的事件(集合)
		List<Row> xlsEventRows = ExcelUtil.getRows(sheet, startRowNum, lastRowNum);

		// 有的数据是的地址等有多个，把这些记录分解成多条记录。
		List<Row> actualEventRows = importService.convertXlsRow2EventRows(xlsEventRows);
		System.out.println("读取表格成功，共有" + actualEventRows.size() + "条实际需要处理的数据");
		// 把实际分解出来的事件行导入到数据库中，不能导入的数据行就保存到不能保存的文件中。
		importActualEventRows(excel, sheet, actualEventRows);
		
//		关闭文件
		workbook.close();
		System.out.println("不合格检测文件导入事件成功，请查看数据库和控制台输出的信息！！！");
		return true;
	}
//	导入event列表，将爬取初始状态的数据写道notImprotEeventRowList中并保存
	@Transactional(timeout = 30000000)
	private void importActualEventRows(Crawlerfile excel, Sheet sheet, List<Row> eventRowList) throws Exception {
		System.out.println("拆分后，总共有" + eventRowList.size() + "条数据待匹配并插入");
		// 表头，主要用来导出错误Row
		List<Row> headList = ExcelUtil.getRows(sheet, sheet.getFirstRowNum(), 1);//从0行开始，深圳的是第1行才是表的head.
		List<Row> nonImportEventRowList = new ArrayList<>();
		//		迭代导入事件，没有成功导入的事件添加到nonImportEventRowList列表中；
		for (Row row : eventRowList) {
//			是否已经插入，有可能前面就已经导入过了的，免得引起冗余的数据。
			
			boolean isImported = false;
			
			short lastCellNum = row.getLastCellNum();
			switch (lastCellNum -1 ) {
			case 'l' -97:
//				isImported = importActualEventLrow(excel, row);
				break;
				
			default:
				break;
			}
			
			if(!isImported) {
				nonImportEventRowList.add(row);
			}
		}
		
		//		保存爬取初始状态的事件行。
		/*
		 * try { ExcelUtil.exportXlsx(nonImportEventFile, headList,
		 * nonImportEventRowList); } catch (IOException e) { e.printStackTrace(); }
		 */
	}
	
//	？？？子类根据excel获取rule.该规则子类调用build方法的时候要用到，好像还有一些问题！！！
	protected Rule rule = null;
//	数据表中的数据，有的列有多个数据，需要将这些数据分开成为多行数据

	

	
//	匹配失败：正常底层对象保存成功，引用表失败，保存失败。
//	不需要适配的数据：读取的时候整行，标红，没有拆分。
//	不需要拆分了，分隔符已经处理了

	protected void insertEvent2adultertype(Connection con, Adultertype adultertype, Event event)
			throws SQLException, Exception {
		boolean exist = false;

		{
			String sql = "select * from event2adultertype where event = ? and adultertype = ?";
			PreparedStatement ps = con.prepareStatement(sql);
			ps.setObject(1, event.getId());
			ps.setObject(2, adultertype.getId());
			ResultSet rs = ps.executeQuery();
			if (rs.next())
				exist = true;
		}

		if (!exist) {
			// 因为service.save懒加载出错，所以用jdbc把event和adultertype关联起来
			String sql = "INSERT INTO event2adultertype(event, adultertype) VALUES(?, ?);";
			PreparedStatement ps = con.prepareStatement(sql);
			ps.setObject(1, event.getId());
			ps.setObject(2, adultertype.getId());
			int executeUpdate = ps.executeUpdate();
			if (executeUpdate != 1)
				throw new Exception("关联event和adultertype失败");
		}
	}
	
	//	TODO 通过反射的方式获取，暂时没有成功，使用固定的代码撰写：所有的字段，需要处理的字段和顺序是：
	protected void getEventByReflection(Event event, Row row) {
		Set<Rulefield> ruledetails =  rule.getRulefieldsForRule();
		
//		通过反射获取所有的域，查看域的映射规则。
		Field [] eventFields = event.getClass().getDeclaredFields();
		
		for(Field field: eventFields) {
			field.setAccessible(true);
			
			switch (field.getType().getCanonicalName()) {
//			字符串类型:
			case "java.lang.String":
				StringBuilder fieldString = new StringBuilder();
				for (Rulefield ruledetail : ruledetails) {
					if (ruledetail.getEntityclassname().equalsIgnoreCase(field.getName())) {
						Set<Rulecolumn> xlscolumns = ruledetail.getRulecolumnsForRulefield();
						// 使用反射获取event的基本属性的获取方法
						for (Rulecolumn xlscolumn : xlscolumns) {
							fieldString = fieldString.append(ExcelUtil.getCellString(row, xlscolumn.getExcelcolumn()));
						}
					}
				}

//				把值赋值给event对象
				try {
					
					field.set(event, fieldString.toString());
				} catch (IllegalArgumentException e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
				} catch (IllegalAccessException e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
				}
//				保存这个field
				break;
				
			case "cn.edu.scau.cmi.Address":
				break;
			}
		}
	}
	
	
//	电子表格的标准，包含合格和不合格的电子表格文件，注意：合格和不合格的电子表格文件所在的目录不同。
	public Crawlerfilestandard updateCrawlerfilestandard(Crawlerfile file) {

		Map<String, Integer> toMatchStandardColumnNumberMap = new HashedMap<String, Integer>();
		String siteName=file.getUrl().getSite().getName();
		String samrAddressName=siteName.substring(0,siteName.length()-7);
		Workbook workbook = null;
		
		String fileName = samrDirectory + samrAddressName + "/" + samrNonQualifyDetect + file.getName();
		File detectFile = new File(fileName);
		if(detectFile.exists()) {
			workbook = ExcelUtil.getWorkbook(fileName);
		}else {
			workbook = ExcelUtil.getWorkbook(samrDirectory + samrAddressName + "/" + samrQualifyDetect + file.getName());
		}
//		如果还没有找到workbook,就返回
		if(workbook == null) {
			file.setDescription(file.getDescription() + "，文件转换为workbook对象失败，请查找原因");
			file.setCrawlerfilestandard(crawlerfilestandardDAO.findCrawlerfilestandardById(-2));
			crawlerfileService.saveCrawlerfile(file);
			return null;
		}
		
//		Workbook workbook = ExcelUtil.getWorkbook("D:\\EMA工作目录\\CrawlerFile\\上海市\\合格信息\\" + crawlerfile.getName());
		
		Crawlerfilestandard crawlerfilestandard = crawlerfilestandardDAO.findCrawlerfilestandardById(-2);
		

		
		
		Sheet sheet = workbook.getSheetAt(0);
//		TODO 获取标准title行row，有可能是空，如果为空，返回空的值，并设置这个文件的标准类型是
		Row titleRow = importService.getTitleRow(sheet);
		if(titleRow == null) {
			file.setCrawlerfilestandard(crawlerfilestandardDAO.findCrawlerfilestandardById(1));//id 49:无法确定的标准
			crawlerfileService.saveCrawlerfile(file);
			System.out.println("---" + file.getId() + " " + file.getName() + "抬头行获取失败，该文件的标准没有正确获取---");
			return crawlerfilestandardDAO.findCrawlerfilestandardById(1);
		}
		
		short lastCellNum = titleRow.getLastCellNum();
		int mapIndex = 0;
//		实际列的数量，有可能获取的列是空，如果是空就不添加
		int actualColumn = 0;
		
		for(int cellNum = 0 ; cellNum < lastCellNum; cellNum++) {
//			第三行（）每一列指定行
			Cell cell = titleRow.getCell(cellNum);
			String cellValue = ExcelUtil.getCellValue(cell);
//			！！！有可能最后列是没有内容 ，但是。。。。。。被读取。。。。
			if(cellValue.equals("")) {
				continue;
			}
			
			actualColumn++;
			
			Crawlerfilecolumn crawlerfilecolumn = CmiSetUtil.getSoleInstance(crawlerfilecolumnDAO.findCrawlerfilecolumnByName(cellValue));
			if(crawlerfilecolumn == null) {
				System.out.println("____________" + file.getId() + file.getName() + "" + cellValue + "列在crawelfilecolumn表中没有找到，请先添加后在执行标准的更新");
				return crawlerfilestandard;
				
			}
			
			Set<Crawlerfilestandardcolumn> crawlerfilestandardcolumns = crawlerfilecolumn.getCrawlerfilestandardcolumnsForCrawlerfilecolumn();
			
			if(crawlerfilestandardcolumns.size() == 1) {
				String mapName = CmiSetUtil.getSoleInstance(crawlerfilestandardcolumns).getName();
				toMatchStandardColumnNumberMap.put(mapName, mapIndex);
				mapIndex++;
			}else {
				List<Crawlerfilestandardcolumn> list = new ArrayList<Crawlerfilestandardcolumn>(crawlerfilestandardcolumns);
//				需要实现Comparable接口才能使用sort方法
				Collections.sort(list);
				for(Crawlerfilestandardcolumn c: list) {
					toMatchStandardColumnNumberMap.put(c.getName(), mapIndex); 
					mapIndex++; 
					}
				}
			}
		
		crawlerfilestandard = matchCrawlerfilestandard(file, actualColumn, toMatchStandardColumnNumberMap);
		
		file.setCrawlerfilestandard(crawlerfilestandard);
		file.setCrawlerfilestatus(crawlerfilestatusDAO.findCrawlerfilestatusById(8));//id = 8 表示已经确定了导入标准
		file = crawlerfileService.saveCrawlerfile(file);
		
		return crawlerfilestandard;
	}



//	匹配算法：将电子表格中的格式的标准找到，如果找不到，就新建。
//	toMatchStandardMap的值是：{标识生产企业名称=4, 被抽样单位名称=6, 被抽样单位地址=7, 食品类别=9, 标识生产企业地址=5, 序号=0, 检测机构=10, 不确定=8}
	private Crawlerfilestandard matchCrawlerfilestandard(Crawlerfile fCrawlerfile, int actualColumn, Map<String, Integer> toMatchStandardColumnNumberMap) {
		
//		用于判断是否是该标准.初始化为false
//		boolean isThistandard = false;
//		对每一个标准的每一列和details匹配。
		Set<Crawlerfilestandard> allStandards = crawlerfilestandardDAO.findAllCrawlerfilestandards();
		Crawlerfilestandard crawlerfilestandard = null;
		
		for(Crawlerfilestandard standard :allStandards) {
			boolean isThisStandard = true;
//			一个标准对应一个test标准细则集合,通过标准细则来与文件进行匹配
			Set<Crawlerfilestandardetail> standardetails = standard.getCrawlerfilestandardetailsForCrawlerfilestandard();
//			如果size不一样,那么这种标准一定不正确,直接切换下一个标准
			if(standardetails.size() != toMatchStandardColumnNumberMap.size()) {
				continue;
			}
			
//			把标准的明细弄成一个map，好比较一些，要不然，不好比较
			
//			{标识生产企业名称=4, 被抽样单位名称=6, 被抽样单位地址=7, 食品类别=9, 标识生产企业地址=5, 序号=0, 检测机构=10, 生产日期=8}
			Map<String, Integer> standardetailColumnNumberMap = new HashedMap<String, Integer>();
			for(Crawlerfilestandardetail standardetail : standardetails ) {
				standardetailColumnNumberMap.put(standardetail.getCrawlerfilestandardcolumn().getName(), standardetail.getNumber());
			}
			
//			迭代所有的key, 如果需要匹配的value和标准中的value相等，表示找到了匹配的明细，如果所有的明细都匹配，这个标准就是需要的标准，如果有一个不相等，就不是这个标准
 			Set<String> standardColumnNames = standardetailColumnNumberMap.keySet();
			for(String standardColumnName : standardColumnNames) {
				if(toMatchStandardColumnNumberMap.get(standardColumnName) == null) {
					continue;
				}
				int toMatchStandardColumNumber = toMatchStandardColumnNumberMap.get(standardColumnName);

				int standardColumNumber = standardetailColumnNumberMap.get(standardColumnName);

//				只要有一个不匹配，就不是这个标准了，就跳出本次standard循环，进入下一个循环的
				if(toMatchStandardColumNumber != standardColumNumber) {
					isThisStandard = false;
					break;
				}
				
			}
//			如果这个标准已经匹配上了，循环就结束。
			if(isThisStandard) {
				crawlerfilestandard = standard;
				break;
			}else {
				continue;
			}
		}
		
//		如果没有匹配上，crawlerfilestandard 任然是空，就新建一个标准，并将这些标准的明细保存。
//		先保存主表标准，然后再保存主表的明细
//		注意检查是否把联系保存了!!!
		
		if(crawlerfilestandard == null) {
			crawlerfilestandard = newCrawlerfileStandard(fCrawlerfile, actualColumn, toMatchStandardColumnNumberMap);
		}
		return crawlerfilestandard;
	}

	private Crawlerfilestandard newCrawlerfileStandard(Crawlerfile file, int actualColumn, Map<String, Integer> toMatchStandardcolumnNumberMap) {
		Crawlerfilestandard crawlerfilestandard;
//			新建一条标准
		crawlerfilestandard = new Crawlerfilestandard();
//		1: 检测合格文件; 2: 检测不合格文件
		
		String fileTypeName = file.getFiletype().getName();
//		10:J, 11: K。。。。。。
		
//		列对应的字符串
		char logicStandardcolumnChar = (char) (toMatchStandardcolumnNumberMap.size() + 64);
//		电子表格列的数量转变为字符
		char actualStandardcolumnChar = (char) (actualColumn + 64);
		
//		int standardNamePostfix = 0;//标准后面的序号，例如：EventJ1中的1
		
		String standardNamePrefix = "";
		
		switch (fileTypeName) {
		case "检测合格文件":
			standardNamePrefix = "Detect" + actualStandardcolumnChar + logicStandardcolumnChar;
			break;
			
		case "检测不合格文件":
			standardNamePrefix = "Event" + actualStandardcolumnChar + logicStandardcolumnChar;
			break;

		default:
			break;
		}
		
		int standardNamePostfix = -1;//已经存在的最大后缀，例如EventJ1, EventJ2， 那么就是2；
		Set<Crawlerfilestandard> standards = crawlerfilestandardDAO.findCrawlerfilestandardByNameContaining(standardNamePrefix );
		
		for(Crawlerfilestandard standard: standards) {
			int num =  Integer.parseInt( standard.getName().replace(standardNamePrefix, "").trim());
			if (num > standardNamePostfix) {
				standardNamePostfix = num;
			}
		}
		standardNamePostfix ++;
		
//		fCrawlerfile获取标准的名称，DetectJ2
		
		crawlerfilestandard.setName(standardNamePrefix + standardNamePostfix);
		crawlerfilestandard.setDescription("文件Id号：" + file.getId() + "文件名：" +file.getName() +"。新建的标准，还没有完成这个标准的导入部分，请完善！！！");
		crawlerfilestandard = crawlerfilestandardService.saveCrawlerfilestandard(crawlerfilestandard);
		
//			把这个新建标准的明细添加到集合中，保存这个标准素有的标准明细
		
		Set<Crawlerfilestandardetail> toMatchStandardetails = new HashSet<Crawlerfilestandardetail>();
		
		Set<String> toMatchStandarColumnNames = toMatchStandardcolumnNumberMap.keySet();
		for(String toMatchkeyStringstandardColumName : toMatchStandarColumnNames) {
			Integer standardColumNumber = toMatchStandardcolumnNumberMap.get(toMatchkeyStringstandardColumName);
			
			Crawlerfilestandardetail crawlerfilestandardetail = new Crawlerfilestandardetail();
			crawlerfilestandardetail.setCrawlerfilestandard(crawlerfilestandard);
			
//				这一列对应的标准列
			Crawlerfilestandardcolumn standardColumn = CmiSetUtil.getLastInstance(crawlerfilestandardcolumnDAO.findCrawlerfilestandardcolumnByName(toMatchkeyStringstandardColumName));
			if(standardColumn == null) {
				standardColumn = crawlerfilestandardcolumnDAO.findCrawlerfilestandardcolumnById(-2);
			}
			
//				继续查看测试。
			
			crawlerfilestandardetail.setCrawlerfilestandardcolumn(standardColumn);
			crawlerfilestandardetail.setDescription(toMatchkeyStringstandardColumName);
			crawlerfilestandardetail.setName(toMatchkeyStringstandardColumName);
			crawlerfilestandardetail.setNumber(standardColumNumber);
			
			crawlerfilestandardetail = crawlerfilestandardetailService.saveCrawlerfilestandardetail(crawlerfilestandardetail);
			
			toMatchStandardetails.add(crawlerfilestandardetail);
		}
		
		crawlerfilestandard.setCrawlerfilestandardetailsForCrawlerfilestandard(toMatchStandardetails);
		
//			把这个新建标准的明细添加到集合中，保存这个标准素有的标准明细
		Set<Crawlerfile> crawlerfiles = new HashSet<Crawlerfile>();
		crawlerfiles.add(file);
		crawlerfilestandard.setCrawlerfilesForCrawlerfilestandard(crawlerfiles );
//			再次保存标准？？？是否需要再次保存呢？？？
		crawlerfilestandard = crawlerfilestandardService.saveCrawlerfilestandard(crawlerfilestandard);
		return crawlerfilestandard;
	}

	@Override
	public boolean importDetectFromCrawlerfile(Crawlerfile excel) throws Exception {
		String timeStamp = CmiTimeUtil.getYMDHMSString(Calendar.getInstance());
		String siteName=excel.getUrl().getSite().getName();
		String samrAddressName=siteName.substring(0,siteName.length()-7);
		nonImportDetectFile = new File(samrDirectory + samrAddressName +"/" + samrNonImportDetect +excel.getName().replace(".xlsx", "-检测导入异常-" + timeStamp + ".xlsx"));
		Workbook workbook = ExcelUtil.getWorkbook(samrDirectory + samrAddressName +"/" + samrQualifyDetect + excel.getName());
		Sheet sheet = workbook.getSheetAt(0);
		int lastRowNum = ExcelUtil.getRows(sheet, 0).size();
		int startRowNum =2;
		List<Row> xlsEventRows = ExcelUtil.getRows(sheet, startRowNum, lastRowNum);
		List<Row> actualEventRows = importService.convertXlsRow2EventRows(xlsEventRows);
		System.out.println("读取表格成功，共有" + actualEventRows.size() + "条实际需要处理的数据");
		
		importActualDetectRows(excel, sheet, actualEventRows);
		workbook.close();
		System.out.println("导入事件成功，请查看数据库和控制台输出的信息！！！");
		return true;
	}

	@Override
	public boolean importEventFromCrawlerfile(Crawlerfile excel) throws Exception {
		return false;
	}

	
//	导入event列表，将爬取初始状态的数据写道notImprotEeventRowList中并保存
	@Transactional(timeout = 30000000)
	private void importActualDetectRows(Crawlerfile excel, Sheet sheet, List<Row> eventRowList) throws Exception {
		System.out.println("拆分后，总共有" + eventRowList.size() + "条数据待匹配并插入");
		// 表头，主要用来导出错误Row
		List<Row> headList = ExcelUtil.getRows(sheet, sheet.getFirstRowNum(), 1);//从0行开始，深圳的是第1行才是表的head.
		List<Row> nonImportEventRowList = new ArrayList<>();
		//		迭代导入事件，没有成功导入的事件添加到nonImportEventRowList列表中；
		for (Row row : eventRowList) {
//			是否已经插入，有可能前面就已经导入过了的，免得引起冗余的数据。
			boolean isImported = false;
			short lastCellNum = row.getLastCellNum();
//			考虑将这一段放到try块中。
			switch (lastCellNum -1 ) {
			case 'h' -97:
//				isImported = importActualDetectHrow(excel, row);
				break;}
			}
		}


	

	
	@Override
	public void importColumn(Crawlerfile crawlerfile) {

		String siteName=crawlerfile.getUrl().getSite().getName();
		String samrAddressName=siteName.substring(0,siteName.length()-7);
		Workbook workbook = null;
//		很多奇怪的文件，例如：既包含合格，也包含不合格，爬取的时候弄得不正确
//		河南省​关于5批次食品不合格情况的通告（2021年第27期）_食品安全监督抽检产品合格信息_2021-07-07.xlsx
		
		String detectFileName = samrDirectory + samrAddressName + "/" + samrQualifyDetect + crawlerfile.getName();
		File detectFile = new File(detectFileName);
		
		String eventFileName = samrDirectory + samrAddressName + "/" + samrNonQualifyDetect + crawlerfile.getName();
		
		if(detectFile.exists()) {
			workbook = ExcelUtil.getWorkbook(detectFileName);
		}else {
			workbook = ExcelUtil.getWorkbook(eventFileName);
		}
		
		
		if (workbook == null) {
			System.out.println("电子表格文件WorkBook读取错误，可能是文件格式错误");
			crawlerfile.setDescription("电子表格文件WorkBook读取错误，可能是文件格式错误");
			crawlerfile.setCrawlerfilestatus(crawlerfilestatusDAO.findCrawlerfilestatusById((9)));
			crawlerfileService.saveCrawlerfile(crawlerfile);
			return;
		}
		
		Sheet sheet = workbook.getSheetAt(0);
//		TODO 获取标准title行row
//		Row row = sheet.getRow(2);
		Row titleRow = importService.getTitleRow(sheet);
		if(titleRow == null) {
			System.out.println(crawlerfile.getId() + "  " + crawlerfile.getName() + "文件的抬头行没有正确获取");
			return;
		}
		
		short lastCellNum = titleRow.getLastCellNum();
		
//		电子表格的列明细的集合：将电子表格中的列封装成为标准列的集合
//		可以考虑标准的明细冗余放置再标准中，这样可以减少后面的计算。
		
		for(int cellNum = 0 ; cellNum < lastCellNum; cellNum++) {
			
//			第三行（）每一列指定行
			Cell cell = titleRow.getCell(cellNum);
			String cellValue = ExcelUtil.getCellValue(cell);
//			有可能获取的列是空字符串的内容，
			if(cellValue.equals("")) {
				continue;
			}
			Set<Crawlerfilecolumn> crawlerfilecolumns = crawlerfilecolumnDAO.findCrawlerfilecolumnByName(cellValue);
			Crawlerfilecolumn crawlerfilecolumn = new Crawlerfilecolumn();
			
//			column中还没有这个列column
			if(crawlerfilecolumns.size() == 0) {
//			！！！！！！写入没有的列，这里需要写入，并且需要再后台添加外键！！！！！！
				crawlerfilecolumn.setName(cellValue);
				crawlerfilecolumn.setDescription("列的初始来源：" + crawlerfile.getName());
				crawlerfilecolumn = crawlerfilecolumnService.saveCrawlerfilecolumn(crawlerfilecolumn);
			}
		}
		Crawlerfilestatus filestatustype = crawlerfilestatusDAO.findCrawlerfilestatusById(7);//所有列标题已经导入crawlerfilecolumn表
		 crawlerfile.setCrawlerfilestatus(filestatustype);
		 crawlerfile.setTitlerownum(titleRow.getRowNum());
		 crawlerfile = crawlerfileService.saveCrawlerfile(crawlerfile);
	}

	

	
	
	
	
	@Override
	public boolean importDetect(Crawlerdetect crawlerdetect) {
//		可以采用注入的方式
//		被引用的先保存
//		（1）不需要封装的属性
//		是event的前提，需要保存。
//		（2）利用crawlerdetect封装好各种相关的实体类
		Producer producer = buildProducer(crawlerdetect);
		if(producer == null) {
			producer = producerDAO.findProducerById(-2);
		}else if(producer.getId() == null) {
			producer = producerService.saveProducer(producer);
		}

		Detectdepartment detectdepartment = buildDetectdepartment(crawlerdetect);
		if(detectdepartment == null) {
			detectdepartment = detectdepartmentDAO.findDetectdepartmentById(-2);
		}else if(detectdepartment.getId() == null) {
			detectdepartment = detectdepartmentService.saveDetectdepartment(detectdepartment);
		}
//		subject不能为空
		Subject subject = buildSubject(crawlerdetect);
		if(subject == null) {
			System.out.println("导入Detect的主体不能为空，这一行记录没有被保存");
			crawlerdetect.setDescription("导入Detect的主体不能为空，这一行记录没有被保存");
			return false;
		}else if(subject.getId() == null) {
			subject = subjectService.saveSubject(subject);
		}
		
		Feature feature = buildFeature(crawlerdetect);
		if (feature == null) {
			feature = featureDAO.findFeatureById(-2);
		}else if(feature.getId() == null) {
			feature = featureService.saveFeature(feature);
		}
		
		Brand brand = bulidBrand(crawlerdetect);
		if(brand == null) {
			brand = brandDAO.findBrandById(-2);
		}else if(brand.getId() == null) {
			brand = brandService.saveBrand(brand);
		}

		//		食物类型是标准数据，不许保存
		Foodtype foodtype = buildFoodtype(crawlerdetect);
		if(foodtype == null) {
			foodtype = foodtypeDAO.findFoodtypeById(-2);
		}

//		食物：物理主键：name, productcode, brand
		Food food = buildFood(crawlerdetect);
		
		if(food == null) {
			System.out.println("Detect的Food是空，该数据没有意义");
			crawlerdetect.setDescription("Detect的Food是空，该数据没有意义");
			crawlerdetect.setCrawlerdetectstatus(crawlerdetectstatusDAO.findCrawlerdetectstatusById(5));//导入失败
			crawlerdetectService.saveCrawlerdetect(crawlerdetect);
			return false;
		}
		
		food.setBrand(brand);
		food.setProductcode(productcodeDAO.findProductcodeById(-2));
		food.setProducer(producer);
		food.setFoodtype(foodtype);
		food.setDescription("名称：" + food.getName() + "，产品编码：" + food.getProductcode().getName() + "，生产商：" + food.getProducer().getName() + "，品牌：" + food.getBrand().getName());
		food = foodService.saveFood(food);
		
//		如果foodbatch为空，表示数据没有任何foodbatch的信息，新建一个，如果foodbatch的id为空，表示有一个foodbatch，需要补全信息。
		Foodbatch foodbatch = buildFoodbatch(crawlerdetect);;//event.getFodFoodbatch();
		if(foodbatch == null) {
			foodbatch = entityFactory.initFoodbatch();
			foodbatch.setName("检测报告没有标明食物的批次");
			foodbatch.setBatch("检测报告没有标明食物的批次");
			foodbatch.setDescription("检测报告没有标明食物的批次");
		}
		
		if(foodbatch.getId() == null) {
//			foodbatch.setName("检测报告没有标明食物的批次");
//			foodbatch.setBatch("检测报告没有标明食物的批次");
			//detect虽然还没有保存，但是从后台获取的临时对象有这个date值。
//			foodbatch.setDate(event.getDetect().getDate());
			
			if(crawlerdetect.getFoodbatch() != null) {
				foodbatch.setBatch(crawlerdetect.getFoodbatch());
			}
			foodbatch.setDescription("食物：" + food.getName() + "，生产商：" + producer.getName() + ", 品牌：" + brand.getName() + "生产日期：" + "   ");
		}
		foodbatch.setFood(food);
		foodbatch.setUnit(unitDAO.findUnitById(-2));
		
//		TODO !!!!!!日期需要修改，暂时使用当前的值
		Calendar date = Calendar.getInstance();
		
		String crawlerDetectFoodBatch = crawlerdetect.getFoodbatch();//2021-09-28
		SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
		Date datePerseDate = null;
		try {
			datePerseDate = dateFormat.parse(crawlerDetectFoodBatch);
		} catch (ParseException e) {
			e.printStackTrace();
		}
		date.setTime(datePerseDate);
			
		foodbatch.setDate(date);
		foodbatch = foodbatchService.saveFoodbatch(foodbatch);
		

//		需要保存的主要对象，初始化Detect对象
		
//		Detect detect = entityFactory.initDetect();
		Detect detect = buildDetect(crawlerdetect);
		
		detect.setName(subject.getName() + "，检测批次：" + foodbatch.getName());
		detect.setFoodbatch(foodbatch);
		detect.setDetectdepartment(detectdepartment);
 		detect.setSubject(subject);
		detect = detectService.saveDetect(detect);
		
		Featurestandard featurestandard = buildFeaturestandard(crawlerdetect);//event.getFeaturestandard();
//		检测标准需要确定
		if(featurestandard == null) {
			featurestandard = featurestandardDAO.findFeaturestandardById(-2);
		}
		
		if(featurestandard.getId() == null) {
			featurestandard.setName(feature.getName() + "的标准");
			featurestandard.setFeature(feature);
			featurestandard.setDescription("");
			featurestandard = featurestandardService.saveFeaturestandard(featurestandard);
		}
		
		Detectdetail detectdetail = buildDetectdetail(crawlerdetect);// event.getDetectdetail();
		detectdetail.setDetect(detect);
		detectdetail.setFeature(feature);
		
		if(crawlerdetect.getRedetectresult() != null) {
			detectdetail.setValue(crawlerdetect.getDetectresult());
		}
		detectdetail.setName(detect.getName() + feature.getName());
		detectdetail = detectdetailService.saveDetectdetail(detectdetail);
		
		Detectfeature detectfeature = bulidDetectfeature(crawlerdetect);//event.getDetectfeature();
		if(detectfeature == null) {
			detectfeature = detectfeatureDAO.findDetectfeatureById(-2);
		}
		
		if(detectfeature.getId() == null) {
			detectfeature.setFeature(feature);
			detectfeature.setDetect(detect);
			detectfeature = detectfeatureService.saveDetectfeature(detectfeature);
		}
		
		System.out.println("检测成功保存，事件的ID是：" + detect.getId() + "，检测的名称是：" + detect.getName());
		
		/*
		 * try {} catch (Exception e) { e.printStackTrace();
		 * System.out.println("------该行因为数据存在匹配不正确，没有被导入！！！" + e.getMessage()); //
		 * ！！！TODO 没有被正确导入后，session结束了，后面的数据也不能导入了，因此没必要继续下去。 return false; }
		 */
		return true;
	}


	private Detect buildDetect(Crawlerdetect crawlerdetect) {
		Detect detect = entityFactory.initDetect();
		detect.setDescription(crawlerdetect.getDescription());
		return detect;
	}

	private Detectfeature bulidDetectfeature(Crawlerdetect crawlerdetect) {
		if(crawlerdetect.getDetectfeature() == null) {
			return null;
		}
		
		Detectfeature instance = CmiSetUtil.getSoleInstance(detectfeatureDAO.findDetectfeatureByName(crawlerdetect.getDetectfeature()));
		if(instance == null) {
			instance = entityFactory.initDetectfeature();
			instance.setName(crawlerdetect.getDetectfeature());
		}
		return instance;
	}


	private Detectdetail buildDetectdetail(Crawlerdetect crawlerdetect) {
		Detectdetail instance = entityFactory.initDetectdetail();//CmiSetUtil.getSoleInstance(detectdetailDAO.findDetectdetailByName(crawlerdetect.getDetectdetail()));
		return instance;
	}


	private Featurestandard buildFeaturestandard(Crawlerdetect crawlerdetect) {
		if(crawlerdetect.getDetectstandardvalue() == null) {
			return null;
		}
		
		Featurestandard instance = CmiSetUtil.getSoleInstance(featurestandardDAO.findFeaturestandardByName(crawlerdetect.getDetectstandardvalue()));
		if(instance == null) {
			instance = entityFactory.initFeaturestandard();
			instance.setName(crawlerdetect.getDetectstandard());
		}
		return instance;
	}


	private Foodbatch buildFoodbatch(Crawlerdetect crawlerdetect) {
		if(crawlerdetect.getFoodbatch() == null) {
			return null;
		}
		
		Foodbatch instance = CmiSetUtil.getSoleInstance(foodbatchDAO.findFoodbatchByName(crawlerdetect.getFoodbatch()));
		if(instance == null) {
			instance = entityFactory.initFoodbatch();
			instance.setName(crawlerdetect.getFoodbatch());
		}
		return instance;
	}


	private Brand bulidBrand(Crawlerdetect crawlerdetect) {
		
		if(crawlerdetect.getBrand() == null) {
			return null;
		}
		
		Brand instance = CmiSetUtil.getSoleInstance(brandDAO.findBrandByName(crawlerdetect.getBrand()));
		if(instance == null) {
			instance = entityFactory.initBrand();
			instance.setName(crawlerdetect.getBrand());
		}
		return instance;
	}


	private Food buildFood(Crawlerdetect crawlerdetect) {
		
		if(crawlerdetect.getFood() == null) {
			return null;
		}
		
		Food instance = CmiSetUtil.getSoleInstance(foodDAO.findFoodByName(crawlerdetect.getFood()));
		if(instance == null) {
			instance = entityFactory.initFood();
			instance.setName(crawlerdetect.getFood());
		}
		return instance;
	}

	private Foodtype buildFoodtype(Crawlerdetect crawlerdetect) {
		if(crawlerdetect.getFoodtype() == null) {
			return foodtypeDAO.findFoodtypeById(-2);
		}
		
		Foodtype instance = CmiSetUtil.getSoleInstance(foodtypeDAO.findFoodtypeByName(crawlerdetect.getFoodtype()));
		if(instance == null) {
//			食物类型是一个标准数据，不容许擅自保存
			instance = foodtypeDAO.findFoodtypeById(-2);
		}
		return instance;
	}

	private Feature buildFeature(Crawlerdetect crawlerdetect) {
		if(crawlerdetect.getDetectfeature() == null) {
			return null;
		}
		
		Feature instance = CmiSetUtil.getSoleInstance(featureDAO.findFeatureByName(crawlerdetect.getDetectfeature()));
		if(instance == null) {
			instance = entityFactory.initFeature();
			instance.setName(crawlerdetect.getDetectfeature());
		}
		return instance;
	}

	private Subject buildSubject(Crawlerdetect crawlerdetect) {
		if(crawlerdetect.getSubject() == null) {
			return null;
		}
		
		Subject instance = CmiSetUtil.getSoleInstance(subjectDAO.findSubjectByName(crawlerdetect.getSubject()));
		if(instance == null) {
			instance = entityFactory.initSubject();
			instance.setName(crawlerdetect.getSubject());
		}
		
		return instance;
	}

	private Detectdepartment buildDetectdepartment(Crawlerdetect crawlerdetect) {
		if(crawlerdetect.getDetectdepartment() == null) {
			return null;
		}
		
		Detectdepartment instance = CmiSetUtil.getSoleInstance(detectdepartmentDAO.findDetectdepartmentByName(crawlerdetect.getDetectdepartment()));
		if(instance == null) {
			instance = entityFactory.initDetectdepartment();
			instance.setName(crawlerdetect.getDetectdepartment());
		}
		return instance;
	}


	private Producer buildProducer(Crawlerdetect crawlerdetect) {
		if(crawlerdetect.getProducer() == null) {
			return null;
		}
		
		Producer instance = CmiSetUtil.getSoleInstance(producerDAO.findProducerByName(crawlerdetect.getProducer()));
		if(instance == null) {
			instance = entityFactory.initProducer();
			instance.setName(crawlerdetect.getProducer());
			instance.setDescription(crawlerdetect.getProduceraddress());
		}
		
		return instance;
	}

}