package cn.edu.scau.cmi.crawler.controller;

import java.io.File;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;

import javax.servlet.http.HttpServletRequest;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Controller;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.bind.annotation.ModelAttribute;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.servlet.ModelAndView;

import cn.edu.scau.cmi.crawler.dao.ScauCmiCrawlerdetectDAO;
import cn.edu.scau.cmi.crawler.service.CrawlerdetectImportService;
import cn.edu.scau.cmi.ema.domain.Crawlerfile;
import cn.edu.scau.cmi.ema.domain.Crawlerfilestandard;
import cn.edu.scau.cmi.ema.domain.base.CmiPagination;
import cn.edu.scau.cmi.front.controller.FrontCmiEmaController;

/*
	（1）爬取文件
	（2）crawlerfile表
	
	包含不合格检测、合格检测、说明，需要合理的导入。
*/

@Controller
public class CrawlerfileImportController extends FrontCmiEmaController{
	@Autowired CrawlerdetectImportService crawlerdetectImportService;
	
	@Autowired ScauCmiCrawlerdetectDAO scauCmiCrawlerdetectDAO;

	//	界面列表显示，然后每一个文件后有一个导入column, 确定标准。
	@RequestMapping(value = "/importCrawlerfilecolumnDashboard", method = { RequestMethod.POST, RequestMethod.GET })
	public ModelAndView importCrawlerfilecolumnDashboard(HttpServletRequest request, @ModelAttribute CmiPagination pagination) {

		ModelAndView mav = new ModelAndView();

		Set<Crawlerfile> allCrawlerfiles = crawlerfileDAO.findAllCrawlerfiles();
		Set<Crawlerfile> crawlerfiles = new HashSet<Crawlerfile>();
//		爬取初始状态的文件
		for(Crawlerfile file : allCrawlerfiles) {
			if(file.getCrawlerfilestatus().getId() == 1 && file.getName().contains(".xlsx")) {//1 表示的是美欧导入的状态
				crawlerfiles.add(file);
			}
		}

		mav.addObject("crawlerfiles", crawlerfiles);
		mav.addObject("entityName", "crawlerfile");
		mav.setViewName("adapter/importCrawlerfilecolumnDashboard.jsp");
		return mav;
	}
	
	
//	使用crawlerfile id是329进行的测试
	@RequestMapping(value = "/importCrawlerfilecolumn/{excelId}", method = { RequestMethod.POST, RequestMethod.GET })
	public ModelAndView importCrawlerfilecolumn(@PathVariable Integer excelId) {
		ModelAndView mav = new ModelAndView();
		crawlerdetectImportService.importColumn(crawlerfileDAO.findCrawlerfileById(excelId));
		mav.setViewName("redirect:/listCrawlerfilesDashboard");
		return mav;
	}
	

//	导入文件的列：批量文件的导入，从界面列表中点击导入
//	使用crawlerfile id是329进行的测试
	@ResponseBody
	@RequestMapping(value = "/batchImportCrawlerfilecolumn/{fileQuantity}", method = { RequestMethod.POST, RequestMethod.GET })
	public ModelAndView batchImportCrawlerfilecolumn(@PathVariable Integer fileQuantity) {
		ModelAndView mav = new ModelAndView();
//		导入后的文件的状态设置为所有的列标题设置为“所有列标题已经导入crawlerfilecolumn”
		
//		所有列标题已经导入crawlerfilecolumn
//		查找符合条件数量 第一个参数：filetype :1 合格检测文件，2 不合格检测文件， 第二个参数： 1 爬取初始状态
//		数据待优化
		
//		Set<Crawlerfile> files =  scauCmiCrawlerdetectDAO.findDetectCrawlerfileByFiletypeAndImportdetectstatus(2, 1, -1, fileQuantity);
		
		Set<Crawlerfile> files =  scauCmiCrawlerdetectDAO.findCrawlerfileByFiletypeExistCrawlerfilestatus(2, true, 1, -1, fileQuantity);//Filetype 2： 合格检测文件，importstatustype 1: 没有导入
		
		
//		Set<Crawlerfile> files =  crawlerfileDAO.findCrawlerfilesByImportstatus(1, -1, fileQuantity);
		System.out.println("总共有" + files.size() + "个文件需要导入");
		int i= 1;
		for(Crawlerfile file : files) {
			System.out.println();
			System.out.println("开始导入第" + i + "个文件，还有" + (files.size() -i) + "需要导入");
			i++;
			System.out.println("文件名：" + file.getName());
			if(file.getName().contains(".xlsx") && file.getExist()) {
				System.out.println("************** " + file.getName() + " **************准备导入");
				crawlerdetectImportService.importColumn(file);
				i++;
				System.out.println("************** " + file.getName() + " **************的列已经导入");
				System.out.println();
			}else {
				System.out.println("文件不存在或者不是Xlsx文件：" + file.getName() + "");
			}
		}
		System.out.println("！！！！！！！！！！批量导入结束，请查看数据！！！！！！！！！！");
		mav.setViewName("redirect:/batchCrawlerfilesDashboard");
		return mav;
	}
	
//	界面列表显示，然后每一个文件后有一个导入column, 确定标准。
	@RequestMapping(value = "/updateCrawlerfilestandardDashboard", method = { RequestMethod.POST, RequestMethod.GET })
	public ModelAndView updateCrawlerfilestandardDashboard(HttpServletRequest request, @ModelAttribute CmiPagination pagination) {

		ModelAndView mav = new ModelAndView();

		Set<Crawlerfile> allCrawlerfiles = crawlerfileDAO.findAllCrawlerfiles();
		Set<Crawlerfile> crawlerfiles = new HashSet<Crawlerfile>();
//		爬取初始状态的文件
		for(Crawlerfile file : allCrawlerfiles) {
			if(file.getCrawlerfilestandard() == null && file.getName().contains(".xlsx")) {//1 表示的是美欧导入的状态
				crawlerfiles.add(file);
			}
		}

		mav.addObject("crawlerfiles", crawlerfiles);
		mav.addObject("entityName", "crawlerfile");
		mav.setViewName("adapter/updateCrawlerfilestandardDashboard.jsp");
		return mav;
	}
	
	
//	这个mapping暂时没啥用。
	@RequestMapping(value = "/listCrawlerfilesDashboard", method = { RequestMethod.POST, RequestMethod.GET })
	public ModelAndView listCrawlerfilesDashboard(HttpServletRequest request, @ModelAttribute CmiPagination pagination) {
		ModelAndView mav = new ModelAndView();

		pagination = crawlerfilePaginationService.updatePagination(request, pagination);
//		当前只管那些爬取初始状态的数据。
//		pagination.getBasicPropertyCriteriaMap().put("isinsertcrawlereventordetectable", "0");
//		pagination.getBasicPropertyCriteriaMap().put("isinsertEvent", "0");
//		
		Set<Crawlerfile> excels = crawlerfilePaginationService.getPaginationCrawlerfiles(pagination);

		mav.addObject("excels", excels);
		mav.addObject("entityName", "excel");
		mav.addObject("pagination", pagination);
		mav.setViewName("adapter/listCrawlerfilesDashboard.jsp");
		return mav;
	}
	
//	使用crawlerfile id是329进行的测试
//	设置电子表格的标准
	@RequestMapping(value = "/updateCrawlerfilestandard/{excelId}", method = { RequestMethod.POST, RequestMethod.GET })
	public ModelAndView updateCrawlerfilestandard(@PathVariable Integer excelId) {
		ModelAndView mav = new ModelAndView();
		Crawlerfile file = crawlerfileDAO.findCrawlerfileById(excelId);
		crawlerdetectImportService.updateCrawlerfilestandard(file);
		mav.setViewName("redirect:/listCrawlerfilesDashboard");
		return mav;
	}
	
//	使用crawlerfile id是329进行的测试
	@RequestMapping(value = "/batchUpdateCrawlerfilestandard/{fileQuantity}", method = { RequestMethod.POST, RequestMethod.GET })
	public ModelAndView batchUpdateCrawlerfilestandard(@PathVariable Integer fileQuantity) {
		ModelAndView mav = new ModelAndView();
		Set<Crawlerfile> allFiles = crawlerfileDAO.findAllCrawlerfiles();//.findCrawlerfilesByFiletype(1, -1, -1);//1：检测合格文件
		Set<Crawlerfile> files =  new HashSet<Crawlerfile>();//.findDetectCrawlerfileByFiletypeAndImportdetectstatus(2, 7, -1, fileQuantity);
		
//		文件标准是空就导入
		for (Crawlerfile file : allFiles) {
			if (file.getExist()  && file.getName().contains(".xlsx")  &&  file.getCrawlerfilestandard() == null ) {
				files.add(file);
			}
//			超过文件的数量就不再添加文件
			if(files.size() == fileQuantity) {
//				if(files.size() == fileQuantity) {
				break;
			}
		}
		
		for(Crawlerfile file: files) {
			System.out.println("准备标准化的文件id是：" + file.getId() + "，文件名是" + file.getName());
			if(file.getExist()) {
				Crawlerfilestandard standard = crawlerdetectImportService.updateCrawlerfilestandard(file);
				
				if(standard  == null) {
					System.out.println("需要标准化的文件：" + file.getId() +  "  " + file.getName() + "没有成功，请仔细查看原因！！！");
					file.setDescription(file.getDescription() + "，标准化没有成功");
					file.setCrawlerfilestandard(crawlerfilestandardDAO.findCrawlerfilestandardById(-2));
					crawlerfileService.saveCrawlerfile(file);
				}else {
					System.out.println("已经标准化的文件：" + file.getName());
					System.out.println("***标准Id是：" + standard.getId()  + "，名称是：" + standard.getName());
					file.setCrawlerfilestandard(standard);
					file.setCrawlerfilestatus(crawlerfilestatusDAO.findCrawlerfilestatusById(8));
					crawlerfileService.saveCrawlerfile(file);
				}
			}
		}
		System.out.println("！！！！！！！！！！！！批量导入结束，请查看数据库的结果！！！！！！！！！！！！");
		System.out.println();
		mav.setViewName("redirect:/batchCrawlerfilesDashboard");
		return mav;
	}
	
	
	
//	删除重复的文件记录crawlerfile
	@RequestMapping(value = "/deleteDuplicateCrawlerfile/{crawlerfileId}", method = { RequestMethod.POST, RequestMethod.GET })
	public ModelAndView deleteDuplicateCrawlerfile(HttpServletRequest request, @PathVariable Integer crawlerfileId) {
		ModelAndView mav = new ModelAndView();
		Crawlerfile toClearFile = crawlerfileDAO.findCrawlerfileById(crawlerfileId);

		String filename = toClearFile.getName();
		Integer fileId = toClearFile.getId();
		
		Set<Crawlerfile> crawlerfiles = crawlerfileDAO.findCrawlerfileByName(filename);
		
		if(crawlerfiles.size() == 1) {
			System.out.println("id是" + fileId + "，文件名是" + filename + "没有重复的记录");
		}else{
			System.out.println("id是 " + fileId + "，名字是" + toClearFile.getName() + " 的文件重复，需要删除重复的记录");
			System.out.println();
			
			for(Crawlerfile file : crawlerfiles) {
				System.out.println("id是 " + file + "，名字是" + file.getName() + " 的文件重复，删除重复的记录");
				if(file.getId() !=fileId) {
					crawlerfileService.deleteCrawlerfile(file);
					System.out.println("id是 " + file + "，名字是" + file.getName() + " 的文件已经删除");
				}
			}
		}
		mav.setViewName("redirect:/listCrawlerfilesDashboard");
//		mav.setViewName("adapter/listCrawlerfilesDashboard.jsp");
		return mav;
		
	}
	
	//	删除重复的文件记录crawlerfile
	@RequestMapping(value = "/batchDeleteDuplicateCrawlerfile", method = { RequestMethod.POST, RequestMethod.GET })
	public ModelAndView batchDeleteDuplicateCrawlerfile(HttpServletRequest request) {
		ModelAndView mav = new ModelAndView();
		
		Set<Crawlerfile> allFiles = crawlerfileDAO.findAllCrawlerfiles();
		for(Crawlerfile toClearFile : allFiles) {
			String filename = toClearFile.getName();
			Integer fileId = toClearFile.getId();
			Set<Crawlerfile> crawlerfiles = crawlerfileDAO.findCrawlerfileByName(filename);
			
			if(crawlerfiles.size() == 1) {
				System.out.println("id是" + fileId + "，文件名是" + filename + "没有重复的记录");
			}else{
				System.out.println("id是 " + fileId + "，名字是" + toClearFile.getName() + " 的文件重复，需要删除重复的记录");
				System.out.println();
				
				for(Crawlerfile file : crawlerfiles) {
					System.out.println("id是 " + file + "，名字是" + file.getName() + " 的文件重复，删除重复的记录");
					if(file.getId() !=fileId) {
						crawlerfileService.deleteCrawlerfile(file);
						System.out.println("id是 " + file + "，名字是" + file.getName() + " 的文件已经删除");
					}
				}
			}
		}
		mav.setViewName("redirect:/batchCrawlerfilesDashboard");
		return mav;
	}
	
	
	
	// 批量更新文件存在状态
	 @RequestMapping(value = "/batchUpdatFiletype", method = { RequestMethod.POST, RequestMethod.GET })
	 public ModelAndView batchUpdatFiletype(HttpServletRequest request) {
	  ModelAndView mav = new ModelAndView();   
	  Set<Crawlerfile> crawlerfiles = crawlerfileDAO.findAllCrawlerfiles();
	  for(Crawlerfile crawlerfile :crawlerfiles) {
	   if(crawlerfile.getFiletype() == null) {
	    System.out.println("文件Id和名称：" + crawlerfile.getId() + crawlerfile.getName());
	    
	   
	    if(crawlerfile.getName().contains(".doc")) {
	    	 crawlerfile.setFiletype(filetypeDAO.findFiletypeById(3));
	    }
	    else if(crawlerfile.getName().contains("不合格") && crawlerfile.getName().contains("xlsx")) {
	    	crawlerfile.setFiletype(filetypeDAO.findFiletypeById(2));
	    }
	    
	    else if(crawlerfile.getName().contains("xlsx")){
	    	crawlerfile.setFiletype(filetypeDAO.findFiletypeById(1));
	    }
	    else {
	    	crawlerfile.setFiletype(filetypeDAO.findFiletypeById(-2));
	    }
	    crawlerfile = crawlerfileService.saveCrawlerfile(crawlerfile);
	   }
	  }
	  mav.setViewName("redirect:/batchCrawlerfilesDashboard");
	  System.out.println();
	  System.out.println("!!!!!!!!!!!!!文件类型已经更新完毕!!!!!!!!!!!!!!!");
	  return mav;
	 }
	
	
	
//	批量更新文件存在状态
	@RequestMapping(value = "/batchUpdatExist/{fileQuantity}", method = { RequestMethod.POST, RequestMethod.GET })
	public ModelAndView batchUpdatExist(HttpServletRequest request, @PathVariable Integer fileQuantity) {
		
		ModelAndView mav = new ModelAndView();
		
		Set<Crawlerfile> crawlerfiles = crawlerfileDAO.findAllCrawlerfiles();
		for(Crawlerfile crwalerfile :crawlerfiles) {
			String siteName=crwalerfile.getUrl().getSite().getName();
			String samrAddressName=siteName.substring(0,siteName.length()-7);
			
			String crawlerdetectPhysicFileName = samrDirectory + samrAddressName +"/合格信息/" + crwalerfile.getName();
			String crawlereventPhysicFileName = samrDirectory + samrAddressName +"/不合格信息/" + crwalerfile.getName();
			String knowledgePhysicFileName = samrDirectory + samrAddressName +"/知识库/" + crwalerfile.getName();
			
			File crawlerdetectPhysicFile = new File(crawlerdetectPhysicFileName);
			File crawlereventPhysicFile = new File(crawlereventPhysicFileName);
			File knowledgePhysicFile = new File(knowledgePhysicFileName);
//			文件默认存在，如果都不存在，就
			if( crawlerdetectPhysicFile.exists() || crawlereventPhysicFile.exists() || knowledgePhysicFile.exists()){
				crwalerfile.setExist(true);
				System.out.println(crwalerfile.getId() +crwalerfile.getName() + " 的文件存在");
			}else {
				crwalerfile.setExist(false); 
				crwalerfile.setDescription("文件不存在，请检查重新下载");
				System.out.println(crwalerfile.getId() +crwalerfile.getName() + " 的文件不存在");
			}
			crawlerfileService.saveCrawlerfile(crwalerfile);
		}
		
		mav.setViewName("redirect:/batchCrawlerfilesDashboard");
		return mav;
	}	
	
//	文件是否存在
	@RequestMapping(value = "/updatExist/{fileId}", method = { RequestMethod.POST, RequestMethod.GET })
	public ModelAndView updatExist(HttpServletRequest request, @PathVariable Integer fileId) {
		
		ModelAndView mav = new ModelAndView();
		
		Crawlerfile crwalerfile = crawlerfileDAO.findCrawlerfileById(fileId);
		
		String siteName=crwalerfile.getUrl().getSite().getName();
		String samrAddressName=siteName.substring(0,siteName.length()-7);
		
		String physicFileName1 = samrDirectory + samrAddressName +"/合格信息/" + crwalerfile.getName();
		String physicFileName2 = samrDirectory + samrAddressName +"/不合格信息/" + crwalerfile.getName();
		String physicFileName3 = samrDirectory + samrAddressName +"/知识库/" + crwalerfile.getName();
		
		File physicFile1 = new File(physicFileName1);
		File physicFile2 = new File(physicFileName2);
		File physicFile3 = new File(physicFileName3);
//		文件默认存在，如果都不存在，就
		if( physicFile1.exists() || physicFile2.exists() || physicFile3.exists()){
			crwalerfile.setExist(true);
		}else {
			crwalerfile.setExist(false);
		}
		crawlerfileService.saveCrawlerfile(crwalerfile);
		
		mav.setViewName("redirect:/listCrawlerfilesDashboard");
		return mav;
	}


//	不需要了的部分。
	@RequestMapping(value = "/selectImportXls", method = RequestMethod.GET)
	public ModelAndView selectImportXls() {
		ModelAndView mav = new ModelAndView();
		
		Map<String, String[]> cityExcelsMap = new LinkedHashMap<String, String[]>();
		
//		TODO !!!!!!!!!!!!!!
		Set<String> addresses = new HashSet<String>();
		for(String address : addresses ) {
			String baseDirectory = samrDirectory;
			addExcelFiles(baseDirectory , cityExcelsMap, address);
		}
		
		mav.addObject("cityExcelsMap", cityExcelsMap);
		mav.setViewName("adapter/selectImportXls.jsp");
		return mav;
}

	private void addExcelFiles(String baseDirectory, Map<String, String[]> cityExcelsMap, String cityName) {
		File cityFileDir = new File(baseDirectory + "待处理/" + cityName + "/不合格检测");
		//先清空一下原来的数据，因为重定向到这个地方，如果不清空，就会把已经删除的文件也保存着。
		String[] files = {};
		files = cityFileDir.list();
		cityExcelsMap.put(cityName, files);
	}


	
//	多电脑并发导入到crawlerevent, crawlerdetect, event, detect的控制面板
	@RequestMapping(value = "/batchCrawlerfilesImportDashboard", method = { RequestMethod.POST, RequestMethod.GET })
	public ModelAndView batchCrawlerfilesImportDashboard(HttpServletRequest request, @ModelAttribute CmiPagination pagination) {
		ModelAndView mav = new ModelAndView();
		mav.setViewName("adapter/batchCrawlerfilesImportDashboard.jsp");
		return mav;
	}
	
//	批操作处理主面板
	@RequestMapping(value = "/batchCrawlerfilesDashboard", method = { RequestMethod.POST, RequestMethod.GET })
	public ModelAndView batchCrawlerfilesDashboard() throws Exception {
		ModelAndView mav = new ModelAndView();
		Set<Crawlerfile> notSureExistCrawlerfiles = scauCmiCrawlerdetectDAO.findDetectCrawlerfileByNullExistStatus(-1, -1);
		mav.addObject("notSureExistCrawlerfiles", notSureExistCrawlerfiles);
		mav.setViewName("adapter/batchCrawlerfilesDashboard.jsp");
		return mav;
		
	}
}