package com.infinitus.nginxInfo.task;

import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import javax.annotation.Resource;

import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Component;

import com.alibaba.fastjson.JSONObject;
import com.infinitus.nginxInfo.config.KafkaConfig;
import com.infinitus.nginxInfo.domain.KafkaStartupLog;
import com.infinitus.nginxInfo.domain.nginxinfo.NginxInfo;
import com.infinitus.nginxInfo.domain.switchinfo.SwitchInfo;
import com.infinitus.nginxInfo.service.KafkaLogService;
import com.infinitus.nginxInfo.service.RedisService;
import com.infinitus.nginxInfo.service.util.AES;
import com.infinitus.nginxInfo.service.util.Base64;


@Component
public class Cleaner {

	private static final Logger LOG = LoggerFactory.getLogger(Cleaner.class);
	
	@Autowired
	private KafkaLogService kafkaLogService;
	
	@Resource(name = "redisServiceDB0") 
	private RedisService redisService;
	
	@Autowired
	private KafkaConfig kafkaConfig;
	
	public static Boolean SHUTDOWN = false;
	
	/**
	 * @throws InterruptedException 
	 */
	@Async
	public void clean(String sourceTopic, final String targetTopic ){
		
		LOG.info("启动kafka消费端接收数据");
		KafkaConsumer<String, String> consumer = createConsumer(sourceTopic);
		LOG.info("启动kafka生产者发送数据");
		Properties props = kafkaConfig.getProperties(true, false);
		final Producer<String, String> producer = new KafkaProducer<String, String>(props);
		
		//代入线程方法，保障线程安全
		final Map<String, Object> mainMap = new ConcurrentHashMap<String, Object>();
		//不变的属性
		mainMap.put("topic", targetTopic);
		int totalCount = 0;
		mainMap.put("totalCount", totalCount);
		
		ExecutorService fixedThreadPool = Executors.newFixedThreadPool(3);
		final CloseableHttpClient httpclient = HttpClients.createDefault();
		//订阅消息
		ConsumerRecords<String, String> records = null;
		while (!SHUTDOWN) {
			records = null;
			try{
				Thread.sleep(10);
				Future future = null;
				records = consumer.poll(200);
				for (final ConsumerRecord<String, String> record : records) {
					final String value = record.value();
					Future submit = fixedThreadPool.submit(new Callable<String>(){
						@Override
						public String call() throws Exception {
							try {
								if(kafkaConfig.getCleanNginxTopic().equals(targetTopic)){
									return multiThreadhandNginx(mainMap, producer, httpclient, value);
								}else if(kafkaConfig.getCleanSwitchTopic().equals(targetTopic)){
									return multiThreadhandSwitch(mainMap, producer, httpclient, value);
								}
							} catch (Exception e) {
								e.printStackTrace();
							}
							return null;
						}
					});
					if(future==null && submit!=null){
						future = submit;
					}
				}
				Object appprotimestamp =null;
				try {
					if(future!=null){
						appprotimestamp = future.get();
					}
				} catch (Exception e) {
					e.printStackTrace();
				}
				
				totalCount = (int)mainMap.get("totalCount");
				if(totalCount%100==0){
					if(appprotimestamp!=null){
						System.out.println(appprotimestamp);
					}
					totalCount=0;
				}
				consumer.commitSync();
			} catch (Exception e) {
				e.printStackTrace();
				LOG.error("拉取日志数据失败:"+e);
			}
		}
	}
	
	//多线程清洗
	private String multiThreadhandNginx(Map<String,Object> mainMap, Producer<String, String> producer, CloseableHttpClient httpclient, String value ){
		//不变量
		String targetTopic = (String) mainMap.get("topic" );
		NginxInfo one = NginxInfo.converts(value);
		
		if(one!=null){
			try{
				//根据规则清洗
				String checkResult = FilterRules.checkAll(one);
				//符合的记录进一步处理
				if(checkResult == null){
					dataHandle(httpclient, one, targetTopic);
					//记录总数
					mainMap.put("totalCount", (int)mainMap.get("totalCount")+1);
					String encrypt = NginxInfo.encrypt(targetTopic, one);
					if(encrypt!=null){
						producer.send(new ProducerRecord<String, String>(targetTopic+"_etl", encrypt));
					}
				}
				return one.getEventDate();
			}catch(Exception e){
			}
		}
		return null;
	}
	
	//多线程清洗
	private String multiThreadhandSwitch(Map<String,Object> mainMap, Producer<String, String> producer, CloseableHttpClient httpclient, String value ){
		//不变量
		String targetTopic = (String) mainMap.get("topic" );
		SwitchInfo one = SwitchInfo.converts(value);
		
		if(one!=null){
			try{
				//根据规则清洗
				String checkResult = FilterRules.checkAll(one);
				//符合的记录进一步处理
				if(checkResult == null){
					dataHandle(httpclient, one, targetTopic);
					//记录总数
					mainMap.put("totalCount", (int)mainMap.get("totalCount")+1);
					String encrypt = SwitchInfo.encrypt(targetTopic, one);
					if(encrypt!=null){
						producer.send(new ProducerRecord<String, String>(targetTopic+"_etl", encrypt));
					}
				}
				return one.getEventDate();
			}catch(Exception e){
			}
		}
		return null;
	}
	
	public void dataHandle(CloseableHttpClient httpclient, NginxInfo one, String targetTopic) throws Exception{
		//日期对应周(非自然周)
		String date = one.getEventDate();
		if(date != null && !"".equals(date.trim())){
			if(redisService.exists(date)){
				one.setYearWeek(redisService.get(date));
			}
		}
		String request = one.getRequest();
		String ident = one.getIdent();
		if("gateway".equals(one.getFields().getApplication_name())){
			if(FilterRules.isNotBlank(request)){
				Pattern tkPtn = Pattern.compile("(\\?|\\&)access_token=([^\\&]+)");
				Matcher tkMec = tkPtn.matcher(request);
				if(tkMec.find()){
					String decStr = new String(Base64.decode( URLDecoder.decode(tkMec.group(2), "UTF-8") ));
					Pattern usPtn = Pattern.compile("#+([\\d]+)");
					Matcher usMec = usPtn.matcher(decStr);
					if(usMec.find()){
						String userid = usMec.group(1);
						if(FilterRules.isNotBlank(userid) && userid.indexOf("-")>-1){
							one.setUserID(userid.split("\\-")[0]);
							one.setUserAtt(userid.split("\\-")[1]);
						}else{
							one.setUserID(userid);
						}
//						System.out.println(userid);
					}
				}
			}
		}else{
			if(FilterRules.isNotBlank(request)&& !"-".equals(ident)){
				ident = ident.replaceAll("\"?\\\\x22\"?", "");
				String jsonStr = AES.decrypt(ident, kafkaConfig.getUserDecryKey());
				JSONObject json = JSONObject.parseObject(jsonStr);
				String userid = json.getString("userName");
				if(FilterRules.isNotBlank(userid) && userid.indexOf("-")>-1){
					one.setUserID(userid.split("\\-")[0]);
					one.setUserAtt(userid.split("\\-")[1]);
				}else{
					one.setUserID(userid);
				}
//				System.out.println(userID);
			}
		}
	}
	
	public void dataHandle(CloseableHttpClient httpclient, SwitchInfo one, String targetTopic) throws Exception{
		//日期对应周(非自然周)
		String date = one.getEventDate();
		if(date != null && !"".equals(date.trim())){
			if(redisService.exists(date)){
				one.setYearWeek(redisService.get(date));
			}
		}
	}
	
	/**
	 * 创建kafka消费端，设置手动commit
	 * @param topic
	 * @return
	 */
	private KafkaConsumer<String, String> createConsumer(String topic) {
		System.out.println("订阅："+topic);
		Properties props = kafkaConfig.getProperties(false, true);
		LOG.info("kafka config:" + props);
		KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
		
		List<TopicPartition> topicPartitions = new ArrayList<TopicPartition>();
        List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
        for (PartitionInfo partition: partitionInfos) {
            topicPartitions.add(new TopicPartition(topic, partition.partition()));
        }
        consumer.assign(topicPartitions);
        
        for (TopicPartition topicPartition: topicPartitions) {
        	//初始化offset位置，只有第一次运行时为true
            if (consumer.committed(topicPartition) == null) {
                long position = consumer.position(topicPartition);
                LOG.info("topic partition " + topicPartition.topic()+":"+topicPartition.partition() + ", offset: " + position);
                kafkaLogService.save(new KafkaStartupLog(topic, ""+topicPartition.partition(), ""+position));
                consumer.seek(topicPartition, position);
            }
        }
		return consumer;
	}
	
}
