import os
import json
import chardet
from datetime import datetime, timedelta
import zipfile
import tempfile
from log_uploader import upload_log_file
from dingtalk_bot import DingTalkBot
from logging_utils import Logger


class LogFilter:
	LAYOUT_CONFIG_PATH = os.path.join("..", "config", "layout_config.json")  # 关键字配置文件路径
	DINGTALK_CONFIG_PATH = os.path.join("..", "config", "dingtalk_config.json")  # 钉钉推送配置文件路径
	
	if not os.path.isfile(LAYOUT_CONFIG_PATH):
		raise FileNotFoundError(f"关键字配置文件不存在: {LAYOUT_CONFIG_PATH}")
	if not os.path.isfile(DINGTALK_CONFIG_PATH):
		raise FileNotFoundError(f"关键字配置文件不存在: {DINGTALK_CONFIG_PATH}")
	
	with open(LAYOUT_CONFIG_PATH, "r", encoding="utf-8") as f:
		layout_data = json.load(f)
	with open(DINGTALK_CONFIG_PATH, "r", encoding="utf-8") as f:
		dingtalk_data = json.load(f)
	
	def __init__(self, config: dict):
		"""
		初始化日志过滤器，从传入的字典和 layout_config.json 中加载参数

		:param config: 配置字典，示例如下：
			{
				"port": "COM4",
				"board": "T41N",
				"log_file": "D:\\log\\串口_2025-07-18_9_19_49.log",
				"push": "test",
				"enabled": true,
				"case_sensitive": false
			}
		"""
		self.port = config.get("port", "")
		self.board = config.get("board", "")
		self.push = config.get("push", "")
		self.enabled = config.get("enabled", False)
		self.file_path = config.get("log_file", None)  # 用户指定的日志文件路径
		self.keyword_dict = {}  # {key: pattern}
		self.case_sensitive = config.get("case_sensitive", False)
		self.encoding = None
		
		# 加载关键字
		self._load_keywords_from_layout_config()
		
		# 查找最新的日志文件（支持按天滚动）
		self._find_latest_log_file()
		
		# 初始化钉钉机器人
		self.bot = DingTalkBot(next((item for item in self.dingtalk_data if item["group_name"] == self.push), None))
	
	def _load_keywords_from_layout_config(self):
		"""从 layout_config.json 中加载当前 board 对应的关键字"""
		
		board_config = self.layout_data.get(self.board)
		if not board_config:
			raise ValueError(f"layout_config.json 中未找到 board: {self.board}")
		
		self.keyword_dict = board_config
		Logger.info(f"从 layout_config.json 加载了 {len(self.keyword_dict)} 个关键字。")
	
	def _extract_prefix_from_filename(self, filename: str) -> str:
		"""
		从文件名中提取前缀，支持多种格式：
		- 串口_2025-07-18_9_19_49.log → "串口_2025-07-18_"
		- serial-com33-2600-00-00.log → "serial-com33"
		"""
		base_name, ext = os.path.splitext(filename)
		
		# 尝试第一种格式：包含下划线分隔的日期
		parts = base_name.split('_')
		if len(parts) >= 3:
			date_part = parts[1]
			if '-' in date_part and len(date_part) == 10:  # 如 2025-07-18
				return '_'.join(parts[:2]) + '_'
		
		# 尝试第二种格式：使用短横线连接的串口名
		parts = base_name.split('-')
		if len(parts) >= 2:
			# 返回前两部分，如 serial-com33
			return '-'.join(parts[:2])
		
		# 如果都不匹配，尝试提取第一个单词作为基础前缀
		if '_' in base_name:
			return base_name.split('_')[0] + '_'
		elif '-' in base_name:
			return base_name.split('-')[0]
		else:
			return base_name[:5]  # 最后兜底：取前5个字符
		
		return ""
	
	def _search_for_file(self, directory: str, prefix: str) -> str:
		"""
		在指定目录中查找以指定前缀开头的最新文件
		"""
		if not os.path.isdir(directory):
			return ""
		
		files = []
		for f in os.listdir(directory):
			full_path = os.path.join(directory, f)
			if os.path.isfile(full_path) and f.startswith(prefix):
				files.append(full_path)
		
		if not files:
			return ""
		
		# 按修改时间排序取最新
		files.sort(key=lambda x: os.path.getmtime(x), reverse=True)
		return files[0]
	
	def _find_latest_log_file(self):
		"""
		根据当前 log_file 的前缀，查找当天或昨天目录下最新的日志文件
		"""
		if not self.file_path or not os.path.isfile(self.file_path):
			raise FileNotFoundError(f"初始日志文件不存在: {self.file_path}")
		
		directory = os.path.dirname(self.file_path)
		filename = os.path.basename(self.file_path)
		
		# 提取前缀（去掉时间戳后缀）
		prefix = self._extract_prefix_from_filename(filename)
		if not prefix:
			raise ValueError(f"无法从文件名提取前缀: {filename}")
		
		# 优先查找当天的日志目录
		today = datetime.now().date()
		latest_file = self._search_for_file(directory, prefix)
		
		if not latest_file:
			# 如果当天没有，回退到昨天
			yesterday = today - timedelta(days=1)
			yesterday_dir = os.path.join(os.path.dirname(directory), yesterday.strftime("%Y-%m-%d"))
			if os.path.isdir(yesterday_dir):
				latest_file = self._search_for_file(yesterday_dir, prefix)
		
		if not latest_file:
			raise FileNotFoundError(f"未找到以 '{prefix}' 开头的日志文件")
		
		self.file_path = latest_file
		self.encoding = self._detect_encoding()
		Logger.info(f"✅ 使用最新日志文件: {self.file_path}")
	
	def _detect_encoding(self, sample_size=100000):
		"""检测日志文件编码"""
		Logger.info(f"开始检测文件编码：{self.file_path}")
		with open(self.file_path, "rb") as f:
			raw_data = f.read(sample_size)
		result = chardet.detect(raw_data)
		encoding = result["encoding"]
		Logger.info(f"检测到的编码为: {encoding}")
		return encoding


	def filter_by_time_range(self, start_time, end_time, output_zip=None, upload_config=None):
		"""
			根据指定时间范围和关键字筛选日志内容，并按关键字分组返回。
			若检测到匹配项，可将时间范围内日志打包成 ZIP 文件并上传。
	
			:param start_time: 起始时间（datetime 对象）
			:param end_time: 结束时间（datetime 对象）
			:param output_zip: 打包输出路径（如 "output.zip"），若为 None 则不打包
			:param upload_config: 上传配置，支持 server_url 字段
			:return: 字典，键为关键字名，值为匹配该关键字的日志列表
			"""
		Logger.info(f"开始处理日志文件：{self.file_path}")
		
		# 原始关键字匹配结果
		matched_logs = {key: [] for key in self.keyword_dict}
		
		# 用于保存完整时间范围（含前后5分钟）的日志，用于打包
		full_time_range_logs = []
		
		# 新增：扩展前后5分钟
		extended_start = start_time - timedelta(minutes=5)
		extended_end = end_time + timedelta(minutes=5)
		
		# 标志是否进入扩展时间范围
		capture = False
		
		try:
			with open(self.file_path, "r", encoding=self.encoding, errors="ignore") as f:
				for line_number, line in enumerate(f):
					line = line.strip()
					if not line:
						continue
					
					# 假设每行以 "[YYYY/MM/DD HH:MM:SS]" 开头
					if line.startswith("["):
						try:
							time_str = line[1:line.index("]")]
							log_time = datetime.strptime(time_str, "%Y/%m/%d %H:%M:%S")
							
							# 判断是否进入扩展时间范围
							if log_time < extended_start:
								capture = False
							elif extended_start <= log_time <= extended_end:
								capture = True
							else:
								capture = False  # 超出扩展时间，结束
							
							# 无论是否匹配关键字，只要在扩展时间内，都加入打包日志
							if capture:
								full_time_range_logs.append(line)
							
							# 如果在原始时间范围内，才进行关键字匹配
							if start_time <= log_time <= end_time:
								pass  # 仅用于匹配关键字
							else:
								continue  # 不在原始时间范围，跳过关键字检测
						
						except (ValueError, IndexError) as e:
							# 如果解析失败则跳过该行
							Logger.info(f"第 {line_number + 1} 行解析失败，跳过。错误信息：{e}")
							capture = False
					
					# 只在原始时间范围内做关键字匹配
					if capture and (start_time <= log_time <= end_time):
						search_line = line if self.case_sensitive else line.lower()
						for key, pattern in self.keyword_dict.items():
							target_pattern = pattern if self.case_sensitive else pattern.lower()
							if target_pattern in search_line:
								matched_logs[key].append(line)
			
			# 统计总数
			total_count = sum(len(logs) for logs in matched_logs.values())
			Logger.info(f"共匹配到 {total_count} 条日志。")
			
			# 如果指定了打包路径，并且有匹配项，则打包完整时间范围内的日志
			if output_zip and len(full_time_range_logs) > 0 and total_count > 0:
				self._package_logs(full_time_range_logs, output_zip)
				Logger.info(f"已生成日志压缩包（含前后5分钟日志）：{output_zip}")
				
				# 如果提供了上传配置，上传文件
				if not upload_config:
					raise ValueError("upload_config 不能为空")
				
				server_url = upload_config.get("server_url")
				if not server_url:
					raise ValueError("upload_config 中缺少 server_url 字段")
				
				Logger.info(f"准备通过 HTTP 接口上传日志压缩包到：{server_url}")
				result = upload_log_file(file_path=output_zip, server_url=server_url)
				
				if "error" in result:
					Logger.info(f"❌ 上传失败: {result['error']}")
				else:
					Logger.info(f"✅ 上传成功！下载链接：{result['download_url']}")
					
					# 示例1：发送普通文本消息
					summary = "\n".join([f"{key}: {len(logs)}条" for key, logs in matched_logs.items() if logs])
					message = f"串口{self.port} 匹配日志：\n{summary}\n下载地址：{result.get('download_url')}"
					self.bot.send_text(message)
					
					# 示例2：发送日报（Markdown）
					# self.bot.send_markdown_report("日志下载路径", result.get("download_url"))
					# Logger.info(result.get("download_url"))
			
			return matched_logs, result if total_count > 0 else {}
		
		except Exception as e:
			Logger.info(f"❌ 处理日志时发生错误：{e}")
			return {}, {"error": str(e)}
	
	@staticmethod
	def _package_logs(logs, zip_path):
		"""
		将指定日志内容打包为 ZIP 文件

		:param logs: 日志列表
		:param zip_path: 输出 ZIP 文件路径
		"""
		with tempfile.TemporaryDirectory() as temp_dir:
			log_file_path = os.path.join(temp_dir, "filtered_log.txt")
			with open(log_file_path, "w", encoding="utf-8") as f:
				f.write("\n".join(logs))
			
			with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zipf:
				zipf.write(log_file_path, arcname="filtered_log.txt")
		
		Logger.info(f"日志已打包至：{zip_path}")


if __name__ == "__main__":
	config = {
		"port": "COM4",
		"board": "T41N",
		"log_file": "D:\\log\\串口_2025-07-18_9_19_49.log",
		"push": "test",
		"enabled": True
	}
	
	log_filter = LogFilter(config)
	
	start_time = datetime(2025, 7, 18, 8, 0, 0)
	end_time = datetime(2025, 7, 18, 18, 0, 0)
	
	upload_config = {
		'server_url': 'http://xrsdev.easysvn.cn:5557/'
	}
	
	matched_logs, result = log_filter.filter_by_time_range(
		start_time=start_time,
		end_time=end_time,
		output_zip="filtered_log_20250718.zip",
		upload_config=upload_config
	)
	
	for key, logs in matched_logs.items():
		Logger.info(f"\n【{key}】({len(logs)}条)")
		for log in logs:
			Logger.info(log)
