package org.example.rzfx.service;

import cn.hutool.core.util.StrUtil;
import cn.hutool.crypto.digest.MD5;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import lombok.extern.slf4j.Slf4j;
import org.example.rzfx.dto.LogQueryDTO;
import org.example.rzfx.dto.LogStatDTO;
import org.example.rzfx.dto.PageResult;
import org.example.rzfx.entity.Log;
import org.example.rzfx.mapper.LogMapper;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.time.LocalDateTime;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

/**
 * 日志服务
 */
@Service
@Slf4j
public class LogService extends ServiceImpl<LogMapper, Log> {
    
    @Resource
    private RedisTemplate<String, Object> redisTemplate;
    
    @Resource
    private RedisService redisService;
    
    @Resource
    private LogMapper logMapper;
    
    @Resource
    private LogCacheService logCacheService;
    
    /**
     * 查询日志（带缓存）
     */
    public PageResult<Log> queryLogs(LogQueryDTO queryDTO) {
        // 生成缓存key
        String cacheKey = "log:query:" + MD5.create().digestHex(JSON.toJSONString(queryDTO));
        
        // 尝试从缓存获取
        Object cached = redisTemplate.opsForValue().get(cacheKey);
        if (cached != null) {
            return (PageResult<Log>) cached;
        }
        
        // 构建查询条件
        LambdaQueryWrapper<Log> wrapper = new LambdaQueryWrapper<>();
        
        if (queryDTO.getStartTime() != null) {
            wrapper.ge(Log::getTimestamp, queryDTO.getStartTime());
        }
        if (queryDTO.getEndTime() != null) {
            wrapper.le(Log::getTimestamp, queryDTO.getEndTime());
        }
        if (StrUtil.isNotBlank(queryDTO.getLogLevel())) {
            wrapper.eq(Log::getLogLevel, queryDTO.getLogLevel());
        }
        if (StrUtil.isNotBlank(queryDTO.getSystemType())) {
            wrapper.eq(Log::getSystemType, queryDTO.getSystemType());
        }
        if (queryDTO.getSourceId() != null) {
            wrapper.eq(Log::getSourceId, queryDTO.getSourceId());
        }
        if (StrUtil.isNotBlank(queryDTO.getKeyword())) {
            wrapper.like(Log::getContent, queryDTO.getKeyword());
        }
        
        // 排序
        if ("asc".equalsIgnoreCase(queryDTO.getOrderDirection())) {
            wrapper.orderByAsc(Log::getTimestamp);
        } else {
            wrapper.orderByDesc(Log::getTimestamp);
        }
        
        // 分页查询
        Page<Log> page = new Page<>(queryDTO.getPageNum(), queryDTO.getPageSize());
        IPage<Log> result = this.page(page, wrapper);
        
        PageResult<Log> pageResult = new PageResult<>(
            queryDTO.getPageNum(),
            queryDTO.getPageSize(),
            result.getTotal(),
            result.getRecords()
        );
        
        // 缓存结果（1小时）
        redisTemplate.opsForValue().set(cacheKey, pageResult, 1, TimeUnit.HOURS);
        
        return pageResult;
    }
    
    /**
     * 保存日志（新策略：先写Redis缓存）
     * 
     * 流程：
     * 1. 生成唯一hash用于去重
     * 2. 检查Redis和MySQL是否已存在
     * 3. 保存到Redis缓存
     * 4. 由定时任务异步回写到MySQL
     * 
     * @param log 日志对象
     */
    public void saveLog(Log log) {
        // 生成唯一hash
        String uniqueHash = MD5.create().digestHex(log.getContent() + log.getTimestamp());
        log.setUniqueHash(uniqueHash);
        
        // 检查MySQL中是否已存在
        LambdaQueryWrapper<Log> wrapper = new LambdaQueryWrapper<>();
        wrapper.eq(Log::getUniqueHash, uniqueHash);
        if (this.count(wrapper) > 0) {
            // 日志已存在，忽略
            return;
        }
        
        // 保存到Redis缓存（异步回写到MySQL）
        boolean cached = logCacheService.saveToCache(log);
        
        if (!cached) {
            // 如果Redis保存失败，降级直接写MySQL
            this.save(log);
        }
    }
    
    /**
     * 批量保存日志到数据库（用于从Redis回写）
     * 
     * @param logs 日志列表
     * @return 是否全部成功
     */
    public boolean batchSaveLogsToDB(List<Log> logs) {
        if (logs == null || logs.isEmpty()) {
            return true;
        }
        
        try {
            // 过滤掉已存在的日志（去重）
            List<Log> logsToSave = logs.stream()
                .filter(log -> {
                    LambdaQueryWrapper<Log> wrapper = new LambdaQueryWrapper<>();
                    wrapper.eq(Log::getUniqueHash, log.getUniqueHash());
                    return this.count(wrapper) == 0;
                })
                .collect(Collectors.toList());
            
            if (logsToSave.isEmpty()) {
                return true;
            }
            
            // 逐条保存，跳过失败的记录
            int successCount = 0;
            int failCount = 0;
            
            for (Log logEntity : logsToSave) {
                try {
                    // 清理fields字段
                    if (logEntity.getFields() != null && !logEntity.getFields().isEmpty()) {
                        try {
                            // 验证JSON有效性
                            JSONObject.parseObject(logEntity.getFields());
                        } catch (Exception e) {
                            log.warn("日志fields字段不是有效JSON，已清空: hash={}", logEntity.getUniqueHash());
                            logEntity.setFields(null);
                        }
                    }
                    
                    // 单条保存
                    if (this.save(logEntity)) {
                        successCount++;
                    } else {
                        failCount++;
                        log.warn("日志保存失败（未知原因）: hash={}", logEntity.getUniqueHash());
                    }
                } catch (Exception e) {
                    failCount++;
                    log.error("日志保存异常: hash={}, content={}", 
                        logEntity.getUniqueHash(), 
                        logEntity.getContent() != null ? logEntity.getContent().substring(0, Math.min(100, logEntity.getContent().length())) : "null", 
                        e);
                }
            }
            
            log.info("批量保存完成: 总数={}, 成功={}, 失败={}", logsToSave.size(), successCount, failCount);
            return failCount == 0;
            
        } catch (Exception e) {
            log.error("批量保存日志到数据库失败", e);
            return false;
        }
    }
    
    /**
     * 直接保存到MySQL（绕过Redis缓存）
     * 用于特殊场景，如告警日志需要立即持久化
     * 
     * @param log 日志对象
     */
    public void saveLogDirectly(Log log) {
        // 生成唯一hash
        String uniqueHash = MD5.create().digestHex(log.getContent() + log.getTimestamp());
        log.setUniqueHash(uniqueHash);
        
        // 检查是否已存在
        LambdaQueryWrapper<Log> wrapper = new LambdaQueryWrapper<>();
        wrapper.eq(Log::getUniqueHash, uniqueHash);
        if (this.count(wrapper) > 0) {
            // 日志已存在，忽略
            return;
        }
        
        this.save(log);
    }
    
    /**
     * 统计日志
     */
    public LogStatDTO statLogs(LocalDateTime startTime, LocalDateTime endTime) {
        LogStatDTO statDTO = new LogStatDTO();
        
        String startTimeStr = startTime.toString();
        String endTimeStr = endTime.toString();
        
        // 日志级别分布
        List<Map<String, Object>> levelDist = logMapper.statLogLevelDistribution(startTimeStr, endTimeStr);
        Map<String, Long> levelMap = levelDist.stream()
            .collect(Collectors.toMap(
                m -> (String) m.get("logLevel"),
                m -> ((Number) m.get("count")).longValue()
            ));
        statDTO.setLevelDistribution(levelMap);
        
        // 系统类型分布
        List<Map<String, Object>> systemDist = logMapper.statSystemDistribution(startTimeStr, endTimeStr);
        Map<String, Long> systemMap = systemDist.stream()
            .collect(Collectors.toMap(
                m -> (String) m.get("systemType"),
                m -> ((Number) m.get("count")).longValue()
            ));
        statDTO.setSystemDistribution(systemMap);
        
        // 日志量趋势
        List<Map<String, Object>> trendData = logMapper.statLogTrend(startTimeStr, endTimeStr);
        List<LogStatDTO.TimeSeriesItem> trendList = trendData.stream()
            .map(m -> {
                LogStatDTO.TimeSeriesItem item = new LogStatDTO.TimeSeriesItem();
                item.setTime((String) m.get("time"));
                item.setCount(((Number) m.get("count")).longValue());
                return item;
            })
            .collect(Collectors.toList());
        statDTO.setLogTrend(trendList);
        
        // 总日志数和错误日志数
        LambdaQueryWrapper<Log> totalWrapper = new LambdaQueryWrapper<>();
        totalWrapper.ge(Log::getTimestamp, startTime).le(Log::getTimestamp, endTime);
        statDTO.setTotalCount(this.count(totalWrapper));
        
        LambdaQueryWrapper<Log> errorWrapper = new LambdaQueryWrapper<>();
        errorWrapper.ge(Log::getTimestamp, startTime).le(Log::getTimestamp, endTime)
                   .eq(Log::getLogLevel, "ERROR");
        statDTO.setErrorCount(this.count(errorWrapper));
        
        return statDTO;
    }
    
    /**
     * 删除过期日志
     */
    public void deleteExpiredLogs(int retentionDays) {
        LocalDateTime expireTime = LocalDateTime.now().minusDays(retentionDays);
        LambdaQueryWrapper<Log> wrapper = new LambdaQueryWrapper<>();
        wrapper.lt(Log::getCreateTime, expireTime);
        this.remove(wrapper);
    }
}

