import os
import pandas as pd
import csv
import threading
from datetime import datetime
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from collections import defaultdict
from utils.logger import setup_logging, app_logger, log_function
import json
import requests
import time
import hashlib
from .statistics import StatisticsService  # 添加导入

class ModelCheckService:
    def __init__(self, config):
        self.config = config
        self.model_config = config['model_check']
        self.observer = None
        self.running = False
        self.stats_timer = None
        
        # 设置日志器
        self.loggers = setup_logging(
            log_dir=config['global']['log_dir'],
            service_name='model_check'
        )
        # 设置服务上下文
        app_logger.set_context(service='model_check')
        
        # 初始化日志装饰器用的logger
        self.process_logger = app_logger.get_logger(
            name='model_check.info',
            log_dir=config['global']['log_dir']
        )
        
        # 直接设置装饰器的logger
        self.process_file = log_function(logger=self.process_logger)(self.process_file)
        self.generate_statistics = log_function(logger=self.process_logger)(self.generate_statistics)
        self.statistics_service = StatisticsService(config, self.loggers)

    def process_file(self, file_path):
        """处理单个文件，返回处理结果统计"""
        with app_logger.set_context(file=file_path):
            try:
                # 检查文件是否在正确的目录
                if not file_path.startswith(self.model_config['input_dir']):
                    self.loggers['error'].error(f"文件不在输入目录中: {file_path}")
                    return

                # 检查是否是输出或统计目录
                if any(x in file_path for x in ['output', 'statistics']):
                    self.loggers['warn'].warning(f"跳过输出或统计目录中的文件: {file_path}")
                    return

                # 首先计算总行数
                total_lines = sum(1 for _ in open(file_path, 'r', encoding='utf-8'))
                file_name = os.path.basename(file_path)
                self.loggers['info'].info(
                    f"开始处理任务:\n"
                    f"- 文件总数: 1\n"
                    f"- 总行数: {total_lines}"
                )
                
                results = []
                processed_lines = 0
                success_count = 0
                file_count = 0
                last_progress = 0
                start_time = time.time()
                
                def print_progress():
                    """打印进度条"""
                    progress = processed_lines / total_lines * 100
                    bar_length = 50
                    filled_length = int(bar_length * processed_lines / total_lines)
                    bar = '=' * filled_length + '-' * (bar_length - filled_length)
                    
                    # 计算已用时间和预估剩余时间
                    elapsed_time = time.time() - start_time
                    minutes = int(elapsed_time // 60)
                    seconds = int(elapsed_time % 60)
                    
                    if progress > 0:
                        total_time = elapsed_time / (progress / 100)
                        remaining_time = total_time - elapsed_time
                        remaining_minutes = int(remaining_time // 60)
                        remaining_seconds = int(remaining_time % 60)
                        time_info = f"用时: {minutes}分{seconds}秒 预计剩余: {remaining_minutes}分{remaining_seconds}秒"
                    else:
                        time_info = f"用时: {minutes}分{seconds}秒"
                    
                    self.loggers['info'].info(
                        f"总进度: [{bar}] {progress:.1f}% ({processed_lines}/{total_lines}行) "
                        f"成功: {success_count} 文件: {file_count}/1 {time_info}\n"
                        f"当前处理: {file_name}"
                    )
                
                def save_results(results_to_save, current_file_count):
                    """保存结果到文件"""
                    timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
                    output_file = os.path.join(
                        self.model_config['output_dir'],
                        f"检测结果_{timestamp}_{current_file_count}.csv"
                    )
                    
                    os.makedirs(os.path.dirname(output_file), exist_ok=True)
                    with open(output_file, 'w', encoding='utf-8', newline='') as f:
                        writer = csv.DictWriter(f, fieldnames=self.model_config['result_fields'])
                        writer.writeheader()
                        writer.writerows(results_to_save)
                    
                    self.loggers['info'].info(f"保存结果文件: {output_file}")
                    return []  # 返回空列表以清空结果
                
                # 根据文件类型选择读取方式
                if self.model_config['file_type'] == 'json':
                    # 按行读取JSON
                    with open(file_path, 'r', encoding='utf-8') as f:
                        batch = []
                        for line in f:
                            total_lines += 1
                            try:
                                item = json.loads(line.strip())
                                if self._process_item(item, results):
                                    processed_lines += 1
                                    
                                    # 检查是否需要保存文件
                                    if len(results) >= self.model_config['max_rows_per_file']:
                                        file_count += 1
                                        results = save_results(results, file_count)
                                    
                                    # 每处理1%的数据打印一次进度
                                    current_progress = processed_lines / total_lines * 100
                                    if current_progress - last_progress >= 1:
                                        print_progress()
                                        last_progress = current_progress
                                    
                            except json.JSONDecodeError as e:
                                self.loggers['error'].error(f"JSON解析失败: 行 {str(e)}")
                                continue
                            except Exception as e:
                                self.loggers['error'].error(f"处理行失败: {str(e)}")
                                continue
                else:  # csv
                    try:
                        data = pd.read_csv(file_path).to_dict('records')
                        total_lines = len(data)
                        for item in data:
                            try:
                                if self._process_item(item, results):
                                    processed_lines += 1
                                    
                                    # 检查是否需要保存文件
                                    if len(results) >= self.model_config['max_rows_per_file']:
                                        file_count += 1
                                        results = save_results(results, file_count)
                                    
                                    # 每处理1%的数据打印一次进度
                                    current_progress = processed_lines / total_lines * 100
                                    if current_progress - last_progress >= 1:
                                        print_progress()
                                        last_progress = current_progress
                                    
                            except Exception as e:
                                self.loggers['error'].error(f"处理记录失败: {str(e)}")
                                continue
                    except Exception as e:
                        self.loggers['error'].error(f"读取CSV文件失败: {str(e)}")
                        return

                # 保存最后的结果
                if results:
                    file_count += 1
                    save_results(results, file_count)
                    
                # 每处理1000行打印一次进度
                if processed_lines % 1000 == 0:
                    print_progress()
                
                # 处理完成后打印总结
                elapsed_time = time.time() - start_time
                minutes = int(elapsed_time // 60)
                seconds = int(elapsed_time % 60)
                speed = processed_lines / elapsed_time if elapsed_time > 0 else 0
                
                self.loggers['info'].info(
                    f"任务完成:\n"
                    f"- 总文件数: 1\n"
                    f"- 总行数: {total_lines}\n"
                    f"- 成功处理: {success_count}\n"
                    f"- 总用时: {minutes}分{seconds}秒\n"
                    f"- 平均速度: {speed:.1f}行/秒"
                )
                
                # 处理完成后返回统计信息
                return {
                    'processed_lines': processed_lines,
                    'success_count': success_count,
                    'file_count': file_count
                }
                
            except Exception as e:
                self.loggers['error'].error(f"处理文件失败: {file_path}, 错误: {str(e)}")
                return None
    
    def _process_item(self, item, results):
        """处理单个记录，返回是否处理成功"""
        try:
            # 检查过滤条件
            if not self._check_filter_conditions(item):
                self.loggers['info'].info(f"跳过不符合过滤条件的记录: {item}")
                return False
            
            # 获取文本内容
            text_field = self.model_config['fields']['text']
            model_field = self.model_config['fields']['model']
            
            text = item.get(text_field, '')
            model_id = str(item.get(model_field, ''))
            
            if not text:
                self.loggers['warn'].warning(f"跳过空文本: {item}")
                return False
            
            # 调用API
            response = self._call_api(text)
            if not response or 'data' not in response or 'fake_conf' not in response['data']:
                self.loggers['error'].error(f"API响应格式错误: {response}")
                return False
            
            fake_conf = response['data']['fake_conf']
            
            # 判断结果
            if fake_conf >= self.model_config['fake_conf_threshold']:
                category = "LLM"
                confidence = fake_conf * 100
            else:
                category = "HUMAN"
                confidence = (1 - fake_conf) * 100
            
            # 获取模型名称
            model_name = self.model_config['model_mapping'].get(
                model_id,
                f"未知模型({model_id})"
            )
            
            # 构建结果
            result_fields = self.model_config['result_fields']
            result = {
                result_fields[0]: text,  # 内容
                result_fields[1]: model_name,  # 模型名称
                result_fields[2]: category,  # 类别
                result_fields[3]: f"{confidence:.2f}%"  # 分数
            }
            results.append(result)
            self.loggers['info'].info(f"处理成功: {result}")
            return True
        except Exception as e:
            self.loggers['error'].error(f"处理记录失败: {str(e)}")
            return False
    
    def _call_api(self, text):
        """调用API检测文本"""
        for attempt in range(self.config['api']['max_retries']):
            try:
                self.loggers['debug'].debug(f"调用API, 尝试次数: {attempt + 1}")
                response = requests.post(
                    self.config['api']['url'],
                    json={"text": text},
                    timeout=self.config['api']['timeout']
                )
                response.raise_for_status()
                
                # 检查响应格式
                data = response.json()
                if not data or 'data' not in data or 'fake_conf' not in data['data']:
                    raise ValueError(f"API响应格式错误: {data}")
                    
                self.loggers['debug'].debug(f"API调用成功: {data}")
                return data
                
            except requests.exceptions.RequestException as e:
                self.loggers['error'].error(f"API调用失败: {str(e)}")
                if attempt == self.config['api']['max_retries'] - 1:
                    raise
                time.sleep(1 * (attempt + 1))  # 指数退避
            except Exception as e:
                self.loggers['error'].error(f"API调用异常: {str(e)}")
                raise
                
        raise Exception("API调用超过最大重试次数")
    
    def _check_filter_conditions(self, item):
        """检查是否符合过滤条件"""
        # 检查 auto_evaluation 字段
        auto_eval = item.get('auto_evaluation', '')
        if auto_eval not in self.model_config['filter_conditions']['auto_evaluation']:
            return False
        
        # 检查问题内容
        question = item.get(self.model_config['fields']['question'], '')
        for keyword in self.model_config['filter_conditions']['questions_content']:
            if keyword in question:
                return False
        
        return True
    
    def generate_statistics(self):
        """生成统计报告"""
        try:
            stats = defaultdict(lambda: {'LLM': 0, 'HUMAN': 0})
            
            # 遍历输出目录
            for root, _, files in os.walk(self.model_config['output_dir']):
                for file in files:
                    if file.endswith('.csv'):
                        self._process_file_for_stats(
                            os.path.join(root, file),
                            stats
                        )
            
            # 生成报告
            self._save_statistics(stats)
            
        except Exception as e:
            self.loggers['error'].error(f"生成统计报告失败: {str(e)}")
        finally:
            # 设置下一次统计
            if self.running:
                self.stats_timer = threading.Timer(
                    self.model_config['interval'],
                    self.generate_statistics
                )
                self.stats_timer.start()
    
    def _process_file_for_stats(self, file_path, stats):
        """处理单个文件的统计"""
        try:
            df = pd.read_csv(file_path)
            model_field = self.model_config['result_fields'][1]  # 模型名称字段
            category_field = self.model_config['result_fields'][2]  # 类别字段
            
            for _, row in df.iterrows():
                model = row[model_field]
                category = row[category_field]
                if category in ['LLM', 'HUMAN']:
                    stats[model][category] += 1
        except Exception as e:
            self.loggers['error'].error(f"处理文件失败: {file_path}, 错误: {str(e)}")
    
    def _save_statistics(self, stats):
        """保存统计结果"""
        try:
            # 检查是否有数据需要统计
            if not stats:
                self.loggers['warn'].warning("没有数据需要统计，跳过生成报告")
                return

            timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
            report_path = os.path.join(
                self.model_config['statistics_dir'],
                f'统计报告_{timestamp}.csv'
            )
            
            # 检查目录是否正确
            if not os.path.commonprefix([report_path, self.model_config['statistics_dir']]) == self.model_config['statistics_dir']:
                self.loggers['error'].error("统计报告路径不在指定目录中")
                return

            os.makedirs(os.path.dirname(report_path), exist_ok=True)
            
            # 从配置获取统计字段名
            stat_fields = self.model_config['statistics']['fields']
            
            rows = []
            for model, counts in stats.items():
                total = counts['LLM'] + counts['HUMAN']
                llm_ratio = f"{(counts['LLM'] / total * 100):.2f}%" if total > 0 else "0%"
                rows.append([
                    model,
                    counts['LLM'],
                    counts['HUMAN'],
                    total,
                    llm_ratio
                ])
            
            with open(report_path, 'w', encoding='utf-8', newline='') as f:
                writer = csv.writer(f)
                writer.writerow(stat_fields)
                writer.writerows(rows)
            
            self.loggers['info'].info(f"统计报告已生成: {report_path}")
            
        except Exception as e:
            self.loggers['error'].error(f"保存统计报告失败: {str(e)}")
    
    def start(self):
        """启动服务"""
        self.loggers['info'].info("启动模型检查服务")
        self.running = True
        
        # 创建自定义事件处理器
        class FileHandler(FileSystemEventHandler):
            def __init__(self, service):
                self.service = service
                self.processed_files = set()  # 已处理的文件
                self.total_files = 0  # 总文件数
                self.total_lines = 0  # 所有文件的总行数
                self.processed_lines = 0  # 已处理的总行数
                self.success_count = 0  # 成功处理的记录数
                self.start_time = time.time()  # 记录开始时间
                self.last_progress = 0  # 上次打印进度的百分比
                self.last_print_time = time.time()  # 上次打印进度的时间
                
                # 计算需要处理的文件总数和总行数
                for file_name in os.listdir(service.model_config['input_dir']):
                    if file_name.endswith(('.json', '.csv')):
                        self.total_files += 1
                        file_path = os.path.join(service.model_config['input_dir'], file_name)
                        if service.model_config['file_type'] == 'json':
                            self.total_lines += sum(1 for _ in open(file_path, 'r', encoding='utf-8'))
                        else:  # csv
                            df = pd.read_csv(file_path)
                            self.total_lines += len(df)
                
                self.service.loggers['info'].info(
                    f"开始处理任务:\n"
                    f"- 文件总数: {self.total_files}\n"
                    f"- 总行数: {self.total_lines}"
                )

            def print_progress(self, current_file=None):
                """打印总体进度条"""
                if self.total_lines == 0:
                    return
                
                progress = self.processed_lines / self.total_lines * 100
                
                # 每完成1%或者超过30秒才打印进度
                if progress - self.last_progress < 1 and time.time() - self.last_print_time < 30:
                    return
                
                bar_length = 50
                filled_length = int(bar_length * self.processed_lines / self.total_lines)
                bar = '=' * filled_length + '-' * (bar_length - filled_length)
                
                # 计算速度和时间
                elapsed_time = time.time() - self.start_time
                speed = self.processed_lines / elapsed_time if elapsed_time > 0 else 0
                
                # 计算预计剩余时间
                if progress > 0:
                    remaining_lines = self.total_lines - self.processed_lines
                    remaining_time = remaining_lines / speed if speed > 0 else 0
                    remaining_minutes = int(remaining_time // 60)
                    remaining_seconds = int(remaining_time % 60)
                    time_info = f"预计剩余: {remaining_minutes}分{remaining_seconds}秒"
                else:
                    time_info = "计算中..."
                
                # 进度条格式显示
                status = (
                    f"[{bar}] {progress:.1f}% "
                    f"({self.processed_lines}/{self.total_lines}) "
                    f"速度: {speed:.1f}行/秒 "
                    f"{time_info}"
                )
                
                self.service.loggers['info'].info(status)
                self.last_progress = progress
                self.last_print_time = time.time()

            def process_if_needed(self, file_path):
                file_hash = self._get_file_hash(file_path)
                if file_hash not in self.processed_files:
                    file_name = os.path.basename(file_path)
                    self.service.loggers['info'].info(f"开始处理文件: {file_name}")
                    
                    # 处理文件并获取处理结果
                    result = self.service.process_file(file_path)
                    if result:
                        self.processed_lines += result['processed_lines']
                        self.success_count += result['success_count']
                    
                    self.processed_files.add(file_hash)
                    self.print_progress()
                    
                    # 如果是最后一个文件，打印总结
                    if len(self.processed_files) == self.total_files:
                        elapsed_time = time.time() - self.start_time
                        speed = self.processed_lines / elapsed_time
                        self.service.loggers['info'].info(
                            f"处理完成 - 总行数: {self.total_lines} "
                            f"成功: {self.success_count} "
                            f"用时: {int(elapsed_time//60)}分{int(elapsed_time%60)}秒 "
                            f"平均速度: {speed:.1f}行/秒"
                        )

            def _get_file_hash(self, file_path):
                with open(file_path, 'rb') as f:
                    return hashlib.md5(f.read()).hexdigest()

            def on_created(self, event):
                if event.is_directory:
                    return
                if event.src_path.endswith(('.json', '.csv')):
                    self.process_if_needed(event.src_path)

            def on_modified(self, event):
                if event.is_directory:
                    return
                if event.src_path.endswith(('.json', '.csv')):
                    self.process_if_needed(event.src_path)
        
        # 启动文件监控
        event_handler = FileHandler(self)
        self.observer = Observer()
        self.observer.schedule(
            event_handler,
            self.model_config['input_dir'],
            recursive=False
        )
        self.observer.start()
        self.loggers['info'].info(f"开始监控目录: {self.model_config['input_dir']}")
        
        # 处理现有文件
        for file_name in os.listdir(self.model_config['input_dir']):
            if file_name.endswith(('.json', '.csv')):
                file_path = os.path.join(self.model_config['input_dir'], file_name)
                event_handler.process_if_needed(file_path)
        
        # 如果启用了统计功能且为实时模式，启动统计任务
        if (self.model_config['statistics']['enabled'] and 
            self.model_config['statistics']['mode'] == 'realtime'):
            self.generate_statistics()
        self.loggers['info'].info("统计任务已启动")
        
        # 启动统计服务
        self.statistics_service.start()
    
    def stop(self):
        """停止服务"""
        self.loggers['info'].info("停止模型检查服务")
        self.running = False
        if self.observer:
            self.observer.stop()
            self.observer.join()
        if self.stats_timer:
            self.stats_timer.cancel() 
        self.statistics_service.stop() 