#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
批量处理文本文件进行命名实体识别
支持处理任意文本文件并生成详细报告
"""

import requests
import json
import time
import argparse
import os
from typing import List, Dict, Any
from datetime import datetime

class BatchNERProcessor:
    def __init__(self, server_url: str = "http://192.168.224.255:5000"):
        self.server_url = server_url
        self.session = requests.Session()
        self.session.timeout = 30
        
    def read_text_file(self, file_path: str) -> List[str]:
        """
        读取文本文件中的所有行
        
        Args:
            file_path: 文件路径
            
        Returns:
            文本行列表
        """
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                lines = [line.strip() for line in f.readlines() if line.strip()]
            return lines
        except FileNotFoundError:
            print(f"错误: 文件 {file_path} 未找到")
            return []
        except Exception as e:
            print(f"读取文件时出错: {e}")
            return []
    
    def analyze_text(self, text: str, method: str = "bert", domain: str = "general") -> Dict[str, Any]:
        """
        分析文本
        
        Args:
            text: 要分析的文本
            method: 分析方法 ("bert" 或 "advanced")
            domain: 领域
            
        Returns:
            分析结果
        """
        try:
            response = self.session.post(
                f"{self.server_url}/nlp/analysisText",
                json={
                    "systemCode": "NLP_SYSTEM_001",
                    "text": text,
                    "domain": domain,
                    "method": method
                }
            )
            
            if response.status_code == 200:
                return response.json()
            else:
                return {"error": f"HTTP {response.status_code}: {response.text}"}
        except Exception as e:
            return {"error": str(e)}
    
    def process_file(self, input_file: str, output_dir: str = ".", domain: str = "general"):
        """
        处理文件中的所有行
        
        Args:
            input_file: 输入文件路径
            output_dir: 输出目录
            domain: 领域
        """
        # 读取文件
        lines = self.read_text_file(input_file)
        if not lines:
            print("没有读取到任何文本行")
            return
        
        print(f"共读取到 {len(lines)} 行文本")
        
        # 确保输出目录存在
        os.makedirs(output_dir, exist_ok=True)
        
        # 存储结果
        bert_results = []
        advanced_results = []
        
        # 统计信息
        total_lines = len(lines)
        processed_lines = 0
        
        # 处理每一行
        for i, line in enumerate(lines):
            print(f"处理第 {i+1}/{total_lines} 行: {line[:50]}...")
            
            # BERT方法分析
            bert_result = self.analyze_text(line, "bert", domain)
            bert_results.append({
                "line_number": i+1,
                "text": line,
                "result": bert_result,
                "timestamp": datetime.now().isoformat()
            })
            
            # 增强版方法分析
            advanced_result = self.analyze_text(line, "advanced", domain)
            advanced_results.append({
                "line_number": i+1,
                "text": line,
                "result": advanced_result,
                "timestamp": datetime.now().isoformat()
            })
            
            processed_lines += 1
            
            # 每处理10行显示一次进度
            if (i + 1) % 10 == 0:
                print(f"已处理 {i+1}/{total_lines} 行")
            
            # 避免请求过于频繁
            time.sleep(0.1)
        
        # 生成文件名前缀
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        base_name = os.path.splitext(os.path.basename(input_file))[0]
        file_prefix = f"{base_name}_{timestamp}"
        
        # 保存结果
        self._save_results(bert_results, f"{output_dir}/{file_prefix}_bert.json")
        self._save_results(advanced_results, f"{output_dir}/{file_prefix}_advanced.json")
        
        # 生成统计报告
        self._generate_report(bert_results, advanced_results, f"{output_dir}/{file_prefix}_report.txt")
        
        print(f"\n处理完成!")
        print(f"- 总行数: {total_lines}")
        print(f"- BERT结果保存至: {output_dir}/{file_prefix}_bert.json")
        print(f"- 增强版结果保存至: {output_dir}/{file_prefix}_advanced.json")
        print(f"- 统计报告保存至: {output_dir}/{file_prefix}_report.txt")
    
    def _save_results(self, results: List[Dict], file_path: str):
        """
        保存结果到JSON文件
        
        Args:
            results: 结果列表
            file_path: 文件路径
        """
        try:
            with open(file_path, "w", encoding="utf-8") as f:
                json.dump(results, f, ensure_ascii=False, indent=2)
        except Exception as e:
            print(f"保存结果到 {file_path} 时出错: {e}")
    
    def _generate_report(self, bert_results: List[Dict], advanced_results: List[Dict], report_path: str):
        """
        生成统计报告
        
        Args:
            bert_results: BERT方法结果
            advanced_results: 增强版方法结果
            report_path: 报告文件路径
        """
        try:
            with open(report_path, "w", encoding="utf-8") as f:
                f.write("NER批量处理统计报告\n")
                f.write("=" * 50 + "\n")
                f.write(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
                f.write(f"总行数: {len(bert_results)}\n\n")
                
                # 统计实体类型
                bert_entity_types = {}
                advanced_entity_types = {}
                
                for result in bert_results:
                    if "ners" in result["result"]:
                        for entity in result["result"]["ners"]:
                            entity_type = entity.get("nerKey", "Unknown")
                            bert_entity_types[entity_type] = bert_entity_types.get(entity_type, 0) + 1
                
                for result in advanced_results:
                    if "ners" in result["result"]:
                        for entity in result["result"]["ners"]:
                            entity_type = entity.get("nerKey", "Unknown")
                            advanced_entity_types[entity_type] = advanced_entity_types.get(entity_type, 0) + 1
                
                f.write("BERT方法识别的实体类型统计:\n")
                for entity_type, count in sorted(bert_entity_types.items()):
                    f.write(f"  {entity_type}: {count}\n")
                
                f.write("\n增强版方法识别的实体类型统计:\n")
                for entity_type, count in sorted(advanced_entity_types.items()):
                    f.write(f"  {entity_type}: {count}\n")
                
                # 错误统计
                bert_errors = sum(1 for r in bert_results if "error" in r["result"])
                advanced_errors = sum(1 for r in advanced_results if "error" in r["result"])
                
                f.write(f"\n错误统计:\n")
                f.write(f"  BERT方法错误数: {bert_errors}\n")
                f.write(f"  增强版方法错误数: {advanced_errors}\n")
                
        except Exception as e:
            print(f"生成报告 {report_path} 时出错: {e}")

def main():
    parser = argparse.ArgumentParser(description="批量处理文本文件进行命名实体识别")
    parser.add_argument("input_file", help="输入文本文件路径")
    parser.add_argument("-o", "--output_dir", default=".", help="输出目录 (默认为当前目录)")
    parser.add_argument("-d", "--domain", default="general", help="领域 (默认为 general)")
    parser.add_argument("-u", "--url", default="http://192.168.224.255:5000", help="NER服务URL (默认为 http://192.168.224.255:5000)")
    
    args = parser.parse_args()
    
    if not os.path.exists(args.input_file):
        print(f"错误: 输入文件 {args.input_file} 不存在")
        return
    
    processor = BatchNERProcessor(args.url)
    processor.process_file(args.input_file, args.output_dir, args.domain)

if __name__ == "__main__":
    main()