#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
批量流域划分工具
基于 standardized_delineate.py 实现批量处理多个流域
"""

import os
import sys
import re
import csv
from datetime import datetime
from typing import List, Dict, Any, Tuple
from loguru import logger

# 使用相对导入
from ..core.standardized_delineate import StandardizedDelineator


class BatchDelineator:
    """批量流域划分器"""
    
    def __init__(self, output_dir: str = None, result_log_file: str = None):
        """
        初始化批量流域划分器
        
        Args:
            output_dir: 输出目录，如果不指定则使用默认配置
            result_log_file: 结果清单文件路径，如果不指定则使用默认路径
        """
        self.delineator = StandardizedDelineator(output_dir)
        self.results = []
        self.failed_items = []
        
        # 设置结果清单文件路径
        if result_log_file:
            self.result_log_file = result_log_file
        else:
            # 默认在当前目录下创建带时间戳的结果文件
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            self.result_log_file = f"batch_delineate_results_{timestamp}.csv"
        
        # 初始化结果清单文件
        self._initialize_result_log()
    
    def _initialize_result_log(self):
        """初始化结果清单文件，创建CSV表头"""
        try:
            with open(self.result_log_file, 'w', newline='', encoding='utf-8') as f:
                writer = csv.writer(f)
                writer.writerow([
                    '序号', '流域编码', '处理时间', '状态', '纬度', '经度', 
                    '面积估算', '输出文件数', '错误信息', '处理耗时(秒)'
                ])
            logger.info(f"结果清单文件已创建: {self.result_log_file}")
        except Exception as e:
            logger.error(f"创建结果清单文件失败: {e}")
    
    def _log_result_to_file(self, index: int, stcd: str, status: str, 
                           lat: float, lng: float, area: float, 
                           output_files_count: int = 0, error_msg: str = "", 
                           processing_time: float = 0.0):
        """
        将处理结果记录到清单文件
        
        Args:
            index: 序号
            stcd: 流域编码
            status: 处理状态 (成功/失败)
            lat: 纬度
            lng: 经度
            area: 面积估算
            output_files_count: 输出文件数量
            error_msg: 错误信息
            processing_time: 处理耗时
        """
        try:
            current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            with open(self.result_log_file, 'a', newline='', encoding='utf-8') as f:
                writer = csv.writer(f)
                writer.writerow([
                    index, stcd, current_time, status, lat, lng, 
                    area, output_files_count, error_msg, round(processing_time, 2)
                ])
        except Exception as e:
            logger.error(f"写入结果清单文件失败: {e}")

    def parse_watershed_file(self, file_path: str) -> List[Dict[str, Any]]:
        """
        解析流域数据文件
        
        Args:
            file_path: 流域数据文件路径
            
        Returns:
            流域数据列表
        """
        watersheds = []
        
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                lines = f.readlines()
                
            # 跳过标题行
            for line_num, line in enumerate(lines[1:], start=2):
                line = line.strip()
                if not line:
                    continue
                    
                try:
                    # 解析制表符分隔的数据
                    parts = line.split('\t')
                    if len(parts) != 3:
                        logger.warning(f"第 {line_num} 行格式不正确，跳过: {line}")
                        continue
                        
                    stcd = parts[0].strip()
                    area_str = parts[1].strip()
                    outlet_point = parts[2].strip()
                    
                    # 解析面积（去除逗号）
                    area = float(area_str.replace(',', ''))
                    
                    # 解析坐标点 POINT (lng lat)
                    point_match = re.match(r'POINT\s*\(\s*([\d.-]+)\s+([\d.-]+)\s*\)', outlet_point)
                    if not point_match:
                        logger.warning(f"第 {line_num} 行坐标格式不正确，跳过: {outlet_point}")
                        continue
                        
                    lng = float(point_match.group(1))
                    lat = float(point_match.group(2))
                    
                    watersheds.append({
                        'stcd': stcd,
                        'area': area,
                        'lat': lat,
                        'lng': lng,
                        'line_num': line_num
                    })
                    
                except (ValueError, IndexError) as e:
                    logger.warning(f"第 {line_num} 行解析失败，跳过: {line} - 错误: {e}")
                    continue
                    
        except FileNotFoundError:
            logger.error(f"文件不存在: {file_path}")
            raise
        except Exception as e:
            logger.error(f"读取文件失败: {file_path} - 错误: {e}")
            raise
            
        logger.info(f"成功解析 {len(watersheds)} 个流域数据")
        return watersheds
    
    def process_watersheds(self, watersheds: List[Dict[str, Any]], 
                          high_res: bool = True) -> Dict[str, Any]:
        """
        批量处理流域
        
        Args:
            watersheds: 流域数据列表
            high_res: 是否使用高分辨率
            
        Returns:
            处理结果统计
        """
        total_count = len(watersheds)
        success_count = 0
        failed_count = 0
        
        logger.info(f"开始批量处理 {total_count} 个流域")
        
        for i, watershed in enumerate(watersheds, 1):
            stcd = watershed['stcd']
            area = watershed['area']
            lat = watershed['lat']
            lng = watershed['lng']
            line_num = watershed['line_num']
            
            logger.info(f"[{i}/{total_count}] 处理流域 {stcd} (第 {line_num} 行)")
            logger.info(f"  坐标: ({lat}, {lng}), 面积: {area}")
            
            # 记录开始时间
            start_time = datetime.now()
            
            try:
                # 调用 standardized_delineate.py 的核心逻辑
                results = self.delineator.delineate_watershed(
                    lat=lat,
                    lng=lng,
                    outlet_name=stcd,
                    outlet_id=stcd,
                    area_estimate=area,
                    high_res=high_res
                )
                
                # 计算处理耗时
                end_time = datetime.now()
                processing_time = (end_time - start_time).total_seconds()
                
                if results['validation_result']['is_valid']:
                    logger.success(f"流域 {stcd} 处理成功")
                    success_count += 1
                    self.results.append({
                        'stcd': stcd,
                        'status': 'success',
                        'results': results
                    })
                    
                    # 记录成功结果到清单文件
                    output_files_count = len(results.get('output_files', {}))
                    self._log_result_to_file(
                        index=i,
                        stcd=stcd,
                        status='成功',
                        lat=lat,
                        lng=lng,
                        area=area,
                        output_files_count=output_files_count,
                        error_msg="",
                        processing_time=processing_time
                    )
                else:
                    logger.error(f"流域 {stcd} 验证失败")
                    logger.error(f"  无效文件: {results['validation_result']['invalid_files']}")
                    logger.error(f"  缺失文件: {results['validation_result']['missing_files']}")
                    failed_count += 1
                    
                    error_details = f"无效文件: {results['validation_result']['invalid_files']}, 缺失文件: {results['validation_result']['missing_files']}"
                    self.failed_items.append({
                        'stcd': stcd,
                        'line_num': line_num,
                        'error': '输出文件验证失败',
                        'details': results['validation_result']
                    })
                    
                    # 记录失败结果到清单文件
                    self._log_result_to_file(
                        index=i,
                        stcd=stcd,
                        status='失败',
                        lat=lat,
                        lng=lng,
                        area=area,
                        output_files_count=0,
                        error_msg=error_details,
                        processing_time=processing_time
                    )
                    
            except Exception as e:
                # 计算处理耗时
                end_time = datetime.now()
                processing_time = (end_time - start_time).total_seconds()
                
                logger.error(f"流域 {stcd} 处理失败: {e}")
                failed_count += 1
                self.failed_items.append({
                    'stcd': stcd,
                    'line_num': line_num,
                    'error': str(e),
                    'details': None
                })
                
                # 记录异常结果到清单文件
                self._log_result_to_file(
                    index=i,
                    stcd=stcd,
                    status='异常',
                    lat=lat,
                    lng=lng,
                    area=area,
                    output_files_count=0,
                    error_msg=str(e),
                    processing_time=processing_time
                )
        
        # 输出处理结果统计
        logger.info(f"批量处理完成:")
        logger.info(f"  总数: {total_count}")
        logger.info(f"  成功: {success_count}")
        logger.info(f"  失败: {failed_count}")
        logger.info(f"  结果清单文件: {self.result_log_file}")
        
        if self.failed_items:
            logger.warning("失败的流域:")
            for item in self.failed_items:
                logger.warning(f"  {item['stcd']} (第 {item['line_num']} 行): {item['error']}")
        
        return {
            'total_count': total_count,
            'success_count': success_count,
            'failed_count': failed_count,
            'success_results': self.results,
            'failed_items': self.failed_items,
            'result_log_file': self.result_log_file
        }
    
    def batch_process_from_file(self, file_path: str, high_res: bool = True) -> Dict[str, Any]:
        """
        从文件批量处理流域
        
        Args:
            file_path: 流域数据文件路径
            high_res: 是否使用高分辨率
            
        Returns:
            处理结果统计
        """
        logger.info(f"开始从文件批量处理流域: {file_path}")
        
        # 解析文件
        watersheds = self.parse_watershed_file(file_path)
        
        # 批量处理
        return self.process_watersheds(watersheds, high_res)


def main():
    """主函数"""
    import argparse
    
    parser = argparse.ArgumentParser(description='批量流域划分工具')
    parser.add_argument('input_file', help='输入的流域数据文件路径')
    parser.add_argument('--output-dir', help='输出目录')
    parser.add_argument('--result-log', help='结果清单文件路径')
    parser.add_argument('--low-res', action='store_true', help='使用低分辨率模式')
    
    args = parser.parse_args()
    
    # 创建批量处理器
    batch_delineator = BatchDelineator(
        output_dir=args.output_dir,
        result_log_file=args.result_log
    )
    
    # 执行批量处理
    try:
        results = batch_delineator.batch_process_from_file(
            file_path=args.input_file,
            high_res=not args.low_res
        )
        
        logger.success("批量处理完成！")
        logger.success(f"详细结果请查看: {results['result_log_file']}")
        return results
        
    except Exception as e:
        logger.error(f"批量处理失败: {e}")
        sys.exit(1)


if __name__ == "__main__":
    main()