import csv
import json
import logging
from typing import List, Dict, Any, Optional, Union
from datetime import datetime
import re
from urllib.parse import urlparse, parse_qs

try:
    import pandas as pd
except ImportError:
    pd = None

from .database import DatabaseManager
from .models import CloudResource, CloudType, ShareStatus, ImportResult

class ShareCodeImporter:
    """分享码导入器"""
    
    def __init__(self, db_manager: DatabaseManager):
        self.db = db_manager
        self.logger = logging.getLogger(__name__)
    
    def import_single(self, clouds_type: str, share_code: str, access_code: str = "",
                     share_name: str = "", full_url: str = "", 
                     share_time: datetime = None, force_update: bool = False) -> ImportResult:
        """导入单个分享码"""
        result = ImportResult()
        
        try:
            # 创建资源对象
            resource = CloudResource(
                clouds_type=clouds_type,
                share_code=share_code,
                access_code=access_code,
                share_name=share_name,
                full_url=full_url,
                share_time=share_time
            )
            
            # 验证数据
            errors = resource.validate()
            if errors:
                for error in errors:
                    result.add_error(f"验证失败: {error}")
                return result
            
            # 检查是否已存在
            if not force_update and self.db.check_resource_exists(clouds_type, share_code):
                self.logger.info(f"分享码已存在，跳过: {clouds_type} - {share_code}")
                result.add_skip(resource)
                return result
            
            # 插入或更新数据库
            success = self.db.insert_resource(
                clouds_type=resource.clouds_type,
                share_code=resource.share_code,
                access_code=resource.access_code,
                share_name=resource.share_name,
                full_url=resource.full_url,
                share_status=resource.share_status,
                share_time=resource.share_time,
                file_info_json=resource.file_info_json
            )
            
            if success:
                result.add_success(resource)
                self.logger.info(f"分享码导入成功: {clouds_type} - {share_code}")
            else:
                result.add_error(f"数据库插入失败: {clouds_type} - {share_code}")
                
        except Exception as e:
            error_msg = f"导入分享码时发生错误: {str(e)}"
            result.add_error(error_msg)
            self.logger.error(error_msg)
        
        return result
    
    def import_batch(self, resources: List[Dict[str, Any]], force_update: bool = False) -> ImportResult:
        """批量导入分享码"""
        result = ImportResult()
        
        for i, resource_data in enumerate(resources):
            try:
                # 提取必要字段
                clouds_type = resource_data.get('clouds_type', '')
                share_code = resource_data.get('share_code', '')
                access_code = resource_data.get('access_code', '')
                share_name = resource_data.get('share_name', '')
                full_url = resource_data.get('full_url', '')
                
                # 处理分享时间
                share_time = None
                if resource_data.get('share_time'):
                    if isinstance(resource_data['share_time'], str):
                        try:
                            share_time = datetime.fromisoformat(resource_data['share_time'])
                        except ValueError:
                            try:
                                share_time = datetime.strptime(resource_data['share_time'], '%Y-%m-%d %H:%M:%S')
                            except ValueError:
                                self.logger.warning(f"无法解析分享时间: {resource_data['share_time']}")
                    elif isinstance(resource_data['share_time'], datetime):
                        share_time = resource_data['share_time']
                
                # 导入单个资源
                single_result = self.import_single(
                    clouds_type=clouds_type,
                    share_code=share_code,
                    access_code=access_code,
                    share_name=share_name,
                    full_url=full_url,
                    share_time=share_time,
                    force_update=force_update
                )
                
                # 合并结果
                result.success_count += single_result.success_count
                result.skip_count += single_result.skip_count
                result.error_count += single_result.error_count
                result.errors.extend(single_result.errors)
                result.imported_resources.extend(single_result.imported_resources)
                result.skipped_resources.extend(single_result.skipped_resources)
                
            except Exception as e:
                error_msg = f"处理第 {i+1} 条记录时发生错误: {str(e)}"
                result.add_error(error_msg)
                self.logger.error(error_msg)
        
        return result
    
    def import_from_csv(self, csv_file_path: str, force_update: bool = False) -> ImportResult:
        """从CSV文件导入分享码"""
        result = ImportResult()
        
        try:
            with open(csv_file_path, 'r', encoding='utf-8') as file:
                # 尝试检测CSV格式
                sample = file.read(1024)
                file.seek(0)
                
                # 检测分隔符
                sniffer = csv.Sniffer()
                delimiter = sniffer.sniff(sample).delimiter
                
                reader = csv.DictReader(file, delimiter=delimiter)
                resources = []
                
                for row in reader:
                    # 标准化字段名（去除空格，转换为小写）
                    normalized_row = {}
                    for key, value in row.items():
                        if key:
                            clean_key = key.strip().lower()
                            # 字段名映射
                            field_mapping = {
                                '云盘类型': 'clouds_type',
                                'clouds_type': 'clouds_type',
                                'type': 'clouds_type',
                                '分享码': 'share_code',
                                'share_code': 'share_code',
                                'code': 'share_code',
                                '访问码': 'access_code',
                                'access_code': 'access_code',
                                'password': 'access_code',
                                '分享名称': 'share_name',
                                'share_name': 'share_name',
                                'name': 'share_name',
                                'title': 'share_name',
                                '分享链接': 'full_url',
                                'full_url': 'full_url',
                                'url': 'full_url',
                                'link': 'full_url',
                                '分享时间': 'share_time',
                                'share_time': 'share_time',
                                'time': 'share_time',
                                'date': 'share_time'
                            }
                            
                            mapped_key = field_mapping.get(clean_key, clean_key)
                            normalized_row[mapped_key] = value.strip() if value else ''
                    
                    if normalized_row.get('share_code'):  # 至少需要有分享码
                        resources.append(normalized_row)
                
                if resources:
                    result = self.import_batch(resources, force_update)
                else:
                    result.add_error("CSV文件中没有找到有效的分享码数据")
                    
        except FileNotFoundError:
            result.add_error(f"CSV文件不存在: {csv_file_path}")
        except Exception as e:
            result.add_error(f"读取CSV文件时发生错误: {str(e)}")
        
        return result
    
    def import_from_json(self, json_file_path: str, force_update: bool = False) -> ImportResult:
        """从JSON文件导入分享码"""
        result = ImportResult()
        
        try:
            with open(json_file_path, 'r', encoding='utf-8') as file:
                data = json.load(file)
                
                # 支持多种JSON格式
                resources = []
                if isinstance(data, list):
                    resources = data
                elif isinstance(data, dict):
                    if 'resources' in data:
                        resources = data['resources']
                    elif 'data' in data:
                        resources = data['data']
                    else:
                        # 单个资源对象
                        resources = [data]
                
                if resources:
                    result = self.import_batch(resources, force_update)
                else:
                    result.add_error("JSON文件中没有找到有效的资源数据")
                    
        except FileNotFoundError:
            result.add_error(f"JSON文件不存在: {json_file_path}")
        except json.JSONDecodeError as e:
            result.add_error(f"JSON文件格式错误: {str(e)}")
        except Exception as e:
            result.add_error(f"读取JSON文件时发生错误: {str(e)}")
        
        return result
    
    def import_from_excel(self, file_path: str, force_update: bool = False, 
                         sheet_name: str = None) -> ImportResult:
        """从Excel文件导入分享码
        
        Args:
            file_path: Excel文件路径
            force_update: 是否强制更新已存在的记录
            sheet_name: 工作表名称，None表示使用第一个工作表
        
        Excel文件格式要求：
        - 类别（默认天翼云盘）
        - 分享码
        - 访问码
        - 解析结果file_info_json（可选）
        """
        result = ImportResult()
        
        if pd is None:
            result.add_error("缺少pandas依赖，无法读取Excel文件。请安装: pip install pandas openpyxl")
            return result
        
        try:
            # 读取Excel文件
            if sheet_name:
                df = pd.read_excel(file_path, sheet_name=sheet_name)
            else:
                df = pd.read_excel(file_path)
            
            self.logger.info(f"Excel文件读取成功，共 {len(df)} 行数据")
            
            # 列名映射（支持多种可能的列名）
            column_mapping = {
                '类别': ['类别', '云盘类型', 'clouds_type', 'type'],
                '分享码': ['分享码', 'share_code', 'code', '提取码'],
                '访问码': ['访问码', 'access_code', 'password', '密码'],
                '解析结果': ['解析结果', 'file_info_json', 'file_info', 'result', '结果'],
                '分享名称': ['分享名称', 'share_name', 'name', '名称'],
                '分享链接': ['分享链接', 'full_url', 'url', '链接']
            }
            
            # 自动识别列名
            actual_columns = {}
            for standard_name, possible_names in column_mapping.items():
                for col in df.columns:
                    if col in possible_names:
                        actual_columns[standard_name] = col
                        break
            
            # 检查必需的列
            if '分享码' not in actual_columns:
                result.add_error("Excel文件中未找到分享码列，支持的列名: " + 
                               ", ".join(column_mapping['分享码']))
                return result
            
            # 处理每一行数据（限制最大20条）
            processed_count = 0
            max_import_count = 20
            
            for index, row in df.iterrows():
                # 检查是否达到最大导入数量
                # if processed_count >= max_import_count:
                #     self.logger.info(f"已达到最大导入数量限制: {max_import_count}")
                #     break
                try:
                    # 提取数据
                    share_code = str(row[actual_columns['分享码']]).strip() if pd.notna(row[actual_columns['分享码']]) else ''
                    
                    if not share_code or share_code == 'nan':
                        result.add_error(f"第 {index + 2} 行分享码为空")
                        continue
                    
                    # 类别（默认天翼云盘）
                    clouds_type = CloudType.TIANYI.value
                    if '类别' in actual_columns and pd.notna(row[actual_columns['类别']]):
                        clouds_type = str(row[actual_columns['类别']]).strip()
                        # 标准化云盘类型名称
                        if clouds_type in ['天翼云', '天翼云盘', '189云盘']:
                            clouds_type = CloudType.TIANYI.value
                        elif clouds_type in ['百度云', '百度网盘', '百度云盘']:
                            clouds_type = CloudType.BAIDU.value
                        elif clouds_type in ['阿里云', '阿里云盘', 'aliyun']:
                            clouds_type = CloudType.ALIYUN.value
                        elif clouds_type in ['115', '115网盘']:
                            clouds_type = CloudType.YUN115.value
                    
                    # 访问码
                    access_code = ''
                    if '访问码' in actual_columns and pd.notna(row[actual_columns['访问码']]):
                        access_code = str(row[actual_columns['访问码']]).strip()
                        if access_code == 'nan':
                            access_code = ''
                    
                    # 分享名称
                    share_name = ''
                    if '分享名称' in actual_columns and pd.notna(row[actual_columns['分享名称']]):
                        share_name = str(row[actual_columns['分享名称']]).strip()
                        if share_name == 'nan':
                            share_name = ''
                    
                    # 分享链接
                    full_url = ''
                    if '分享链接' in actual_columns and pd.notna(row[actual_columns['分享链接']]):
                        full_url = str(row[actual_columns['分享链接']]).strip()
                        if full_url == 'nan':
                            full_url = ''
                    
                    # 解析结果（检查"结果"列或"解析结果"列）
                    file_info_json = None
                    share_status = ShareStatus.UNPROCESSED.value
                    
                    # 检查"结果"列或"解析结果"列
                    result_column = None
                    if '结果' in actual_columns:
                        result_column = '结果'
                    elif '解析结果' in actual_columns:
                        result_column = '解析结果'
                    
                    if result_column and pd.notna(row[actual_columns[result_column]]):
                        file_info_str = str(row[actual_columns[result_column]]).strip()
                        if file_info_str and file_info_str != 'nan':
                            try:
                                # 尝试解析JSON
                                if file_info_str.startswith('{') or file_info_str.startswith('['):
                                    file_info_json = json.loads(file_info_str)
                                else:
                                    file_info_json = file_info_str
                                share_status = ShareStatus.PROCESSED.value
                            except json.JSONDecodeError:
                                # 如果不是JSON格式，直接存储为字符串
                                file_info_json = file_info_str
                                share_status = ShareStatus.PROCESSED.value
                    
                    # 创建资源对象
                    resource = CloudResource(
                        clouds_type=clouds_type,
                        share_code=share_code,
                        access_code=access_code,
                        share_name=share_name,
                        full_url=full_url,
                        share_status=share_status,
                        file_info_json=json.dumps(file_info_json, ensure_ascii=False) if file_info_json else None
                    )
                    
                    # 检查是否已存在
                    existing = self.db.check_resource_exists(clouds_type, share_code)
                    
                    if existing and not force_update:
                        result.add_skip(resource)
                        self.logger.debug(f"分享码已存在，跳过: {share_code}")
                        continue
                    
                    # 插入或更新资源
                    success = self.db.insert_resource(
                        clouds_type=resource.clouds_type,
                        share_code=resource.share_code,
                        access_code=resource.access_code,
                        share_name=resource.share_name,
                        full_url=resource.full_url,
                        share_status=resource.share_status,
                        share_time=resource.share_time,
                        file_info_json=resource.file_info_json
                    )
                    
                    if success:
                        result.add_success(resource)
                        processed_count += 1
                        self.logger.info(f"导入资源: {share_code} ({clouds_type}) (第{processed_count}条)")
                    else:
                        result.add_error(f"第 {index + 2} 行数据库操作失败: {share_code}")
                
                except Exception as e:
                    error_msg = f"第 {index + 2} 行处理失败: {str(e)}"
                    result.add_error(error_msg)
                    self.logger.error(error_msg)
            
            self.logger.info(f"Excel文件导入完成: {result.get_summary()}")
            
        except Exception as e:
            error_msg = f"导入Excel文件失败: {str(e)}"
            self.logger.error(error_msg)
            result.add_error(error_msg)
        
        return result
    
    def import_from_text(self, text_content: str, clouds_type: str = CloudType.TIANYI.value, 
                        force_update: bool = False) -> ImportResult:
        """从文本内容导入分享码（支持多种格式）"""
        result = ImportResult()
        
        try:
            lines = text_content.strip().split('\n')
            resources = []
            
            for line in lines:
                line = line.strip()
                if not line or line.startswith('#'):  # 跳过空行和注释
                    continue
                
                resource_data = self._parse_text_line(line, clouds_type)
                if resource_data:
                    resources.append(resource_data)
            
            if resources:
                result = self.import_batch(resources, force_update)
            else:
                result.add_error("文本中没有找到有效的分享码")
                
        except Exception as e:
            result.add_error(f"解析文本内容时发生错误: {str(e)}")
        
        return result
    
    def _parse_text_line(self, line: str, default_clouds_type: str) -> Optional[Dict[str, str]]:
        """解析文本行，提取分享码信息"""
        try:
            # 格式1: 分享码|访问码|分享名称
            if '|' in line:
                parts = line.split('|')
                return {
                    'clouds_type': default_clouds_type,
                    'share_code': parts[0].strip(),
                    'access_code': parts[1].strip() if len(parts) > 1 else '',
                    'share_name': parts[2].strip() if len(parts) > 2 else '',
                    'full_url': parts[3].strip() if len(parts) > 3 else ''
                }
            
            # 格式2: 分享码 访问码 分享名称（空格分隔）
            elif ' ' in line:
                parts = line.split()
                return {
                    'clouds_type': default_clouds_type,
                    'share_code': parts[0],
                    'access_code': parts[1] if len(parts) > 1 else '',
                    'share_name': ' '.join(parts[2:]) if len(parts) > 2 else ''
                }
            
            # 格式3: 完整URL
            elif line.startswith('http'):
                parsed_url = urlparse(line)
                if 'cloud.189.cn' in parsed_url.netloc:
                    # 天翼云链接解析
                    path_parts = parsed_url.path.split('/')
                    share_code = None
                    for part in path_parts:
                        if part and len(part) > 10:  # 分享码通常比较长
                            share_code = part
                            break
                    
                    if share_code:
                        return {
                            'clouds_type': CloudType.TIANYI.value,
                            'share_code': share_code,
                            'access_code': '',
                            'share_name': '',
                            'full_url': line
                        }
            
            # 格式4: 纯分享码
            elif len(line) > 5:  # 假设分享码长度大于5
                return {
                    'clouds_type': default_clouds_type,
                    'share_code': line,
                    'access_code': '',
                    'share_name': ''
                }
            
        except Exception as e:
            self.logger.warning(f"解析文本行失败: {line}, 错误: {str(e)}")
        
        return None
    
    def extract_share_codes_from_urls(self, urls: List[str]) -> List[Dict[str, str]]:
        """从URL列表中提取分享码信息"""
        resources = []
        
        for url in urls:
            try:
                parsed_url = urlparse(url)
                
                # 天翼云盘URL解析
                if 'cloud.189.cn' in parsed_url.netloc:
                    # 提取分享码
                    path_parts = parsed_url.path.split('/')
                    share_code = None
                    
                    # 从路径中查找分享码
                    for part in path_parts:
                        if part and len(part) > 10:  # 分享码通常比较长
                            share_code = part
                            break
                    
                    # 从查询参数中查找访问码
                    query_params = parse_qs(parsed_url.query)
                    access_code = query_params.get('accessCode', [''])[0]
                    
                    if share_code:
                        resources.append({
                            'clouds_type': CloudType.TIANYI.value,
                            'share_code': share_code,
                            'access_code': access_code,
                            'share_name': '',
                            'full_url': url
                        })
                
                # 可以在这里添加其他云盘的URL解析逻辑
                # elif 'pan.baidu.com' in parsed_url.netloc:
                #     # 百度网盘URL解析
                #     pass
                
            except Exception as e:
                self.logger.warning(f"解析URL失败: {url}, 错误: {str(e)}")
        
        return resources
    
    def get_import_statistics(self) -> Dict[str, Any]:
        """获取导入统计信息"""
        try:
            with self.db.connection.cursor() as cursor:
                # 总数统计
                cursor.execute("SELECT COUNT(*) as total FROM cloud_resources")
                total = cursor.fetchone()[0]
                
                # 按云盘类型统计
                cursor.execute("""
                    SELECT clouds_type, COUNT(*) as count 
                    FROM cloud_resources 
                    GROUP BY clouds_type
                """)
                by_type = {row[0]: row[1] for row in cursor.fetchall()}
                
                # 按状态统计
                cursor.execute("""
                    SELECT share_status, COUNT(*) as count 
                    FROM cloud_resources 
                    GROUP BY share_status
                """)
                by_status = {row[0]: row[1] for row in cursor.fetchall()}
                
                # 已处理和未处理统计
                cursor.execute("""
                    SELECT 
                        SUM(CASE WHEN file_info_json IS NOT NULL AND file_info_json != '' THEN 1 ELSE 0 END) as processed,
                        SUM(CASE WHEN file_info_json IS NULL OR file_info_json = '' THEN 1 ELSE 0 END) as unprocessed
                    FROM cloud_resources
                """)
                processed_stats = cursor.fetchone()
                
                return {
                    'total': total,
                    'by_type': by_type,
                    'by_status': by_status,
                    'processed': processed_stats[0] or 0,
                    'unprocessed': processed_stats[1] or 0
                }
                
        except Exception as e:
            self.logger.error(f"获取导入统计信息失败: {str(e)}")
            return {}


def create_sample_csv(file_path: str):
    """创建示例CSV文件"""
    sample_data = [
        {
            'clouds_type': '天翼云',
            'share_code': 'example_code_1',
            'access_code': 'password123',
            'share_name': '示例分享1',
            'full_url': 'https://cloud.189.cn/web/share?code=example_code_1',
            'share_time': '2024-01-01 12:00:00'
        },
        {
            'clouds_type': '天翼云',
            'share_code': 'example_code_2',
            'access_code': '',
            'share_name': '示例分享2',
            'full_url': 'https://cloud.189.cn/web/share?code=example_code_2',
            'share_time': '2024-01-02 12:00:00'
        }
    ]
    
    with open(file_path, 'w', newline='', encoding='utf-8') as file:
        fieldnames = ['clouds_type', 'share_code', 'access_code', 'share_name', 'full_url', 'share_time']
        writer = csv.DictWriter(file, fieldnames=fieldnames)
        writer.writeheader()
        writer.writerows(sample_data)


if __name__ == "__main__":
    # 测试导入功能
    logging.basicConfig(level=logging.INFO)
    
    from .database import init_database
    
    db = init_global_session()
    if db:
        importer = ShareCodeImporter(db)
        
        # 测试单个导入
        result = importer.import_single(
            clouds_type=CloudType.TIANYI.value,
            share_code="test_code_123",
            access_code="test_password",
            share_name="测试分享"
        )
        
        print(result.get_summary())
        
        # 获取统计信息
        stats = importer.get_import_statistics()
        print(f"导入统计: {stats}")
        
        db.disconnect()