import os
import json
from pathlib import Path
from typing import Dict, Any, List
from datetime import datetime
from sqlalchemy import create_engine, Column, Integer, String, Text, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker

from states import CrawlerState, is_info_complete, merge_extracted_info
from utils.logger import logger
from tools import (
    get_original_html, 
    call_ai_api,
    clean_html,
    extract_basic_contact_info,
    _crawl_html_with_selenium_direct
)

# 数据库模型
Base = declarative_base()

class Contact(Base):
    """联系人表"""
    __tablename__ = 'contacts'
    
    id = Column(Integer, primary_key=True, autoincrement=True)
    url = Column(String(500), unique=True, nullable=False, comment='联系人URL')
    url_phone = Column(String(50), comment='电话号码')
    has_whatsapp = Column(String(10), comment='是否有WhatsApp')
    remarks = Column(Text, comment='备注')
    email = Column(String(200), comment='邮箱地址')
    social_media = Column(String(500), comment='社交媒体')
    contact_remarks = Column(Text, comment='联系备注')
    other_remarks = Column(Text, comment='其他备注')
    updated = Column(DateTime, default=datetime.now, onupdate=datetime.now, comment='更新时间')

    def __repr__(self):
        return f"<Contact(id={self.id}, url='{self.url}')>"


def start_node(state: CrawlerState) -> CrawlerState:
    """开始节点 - 初始化状态并从数据库获取URL"""
    html_cache_dir = "data/html_cache"
    html_file_path = f"{html_cache_dir}/{state['html_index']}.html"
    
    # 从数据库获取URL信息
    url = None
    try:
        # 创建数据库连接
        db_path = "data/uae_contacts.db"
        engine = create_engine(f'sqlite:///{db_path}', echo=False)
        Session = sessionmaker(bind=engine)
        session = Session()
        
        # 查询对应的联系人记录
        contact = session.query(Contact).filter_by(id=state['html_index']).first()
        if contact:
            url = contact.url
            logger.info(f"从数据库获取URL: {url}")
        else:
            logger.warning(f"数据库中未找到ID为{state['html_index']}的联系人记录")
        
        session.close()
    except Exception as e:
        logger.error(f"从数据库获取URL失败: {e}")
    
    # 只在初始启动时输出处理日志
    logger.info(f"开始处理HTML文件索引: {state['html_index']}")
    return {
        **state,
        "url": url,  # 添加从数据库获取的URL
        "html_file_path": html_file_path,
        "extracted_info": {
            "emails": [],
            "phones": [],
            "social_media": {},
            "company_info": {},
            "remarks": ""
        },
        "pending_indices": [],
        "processed_indices": [],
        "current_count": 0,
        "messages": [{"role": "system", "content": f"开始处理HTML文件: {html_file_path}"}]
    }


def fetch_html_node(state: CrawlerState) -> CrawlerState:
    """读取HTML内容节点 - 优先读取本地文件，失败时使用Selenium动态爬取"""
    # 首先尝试读取本地HTML文件
    html_content = get_original_html(state["html_index"], state.get("url"))
    
    if html_content:
        # 成功读取本地HTML文件
        return {
            **state,
            "html_content": html_content,
            "messages": state["messages"] + [{"role": "system", "content": "HTML文件读取成功"}]
        }
    else:
        # 本地文件不存在，尝试使用Selenium动态爬取
        if state.get("url"):
            # 本地HTML文件不存在，尝试动态爬取
            
            # 提取当前URL的域名用于域名限制
            from urllib.parse import urlparse
            parsed_url = urlparse(state["url"])
            allowed_domain = parsed_url.netloc.lower()
            if allowed_domain.startswith('www.'):
                allowed_domain = allowed_domain[4:]
            
            html_content = _crawl_html_with_selenium_direct(state["url"], allowed_domain=allowed_domain)
            
            if html_content:
                # 成功动态爬取HTML内容
                return {
                    **state,
                    "html_content": html_content,
                    "messages": state["messages"] + [{"role": "system", "content": "HTML内容动态爬取成功"}]
                }
            else:
                error_msg = f"动态爬取失败: {state['url']}"
                logger.error(error_msg)
                return {
                    **state,
                    "html_content": None,
                    "error_message": error_msg,
                    "messages": state["messages"] + [{"role": "system", "content": error_msg}]
                }
        else:
            error_msg = f"无法读取HTML文件且未提供URL: {state['html_file_path']}"
            logger.error(error_msg)
            return {
                **state,
                "html_content": None,
                "error_message": error_msg,
                "messages": state["messages"] + [{"role": "system", "content": error_msg}]
            }


def preprocess_html_node(state: CrawlerState) -> CrawlerState:
    """🔍 前置信息提取节点：HTML预处理和联系信息提取"""
    html_content = state.get("html_content")
    if not html_content:
        return {
            **state,
            "error_message": "没有HTML内容可供分析"
        }
    
    logger.info("[🔍] 开始前置信息提取...")
    
    # 步骤1: HTML预处理 - 去除CSS/style/script等垃圾标签
    logger.info("[🧹] 执行HTML预处理，去除垃圾标签...")
    cleaned_html = clean_html(html_content)
    
    # 步骤2: 前置提取联系信息、社交媒体和关键链接
    logger.info("[📋] 前置提取联系信息、社交媒体和关键链接...")
    pre_extracted = extract_basic_contact_info(html_content, state.get("url", ""))
    
    # 🚨 检测：如果前置提取返回空值且没有相关链接，标记为跳过后续处理
    is_empty_result = (
        not pre_extracted.get("emails") and 
        not pre_extracted.get("phones") and 
        not pre_extracted.get("social_media") and
        not pre_extracted.get("page_links")
    )

    
    if is_empty_result:
        logger.info("[⚠️] 前置提取结果为空且无相关链接，将跳过Agent处理直接保存")
        state["skip_agent_processing"] = True
    elif pre_extracted.get("page_links"):
        logger.info(f"[🔗] 发现 {len(pre_extracted.get('page_links', []))} 个相关链接，继续Agent处理")
        state["skip_agent_processing"] = False
    
    # 构建前置提取结果
    preprocessed_info = {
        "emails": pre_extracted.get("emails", []),
        "phones": pre_extracted.get("phones", []),
        "social_media": pre_extracted.get("social_media", {}),
        "page_links": pre_extracted.get("page_links", []),
        "company_info": {},  # 商业信息由Agent后续提取
        "remarks": "Bad Url, skip" if is_empty_result else ""
    }
    
    logger.info(f"[✅] 前置提取完成: {len(preprocessed_info['emails'])}个邮箱, {len(preprocessed_info['phones'])}个电话, {len(preprocessed_info['social_media'])}个社交媒体, {len(preprocessed_info['page_links'])}个关键链接")
    
    return {
        **state,
        "html_content": cleaned_html,  # 传递清理后的HTML给Agent
        "extracted_info": preprocessed_info,
        "messages": state["messages"] + [
            {"role": "system", "content": f"前置信息提取完成: 邮箱{len(preprocessed_info['emails'])}个, 电话{len(preprocessed_info['phones'])}个, 社交媒体{len(preprocessed_info['social_media'])}个, 关键链接{len(preprocessed_info['page_links'])}个"}
        ]
    }


def extract_business_info_node(state: CrawlerState) -> CrawlerState:
    """🤖 Agent节点：专注提取公司商业信息"""
    html_content = state.get("html_content")
    extracted_info = state.get("extracted_info", {})
    
    if not html_content:
        return {
            **state,
            "error_message": "没有HTML内容可供分析"
        }
    
    logger.info("[🤖] Agent开始专注提取公司商业信息...")
    
    # Agent专注于商业信息提取，联系信息已经前置提取完成
    ai_result = call_ai_api(html_content, state.get("url", ""), extracted_info)
    
    if not ai_result:
        logger.error("[❌] Agent商业信息提取失败")
        return {
            **state,
            "error_message": "Agent商业信息提取失败"
        }
    
    # 获取Agent提取的商业信息
    company_info = ai_result.get("business_info", {})
    remarks = ai_result.get("remarks", "")
    
    # Agent可能补充的联系信息（如果前置提取不完整）
    additional_emails = []
    if ai_result.get("email"):
        if isinstance(ai_result["email"], list):
            additional_emails.extend(ai_result["email"])
        else:
            additional_emails.append(ai_result["email"])
    
    additional_phones = []
    if ai_result.get("phone"):
        if isinstance(ai_result["phone"], list):
            additional_phones.extend(ai_result["phone"])
        else:
            additional_phones.append(ai_result["phone"])
    
    additional_social_media = ai_result.get("social_media", {})
    
    # 整合商业信息到现有的前置提取结果中
    updated_info = {
        "emails": list(set(extracted_info.get("emails", []) + additional_emails)),  # 合并并去重
        "phones": list(set(extracted_info.get("phones", []) + additional_phones)),  # 合并并去重
        "social_media": {**extracted_info.get("social_media", {}), **additional_social_media},  # 合并社交媒体
        "page_links": extracted_info.get("page_links", []),  # 保持页面链接
        "company_info": company_info,  # Agent提取的商业信息
        "remarks": remarks
    }
    
    logger.info(f"[✅] Agent商业信息提取完成: 公司信息{len(company_info)}项, 补充邮箱{len(additional_emails)}个, 补充电话{len(additional_phones)}个")
    
    return {
        **state,
        "extracted_info": updated_info,
        "messages": state["messages"] + [
            {"role": "system", "content": f"Agent商业信息提取完成: 公司信息{len(company_info)}项, 总邮箱{len(updated_info['emails'])}个, 总电话{len(updated_info['phones'])}个"}
        ]
    }


def find_related_files_node(state: CrawlerState) -> CrawlerState:
    """查找相关HTML文件节点"""
    html_content = state.get("html_content")
    if not html_content:
        return state
    
    # 🔍 检查是否为单文件处理模式（max_files=1或只处理指定索引）
    is_single_file_mode = (state["max_files"] == 1 or 
                          len(state["pending_indices"]) == 0 and state["current_count"] == 1)
    
    if is_single_file_mode:
        logger.info("[📁] 单文件处理模式，跳过查找相关文件")
        return {
            **state,
            "messages": state["messages"] + [
                {"role": "system", "content": "单文件处理模式，跳过查找相关文件"}
            ]
        }
    
    # 查找相关HTML文件（仅在批量处理模式下）
    
    # 查找相关链接
    # related_links = find_related_links(html_content, state.get("url", ""))
    
    # 检查data/html_cache目录中的其他HTML文件
    html_cache_dir = Path("data/html_cache")
    if html_cache_dir.exists():
        existing_files = list(html_cache_dir.glob("*.html"))
        potential_indices = []
        
        for file_path in existing_files:
            try:
                index = int(file_path.stem)
                if (index != state["html_index"] and 
                    index not in state["processed_indices"] and
                    index not in state["pending_indices"]):
                    potential_indices.append(index)
            except ValueError:
                continue
        
        # 限制待处理文件数量
        max_additional = state["max_files"] - state["current_count"] - 1
        if max_additional > 0:
            potential_indices = potential_indices[:max_additional]
    else:
        potential_indices = []
    
    # 发现相关文件
    logger.info(f"[📁] 发现{len(potential_indices)}个待处理文件")
    
    return {
        **state,
        "pending_indices": state["pending_indices"] + potential_indices,
        "messages": state["messages"] + [
            {"role": "system", "content": f"发现{len(potential_indices)}个待处理文件"}
        ]
    }


def save_data_node(state: CrawlerState) -> CrawlerState:
    """数据保存节点"""
    extracted_info = state["extracted_info"]
    
    # 保存数据到数据库
    
    try:
        # 创建数据库连接
        db_path = "data/uae_contacts.db"
        engine = create_engine(f"sqlite:///{db_path}")
        Base.metadata.create_all(engine)
        Session = sessionmaker(bind=engine)
        session = Session()
        
        # 准备数据
        url = state.get("url", f"file:///{state['html_file_path']}")
        
        # 检查是否已存在该URL的记录
        existing_contact = session.query(Contact).filter_by(url=url).first()
        
        if existing_contact:
            # 更新现有记录
            if extracted_info.get("phones"):
                existing_contact.url_phone = ", ".join(extracted_info["phones"])
            if extracted_info.get("emails"):
                existing_contact.email = ", ".join(extracted_info["emails"])
            if extracted_info.get("social_media"):
                existing_contact.social_media = json.dumps(extracted_info["social_media"], ensure_ascii=False)
            if extracted_info.get("social_media", {}).get("whatsapp"):
                existing_contact.has_whatsapp = "Yes"
            if extracted_info.get("remarks"):
                existing_contact.contact_remarks = extracted_info["remarks"]
            if extracted_info.get("company_info"):
                existing_contact.other_remarks = json.dumps(extracted_info["company_info"], ensure_ascii=False)
            existing_contact.updated = datetime.now()
            
            # 更新现有联系人记录
        else:
            # 创建新记录
            new_contact = Contact(
                url=url,
                url_phone=", ".join(extracted_info.get("phones", [])),
                email=", ".join(extracted_info.get("emails", [])),
                social_media=json.dumps(extracted_info.get("social_media", {}), ensure_ascii=False),
                has_whatsapp="Yes" if extracted_info.get("social_media", {}).get("whatsapp") else "",
                contact_remarks=extracted_info.get("remarks", ""),
                other_remarks=json.dumps(extracted_info.get("company_info", {}), ensure_ascii=False)
            )
            session.add(new_contact)
            # 创建新联系人记录
        
        session.commit()
        session.close()
        
        # 更新处理计数
        new_processed = state["processed_indices"] + [state["html_index"]]
        new_count = state["current_count"] + 1
        
        return {
            **state,
            "processed_indices": new_processed,
            "current_count": new_count,
            "messages": state["messages"] + [{"role": "system", "content": "数据保存成功"}]
        }
    
    except Exception as e:
        error_msg = f"数据保存失败: {str(e)}"
        logger.error(error_msg)
        return {
            **state,
            "error_message": error_msg,
            "messages": state["messages"] + [{"role": "system", "content": error_msg}]
        }


def finalize_result_node(state: CrawlerState) -> CrawlerState:
    """结果整理节点"""
    extracted_info = state["extracted_info"]
    
    # 构建最终结果
    final_result = {
        "processed_files": len(state["processed_indices"]),
        "total_emails": len(extracted_info.get("emails", [])),
        "total_phones": len(extracted_info.get("phones", [])),
        "social_media_platforms": len(extracted_info.get("social_media", {})),
        "has_company_info": bool(extracted_info.get("company_info")),
        "url": state.get("url"),  # 添加URL信息
        "extracted_data": extracted_info,
        "processing_summary": {
            "html_index": state["html_index"],
            "processed_indices": state["processed_indices"],
            "error_message": state.get("error_message")
        }
    }
    
    logger.info("=== 处理完成 ===")
    logger.info(f"处理文件数: {final_result['processed_files']}")
    logger.info(f"提取邮箱数: {final_result['total_emails']}")
    logger.info(f"提取电话数: {final_result['total_phones']}")
    logger.info(f"社交媒体平台数: {final_result['social_media_platforms']}")
    logger.info(f"是否有公司信息: {final_result['has_company_info']}")
    
    return {
        **state,
        "final_result": final_result,
        "messages": state["messages"] + [{"role": "system", "content": "处理完成"}]
    }


# 条件判断函数
def should_skip_agent_processing(state: CrawlerState) -> str:
    """判断是否应该跳过Agent处理直接保存"""
    skip_agent = state.get("skip_agent_processing", False)
    
    if skip_agent:
        logger.info("[⚡] 跳过Agent处理，直接保存数据")
        return "skip_to_save"
    else:
        logger.info("[🤖] 继续Agent商业信息提取")
        return "continue_agent"


def should_continue_processing(state: CrawlerState) -> str:
    """判断是否继续处理其他文件"""
    # 检查是否还有待处理的HTML文件且未达到最大处理数量
    has_pending = bool(state["pending_indices"])
    under_limit = state["current_count"] < state["max_files"]
    info_incomplete = not is_info_complete(state["extracted_info"])
    
    if has_pending and under_limit and info_incomplete:
        # 继续处理下一个文件
        return "continue"
    else:
        # 处理完成
        return "finish"


def html_fetch_success(state: CrawlerState) -> str:
    """判断HTML获取是否成功"""
    if state.get("html_content"):
        return "success"
    return "error"


def get_next_file_index(state: CrawlerState) -> CrawlerState:
    """获取下一个要处理的文件索引"""
    if state["pending_indices"]:
        next_index = state["pending_indices"][0]
        remaining_indices = state["pending_indices"][1:]
        
        html_cache_dir = "data/html_cache"
        html_file_path = f"{html_cache_dir}/{next_index}.html"
        
        # 从数据库获取对应的URL
        url = None
        try:
            db_path = "data/uae_contacts.db"
            engine = create_engine(f'sqlite:///{db_path}', echo=False)
            Session = sessionmaker(bind=engine)
            session = Session()
            
            contact = session.query(Contact).filter_by(id=next_index).first()
            if contact:
                url = contact.url
            
            session.close()
        except Exception as e:
            logger.error(f"获取文件{next_index}对应URL失败: {e}")
        
        logger.info(f"切换到下一个文件: {next_index} ({state['current_count'] + 1}/{len(state['pending_indices']) + state['current_count'] + 1})")
        
        return {
            **state,
            "html_index": next_index,
            "html_file_path": html_file_path,
            "url": url,  # 更新URL
            "pending_indices": remaining_indices,
            "current_count": state["current_count"] + 1,  # 增加已处理文件计数
            "html_content": None,  # 重置HTML内容
            "messages": state["messages"] + [{"role": "system", "content": f"切换到文件索引: {next_index}"}]
        }
    
    return state