import os
from datetime import datetime
import re
from typing import Any

import scrapy
from playwright.async_api import Page
from scrapy.http import Response
from scrapy_playwright.page import PageMethod

from scrapy import signals

from eolcrawl.items import BaseDetailItem, MoelistItem
from eolcrawl.settings import PRO_SPIDER_ROOT,LOG_PATH
from eolcrawl.database.detailcontent_models import DetailContentDB
from eolcrawl.database.downloadlink_models import DownloadLinkDB
from eolcrawl.spiderutils.common import build_url, format_str, is_support_suffix, get_json_items,is_spider_debug
# from eolcrawl.utils.log_helper import get_logger
from eolcrawl.database.roslink_models import RosLinkDB

import logging
logging = logging.getLogger(__name__)


class ComlistSpider(scrapy.Spider):
    name = "comlist"
    type = 'listspider'
    # allowed_domains = ["jw.beijing.gov.cn"]
    start_urls = []
    # logger = None
    site_name = None
    category = None
    is_debug = is_spider_debug()
    requrls = None
    
    def req_urls(self):
        urls = []
        if self.requrls is None:
            self.logger.error("requrls is None")
            return []
        
        for url in self.requrls:
            urls.append(url["url"])
        return urls
    
    def get_category(self,url):
        for ret in self.requrls:
            if ret["url"] == url:
                return ret["category"]
        return ""
   
    async def scroll_page(self, page: Page) -> str:
        PageMethod("wait_for_load_state", "domcontentloaded")
        await page.evaluate("window.scrollBy(0, document.body.scrollHeight)")
        PageMethod("wait_for_load_state", "load",timeout=120000)
        return page.url

    def __init__(self, name=None, **kwargs):
        super().__init__(name, **kwargs)
        # 获取该爬虫专属的logger
        # self.logger = get_logger(self.name)
        self.start_request_callback = self.parse_leftList
        self.stats = {
            'success_count': 0,
            'failed_count': 0,
            'start_time': datetime.now(),
        }
        #如果start_urls为空，则使用requrls的url
        if self.start_urls is None:
            self.start_urls = self.req_urls()

    def start_requests(self):
        self.logger.info(f"Starting {self.name} spider...")
        for url in self.start_urls:
            self.logger.debug(f"start_requests:{url}")
            yield scrapy.Request(
                url,
                meta={
                    "playwright": True,
                    'download_timeout': 120,
                    "playwright_page_methods": [
                        # 使用更可靠的加载策略
                        PageMethod("wait_for_selector", "body"),
                        PageMethod("wait_for_load_state", "networkidle", timeout=30000),
                        PageMethod(self.scroll_page),
                    ],
                },
                callback=self.start_request_callback,
                errback=self.errback_httpbin,  # 添加错误处理
            )

    async def errback_httpbin(self, failure):
        """处理请求失败的情况"""
        self.logger.error(f'Request failed: {failure.request.url}')
        self.logger.error(f'Failure: {failure.value}')

    async def parse(self, response, **kwargs):
        try:
            self.logger.info(f"start parse url:{response.url}")
            
            current_category = kwargs.get("current_category")
            content = response.xpath("//div[@class='moe-list']/ul/li")
            if current_category =='教育部文件':
                content = response.xpath("//div[@class='scy_lbsj-right-nr']/ul/li")
            category =  format_str(response.xpath("//div[@class='moe-list-title']/h2/text()").get())
            if current_category:
                category = current_category+'|'+category

            self.logger.debug(f"start parse url: {response.url},category: {category}")
            
            if not content:  # 添加空检查
                self.logger.error(f"No content found for URL: {response.url}")
                return
            
            for li in content:
                _item = MoelistItem()
                href = format_str(li.xpath("a[@href]/@href").get())

                _item["father_url"] = response.url
                _item["site_name"] = self.site_name
                _item["url"] = build_url(response.url,href)

                _item["title"] = format_str(li.xpath("a[@href]/text()").get())
                _item["release_date"] = format_str(li.xpath("span/text()").get())
                _item["category"] = self.category+"|"+category if self.category else category

                yield _item
            
            #如果非调试模式，则获取下一页
            if not self.is_debug:
                # next_page = response.xpath("//div[@class='moe-pages']//a[text()='下一页']/@href").get()
                next_page = format_str(response.xpath("//div[@class='scy_tylb_fy-nr']//a[text()='下一页']/@href").get())
                if next_page is not None and (not next_page.startswith("javascript:") and next_page != 'index.html'):
                    self.logger.info(f"next_page =======url:{response.url} , next_page：{next_page}++++++++++")
                    yield response.follow(next_page,
                                        meta={
                                            "playwright": True,
                                            "playwright_page_methods": [
                                                # PageMethod(scroll_page),
                                                PageMethod(self.scroll_page),
                                                # PageMethod("screenshot",path=self.name+".png", full_page=True),
                                            ],
                                        },
                                        cb_kwargs={"current_category": current_category},
                                        callback=self.parse)
                else:
                    self.logger.info(f"no next_page url:{response.url}")

        except Exception as e:
            self.logger.error(f"Error parsing {response.url}: {str(e)}")
            raise

    async def parse_leftList(self, response, **kwargs):

        ##左侧导航解析
        left_list1 = response.xpath("//div[@class='sidebar mhide']")
        left_list2 = response.xpath("//div[@class='scy_lbsj-left']")
        if len(left_list1) or len(left_list2):
            left_list = left_list1.xpath("./ul/li/a") if len(left_list1)>0 else left_list2.xpath("./ul/li/a")
            # num =1
            for link in left_list:
                in_category = format_str(link.xpath("./text()").get())
                href = format_str(link.xpath("@href").get())
                href = build_url(response.url,href)
                self.logger.debug(f"call parse url:{href}, category{in_category}")

                if href:
                    yield response.follow(href,
                                          meta={
                                              "playwright": True,
                                              "playwright_page_methods": [
                                                  PageMethod("wait_for_load_state", "load"),
                                              ],
                                          },
                                          cb_kwargs={"current_category": in_category},
                                          callback=self.parse)

            current_category = left_list.xpath("../../li[@class='showon']/a/text()").get()
            async for item in self.parse(response,**{'current_category':current_category}):
                yield item
        else:
            async for item in self.parse(response):
                yield item

    async def spider_error(self, failure, response, spider):
        self.logger.error(f'Spider error: {failure}')
        await self.close_playwright_resources()

    async def spider_closed(self, spider):
        """处理spider关闭信号"""
        self.logger.info(f'End {spider.name} , Spider closing')
        try:
            # 获取playwright下载器中间件
            playwright_middleware = next((mw for mw in spider.crawler.engine.downloader.middleware.middlewares if hasattr(mw, 'browser_handler')),None)
            if playwright_middleware:
                await self.close_playwright_resources(playwright_middleware.browser_handler)
            else:
                self.logger.info('Playwright middleware not found')
                
        except Exception as e:
            self.logger.error(f'Error closing Playwright: {e}', exc_info=True)

        duration = datetime.now() - self.stats['start_time']
        
        self.logger.info(f"""
        Spider Statistics:
        - Success: {self.stats['success_count']}
        - Failed: {self.stats['failed_count']}
        - Duration: {duration}
        """)

    async def close_playwright_resources(self, handler):
        import asyncio
        """关闭Playwright资源"""
        try:
            # 关闭所有上下文
            if handler.contexts:
                tasks = []
                for context in handler.contexts:
                    try:
                        tasks.append(context.close())
                    except Exception as e:
                        self.logger.error(f'Error closing context: {e}', exc_info=True)
                await asyncio.gather(*tasks, return_exceptions=True)
                self.logger.info('Successfully closed all Playwright contexts')

            
            # 关闭浏览器
            if handler.browser:
                try:
                    await handler.browser.close()
                    self.logger.info('Successfully closed Playwright browser')
                except Exception as e:
                    self.logger.error(f'Error closing browser: {e}', exc_info=True)
            
            # 关闭Playwright
            if handler.playwright:
                try:
                    await handler.playwright.stop()
                    self.logger.info('Successfully stopped Playwright')
                except Exception as e:
                    self.logger.error(f'Error stopping Playwright: {e}', exc_info=True)
            
            self.logger.info('Successfully closed Playwright resources')
        except Exception as e:
            self.logger.error(f'Error closing Playwright resources: {e}', exc_info=True)


    def update_stats(self, success=True):
        if success:
            self.stats['success_count'] += 1
        else:
            self.stats['failed_count'] += 1


class ComdetailSpider(scrapy.Spider):
    name = "comdetail"
    type = 'detailspider'
    listfile_name = ""
    # allowed_domains = ["jw.beijing.gov.cn"]
    start_urls = []
    # logger = None
    is_dbg = False
    list_datas = []

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        # self.logger = get_logger(self.name)
        if self.is_dbg:
            self.start_urls = self.start_urls
        else:
            self.start_urls,self.list_datas = self.get_list_items(self.listfile_name)

        # self.crawler.signals.connect(self.spider_closed, signal=signals.spider_closed)

        # 增加详细的统计信息
        self.stats = {
            'success_count': 0,
            'failed_count': 0,
            'start_time': datetime.now(),
            'attachment_count': 0,  # 附件数量统计
            'categories': {},       # 各分类数量统计
            'error_urls': [],       # 错误URL记录
            'total_content_length': 0,  # 内容总长度
        }

    def get_parse_kwargs(self,response,kwargs):
        category = None
        site_name = None
        father_url = None
        brief = None
        
        # 方法1: 使用cb_kwargs传递的参数,需要和get_list_items配对使用
        if kwargs:
            category = kwargs.get('category')
            site_name = kwargs.get('site_name')
            father_url = kwargs.get('father_url')
            brief = kwargs.get("brief")
        else:
            self.logger.warning(f"the kwargs is None, url:{response.url}")

        return category,site_name,father_url,brief

    def start_requests(self):
        if len(self.start_urls) == 0:
            self.logger.warning("the start_urls is empty,not download url.")

        self.logger.info(f"The number {len(self.start_urls) if self.start_urls else 0} urls start requests,spider name:{self.name}")

        for index, url in enumerate(self.start_urls):
            # 将列表转换为字典
            kwargs = {}
            if len(self.list_datas) > index:
                for item in self.list_datas[index]:
                    kwargs.update(item)  # 将每个字典项合并到kwargs中

            yield scrapy.Request(
                url,
                callback=self.parse,
                cb_kwargs=kwargs if kwargs else None
            )

    def get_title(self, response):
        titls = response.xpath('//div[@class="title"]/text()[not(ancestor::script) and not(ancestor::style)]').getall()
        title = format_str(''.join(titls))
        title = format_str(title)
        return title

    def get_content(self, response):
        contents = response.xpath('//div[@class="TRS_Editor"]//text()[not(ancestor::script)]').getall()
        content = format_str(''.join(contents))
        return content

    def get_info_content(self, response):
        info = response.xpath('//div[@class="info"]')
        return info

    def get_release_date(self, response):
        info = self.get_info_content(response)
        release_date = format_str(info.xpath("./div[@class='time']/text()").get())
        return release_date

    def get_source(self, response):
        info = self.get_info_content(response)
        source = format_str(info.xpath("./div[@class='origin']/text()").get())
        return source

    def get_source_url(self, response):
        info = self.get_info_content(response)
        source_url = format_str(info.xpath("./div[@id='originLink']/a/@href").get())
        return source_url

    def get_category(self, response):
        navs = response.xpath('//div[@class="nav"]//text()[not(ancestor::script) and not(ancestor::style)]').getall()
        navs_len = len(navs)
        if navs_len > 2:
            nav = navs[navs_len-2]
            return format_str(nav)
        return ""

    def parse(self, response: Response, **kwargs: Any) -> Any:

        category,site_name,father_url,brief = self.get_parse_kwargs(response,kwargs)

        try:
            self.logger.info("Processing item page: %s", response.url)

            _details = BaseDetailItem()
            _details['url'] = response.url

            _details['title'] = self.get_title(response)
            _details['content'] = self.get_content(response)

            _details['release_date'] = self.get_release_date(response)
            _details['source'] = self.get_source(response)
            _details['source_url'] = self.get_source_url(response)
            _details['category'] = category+"|"+self.get_category(response) if self.get_category(response) else category
            _details['site_name'] = site_name if site_name else ""
            _details['father_url'] = father_url if father_url else ""
            _details['affix_url'], _details['affix_name'] = self.get_affixs(response)
            _details['brief'] = brief if brief else ""
            self.update_stats(item=_details, success=True)
            yield _details

        except Exception as e:
            error_msg = f"Error parsing {response.url}: {str(e)}"
            self.logger.error(error_msg)
            self.update_stats(success=False, error_url=response.url, error_msg=error_msg)
            raise

    def get_affixs(self, response):
        affixs = response.xpath('//div[@class="TRS_Editor"]//a[not(ancestor::script)]')
        return self.get_affixs_content(response.url, affixs)
        
    def get_affixs_content(self, resq_url, affixs):
        
        if affixs is None or len(affixs) == 0:
            self.logger.debug(f"not exist div affix, url: {resq_url}")
            return [],[]
        
        affix_urls = []
        affix_names = []
        
        for affix in affixs:
            url = format_str(affix.xpath("./@href").get())
            if is_support_suffix(url):
                href = build_url(resq_url, url)
                if href.startswith("mailto:") or href.startswith("http://http"):
                    continue
                affix_urls.append(href)

                affix_name = format_str(affix.xpath(".//text()").get())
                affix_names.append(affix_name)
            else:
                self.logger.error(f"the url is not support suffix, cur url {resq_url} , affix_urls:{url}")
        
        return affix_urls,affix_names

    def update_stats(self, item=None, success=True, error_url=None, error_msg=None):
        """更新统计信息"""
        if success:
            self.stats['success_count'] += 1
            if item:
                # 统计分类
                category = item.get('category', 'unknown')
                self.stats['categories'][category] = self.stats['categories'].get(category, 0) + 1
                
                # 统计附件
                if item.get('affix_url'):
                    self.stats['attachment_count'] += len(item['affix_url'])
                
                # 统计内容长度
                if item.get('content'):
                    self.stats['total_content_length'] += len(item['content'])
        else:
            self.stats['failed_count'] += 1
            if error_url:
                self.stats['error_urls'].append({
                    'url': error_url,
                    'error': error_msg
                })

    async def spider_closed(self, spider):
        """输出详细的统计信息"""
        duration = datetime.now() - self.stats['start_time']
        avg_content_length = (
            self.stats['total_content_length'] / self.stats['success_count'] 
            if self.stats['success_count'] > 0 else 0
        )

        cnt = self.stats['success_count'] + self.stats['failed_count']
        success_count = (self.stats['success_count'] / (cnt) * 100) if cnt > 0 else 0
        stats_report = f"""
        ============ Spider Statistics ============
        Spider Name: {self.name}
        Run Duration: {duration}
        
        Success Count: {self.stats['success_count']}
        Failed Count: {self.stats['failed_count']}
        Success Rate: {success_count}%
        
        Total Attachments: {self.stats['attachment_count']}
        Average Content Length: {avg_content_length:.2f} characters
        
        Category Distribution:
        {'-' * 40}
        """
        
        for category, count in self.stats['categories'].items():
            stats_report += f"\n{category}: {count}"
            
        if self.stats['error_urls']:
            stats_report += f"\n\nFailed URLs ({len(self.stats['error_urls'])}):\n"
            for error in self.stats['error_urls'][:10]:  # 只显示前10个错误
                stats_report += f"\n- {error['url']}: {error['error']}"
            
            if len(self.stats['error_urls']) > 10:
                stats_report += f"\n... and {len(self.stats['error_urls']) - 10} more errors"

        stats_report += "\n==========================================="
        
        self.logger.info(stats_report)
        
        # 更新detail_content数据库状态为-1,下载完成，等待上传
        # update_detail_content_status(self.name, -1,0)


    async def spider_error(self, failure, response, spider):
        self.logger.error(f'Spider error: {failure}')


    def get_list_items(self,name,data_from='db'):
        fields=['category','site_name','father_url','brief']
        if data_from =='db':
            urls,items =  get_list_items_fromdb(name,fields)
        else:
            urls,items =  get_list_items_fromjson(name,fields)

        logging.info(f"get detail urls number:{len(items) if items else None }")
        return urls,items


def get_list_items_fromjson(name,fields):
    cur_path = os.path.dirname(os.path.abspath(__file__))
    input_file = f'{PRO_SPIDER_ROOT}/export_data/{name}/{datetime.today().date()}'
    input_file = os.path.join(cur_path,"../../",input_file)
    if os.path.exists(input_file):
        dirs = os.listdir(input_file)
        items =[]
        urls = []   
        for file in dirs:
            cur_file = os.path.join(input_file,file)
            if os.path.isfile(cur_file):
                _urls,_items = get_json_items(cur_file,fields=fields)
                items=items+_items
                urls = urls+_urls

        return  urls,items
    else:
        return [],[]


def get_list_items_fromdb(name,fields,dbClass = RosLinkDB):
    try:
        # results = DownloadLinkDB.get_download_link_by_spider_name_and_status(name,0)
        results = dbClass.get_download_link_by_spider_name_and_status(name,0) if dbClass else []
        urls = []
        items = []
        if results and len(results) > 0:
            for item in results:
                if "url" in item:
                    urls.append(item["url"])
                
                _item = []
                for field in fields:
                    _item.append({field:item[field]})
                items.append(_item)
        return  urls,items
    except Exception as e:  
        logging.error(f"Error getting detail urls from db: {e}")
        return [],[]   


#用于更新detail_content数据库表的状态，在爬虫结束时
def update_detail_content_status(name=None,qstatus=None,status =0):
    try:
        detail_content_db = DetailContentDB()
        detail_content_db.update_status_by_spider_name(name,qstatus,status)
    
        logging.info(f"update {name} spider status {status},  rows update ,query status {qstatus}")
    except Exception as e:
        # 记录错误日志
        logging.exception(f"Error updating detail_content status: {e}")
        raise


def exist_in_downloadlin_db(id,spider_name):
    # "select * from downloadlink where url = ? and spider_name =?"
    downloadlink_db = DownloadLinkDB()
    ret = downloadlink_db.get_download_link(spider_name,id)
    if ret and len(ret) > 0:
        return True
    return False


def exist_in_detail_content_db(title_id,spider_name):
    # "select * from downloadlink where url = ? and spider_name =?"
    detail_content_db = DetailContentDB()
    ret = detail_content_db.get_detail_content(title_id,spider_name)
    if ret and len(ret) > 0:
        return True
    return False


class MyCustomFilter:
    def __init__(self, feed_options):
        self.feed_options = feed_options

    def accepts(self, item):
        if "export" in item and item["export"] == "true":
            return True
        elif "export" not in item:
            return True
        return False


def set_spider_feeds(spider,crawler):
    # 注册spider关闭信号
    crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
    crawler.signals.connect(spider.spider_error, signal=signals.spider_error)
    
    
    out_file_setting = crawler.settings.get('OUT_FILE')['feeds'] if crawler.settings.get('OUT_FILE') else None
    if out_file_setting:
        out_file = f'{PRO_SPIDER_ROOT}/export_data/{spider.name}/{datetime.today().date()}/%(name)s_%(batch_id)03d.jsonl'
        crawler.settings.set('FEEDS', {out_file: out_file_setting['com']})

        crawler.settings.set('FEED_EXPORT_ENCODING', "utf-8")
        ##日志设置在哥哥
        crawler.settings.set('LOG_FILE', f"{LOG_PATH}/{spider.name}_{datetime.today().date()}.log")


def set_listspider_feeds(spider,crawler):

    # 注册spider关闭信号
    crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
    crawler.signals.connect(spider.spider_error, signal=signals.spider_error)
    # 获取设置
    out_file_setting = crawler.settings.get('OUT_FILE')['feeds'] if crawler.settings.get('OUT_FILE') else None
    if out_file_setting:
        out_file = f'{PRO_SPIDER_ROOT}/export_data/{spider.name}/{datetime.today().date()}/%(name)s_%(batch_id)03d.jsonl'
        crawler.settings.set('FEEDS', {out_file: out_file_setting['com'] })
        crawler.settings.set('USER_AGENT', f"comlist (+{spider.start_urls[0]})") if spider.start_urls is not None else ""
        crawler.settings.set('FEED_EXPORT_ENCODING', "utf-8")

        # 添加超时相关配置
        crawler.settings.set('DOWNLOAD_TIMEOUT', 60)  # 60秒
        # crawler.settings.set('PLAYWRIGHT_LAUNCH_OPTIONS', {
        #     'timeout': 60000,  # 60秒
        # })
        crawler.settings.set('PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT', 60000)
        ##日志设置在哥哥
        crawler.settings.set('LOG_FILE', f"{LOG_PATH}/{spider.name}_{datetime.today().date()}.log")


#残联发〔2009〕22号
#中华人民共和国教育部令第26号
#教师〔2009〕1号
#国发〔2015〕64号
def is_valid_issue_number(issue_number):
    if not issue_number:
        return False
    issue_number = format_str(issue_number).replace(" ", "")
    pattern = r'^[\u4e00-\u9fa5]+发\〔\d{4}〕\d+号$|^[\u4e00-\u9fa5]+令第\d+号$|^[\u4e00-\u9fa5]+〔\d{4}〕\d+号$'
    ret = re.match(pattern, issue_number)
    return bool(ret)


if __name__ == '__main__':
    # from pprint import pprint
    # fields=['category','site_name','father_url']
    # get_detail_urls('bjjwlist')
    # urls,items = get_list_items_fromjson('bjjwlist',fields)
    # urls,items = get_list_items_fromdb('bjjwlist',fields)
    # pprint(urls)
    # pprint(items)

    #    update_detail_content_status('bjjwdetail',-1)
    pass