
from datetime import datetime
import os
from typing import Any
from eolcrawl.spiders.comspider import ComdetailSpider
from eolcrawl.spiders.comspider import set_spider_feeds
from eolcrawl.database.roslink_models import RosLinkDB
from eolcrawl.items import RosReposItem

class RosPkgDownloadSpider(ComdetailSpider):
    name = "ros_pkg_download"
    allowed_domains = ["packages.ros.org"]
    start_urls = ["https://packages.ros.org/ros2/ubuntu/pool/main/r/ros-humble-webots-ros2-tesla/"]
    listfile_name = "ros_source"
    is_dbg = True
    download_branch= os.getenv("ROS_BRANCH","humble")

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(RosPkgDownloadSpider, cls).from_crawler(crawler, *args, **kwargs)
        set_spider_feeds(spider,crawler)
        # FEED_EXPORT_ENCODING = 'utf-8'
        return spider
    
    
    def get_list_items_branch_fromdb(self,name,fields):
        try:
            # results = DownloadLinkDB.get_download_link_by_spider_name_and_status(name,0)
            results = RosLinkDB.get_download_link_by_spider_name_and_status(name,0,self.download_branch)
            urls = []
            items = []
            if results and len(results) > 0:
                for item in results:
                    if "url" in item:
                        urls.append(item["url"])
                    
                    _item = []
                    for field in fields:
                        _item.append({field:item[field]})
                    items.append(_item)
            return  urls,items
        except Exception as e:  
            self.logger.error(f"Error getting detail urls from db: {e}")
            return [],[]   
    
    
    def get_list_items(self,name,data_from='db'):
        fields=['_id','url','name','branch']

        urls,items =  self.get_list_items_branch_fromdb(name,fields)
        
        if self.is_dbg:
            items = items[:10]
        
        self.logger.info(f"get detail urls number:{len(items) if items else None }")
        return urls,items

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        # self.logger = get_logger(self.name)
        if self.is_dbg:
            self.start_urls = self.start_urls
        else:
            self.start_urls,self.list_datas = self.get_list_items(name=self.listfile_name)

        # self.crawler.signals.connect(self.spider_closed, signal=signals.spider_closed)

        # 增加详细的统计信息
        self.stats = {
            'success_count': 0,
            'failed_count': 0,
            'start_time': datetime.now(),
            'attachment_count': 0,  # 附件数量统计
            'categories': {},       # 各分类数量统计
            'error_urls': [],       # 错误URL记录
            'total_content_length': 0,  # 内容总长度
        }

    def get_parse_kwargs(self,response,kwargs):
        _id = None
        url = None
        name = None
        branch = None
        
        # 方法1: 使用cb_kwargs传递的参数,需要和get_list_items配对使用 
        # fields=['_id','url','name','branch']

        if kwargs:
            _id = kwargs.get('_id')
            url = kwargs.get('url')
            name = kwargs.get('name')
            branch = kwargs.get("branch")
        else:
            self.logger.warning(f"the kwargs is None, url:{response.url}")

        return _id,url,name,branch
    
    def parse(self, response, **kwargs: Any) -> Any:
        
        id,url,name,branch = self.get_parse_kwargs(response,kwargs)
        
        trs = response.css(".main>table>tr")
        print(f"trs:{trs}")

        # 使用 CSS 选择器<Selector query='descendant-or-self::td/img[@src]' data='<img src="/icons/compressed.gif" alt=...'>
        src_td_nodes = response.css('td>img[src="/icons/*.gif"]')
        for src_td_node in src_td_nodes:
            if src_td_node.get():
                item = RosReposItem()
                tds = src_td_node.xpath("../../td")
                src_name = tds[1].xpath('a/text()').get()
                src_url = tds[1].xpath('a[@href]/text()').get()
                src_url = response.urljoin(src_url)
                release_date = tds[2].xpath('text()').get()
                pkg_size = tds[3].xpath('text()').get()
                
                item['name'] = name
                item['url'] = url
                item['branch'] = branch
                item['src_name'] = src_name.strip()
                item['src_url'] = src_url.strip()
                item['src_release'] = release_date.strip()
                item['src_size'] = pkg_size.strip()
                
                affix_names = []
                affix_names.append(src_name)
                affix_urls = []
                affix_urls.append(src_url)
                
                # for index,td in enumerate(tds):
                #     aname = td.xpath('a/text()').get()
                #     affix_names.append(aname)
                #     aurl = td.xpath('a[@href]/text()').get()
                #     aurl = response.urljoin(aurl)
                #     affix_urls.append(aurl)
                
                
                item['affix_name'] =affix_names
                item['affix_url'] = affix_urls

                
                print(item)
                yield item
                
            # self.log(f"Next TD (CSS): {next_td_css}")
            pass