from eolcrawl.spiders.comspider import ComlistSpider
from eolcrawl.spiderutils.common import build_url
import os
from scrapy_playwright.page import PageMethod
from eolcrawl.spiders.comspider import set_spider_feeds
import scrapy
from scrapy import Spider, Request

# https://gitee.com/login?redirect_to_url=https%3A%2F%2Fgitee.com%2Forganizations%2Fagiros%2Fprojects
class AgirosRealseCodeSpider(ComlistSpider):
    name = "agiros_realse_code"
    allowed_domains = ["gitee.com"]
    start_urls = ["https://gitee.com/organizations/agiros/projects"]
    site_name = "gitee.com"
    category = ""
    type = 'query_spider'
    branch = os.getenv("ROS_BRANCH","humble")
    ros_prefix = os.getenv("ROS_REPOS_NAME","ros")
    user_name = os.getenv("GITEE_USER_NAME","")
    pw  = os.getenv("GITEE_USER_PW","")
    is_debug = False
    
    
    def __init__(self, name=None, **kwargs):
        super(AgirosRealseCodeSpider, self).__init__(name, **kwargs)
        self.logger.info(f"======{self.ros_prefix}======{self.branch}=========")
        self.start_request_callback = self.parse_loop  # 使用parse_loop作为入口方法
    
    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(AgirosRealseCodeSpider, cls).from_crawler(crawler, *args, **kwargs)
        set_spider_feeds(spider,crawler)
        # FEED_EXPORT_ENCODING = 'utf-8'
        return spider

    def start_requests(self):
        yield Request(
            url="https://gitee.com/login",
            meta={
                "playwright": True,
                "playwright_include_page": True,
                "playwright_page_methods": [
                    PageMethod("fill", 'input#user_login',self.user_name.strip()),  # 替换为你的用户名
                    PageMethod("fill", 'input#user_password', self.pw.strip()),  # 替换为你的密码
                    PageMethod("click", 'input[type="submit"]'),  # 点击登录按钮
                    PageMethod("wait_for_load_state", "networkidle"),
                ],
            },
            callback=self.after_login,
        )

    def after_login(self, response):
        """重写起始请求方法"""
        for url in self.start_urls:
            yield scrapy.Request(
                url=url,
                meta={
                    "playwright": True,
                    "playwright_include_page": True,
                    "playwright_page_methods": [
                        PageMethod("wait_for_selector", ".project-title .repository"),
                    ],
                },
                callback=self.parse_loop,
                errback=self.errback
            )

    async def scroll_page(self, page):
        """滚动到页面底部"""
        await page.evaluate('window.scrollTo(0, document.body.scrollHeight)')
        await page.wait_for_timeout(1000)  # 等待1秒确保内容加载

    async def parse_loop(self, response):
        """使用循环处理分页"""
        self.logger.debug(f"开始处理页面: {response.url}")
        page = response.meta.get('playwright_page')
        
        if not page:
            self.logger.error("未获取到页面对象")
            return

        try:
            while True:
                # 处理当前页面数据
                content = await page.content()
                current_response = scrapy.http.TextResponse(
                    url=page.url,
                    body=content.encode('utf-8'),
                    encoding='utf-8'
                )
                
                current_page = current_response.css('.pagination .active.item::text').get()
                self.logger.debug(f"正在处理页码: {current_page}")
                
                if self.is_debug and int(current_page) > 2:
                    break
                
                # 处理项目列表
                for item in self.parse_items(current_response):
                    yield item

                # 检查是否有下一页
                next_button = await page.query_selector('a.icon.item[rel="next"]')
                if not next_button:
                    self.logger.info(f"已到达最后一页 {current_page}")
                    break

                # 点击下一页
                self.logger.debug("点击下一页按钮")
                # await next_button.click()
                await next_button.click(no_wait_after=True)
                # await page.wait_for_load_state("load")
                await page.wait_for_load_state("domcontentloaded")
                await page.wait_for_selector(".project-title .repository")
                # await page.wait_for_timeout(1000)

        except Exception as e:
            self.logger.error(f"处理页面时出错: {str(e)}")
        finally:
            # 确保关闭页面
            if page and not page.is_closed():
                await page.close()
                self.logger.debug("页面已关闭")

    def parse_items(self, response):
        """解析页面中的项目列表"""
        for pro in response.css(".project-title .repository"):
            release_name = pro.css("::text").get()
            full_name = self.ros_prefix+"-"+self.branch+"-"+release_name
            release_url = pro.css("::attr(href)").get()
            release_url = build_url(response.url, release_url)

            yield {
                "name": full_name,
                "org_name": release_name,
                "url": release_url,
                "status":0
            }

    async def errback(self, failure):
        """错误处理"""
        self.logger.error(f"请求失败: {failure.value}")
        page = failure.request.meta.get("playwright_page")
        if page:
            await page.close()