import scrapy, hashlib, datetime
import pymongo
import re
from items import MyprojectItem
from mongo_collection import U_MongoDB



class TestSpiderTwo(scrapy.Spider):
    name = 'Detail'
    
    def __init__(self) -> None:
        self.mongo = U_MongoDB(host='127.0.0.1',user='shilang',pwd='shilang').get(db_name='spider')
        
       
    def start_requests(self):

        client = pymongo.MongoClient(self.mongo_uri)
        db = client[self.mongo_db]
        collection = db[self.mongo_collection]

        # 从MongoDB集合中读取所有URL字段
        urls = collection.find({}, {'_id': 0, 'section_url': 1})
        for url_doc in urls:
            url = url_doc['section_url']
            yield scrapy.Request(url=url,callback=self.parse_new_url)
            break
                
    def parse_new_url(self):

        sources = list(self.mongo['scrapy_list'].find({},{'section_url':True}))
        print('these url-------',sources)

        # new_url = response.xpath("//a[@class='btn-more ly-fr']/@href").get()
        # if new_url:
        #     print("----------------------",new_url)
        #     yield scrapy.Request(
        #         url=new_url,
        #         callback=self.parse_detail
        #         )
        # self.logger.debug('----')
        # self.logger.warning('')


    def parse_detail(self,response):
        print("successful  -------parse_detail") 
        list_section = []
        role = [
            '1章','NO.1','1.','1:','1话','1、',
            '一章','NO.ONE','一.','一:','一话','一、'
        ]
        result = []
        for r in role:
            result = response.xpath(f'//li/a[re:test(text(), "{r}")]/../../li')
            if result != []:
                break


        for li in result:

            title = li.xpath('a/text()')
            if title == []:
                continue
            else:
                print('----------->>>>>>>>>',title)
            section = {
                'title': title,
                'detail_url': li.xpath('./a/@href').get(),
                'crawled': 0
            }
            list_section.append(section)
        print('======================',response.url)
        item = MyprojectItem()
        item['section_url'] = response.url

        yield item

        yield {
            'section_url': response.url,
            '章节列表': list_section,
            'url_md5': hashlib.md5(response.url.encode()).hexdigest(),
            'crawled': 2,
            'f1': '',
            'f1': '',
            'f1': '',
            'f1': '',
        }

if __name__=='__main__':
    ds = TestSpiderTwo()
    ds.parse_new_url()