import scrapy
from acad_horizon_spiders.utils import jsonbd_util
from urllib import parse
from twisted.internet import defer
from acad_horizon_spiders.items import AssayDetailItem
import json
class SearchAssaySpider(scrapy.Spider):
    name = "search_assay"
    assay_detail=[]


    allowed_domains = ["https://openaccess.thecvf.com","openaccess.thecvf.com"]
    # start_urls = ["https://openaccess.thecvf.com/menu",'https://openaccess.thecvf.com/menu_other.html']
    start_urls = ["https://openaccess.thecvf.com/CVPR2024?day=all"]
    # start_urls = ["https://openaccess.thecvf.com/ICCV2021?day=all"]
    # cvf网站纪律的所有会议主页链接集合

    def __init__(self, pageSize=10, pageIndex=1,time=2024,meetting='CVPR',theme='',authors='' ,*args, **kwargs):
        super(SearchAssaySpider, self).__init__(*args, **kwargs)
        self.pageSize=int(pageSize)
        self.pageIndex=int(pageIndex)
        self.time=time
        self.meetting=meetting
        self.theme=theme
        self.authors=authors

# //dd[position()=]/form/input[@name='query_author']/@value

    def parse(self, response):
        theme=response.xpath('//dt[@class="ptitle"]/a/text()').getall()
        content_link=response.xpath('//dt[@class="ptitle"]/a/@href').getall()
        assaydetail=response.xpath('//dd/div[@class="link2"]/div/text()').getall()
        pdf_link= response.xpath("//dd[position()>1]/a[1]/@href").getall()
        abstract_link= response.xpath("//dt[@class='ptitle']/a/@href").getall()

        start_index=self.pageIndex*self.pageSize-self.pageSize
        end_index=self.pageIndex*self.pageSize
        for i in  range(start_index,end_index):

            """ time和meeting数据获取 """
            temp=assaydetail[i]
            temp=temp.replace(' {','')
            temp=temp.replace('},','')
            temp=temp.split('\n')
            meetting=temp[3][-5:-1]
            time=temp[5][-4:]
            
            authors=response.xpath(f'//dt[{i+1}]/following-sibling::dd[1]/form/input[@name="query_author"]/@value').getall()

            item=AssayDetailItem()
            item['id']=i+1
            item['theme']=theme[i]
            item['time']=time
            item['meetting']=meetting
            item['pdf_link']=parse.urljoin(self.allowed_domains[0],pdf_link[i])
            item['authors']=authors
            item['abstract_link']=parse.urljoin(self.allowed_domains[0],abstract_link[i])
            # yield scrapy.Request(
            #     url=parse.urljoin(self.allowed_domains[0],content_link[i]),
            #     callback=self.parse_getAssayDetail,
            #     meta={
            #         'item':item,
            #         "assay_size":len(range(self.start_index,self.end_index))
            #     }
            # )
            self.assay_detail.append(dict(item))
        # 由于scrapy是同步访问，所以用以下判断是否为最后一个数据存储
        # if len(self.assay_detail)==response.meta['assay_size']:
        jsonbd_util.update_list('../acad_horizon_front_platform/public/assay_detail1.json','assay_detail',self.assay_detail) 

    # @defer.inlineCallbacks
    def parse_getAssayDetail(self,response):
        authors=response.xpath('//i/text()').get()
        abstract=response.xpath("//div[@id='abstract']/text()").get()
        
        item=response.meta['item']
        item['authors']=authors
        item['abstract']=abstract
        # download_link=response.xpath("//div[@id='download_link']/text()").getall()

        # print(item)
        # d=[{
        #     'id':response.meta['id'],
        #     'theme':response.meta['theme'],
        #     'authors':authors[0],
        #     'abstract':abstract[0],
        #     'meetting':response.meta['meetting'],
        #     'time':response.meta['time'],
        #     # 'review_link':response.meta['review_link'],
        #     # 'dowmLoad':'dowmLoad'
                
        # }]
        self.assay_detail.append(dict(item))
        # 由于scrapy是同步访问，所以用以下判断是否为最后一个数据存储
        if len(self.assay_detail)==response.meta['assay_size']:
            self.assay_detail=sorted(self.assay_detail, key=lambda x: x['id'])
            jsonbd_util.update_list('assay_detail1.json','assay_detail',self.assay_detail) 
            # response.meta['assay_detail']=self.assay_detail
            yield  self.assay_detail
