""" 
计算各年份，各会议的assay数量，并存于cvf_main_table

"""
import scrapy
from urllib import parse
from acad_horizon_spiders.utils import jsonbd_util
class CountTotalAssaySpider(scrapy.Spider):
    name = "count_total_assay"
    allowed_domains = ["openaccess.thecvf.com"]
    start_urls = ["https://openaccess.thecvf.com"]

    cvf_main_table= jsonbd_util.get_value('cvf_main_table')
    def start_requests(self):
        for i,t in enumerate(self.cvf_main_table):
            meeting = t['meeting']
            time= t['time']
            # 对于2018-2020其间的，CVPR会议主页的处理，计算assay数量
            # 或 2019 年，ICCV会议主页的处理 ，计算assay数量
            if  ( meeting == 'CVPR' and 2018<=time<=2020 ) or ( meeting == 'ICCV' and time==2019 ) : 

                yield scrapy.Request(
                        url=parse.urljoin(self.start_urls[0],t['mainlink']),
                        callback=self.parse_other,
                        meta={
                            'index':i
                        }
                )

            # 对于2020年和之后的，ICCV或CVPR会议主页的处理，计算assay数量    
            elif meeting in ['ICCV','CVPR'] and time>2020:

                yield scrapy.Request(
                        url=parse.urljoin(self.start_urls[0],t['mainlink']+'?day=all'),
                        callback=self.parse,
                        meta={
                            'index':i
                        }
                )
            # 其他年份或会议，计算assay数量
            else:
                yield scrapy.Request(
                        url=parse.urljoin(self.start_urls[0],t['mainlink']),
                        callback=self.parse,
                        meta={
                            'index':i
                        }
                )
    def parse(self, response):
        print(response.url)
        index=response.meta['index']
        temp= len(response.xpath('//dt').getall())
        self.cvf_main_table[index].update({'assay_num':temp})
        jsonbd_util.update('cvf_main_table',self.cvf_main_table)
        return
    
    def parse_other(self,response):
        urls=response.xpath('//dd/a/@href').getall()
        
        yield scrapy.Request(
            url=parse.urljoin(self.start_urls[0],urls[0]),
            callback=self.parse_countAssay,
            meta={
                'sum':0,
                'urls':urls,
                'index':response.meta['index'],
                'urls_i':0
            }
        )
    def parse_countAssay(self,response):
        sum= response.meta['sum']
        urls= response.meta['urls']
        index= response.meta['index']
        urls_i= response.meta['urls_i']
        sum+=len(response.xpath('//dt').getall())
        print(response.url,urls_i,len(urls))
        if urls_i== len(urls)-1:
            self.cvf_main_table[index].update({'assay_num':sum})
            jsonbd_util.update('cvf_main_table',self.cvf_main_table)
        else:
            yield scrapy.Request(
                url=parse.urljoin(self.start_urls[0],urls[urls_i+1]),
                callback=self.parse_countAssay,
                meta={
                    'sum':sum,
                    'urls':urls,
                    'index':index,
                    'urls_i':urls_i+1
                }
        )