import scrapy
from myabandonware.items import MyabandonwareItem
m=0 #一个全局变量
class CategorySpider(scrapy.Spider):
    name = 'category'
    allowed_domains = ['myabandonware.com']
    start_urls = ['https://www.myabandonware.com/browse/name/']
    def parse(self,response):#分页
        pageList=response.xpath('//select[@id="paginationSelect"]').xpath('./option')
        for n,page in enumerate(pageList):
            a=response.urljoin(page.xpath('./@value').extract_first())
            # print(a)
            item = MyabandonwareItem()
            item['page']=a
            yield scrapy.Request(
                url=a,
                callback=self.parse_page,
                meta = {"pageUrl": item}
            )
            # print("n,page ",n,page )
            # if n==3:
            #     break
    def parse_page(self, response):#每个分页
        # print("this is parse_page",response.text)
        items_div=response.xpath('//div[@class="items games"]')
        div_list=items_div.xpath('./div')
        # print('this is div_list',div_list)
        global m
        for i,div in enumerate(div_list):
            aText=div.xpath('.//a[@class="name c-item-game__name"]/text()').extract_first()
            # a=response.urljoin(a)
            if aText!=None:
                alink=div.xpath('.//a[@class="name c-item-game__name"]')
                str_alink=response.urljoin(alink.xpath("./@href").extract_first())
                thumbSource=alink.xpath('..//img/@src').extract_first()
                thumbSource=response.urljoin(thumbSource)
                # print("this is in aText;",m,thumbSource)

                item=MyabandonwareItem()
                item['thumbSrc']=thumbSource
                m=m+1
                item['cursor']=m
                item['page']=response.meta["pageUrl"]['page']
                # print("this is in for i,div in enumerate(div_list):",response.meta["pageUrl"])
                # item["page"]=response.meta["page"]
                # print('this is in for div in div_list',alink,m,thumbSource,)
                yield  scrapy.Request(
                url=str_alink,
                callback=self.parse_detail,
                meta={"item":item}

            )


    def parse_detail(self,response):#详情
        # print('this is  in parse_detail',response.url,response.meta["item"])
        item=MyabandonwareItem()
        item["page"]=response.meta["item"]['page']
        item['thumbSrc']=response.meta["item"]['thumbSrc']
        item['cursor']=response.meta["item"]['cursor']
        item['name']=response.xpath('//*[@id="content"]/div[1]/h2/text()').extract_first()
        if item['name']==None:
            item['name']=response.xpath('//*[@id="gaff"]/div[1]/h2/text()').extract_first()
        table_gameInfo=response.xpath('//table[@class="gameInfo"]')
        trList=table_gameInfo.xpath('.//tr')
        for tr in trList:
            # print(tr.xpath('./th/text()').extract_first(),tr.xpath('./td').extract_first())
            if tr.xpath('./th/text()').extract_first()=="Year":
                item['YEAR']=tr.xpath('./td/a/text()').extract_first()
            if tr.xpath('./th/text()').extract_first()=="Platform":
                item['PLATFORM']=tr.xpath('./td/a/text()').extract_first()
            if tr.xpath('./th/text()').extract_first() == "Released in":
                item['RELEASED_IN'] = tr.xpath('./td/text()').extract_first()
            if tr.xpath('./th/text()').extract_first() == "Genre":
                item['GENRE'] = tr.xpath('./td/a/text()').extract()
            if tr.xpath('./th/text()').extract_first() == "Theme":
                item['THEME'] = tr.xpath('./td/a/text()').extract()
            if tr.xpath('./th/text()').extract_first() == "Publisher":
                item['PUBLISHER'] = tr.xpath('./td/a/text()').extract_first()
            if tr.xpath('./th/text()').extract_first() == "Developer":
                item['DEVELOPER'] = tr.xpath('./td/a/text()').extract_first()
        div_screens=response.xpath('//div[@class="items screens"]')
        if div_screens.xpath('./a/@href')!=None:
            screensLinks=div_screens.xpath('./a')
            # print(screensLinks)
            item['Screenshot']=[]
            for link in screensLinks:
                item['Screenshot'].append(response.urljoin(link.xpath('./@href').extract_first()))
        rateAndDownload=response.xpath('//div[@class="gameRater"]')
        item['rate']=rateAndDownload.xpath('.//div[@id="grRaA"]/@style').extract_first().replace('width:','').replace('%;','')
        item["download"]=response.urljoin(rateAndDownload.xpath('.//a[@class="button download"]/@href').extract_first())
        item['descripts']=response.xpath('//div[@class="gameDescription dscr"]').getall()
        # print('this is  in parse_detail',response.url,item)
        yield item


