# -*- coding: utf-8 -*-
import scrapy
import copy
from carnews.items import CategoryItem,NewsItem
from urllib.parse import urljoin
import re



class AutohomeSpider(scrapy.Spider):
    name = 'autohome'
    allowed_domains = ['autohome.com.cn']
    start_urls = ['https://www.autohome.com.cn/news/1/#liststart']


    #爬取导航
    def parse(self, response):
        #//*[@id="ulNav"]/ul/li[2]
        #导航
        nav_list = response.xpath('//*[@id="ulNav"]/ul/li')[1:-1]
        for li in nav_list:
            #item = CategoryItem()
            item = {}
            item['url'] ="https:"+li.xpath("./a/@href").extract_first()
            item['catname'] = li.xpath("./a/text()").extract_first()
            #print(item)
            yield scrapy.Request(
                item['url'],
                callback=self.parse_category,
                meta={"item":copy.deepcopy(item)}
            )

    #爬取每个导航的列表
    def parse_category(self,response):
        item = response.meta['item']
        metaitem = copy.deepcopy(item)
        print(item['url'])
        #//*[@id="auto-channel-lazyload-article"]/ul[1]
        #newslist = response.xpath("//div[@id='auto-channel-lazyload-article']/ul[1]/li[@data-artidanchor]")

        ullist = response.xpath("//div[@id='auto-channel-lazyload-article']/ul")
        # print(ullist.extract())
        if len(ullist) == 1:
            ul = ullist
            pageObj = re.search('liststart', item['url'])
            # print(pageObj)
            # 判断是否是分页
            if pageObj:
                print("11111111111111111111111")
                newslist = ul.xpath("./li")
            else:
                newslist = ul.xpath("./li[@data-artidanchor]")
            for li in newslist:
                item["listurl"] = "https:" + li.xpath("./a/@href").extract_first()
                item["title"] = li.xpath("./a/h3/text()").extract_first()
                item["pic"] = "https:" + li.xpath("./a/div[@class='article-pic']/img/@src").extract_first()
                # print(item)
                newsitem = NewsItem();
                newsitem['caturl'] = item['url']
                newsitem['ll_url'] = item['listurl']
                newsitem['catname'] = item['catname']
                newsitem['ll_title'] = item['title']
                newsitem['ll_pics'] = item['pic']
                yield newsitem
        else:
            for ul in ullist:
                pageObj = re.search('liststart', item['url'])
                # print(pageObj)
                # 判断是否是分页
                if pageObj:
                    print("11111111111111111111111")
                    newslist = ul.xpath("./li")
                else:
                    newslist = ul.xpath("./li[@data-artidanchor]")
                for li in newslist:
                    item["listurl"] = "https:" + li.xpath("./a/@href").extract_first()
                    item["title"] = li.xpath("./a/h3/text()").extract_first()
                    item["pic"] = "https:" + li.xpath("./a/div[@class='article-pic']/img/@src").extract_first()
                    # print(item)
                    newsitem = NewsItem();
                    newsitem['caturl'] = item['url']
                    newsitem['ll_url'] = item['listurl']
                    newsitem['catname'] = item['catname']
                    newsitem['ll_title'] = item['title']
                    newsitem['ll_pics'] = item['pic']
                    yield newsitem




        #分页
        next_url = response.xpath("//*[@id='channelPage']/a[@class='page-item-next']/@href").extract_first()
        if next_url != "javascript:void(0);":
            next_url = urljoin(item['url'],next_url)
            metaitem['url'] = next_url
            # print(next_url)
            yield scrapy.Request(
                next_url,
                callback=self.parse_category,
                meta = {"item":copy.deepcopy(metaitem)}
            )