# -*- coding: utf-8 -*-
import scrapy
from myspider.items import areaimgitem

class JingQuSpider(scrapy.spiders.Spider):
    name = "quyutu1"
    allowed_domains = ["baike.baidu.com"]
    start_urls = ["https://baike.baidu.com/item/%E9%87%8D%E5%BA%86/23586"]

    def parse(self, response):
        
        d = {
            #天津
            '120102':'河东区/31195'
            #河北
            ,'130802':'双桥区/5344248','130702':'桥东区/1465101','130703':'桥西区/1357431','130726':'蔚县/1840432','130102': '长安区/10697495','130503':'桥西区/1357417','130502':'桥东区/1465085'
            
            #辽宁
            ,'211204':'清河区/10100481', '210303':'铁西区/8594573' #'210404':#无百科
            
            #吉林
            ,'220302': '铁西区/8594589','220402':'龙山区/10565039'

            #江苏
            ,'320302':'鼓楼区/10574900','320612':'通州区/3947600'
            
            #浙江
            ,'330903':'普陀区/8706680'
            
            #福建            
            ,'350102':'鼓楼区/10574917'

            ,'360825': '永丰县/5113648','360803': '青原区/8473792','360102': '东湖区/11005834','360103': '西湖区/10860641'

            #山东
            ,'370402':'市中区/2758321','371312':'河东区/3149612'

            ,'410322':'孟津县','410204':'鼓楼区/10574910','411121':'舞阳县/1517657','410402':'新华区/7646740'

            ,'430802': '永定区/781705'
            
        }
        #测试数据
        #areacodes = ['500000']
        #allareas = ['重庆市']  

        for k, v in d.items():
            url = "https://baike.baidu.com/item/"+ v
            yield scrapy.Request(url, callback=lambda response, code=k:self.parse_area(response,code), dont_filter=True)
            pass

        pass

    def parse_area(self, response, code):
        link = response.xpath('//div[@class="summary-pic"]/a/@href').extract_first()
        index1 = link.find('/', 5)
        index2 = link.find('/', index1 + 1)
        linkcode = link[0: index2]
        alubmlink = 'https://baike.baidu.com' + linkcode
        yield scrapy.Request(alubmlink, callback=lambda response, code=code:self.parse_picpage(response,code), dont_filter=True)
        pass

    
    def parse_picpage(self, response, code):
        baseurl='https://baike.baidu.com'
        links = response.xpath('//div[@class="pic-list"]/a/@href').extract()
        newlists = []
        for element in links :
            if(element not in newlists):
                newlists.append(element)
                pass
            pass

        index = 0
        length = 3
        if len(newlists) < length:
            length = len(newlists)

        while(index < length):
            yield scrapy.Request(baseurl + newlists[index], callback=lambda response, code=code, index= index:self.parse_pic(response, code, index+1), dont_filter=True)
            index = index + 1
            pass
        pass

    def parse_pic(self, response, code, index):
        imgsrc = response.xpath('//img[@id="imgPicture"]/@src').extract_first()
        if imgsrc is None:
            return
        if imgsrc == '':
            return

        item = areaimgitem()
        item['code'] = code
        item['index'] = index
        item['imgurl'] = imgsrc
        yield item
        pass

    pass