# -*- coding: utf-8 -*-
import scrapy
import re
from myspider.items import areaitem

class AreaSpider(scrapy.spiders.Spider):
    name = "quyu2"
    allowed_domains = ["baike.baidu.com"]
    start_urls = ["https://baike.baidu.com/item/%E9%87%8D%E5%BA%86/23586"]

    def parse(self, response):

        #数据
        d = {
            #'361129': '万年县/1325067'
            #'321281': '兴化/74502'
            #'150581':'霍林郭勒'
        }
        for k, v in d.items():
            url = "https://baike.baidu.com/item/"+ v
            yield scrapy.Request(url, callback=lambda response, code=k:self.parse_area(response,code), dont_filter=True)
            pass
        pass

    def parse_area(self, response, code):
        # 获取内容
        title = response.xpath('//dd[@class="lemmaWgt-lemmaTitle-title"]//h1/text()')
        content = response.xpath('//div[@class="lemma-summary"]/*')
        basicinfonameselector = response.xpath('//div[@class="basic-info cmn-clearfix"]//dt[@class="basicInfo-item name"]')
        basicinfovalueselector = response.xpath('//div[@class="basic-info cmn-clearfix"]//dd[@class="basicInfo-item value"]')

        # 提取内容
        name = title.extract_first()
        descArray = content.extract()
        desc = ''.join(descArray)
        #处理换行
        #desc = re.sub(r"\r\n", "&#10;" , desc)
        #去掉[1-12]
        desc = re.sub(r"\[\d*-?\d*\]", "", desc)

        basicinfoname = basicinfonameselector.extract()
        basicinfovalue = basicinfovalueselector.extract()
        info =[]
        index = 0
        infoarrlength = len(basicinfovalue)
        print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
        print(len(basicinfoname))
        print(basicinfoname)
        print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
        while(index < infoarrlength):
            key = basicinfoname[index]
            value = basicinfovalue[index]
            #去掉[1-12]
            value = re.sub(r"\[\d*-?\d*\]", "", value)
            info.append({ self.clear_html_re(key): self.clear_html_re(value)})
            index += 1
            pass

        item = areaitem()
        item['code'] = code
        item['name'] = name
        item['desc'] = self.clear_html_re(desc)
        item['info'] = info
        yield item

    def clear_html_re(self, src_html):
        content = re.sub(r"</?(.+?)>", "", src_html) # 去除标签
        dst_html = re.sub(r"\s+", "", content)  # 去除空白字符
        return dst_html