import scrapy
from scrapy.spiders import Spider
from scrapy.selector import Selector

from tutorial.items import DmozItem
from pprint import pprint
import json
import urllib
from urllib.parse import urljoin
import jieba

pro_list =[ i.strip()  for i in  open("pro_list.txt", mode='r', encoding='UTF-8').readlines()]

class DmozSpider(scrapy.Spider):
    name = "dmoz"
    allowed_domains = ["cae.cn"]
    start_urls = [
        "http://www.cae.cn/cae/html/main/col48/column_48_1.html"
    ]

    def parse(self, response):
        for sel in response.xpath('//li[@class="name_list"]/a'):
            items = DmozItem()
            items['name'] = sel.xpath("./text()").extract_first()  #unicode
            urlhref=sel.xpath('./@href').extract_first()
            newurl=urljoin(response.url,urlhref)

            yield scrapy.Request(newurl,callback=self.getplaces,meta={'items':items})
    def getplaces(self,response):
            '''
            '''
            items = response.meta['items']

            # get first content;
            #  中英文 混用 .encode('gbk','ignore')  忽略英文空格
            ptext=response.selector.xpath('//div[@class="intro"]/p[1]/text()').extract_first()
            content=ptext.split(u'。')[1]
            seg_list = jieba.cut(content)
            for place in seg_list:
                place=place.replace(u"省",'')
                place=place.replace(u"市",'')
                print("place:",place)
                if place in pro_list:
                    items['place']=place
                    break
            else:
                items['place']='None'
            pprint(items)

            # for p in pro_list:
            #     print p,
            # raise "check place"
            yield items