import scrapy
import re
import json
from kafka import KafkaProducer
producer = KafkaProducer(bootstrap_servers='localhost:9092',value_serializer=lambda v: json.dumps(v).encode('utf-8'))
class Huangye88Spider(scrapy.Spider):
    name = "huangye88"
    start_urls=["http://b2b.huangye88.com/region/"]
    def start_requests(self):       #用start_requests()方法,代替start_urls
        #第一次请求一下登录页面，设置开启cookie使其得到cookie，设置回调函数
        return [scrapy.Request('http://b2b.huangye88.com/region/',meta={'cookiejar':1},callback=self.parse)]    
    def parse(self,response):
        print("loading=",response.url)
        #按省份
        for next_page in response.css('#clist').css("a"):
            url =next_page.xpath('@href').extract()[0]
            print("parse",url)
            if url!='' and url!='#' :
                yield scrapy.Request(url, self.parse,meta={'cookiejar':response.meta['cookiejar']})

        #按行业
        for next_page in response.css('.tag_tx').css("a"):
            url =next_page.xpath('@href').extract()[0]
            print("parse",url)
            if url!='' and url!='#' :
                yield scrapy.Request(url, self.parse,meta={'cookiejar':response.meta['cookiejar']})
        #页数
        if len(response.css('.tit2'))>0:
            if response.url.find("qn")>0:
                totalnum=response.css('.tit2').xpath("//em/text()").extract_first()
                print(totalnum)
             
                #第一页企业列表
                for next_page in response.css('#jubao').css("h4").css("a"):
                    url =next_page.xpath('@href').extract()[0]+"/company_detail.html"
                    print("parse",url)
                    if url!='' and url!='#' :
                        yield scrapy.Request(url, self.parse,meta={'cookiejar':response.meta['cookiejar']})
            else:
                for next_page in response.css('#jubao').css("h4").css("a"):
                    url =next_page.xpath('@href').extract()[0]+"/company_detail.html"
                    print("parse",url)
                    if url!='' and url!='#' :
                        yield scrapy.Request(url, self.parse,meta={'cookiejar':response.meta['cookiejar']})
        #企业信息
        if response.url.find("/company_detail.html")>0:
            print("load company info:" ,response.url)
            id =response.url.split("/")[-2]
            print(id)
            companyname=response.css(".com-name").xpath("text()").extract_first()
            companyDetail={"ID":id}
            
            print( companyname )
            companyDetail["companyname"]=companyname;
            for el in response.css('.con-txt').css("li"):
                if len(el.css("a"))>0:
                    print(el.css("label").xpath("text()").extract_first(),el.css("a").xpath("text()").extract_first())
                    companyDetail[el.css("label").xpath("text()").extract_first()]=el.css("a").xpath("text()").extract_first()
                else:
                    print(el.css("label").xpath("text()").extract_first(),el.xpath("text()").extract_first())
                    companyDetail[el.css("label").xpath("text()").extract_first()]=el.xpath("text()").extract_first()
            producer.send('huangye88', companyDetail)
            producer.flush()