# -*- coding: utf-8 -*-
import scrapy




class XueSpider(scrapy.Spider):
    name = 'xue'
    #allowed_domains = ['11467.com']
    allowed_domains = ['b2b.11467.com']
    #start_urls = ['http://www.11467.com/qiye/53586094.htm']
    start_urls = ['http://b2b.11467.com/search/-51fa53e38d386613-pn3.htm']




    def parse(self, response):

        #url_list = response.xpath("//*[@id='aboutuscontent']/span//text()").extract()[0]
        #"//*[@id="il"]/div[1]/div/ul/li[1]/div[2]/h4/ap
        #print(url_list)


       # for url in url_list:
        #    print("url")



        #url_list = response.text.replace(u'\xa0',u' ')
        #url_list = response.xpath("//*[@id='il']/div[1]/div/ul//li//text()")
        url_list = response.xpath("//*[@id='il']//div[1]//div//div").extract()
        #print(url_list)

        #print(len(url_list))


        #print(list(enumerate(url_list)))

        for url in url_list:

            if  "下一页" in url:  # 判断变量否为'python'

                print(url_list.index(url))
                #print(url.replace(u'\xa9', u' ').replace(u'\xa0', u' ').replace(u'\u200b', u' '))

            else:
                pass




           # print("__________________________________________")
           # print(url_list.index(url))
            #print(url.replace(u'\xa9',u' ').replace(u'\xa0',u' ').replace(u'\u200b',u' '))

        #119是首页的，首页和第二三页的结构不是太一样
        #print(url_list[119].replace(u'\xa9',u' ').replace(u'\xa0',u' ').replace(u'\u200b',u' '))
        print(url_list[200].replace(u'\xa9', u' ').replace(u'\xa0', u' ').replace(u'\u200b', u' '))



        url_a = response.xpath("//*[@id='il']/div[1]/div/ul").extract()

        for a in url_a:
            print("__________________________________________")
           # print(url_a.index(a))
           #print(a.replace(u'\xa9', u' ').replace(u'\xa0', u' ').replace(u'\u200b', u' '))

        print(url_a[0].replace(u'\xa9', u' ').replace(u'\xa0', u' ').replace(u'\u200b', u' '))




        #adress =  response.xpath("//*[@id='contact']//div//dl//dd[1]//text()").extract()[0]
        #item = dict(经理 = names, 公司地址 = adress)
        #yield item


        #找下一个url地址

       # next_url = 'http:'+response.xpath("//*[@id='sidebox']/div[1]/div[2]/ul/li[1]/a/@href").extract()[0]

       # print(next_url)



       # yield  scrapy.Request(
        #    next_url,
         #   callback=self.parse
       #)



