# -*- coding: utf-8 -*-
import random
import time

import scrapy
import logging

from scrapydemo.items import ScrapydemoItem

from scrapydemo.User_AgentList import getRedomAnget

logger=logging.getLogger(__name__)

dataTotal= 0
class SoulewangSpider(scrapy.Spider):
    name = 'soulewang'  #搜了网
    allowed_domains = ["www.51sole.com"]
    start_urls = ["http://union.51sole.com/"]

    def parse(self, response):
        # 处理开始url的数据处理
        shenfen_url_list=response.xpath("/html/body//div[@class='box w_c']/a/@href").extract()[37:]
        logging.info('省份数据'+str(shenfen_url_list))
        for item in shenfen_url_list:
            time.sleep(0.5)  # 缓冲时间
            yield  scrapy.Request(
                item
                ,callback=self.parseCity
                ,headers = {"User-Agent": getRedomAnget()}
                ,dont_filter=True
            )

    #处理市区的数据
    def parseCity(self,response):
        # 处理市区url的数据处理
        city_url_list = response.xpath("/html/body//div[@class='hy_include']/ul/li/a/@href").extract()
        logging.info('市区数据' + str(city_url_list))
        for item in city_url_list:
            time.sleep(0.5) #缓冲时间
            item2 ="http://www.51sole.com"
            item2=item2+item
            yield scrapy.Request(
                item2
                , callback=self.parseList
                ,dont_filter=True
                ,headers = {"User-Agent": getRedomAnget()}
                , meta={"floor_url":None}
            )

    # 列表页面
    def parseList(self,response):
        global  dataTotal
        # 处理列表页面的数据处理
        list_url_list = response.xpath("/html/body//div[@class='hy_companylist']/ul/li")
        # 提取下一页url
        next_url = response.xpath("/html/body//a[text()='下一页']/@href").extract_first()
        # 提取数据  # 判断是否为重复循环分页查询
        floor_url=response.meta["floor_url"]
        if floor_url is  None:
            pass
        else:
           if  next_url[-2:-1] < floor_url:  # 判断分页结束点

               logger.info('分页结束当前页数:'+floor_url)
               return
        for item in list_url_list:
            phone=item.xpath(".//span[@class= 'tel']/text()").extract_first()
            if phone is None:
                continue
            items=ScrapydemoItem()
            items['phone'] = phone
            items['compony_name']=item.xpath(".//span[@class= 'fl']/a/text()").extract_first()
            items['sourcce'] = item.xpath(".//span[@class= 'fl']/a/@href").extract_first()
            items['compony_address'] = item.xpath("./dl/dd/text()").extract_first()
            items['business_scope'] = item.xpath("./p/text()").extract_first()
            dataTotal +=1
            logger.info("获取到的数据数量:%d",dataTotal)
            yield  items
        scrapy.Request(
            "http://www.51sole.com"+next_url,
            callback=self.parseList,
            dont_filter=True,
            headers = {"User-Agent": getRedomAnget()},
            meta={"floor_url":response.xpath("/html/body//div[@class='list-page']/ul//li[@class='on']/text()").extract_first()}
        )



