import scrapy
import re

from scrapy.selector import Selector
from people.items import PeopleItem

class PeoplesSpider(scrapy.Spider):
    name = 'peoples'
    start_urls = [
        'https://henan.baixing.com/fuwu/',
    ]

    def parse(self, response):
        # print('body',response.body)
        selector = Selector(response)

        # ip = selector.xpath("//div[@class='search-write-left w442 pr']/input/@value").extract_first()
        # print('ip',ip)

        services = selector.xpath("//div[@class='main']/ul[@class='list-ad-items']/li")

        # 循环
        for service in services:
            # 详情链接
            detail_link = service.xpath("a[@class='media-cap ']/@href").extract_first()

            member_tag = []

            # 描述
            member_desc = service.xpath("div[@class='media-body']/div[@class='media-body-title']/a[2]/@data-original-title").extract_first()

            # 会员
            member = service.xpath("normalize-space(div[@class='media-body']/div[@class='media-body-title']/a[2]/@class)").extract_first()
            member = self.ParsingMember(member)

            # 类别
            category = service.xpath("div[@class='media-body']/div[@class='media-body-title']/a[3]/text()").extract_first()

            # 如果没有找到vip,再往下找一次
            if member == 0:
                member = service.xpath("normalize-space(div[@class='media-body']/div[@class='media-body-title']/a[3]/@class)").extract_first()
                member = self.ParsingMember(member)
                member_desc = service.xpath("div[@class='media-body']/div[@class='media-body-title']/a[4]/@data-original-title").extract_first()
                category = service.xpath("div[@class='media-body']/div[@class='media-body-title']/a[4]/text()").extract_first()

            member_tag.append(member)
            member_tag.append(member_desc)

            # 商家服务
            merchant_service = service.xpath("div[@class='media-body']/div[@class='media-body-title']/a[1]/text()").extract_first()

            # 区域
            area = service.xpath("div[@class='media-body']/div[@class='ad-item-detail']/text()").extract_first()

            # phone
            phone = service.xpath("div[@class='media-body']/div[@class='media-body-title']/span[@class='highlight']/button[@class='contact-button']/@data-contact").extract_first()

            # 获取详情
            item = PeopleItem()

            item['member_tag'] = member_tag
            item['merchant_service'] = merchant_service
            item['category'] = category
            item['area'] = area
            item['phone'] = phone
            item['link'] = detail_link

            if detail_link is not None:
                # 请求详情页
                yield scrapy.Request(
                    response.urljoin(detail_link),
                    callback = self.ParsingDetail,
                    meta = {'item': item}
                )

        # 翻页
        next = selector.xpath("//ul[@class='list-pagination']/li[last()]/a/@href").extract_first()
        print('next', 'https://henan.baixing.com' + next)

        # if next is not None:
        #     next_url = 'https://henan.baixing.com' + next
        #     yield scrapy.Request(response.urljoin(next_url))
        # else:
        #     print('数据已经爬取完毕！')

    # 解析详情
    def ParsingDetail(self,response):
        item = response.meta['item']
        selector = Selector(response)

        # 大分类
        main_category = self.SplitCity(selector.xpath("//ul[@class='search-crumbs']/li/a/text()").extract())
        item['main_category'] = main_category
        
        # 均价
        item['average_price'] = selector.xpath("//div[@class='viewad-actions']/span[@class='price']/text()").extract_first()

        # 图片
        item['images'] = selector.xpath("//div[@class='swiper-wrapper']/div/img/@src").extract()

        # 公司
        company = selector.xpath("//div[@class='viewad-meta-item'][1]/div[@class='content']/span/text()").extract_first()
        
        if company is not None:
            item['company'] = company
            item['is_company'] = 1
        else:
            company = selector.xpath("//li[@class='viewad-meta-item'][1]/div/text()").extract_first()
            if company == None:
                item['company'] = selector.xpath("//div[@class='poster-detail']/h3/a[@class='poster-name']/text()").extract_first()
                item['is_company'] = 0
            else:
                item['company'] = company
                item['is_company'] = 1

        # 服务内容
        item['service_content'] = selector.xpath("//div[@class='viewad-meta-item'][2]/div[@class='content']/a[@class='tmeta-tag']/span/text()").extract_first()

        # 服务地址
        item['service_address'] = selector.xpath("//div[@class='viewad-meta-item'][4]/div[@class='content']/span/text()").extract_first()

        # 联系人
        contact_person = selector.xpath("//div[@class='viewad-meta-item'][5]/div[@class='content']/span/text()").extract_first()

        if contact_person == None:
            contact_person = selector.xpath("//li[@class='viewad-meta-item'][3]/div/text()").extract_first()

            if contact_person == None:
                contact_person = item['service_address']

        item['contact_person'] = contact_person

        # 服务简介
        item['introduction'] = selector.xpath("//div[@class='viewad-detail']/div[@class='viewad-text']/text()").extract_first()

        # 简介图片
        item['introduction_images'] = self.ParsingUrl(selector.xpath("//div[@class='featured-height']/div/a/@style").extract())

        yield item

    # 处理会员
    def ParsingMember(self,member):
        if member is not None:
            vip = re.findall(r"\d+\.?\d*",member)
            if len(vip) > 1:
                # 强转int
                return int(vip[1])
            else:
               return 0
        else:
            return 0


    # 去掉城市
    def SplitCity(self,city):

        if len(city):
            city.pop(0)
            city.pop(0)

        return city

    # 提取url
    def ParsingUrl(self, images):
        introduction_images = []
        regex = "url\(([\s\S]*?)\)"

        for item in images:
            img = re.search(regex, item).group(1)
            introduction_images.append(img)

        return introduction_images