import scrapy
from ..items import PycharmScrapyItem
import time

class LiepingSpider(scrapy.Spider):
    name = 'weiyi'
    allowed_domains = ['www.guahao.com']
    start_urls = ['https://www.guahao.com/expert/all/%E5%85%A8%E5%9B%BD/all/%E4%B8%8D%E9%99%90/p1']     #爬取的初始页面

    def parse(self, response):
        doctor_list = response.xpath('//div[@class="g-doctor-items to-margin"]/ul/li')      #获取医生列表
        for doctor in doctor_list:              #遍历医生列表
            doctor_name = doctor.xpath('.//a[@class="cover-bg seo-anchor-text"]/text()').get()
            doctor_job = doctor.xpath('.//dl/dt/text()').getall()[1].strip()
            doctor_subject = doctor.xpath('.//dd/p[1]/text()').get().strip()
            doctor_hospital = doctor.xpath('.//span[@class="g-txt-ell"]/text()').get().strip()
            doctor_star = doctor.xpath('.//dd/p[3]/span[1]/em/text()').get()
            if doctor_star :                    #部分医生没有评分，所以在这里做个判断，若有评分就输入评分，若没有评分就赋予None
                doctor_star = doctor_star
            else :
                doctor_star = "None"
            doctor_number = doctor.xpath('.//dd/p[3]/span[2]/i/text()').get()
            doctor_image = doctor.xpath('.//a[@class="infos image"]//em[2]/text()').get()
            if doctor_image :                   #部分医生可能没有图片问诊价格又或者显示暂无排班，这里也做个判断，若没有数据就赋予None，若有数据但显示暂无排班就用None代替
                doctor_image = doctor_image.strip().replace("￥","").replace("暂无排班","None")
            else:
                doctor_image = "None"
            doctor_video = doctor.xpath('.//a[@class="infos video"]//em[2]/text()').get()
            if doctor_video :                   #部分医生可能没有视频问诊价格又或者显示暂无排班，所以与上部分同理，若没有数据就赋予None，若有数据但显示暂无排班就用None代替
                doctor_video = doctor_video.strip().replace("￥","").replace("暂无排班","None")
            else :
                doctor_video = "None"
            doctor_skill = doctor.xpath('.//div[@class="skill"]/p/text()').get().strip().replace("\n","") \
                .replace(" ","").replace("擅长：","").replace("...","")        #去掉不需要的内容

            item = PycharmScrapyItem()
            item['doctor_name'] = doctor_name
            item['doctor_job'] = doctor_job
            item['doctor_subject'] = doctor_subject
            item['doctor_hospital'] = doctor_hospital
            item['doctor_star'] = doctor_star
            item['doctor_number'] = doctor_number
            item['doctor_image'] = doctor_image
            item['doctor_video'] = doctor_video
            item['doctor_skill'] = doctor_skill
            yield item              #提交获取的数据

        for x in range(2,30):       #爬取完一页后根据爬取页面的url规律提交下一页的url，再继续上面的流程，直到指定页面才停止
            time.sleep(2)           #防止翻页太快被反爬虫检测，设置一定延迟
            head_url = 'https://www.guahao.com/expert/all/%E5%85%A8%E5%9B%BD/all/%E4%B8%8D%E9%99%90/p'
            url = head_url+str(x)
            yield scrapy.Request(url,callback=self.parse)