#!usr/bin/python
#-*- coding:utf-8 _*-
"""
@author:root
@file: xywy_ask_spider.py
@time: 2018/08/{DAY}
"""

import scrapy
from crawling.items import AskItem
from crawling.spiders.redis_spider import RedisSpider
import re
import uuid
from bs4 import BeautifulSoup

class XywyAskSpider(RedisSpider):
    name = "link"
    def __init__(self, *args, **kwargs):
        super(XywyAskSpider, self).__init__(*args, **kwargs)
    #昨天时间
    # yestody = datetime.date.today() - datetime.timedelta(days=1)
    # start_urls = ['http://club.xywy.com/keshi/{}/1.html'.format(str(yestody))]
    #获取最后一页页码
    def parse(self, response):
        print response.text
        self.logger.info(response.text)
        last_page = re.findall(ur'共(.*?)页',response.text)[0]
        print last_page
        for page in range(1,int(last_page)+1):
            ques_url = response.url.replace('1.html','{}.html'.format(str(page)))
            yield scrapy.Request(url = ques_url, callback = self.parse_question_link)
            #break
    #获取问题链接
    def parse_question_link(self, response):
        link_list = response.xpath('//div[@class="club_dic"]/h4/em/a/@href').extract()
        for url in link_list:
            yield scrapy.Request(url = url, callback = self.parse_question_content)
            #break
    #获取内容
    def parse_question_content(self, response):
        soup = BeautifulSoup(response.text,'html.parser')
        item = AskItem()
        #问题网址
        item['ask_link'] = response.url
        #w问题id
        item['ask_id'] = str(uuid.uuid1())
        #问题标题
        item['ask_title'] = response.xpath('//p[@class="fl dib fb"]/@title').extract()[0]
        #问题科室
        item['ask_department'] = response.xpath('//p[@class="pt10 pb10 lh180 znblue normal-a"]/a/text()').extract()[-1]
        #发布时间
        item['ask_time'] = response.xpath('//span[@class="User_newbg User_time"]/text()').extract()[0]
        #患者信息
        item['ask_pat'] = ''.join(response.xpath('//div[@class="f12 graydeep Userinfo clearfix pl29"]/span/text()')[2:4].extract()).replace('\t','').replace('\r\n','').replace(item['ask_time'],'')
        if item['ask_pat'] == item['ask_time']:
            item['ask_pat'] = None
        else:
            item['ask_pat'] = item['ask_pat']
        #问题内容
        item['ask_content'] = response.xpath('//div[@id="qdetailc"]/text()').extract()[0]
        #回答情况
        item['ask_sit'] = response.xpath('//span[@class="solve dib ml10 fl f12"]/text()').extract()[0]
        if item['ask_sit'] == u'已回复':
            #医生信息
            item['doctor_info'] = soup.select('div.docall.clearfix div.zyhftop.pl29.pt20.clearfix.pr div.fl.docCon div.Doc_zytpmd')
            #回答内容
            item['doctor_answer'] = soup.select('div.pt15.f14.graydeep.pl20.pr20')
            #回答时间
            item['doctor_time'] = soup.select('span.User_newbg.User_time.Doc_time')
        else:
            item['doctor_info'] = None
            item['doctor_answer'] = None
            item['doctor_time'] = None
        #print(item['ask_department'])
        print(item)
        yield item
