import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
import numpy as np


class XywySpider(CrawlSpider):
    name = 'xywy'
    #allowed_domains = ['jib.xywy.com/']
    start_urls = ['http://jib.xywy.com']

    def start_requests(self):
        # cookies = "login_sid_t=fb88840a5c2ebad61a5998b3da291385; cross_origin_proto=SSL; _s_tentry=passport.weibo.com; Apache=3201104882332.868.1654741926705; SINAGLOBAL=3201104882332.868.1654741926705; ULV=1654741926709:1:1:1:3201104882332.868.1654741926705:; WBtopGlobal_register_version=2022060910; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9Wh.6O3O_fW98sYRYf2fwTI35NHD95QfSKqcSKeNShqEWs4DqcjDi--ciKLhi-2Ri--NiKn0i-82i--Ri-isi-8Wi--4iKnNiKyheh5N1K.t; SSOLoginState=1654747716; SUB=_2A25PpQIUDeThGeNL7VQU8ynIzT6IHXVtaa5crDV8PUJbkNB-LVb-kW1NSNJowHemz3SFfRdogVei7q2wbzVUbVWl"  # 获取一个cookie
        # cookies = {i.split("=")[0]: i.split("=")[1] for i in cookies.split("; ")}
        print("****************************")
        yield scrapy.Request(
            self.start_urls[0],
            callback=self.parse,
            # cookies=cookies,
        )

    def parse(self, response):
        item = {}
        print("#######################")
        #print(response.body.decode(response.encoding))
        all_diseases = response.xpath('//*[@class="illness-main clearfix"]/div')
        #print(all_diseases)
        #print(len(all_diseases))

        nameList = all_diseases[0].xpath('//*[@class="illness-hot clearfix"]/li')
        i = 0
        for nameE in nameList:
                #print(i)
                i+=1
                name = nameE.xpath('./a/text()').get()
                next_url = nameE.xpath('./a/@href').get()
                #print(name)
                #print(next_url)
                next_url = 'http://jib.xywy.com'+ next_url
                yield scrapy.Request(url=next_url, callback=self.detail_page, meta={'data': name})
            #print(nameList)
        return item

    def detail_page(self, response):
        item = {}
        name = response.meta['data']
        item['name'] = name
        desc = response.xpath('//*[@class="mt15 lh200"]/text()').get()
        desc = desc.strip()
        desc = desc.replace('\t', "")
        desc = desc.replace('\n', "")
        desc = desc.replace('\r', "")
        item['desc'] = desc
        print(item['desc'])
        #print('?????')
        common_senses = response.xpath('//*[@class="fl jib-common-sense"]/p')
        #print(common_senses[0])
        item['easy_get'] = common_senses[0].xpath('./span[2]/text()').get()
        item['get_prob'] = common_senses[1].xpath('./span[2]/text()').get()
        item['get_way'] = common_senses[2].xpath('./span[2]/text()').get()
        acompanyList = common_senses[5].xpath('./span[2]/a/text()').getall()
        check_url = common_senses[4].xpath('./span[3]/a/@href').get()
        check_url = 'http://jib.xywy.com'+check_url
        item['acompany'] = acompanyList


        jib_treats = response.xpath('//*[@class="fr jib-treat"]/p')
        a=[]

        treat_departments = jib_treats[0].xpath('./span[2]/text()').get()
        item['cure_department'] = str.split(treat_departments)
        cure_ways = jib_treats[1].xpath('./span[2]/text()').get()
        item['cure_way'] = str.split(cure_ways)
        item['cure_lasttime'] = jib_treats[2].xpath('./span[2]/text()').get()
        item['cured_prob'] = jib_treats[3].xpath('./span[2]/text()').get()
        common_drugs = jib_treats[4].xpath('./span[2]/a/text()').getall()
        item['common_drug'] = common_drugs
        item['check'] = ["耳、鼻、咽拭子细菌培养", "周围血白细胞计数及分类检验", "血常规", "酶联免疫吸附试验", "白细胞分类计数"]
        #print(check_url)
        # print(item['name'])
        # print(item['easy_get'])
        # print(item['get_prob'])
        # print(item['get_way'])
        # print(item['acompany'])
        # print(item['cure_department'])
        # print(item['cure_way'])
        # print(item['cure_lasttime'])
        # print(item['cured_prob'])
        # print(item['common_drug'])
        # print(check_url)
        yield scrapy.Request(url=check_url, callback=self.check_parse, meta={'data': item})

    def check_parse(self,response):
        #print(response.body.decode(response.encoding))
        item = response.meta['data']
        syptoms = response.xpath('//*[@class="jib-articl fr f14 jib-lh-articl"]')
        syptom_list = syptoms.xpath('.//*[@target="_blank"]/text()').getall()
        item['symptom'] = syptom_list
        eat_url = response.xpath('//*[@class="pb5"]/ul/li[3]/a/@href').get()
        eat_url = 'http://jib.xywy.com' + eat_url
        #print("eat url:"+eat_url)
        #print(syptom_list)
        yield scrapy.Request(url=eat_url, callback=self.eat_parse, meta={'data': item})


    def eat_parse(self,response):
        item = response.meta['data']

        do_eat = response.xpath('//*[@class="panels mt10"]/div[2]/div[2]/div/p/text()').getall()
        item['do_eat'] = do_eat
        not_eat = response.xpath('//*[@class="panels mt10"]/div[3]/div[2]/div/p/text()').getall()
        item['not_eat'] = do_eat
        recommand_eat = response.xpath('//*[@class="panels mt10"]/div[4]/div[2]/div/p/text()').getall()
        item['recommand_eat'] = recommand_eat
        recommand_drug_url = response.xpath('//*[@class="dep-nav f14 clearfix"]/li[5]/a/@href').get()
        recommand_drug_url = 'http://jib.xywy.com' + recommand_drug_url
        #print(recommand_drug)
        #print(do_eat)
        #print("not eat:")
        #print(not_eat)
        yield scrapy.Request(url=recommand_drug_url, callback=self.recommand_drug_parse, meta={'data': item})

    def recommand_drug_parse(self,response):
        item = response.meta['data']
        recommand_drugs = response.xpath('//*[@class="city-item"]/div/div[2]/p/a/text()').getall()
        for i in range(len(recommand_drugs)):
            recommand_drugs[i] = recommand_drugs[i].strip()
        item['recommand_drug'] = recommand_drugs
        item['drug_detail'] = "无"
        print(item)
        yield item
