import re
import scrapy
import urllib.parse
from drug_data.items import HealthyGuestItem


class HealthykSpider(scrapy.Spider):
    name = 'healthyk'
    allowed_domains = ['jianke.com']
    start_urls = ['https://search.jianke.com/list-010315.html']

    def parse(self, response):
        lis = response.xpath('//div[@class="itemChooseBox"]/ul/li')  # 中西药品
        for li in lis:
            symptom = li.xpath('./a/text()').extract_first()  # 症状名
            href_url = li.xpath('./a/@href').extract_first()  # 症状药品详情链接
            href_url = urllib.parse.urljoin(response.url, href_url)  # 拼接完整链接
            # print(symptom,href_url)

            yield scrapy.Request(
                url=href_url, callback=self.get_detail_url, meta={"symptom": symptom}
            )

    # 获取药品详情页链接即翻页处理
    def get_detail_url(self, response):
        symptom = response.meta['symptom']  # 症状名

        lis = response.xpath('//ul[@class="pro-con"]/li')  # 药品详情页节点
        for li in lis:
            title = li.xpath('.//div[@class="pro-botxt"]/p/a/text()[2]').extract_first()  # 标题
            title = re.sub(r'[\t\n\s]', '', title)
            discount_price = li.xpath('.//div[@class="pro-botxt"]/span/i/text()').extract_first()  # 折扣价
            orginal_price = li.xpath('.//div[@class="pro-botxt"]/s/text()').extract_first()  # 原价
            if orginal_price.startswith('￥'):
                orginal_price = orginal_price.strip('￥')
            else:
                orginal_price = orginal_price

            detail_url = li.xpath('.//div[@class="pro-botxt"]/p/a/@href').extract_first()  # 药品详情页链接
            detail_url = urllib.parse.urljoin(response.url, detail_url)
            # print(title,discount_price,orginal_price,detail_url)

            yield scrapy.Request(
                url=detail_url, callback=self.get_detail_data,
                meta={'symptom': symptom, 'dprice': discount_price, 'oprice': orginal_price}
            )

        # 翻页
        next_url = response.xpath('//div[@class="pages"]/a[last()]/@href').extract_first()
        if len(next_url) > 30:
            next_url = urllib.parse.urljoin(response.url, next_url)  # 拼接正确且完整的下一页链接
            # print(symptom, next_url)
            yield scrapy.Request(
                url=next_url, callback=self.get_detail_url, meta={'symptom': symptom}
            )
        else:  # 去除javascript:void(0)空的下一页
            return

    # 获取药品详情页数据
    def get_detail_data(self, response):
        drug_id = response.url.split('/')[-1].split('.')[0]  # 药品id
        sort = response.xpath('//div[contains(@class,"crumb_p")]/a[3]/text()').extract_first()  # 类别
        symptom = response.meta['symptom']  # 症状名
        discount_price = response.meta['dprice']  # 折扣价
        orginal_price = response.meta['oprice']  # 原价

        drug_name = response.xpath('//div[@class="widet"]/h1/text()').extract_first()  # 药品名
        com_name = response.css('dl.tongyong dd a::text').extract_first()  # 通用名称
        product_num = response.xpath('//div[@class="detail_box"]/dl[2]/dd/text()').get()  # 产品编号
        approve_num = response.xpath('//div[@class="detail_box"]/dl[3]/dd/span/text()[1]').get()  # 批准文号
        approve_num = re.sub(r'[\t\n\s]', '', approve_num)

        info = response.xpath('//dl[@id="jk_syncdata"]//div[@class="Price_info"]')
        sell_price = info.xpath('./dl[@id="price_big"]/dd/em/text()').extract_first().strip('￥')  # 售价
        manufacturer = info.xpath('./dl[last()]/dd/a/text()').extract_first()  # 生产厂家

        # 商品介绍
        div1 = response.xpath('//div[@id="decora_cons_div"]/div[1]')
        material = div1.xpath('.//tr[2]/td[2]/text()').extract_first()  # 主要原料
        effect = div1.xpath('.//tr[3]/td[2]/text()').extract_first()  # 主要作用
        specification = div1.xpath('.//tr[4]/td[2]/text()').extract_first()  # 产品规格
        dosage = div1.xpath('.//tr[5]/td[2]/text()').extract_first()  # 用法用量

        manual = ''.join(response.xpath('//div[@id="decora_cons_div"]/div[2]/div').extract())  # 说明书
        # prove = '\n'.join(response.xpath('//div[@id="decora_cons_div"]/div[3]//p//text()').extract())       # 资质证明
        drug_img = response.css('div#imgbig a::attr(href)').extract_first()  # 药品图片
        drug_img = urllib.parse.urljoin(response.url, drug_img)
        item = HealthyGuestItem(
            _id=drug_id, sort=sort, symptom=symptom, discount_price=discount_price, orginal_price=orginal_price,
            drug_name=drug_name, com_name=com_name, product_num=product_num, approve_num=approve_num,
            sell_price=sell_price, manufacturer=manufacturer, material=material, effect=effect,
            specification=specification, dosage=dosage, manual=manual, drug_img=drug_img
        )  # prove=prove,
        # print(item)

        yield item
