import re
import scrapy
import urllib.parse
from drug_data.items import OnemedicineItem

class OnemedicineSpider(scrapy.Spider):
    name = 'onemedicine'
    allowed_domains = ['111.com','111.com.cn']
    start_urls = ['https://www.111.com.cn/categories/953710']
    # 定义一个详情页链接表
    detail_list = []

    def parse(self, response):
        # 分类链接
        href_urls = response.css('div.itemChooseBox ul.list_ul li a::attr(href)').extract()
        for url in href_urls:
            url = urllib.parse.urljoin(response.url,url)    # 拼接完整链接
            # print(url)

            yield scrapy.Request(
                url=url,callback=self.get_detail_page
            )

    '''获取详情页链接以及翻页'''
    def get_detail_page(self,response):
        # print(response.url)
        lis = response.xpath('//ul[@id="itemSearchList"]/li')
        for li in lis:
            # 药品详情页链接
            href_url = li.xpath('./div[@class="itemSearchResultCon"]/a/@href').extract_first()
            href_url = urllib.parse.urljoin(response.url,href_url)    # 拼接完整链接
            # print(href_url)
            # 将详情页链接添加进列表
            self.detail_list.append(href_url)

        for detail_url in self.detail_list:
            yield scrapy.Request(
                url=detail_url,callback=self.get_detail_data
            )

        # 翻页
        next_text = response.xpath('//div[@class="turnPageBottom"]/a[last()]/text()').extract_first()
        if next_text == '下一页':
            next_url = response.xpath('//div[@class="turnPageBottom"]/a[last()]/@href').extract_first()
            next_url = urllib.parse.urljoin(response.url,next_url)    # 拼接完整链接

            yield scrapy.Request(url=next_url,callback=self.get_detail_page)   # 回调翻页
        else:
            return


    '''获取药品详情页数据'''
    def get_detail_data(self,response):
        drug_id = response.url.split('/')[-1].split('.')[0]  # 药品id
        sort = response.xpath('//div[@class="detailnav"]/span[not(@class="arrow")][3]/a/text()').extract_first()  # 类别
        symptom = response.xpath('//div[@class="detailnav"]/span[not(@class="arrow")][4]/a/text()').extract_first()  # 症状名
        trs = response.xpath('//div[@class="goods_intro"]')
        drug_name = trs.xpath('.//tr[1]/td/text()').extract_first()  # 药品名
        sell_price = response.css('span.good_price::text').extract_first().strip('￥')  # 售价
        product_num = response.css('li.item_number::text').get().strip('商品编码：')  # 产品编号
        brand = trs.xpath('.//tr[2]/td[1]/text()').extract_first()  # 品牌
        specification = trs.xpath('.//tr[2]/td[2]/text()').extract_first()  # 产品规格
        weight = trs.xpath('.//tr[3]/td[1]/text()').extract_first()  # 重量
        manufacturer = trs.xpath('.//tr[3]/td[2]/text()').extract_first()  # 生产厂商
        approve_num = trs.xpath('.//tr[4]/td[1]/text()').extract_first()  # 批准文号
        approve_num = re.sub(r'[\t\n\s(]','',approve_num)   # re.sub替换
        product_type = trs.xpath('.//tr[4]/td[2]/text()').extract_first()  # 产品类型
        product_type = re.sub(r'[\t\n\s]', '', product_type)
        drug_img = response.css('img#productImg::attr(src)').extract_first()  # 药品图片
        item = OnemedicineItem(
            _id=drug_id,sort=sort,symptom=symptom,drug_name=drug_name,sell_price=sell_price,product_num=product_num,
            brand=brand,specification=specification,weight=weight,manufacturer=manufacturer,approve_num=approve_num,
            product_type=product_type,drug_img=drug_img
        )
        # print(item)
        yield item