# -*- coding: utf-8 -*-
import scrapy
from jiage315.items import MedicineItem
import platform
import sys
import operator

if operator.lt(platform.python_version(), '3'):
    reload(sys)
    sys.setdefaultencoding('utf-8')

labe_dict = {
    '建议零售价格': 'price_retail',
    '产品名称': 'name',
    '包装规格': 'spec',
    '产品剂型': 'dosage_form',
    '包装单位': 'package_unit',
    '批准文号': 'approval_number',
    '生产厂家': 'producer',
    '商品条码': 'barcode',
    '主治疾病': 'attending'
}


class MedicineSpider(scrapy.Spider):
    # 爬虫名称
    name = 'medicine_spider'
    # 允许爬取的域名范围
    allowed_domains = ['www.315jiage.cn']
    # 最开始请求的url地址
    # start_urls = ['**************']
    base_url = 'https://www.315jiage.cn/'

    def __init__(self, name=None, **kwargs):
        super().__init__(name=name)
        self.start_urls.append(kwargs['start_url'])

    def start_requests(self):
        yield self.__get_page(1, self.start_urls[0])

    def __build_request_url(self, url=None, meta=None, callback=None, errback=None):
        cookies = {}
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
        }
        return scrapy.Request(url, cookies=cookies, meta=meta, headers=headers, callback=callback, dont_filter=True,
                              errback=errback)

    '''
    获取列表数据
    '''

    def __get_page(self, page, url):
        self.logger.info('!!!!!!!!!!!!!fetch page:{}'.format(page))
        return self.__build_request_url(url=url, meta={'page': page}, callback=self.__parse_page)

    def __parse_page(self, response):
        node_list = response.xpath('//div[@class="mm sCard-list"]/div')
        if node_list is None or len(node_list) == 0:
            return

        for node in node_list:
            href = str(node.xpath('.//a/@href').extract_first()).strip()
            yield self.__build_request_url(url=response.urljoin(href), meta={'detail_id': href},
                                           callback=self.__parse_detail)

        # 获取下一页数据
        next_page_node = response.xpath('//div[@class="pager"]/ul/li/a[text()="后页"]/@href')
        if next_page_node is None:
            return
        next_page_url = next_page_node.extract_first().strip()
        if next_page_url is None:
            return
        yield self.__get_page(response.meta['page'] + 1, response.urljoin(next_page_url))

    def __parse_detail(self, response):
        item = MedicineItem()
        item['source_url'] = response.url
        self.__extract_content(response, item)
        self.__extract_category(response, item)
        self.__extract_instructions(response, item)
        self.__extract_image(response, item)
        yield item

    """
    提取内容
    """

    def __extract_content(self, response, data):
        content_node = response.xpath('//div[@class="block-info-prop text-oneline"]')[0]
        text = content_node.xpath("string(.)").extract_first().strip().replace('　', ' ')

        split_index_list = []
        s_index = 0
        while True:
            colon_index = text.find("：", s_index)
            if colon_index < 0:
                break

            split_index = text.rfind(' ', s_index, colon_index)
            if split_index > 0:
                split_index_list.append(split_index)

            s_index = colon_index + 1
            if s_index >= len(text):
                break

        s_index = 0
        line_list = []
        for split_index in split_index_list:
            line_list.append(text[s_index:split_index].strip())
            s_index = split_index

        if s_index < len(text):
            line_list.append(text[s_index:len(text) - 1])

        split_str = '：'
        for line in line_list:
            index = line.find(split_str)
            if index < 0:
                continue
            label = line[0:index].strip()
            if label in labe_dict:
                value = line[index + 1:].strip()
                if label == '批准文号' and value.find('本品为处方药，须凭处方购买') >= 0:
                    value = value.replace('本品为处方药，须凭处方购买', '').strip()
                    data['prescription'] = '1'
                if label == '建议零售价格':
                    value = line[line.find('￥') + 1:line.find(' ')].strip()
                data[labe_dict[label]] = value

    """
    提取说明书
    """
    def __extract_instructions(self, response, data):
        try:
            node_list = response.xpath("//div[@id='tab1']/ul/li")
            text_list = []
            for node in node_list:
                text_list.append(str(node.xpath("string(.)").extract_first().strip()))
            data['instructions'] = '\n'.join(text_list)
        except:
            pass

    """
    提取分类
    """
    def __extract_category(self, response, data):
        try:
            category_list = response.xpath("//div[@class='text-lg margin-top']//a")
            if len(category_list) > 1:
                data['category1'] = category_list[1].xpath('string(.)').extract_first().strip()
            if len(category_list) > 2:
                data['category2'] = category_list[2].xpath('string(.)').extract_first().strip()[2:]
        except:
            pass

    """
    提取图片
    """

    def __extract_image(self, response, data):
        try:
            img_style = response.xpath("//img[@class='img-rounded img-responsive block-info-img']/@style")
            if img_style is not None:
                # 'background-image:url(upload/2022-03/22033015019060.jpg)'
                data['image_url'] = self.base_url + img_style.extract_first().replace('background-image:url(', '').replace(')', '')
        except:
            pass
