# -*- coding: UTF-8 -*-
"""
Created on 2017年12月1日
@author: Leo
"""

# 系统库
import re
from bson import ObjectId
from collections import OrderedDict

# 第三方库
import scrapy


class SchoolLibrary(scrapy.Spider):
    name = "school_library"

    def __init__(self, text, page, **kwargs):

        # 启动页面
        self.start_urls = ['http://lib2.gzcc.cn/opac/opac/']

        # 搜索内容
        self.search_text = "title=" + text

        # 页码
        if page is not None or page != "":
            self.default_page = "&page=" + str(page)
        else:
            self.default_page = "&page=1"

        # 数据有序字典
        self._data_od = OrderedDict()

        # 借阅情况
        self._borrow_info = []

        super().__init__(**kwargs)

    # 请求层
    def start_requests(self):
        # 请求
        req_url = self.start_urls[0] + "openlink.php?" + \
            self.search_text + self.default_page
        yield scrapy.Request(url=req_url, method="GET", callback=self.parse_list)

    # 列表解析层
    def parse_list(self, response):
        # 搜索列表页的结果
        title_list_li = response.xpath('//li[@class="book_list_info"]')
        # 每一本书
        for each_title in title_list_li:
            self._data_od = OrderedDict()

            # 图书借阅情况
            book_borrow_info = re.findall(
                r"\d+", each_title.xpath('string(p/span)').extract()[0])
            self._borrow_info.append(book_borrow_info)

            # 图书细节页
            detail_url = self.start_urls[0] + \
                each_title.xpath("p/a/@href").extract()[0]

            yield scrapy.Request(url=detail_url, method="GET", callback=self.parse)

    # 图书详情层
    def parse(self, response):
        # 信息列表信息
        dl_tag_list = response.xpath('//div[@id="item_detail"]/dl')

        # 判空
        if len(dl_tag_list) == 0:
            return

        else:
            # 馆藏信息的list(倒序出list)
            info = self._borrow_info.pop()

            # 循环解析
            for each_dl in dl_tag_list:
                self._data_od['_id'] = ObjectId()

                # 馆藏和借阅
                self._data_od['bookCollections'] = info[0]
                self._data_od['bookSurplus'] = info[1]

                # 由于数据格式没有规律,所以需要一个临时的字典存储页面所有数据
                key = each_dl.xpath('string(dt)').extract()[0]
                value = each_dl.xpath('string(dd)').extract()[0]

                if "题名/责任者" in key:
                    title_writer_tag = value.split("/")
                    self._data_od['bookName'] = title_writer_tag[0]
                    self._data_od['bookAuthor'] = title_writer_tag[1]

                elif "出版发行项" in key:
                    self._data_od['bookPubInfo'] = value

                # 简介
                elif "提要文摘" in key:
                    self._data_od['bookIntro'] = value

                # 来源
                self._data_od['bookOrigin'] = "广商图书馆"

                # 购书链接(京东,天猫,当当,淘宝,亚马逊)
                buy_book_url = []
                jd_url = "https://search.jd.com/Search?keyword=" + \
                    self._data_od['bookName'] + "&enc=utf-8&wq=" + self._data_od['bookName']
                buy_book_url.append(jd_url)
                tmall_url = "https://list.tmall.com/search_product.htm?q=" + \
                    self._data_od['bookName']
                buy_book_url.append(tmall_url)
                dangdang_url = "http://search.dangdang.com/?key=" + \
                    self._data_od['bookName']
                buy_book_url.append(dangdang_url)
                taobao_url = "https://s.taobao.com/search?q=" + \
                    self._data_od['bookName']
                buy_book_url.append(taobao_url)
                amazon_url = "https://www.amazon.cn/s/field-keywords=" + \
                    self._data_od['bookName']
                buy_book_url.append(amazon_url)
                self._data_od['bookBuyUrl'] = buy_book_url

                # 查询的名称
                self._data_od['searchName'] = self.search_text.replace(
                    "title=", "")

                # 查询的页数
                self._data_od['searchPage'] = self.default_page.replace(
                    "&page=", "")

            yield self._data_od
