# -*- coding: UTF-8 -*-
"""
Created on 2017年12月6日
@author: Leo
"""

# 系统库
import re
import json
from bson import ObjectId
from collections import OrderedDict

# 第三方库
import scrapy

'''
图书列表链接: http://opac.gzlib.gov.cn/opac/search?q=javascript&rows=20
图书详情链接: http://opac.gzlib.gov.cn/opac/book/3002426103 (?index=1, 可以不要)
'''


# 广州图书馆官网爬虫
class GuangzhouLibrary(scrapy.Spider):

    name = "GZ_Lib"

    def __init__(self, text, page, **kwargs):
        # 启动页面
        self.start_urls = ['http://opac.gzlib.gov.cn/opac/']

        # 连接符号
        self._connect_sign = ['?', '&']

        # 功能名
        self._function_name = "search"

        # 搜索的字段
        self._search_text = "q=" + text

        # 每页个数(暂时写死)
        self._rows = "rows=10"

        # 页码
        if page is not None or page != "":
            self._default_page = "page=" + str(page)
        else:
            self._default_page = "page=1"

        # 馆藏信息
        self._borrow_info = {}

        # 数据有序字典
        self._data_od = OrderedDict()

        super().__init__(**kwargs)

    # 请求层
    def start_requests(self):
        # 请求地址
        req_url = self.start_urls[0] + \
                  self._function_name + \
                  self._connect_sign[0] + \
                  self._search_text + \
                  self._connect_sign[1] + \
                  self._rows
        yield scrapy.Request(url=req_url, method="GET", callback=self.parse_list)

    # 列表解析层
    def parse_list(self, response):
        # 搜索列表页的结果 --- 精简代码获取所有 bookrecno
        bookrecno = response.xpath('//div[@class="bookmeta"]/@bookrecno')
        for cno in bookrecno:
            # 构造图书借阅情况的json请求
            borrow_info_json_url = "{0}book/holdingPreviews{1}bookrecnos={2}{3}return_fmt=json".format(
                self.start_urls[0], self._connect_sign[0], cno.extract(), self._connect_sign[1])

            yield scrapy.Request(url=borrow_info_json_url,
                                 method="GET",
                                 callback=self.parse_borrow_info)

    # 解析借阅情况的json
    def parse_borrow_info(self, response):
        # 书号
        bookrecno = str(re.findall(r'\d+', response.url)[0])

        # 借阅情况的JSON
        borrow_result = json.loads(bytes.decode(response.body))

        # 详情页URL
        detail_url = self.start_urls[0] + "book/" + bookrecno

        loanable_count = 0
        copy_count = 0
        if len(borrow_result['previews']) != 0:
            for library in borrow_result['previews'][bookrecno]:
                # 可借数量
                loanable_count += library['loanableCount']
                # 馆藏数量
                copy_count += library['copycount']
            self._borrow_info.update({bookrecno: (loanable_count, copy_count)})
        else:
            self._borrow_info.update({bookrecno: (0, 0)})

        yield scrapy.Request(url=detail_url, method="GET", callback=self.parse)

    def parse(self, response):
        tds = response.xpath('//table[@id="bookInfoTable"]/tr/td[@class="leftTD"]')

        # 馆藏信息的dict(通过字典输出)
        info = self._borrow_info[str(re.findall(r'\d+', response.url)[0])]

        for td in tds:
            # 获取table 键值对类型(人为划分 key value)
            try:
                key = td.xpath('string(div)').extract()[0].strip()
            except IndexError:
                continue

            self._data_od['_id'] = ObjectId()

            # 可借本数(剩余本数) self.loanable_count
            self._data_od['bookSurplus'] = info[0]

            # 馆藏本数 self.copy_count
            self._data_od['bookCollections'] = info[1]

            if "题名/责任者" in key:
                title_author = re.sub(r'\s+', '', td.xpath('string(../td[@class="rightTD"])').extract()[0].strip())
                title_author = str(title_author).split("/")
                self._data_od['bookName'] = title_author[0].replace("[专著]", "")
                self._data_od['bookAuthor'] = title_author[1]

            elif "出版发行" in key:
                publish = td.xpath('string(../td[@class="rightTD"])').extract()[0]
                publish = re.sub(r'\s+', '', str(publish).replace(u'\xa0', u'').replace("出版日期", "")).split("：")
                location = publish[1].replace("出版社", "")
                publish_year = ",".join(publish[2:])
                self._data_od['bookPubInfo'] = location + ":" + publish_year

            elif "摘要" in key:
                abstract = td.xpath('string(../td[@class="rightTD"])').extract()[0].strip()
                self._data_od['bookIntro'] = abstract

            # 来源
            self._data_od['bookOrigin'] = "广州图书馆"

        # 购书链接(京东,天猫,当当,淘宝,亚马逊)
        buy_book_url = []
        jd_url = "https://search.jd.com/Search?keyword=" + \
                 self._data_od['bookName'] + "&enc=utf-8&wq=" + self._data_od['bookName']
        buy_book_url.append(jd_url)
        tmall_url = "https://list.tmall.com/search_product.htm?q=" + \
                    self._data_od['bookName']
        buy_book_url.append(tmall_url)
        dangdang_url = "http://search.dangdang.com/?key=" + \
                       self._data_od['bookName']
        buy_book_url.append(dangdang_url)
        taobao_url = "https://s.taobao.com/search?q=" + \
                     self._data_od['bookName']
        buy_book_url.append(taobao_url)
        amazon_url = "https://www.amazon.cn/s/field-keywords=" + \
                     self._data_od['bookName']
        buy_book_url.append(amazon_url)
        self._data_od['bookBuyUrl'] = buy_book_url

        # 查询的名称
        self._data_od['searchName'] = self._search_text.replace(
            "q=", "")

        # 查询的页数
        self._data_od['searchPage'] = self._default_page.replace(
            "page=", "")

        # print(self._data_od)
        yield self._data_od
