#!/usr/bin/env python2.7
# _*_ coding:UTF-8 _*_
"""
__author__ = 'wangshaowei'
"""

import hashlib
import os
import pickle

import requests
from bs4 import BeautifulSoup

latest_books_page_url = "http://www.ituring.com.cn/book?tab=book&sort=new&page={}"  # 最新图书页面url
book_info_url = "http://www.ituring.com.cn/book/{}"  # 图书详细信息页面url


class BookInfoModel(object):
    """图书信息"""

    def __init__(self):
        """Constructor for BookInfoModel"""
        self.title = None
        self.book_id = None
        self.book_url = None
        self.ebook_price = None
        self.zzs_price = None

    def price(self):
        return self.ebook_price or self.zzs_price or 0


def get_pickle_path():
    """
    获取缓存图书信息的".pickle"文件路径
    :return: str, 文件路径
    """
    base_dir = os.path.abspath(os.path.dirname(__file__))
    return os.path.join(base_dir, "_books.pickle")


def get_html_path():
    """
    获取缓存html内容的目录的路径
    :return: str, 目录路径
    """
    base_dir = os.path.abspath(os.path.dirname(__file__))
    html_dir = os.path.join(base_dir, "_html_cache")
    return html_dir


def cache_html(url, content):
    """
    缓存html内容到本地
    :param url: html对应的url链接
    :param content: html的内容
    :return:
    """
    tmp_name = hashlib.md5(url).hexdigest()
    file_name = "{}.html".format(tmp_name)

    html_dir = get_html_path()
    if not os.path.isdir(html_dir):
        os.makedirs(html_dir)
    file_path = os.path.join(html_dir, file_name)
    with open(file_path, "wb") as fp:
        fp.write(content)


def request_html(url):
    """
    请求url对应的html内容（优先使用本地缓存）
    :param url: url链接
    :return: html内容或者None
    """
    file_content = get_html_cache(url)
    if not file_content:
        result = requests.get(url)
        if result.status_code != 200:
            return None
        file_content = result.content
        cache_html(url, file_content)

    return file_content


def get_html_cache(url):
    """
    根据URL获取本地的html缓存文件
    :param url: url链接
    :return: html文件内容或者None
    """
    tmp_name = hashlib.md5(url).hexdigest()
    file_name = "{}.html".format(tmp_name)

    file_path = os.path.join(get_html_path(), file_name)
    if not os.path.isfile(file_path):
        return None
    with open(file_path, "rb") as fp:
        file_content = fp.read()
    return file_content


def get_books(page_url):
    """
    获取page_url页面里所有图书的基本信息
    :param page_url: 书籍列表页面url
    :return: list, 书籍基本信息列表
    """
    file_content = request_html(page_url)
    if not file_content:
        return None

    soup = BeautifulSoup(file_content, "html5lib")
    div_list = soup.find_all("div", class_="book-img")
    book_info_list = []
    for div_tag in div_list:
        book_title = div_tag.a["title"]
        book_href = div_tag.a["href"]
        book_id = book_href.split("/")[-1]
        book_info_list.append((book_id, book_title))
    return book_info_list


def get_book_info(book_id):
    """
    根据书籍ID，获取图书的详细信息
    :param book_id: 书籍ID
    :return: BookInfoModel, 书籍详细信息model
    """
    global book_info_url
    url = book_info_url.format(book_id)
    file_content = request_html(url)
    if not file_content:
        return None

    soup = BeautifulSoup(file_content, "html5lib")
    div_approaches = soup.find("div", class_="book-approaches")
    dl_list = div_approaches.find_all("dl", class_=False)
    model = BookInfoModel()
    model.book_id = book_id
    model.book_url = url
    for tmp_dl in dl_list:
        type_text = tmp_dl.dt.text.strip()
        price_text = tmp_dl.dd.span.text.strip()
        price = float(price_text[1:])

        if type_text == u"电子书":
            model.ebook_price = price
        elif type_text == u"纸质书":
            model.zzs_price = price

    return model


def parse_book_infos(book_info_list):
    """
    解析、请求book_info_list里所有书籍的详细信息
    :param book_info_list: 书籍列表（包含book_id, book_title）
    :return: 书籍详细信息列表
    """
    model_list = []
    for index, (book_id, book_title) in enumerate(book_info_list):
        print index, ".request book info:", book_title
        model = get_book_info(book_id)
        model.title = book_title
        model_list.append(model)
    model_list = sorted(model_list, key=lambda m: m.price())
    return model_list


def all_latest_book():
    """
    获取全部最新的书籍信息
    :return:
    """
    global latest_books_page_url
    book_info_list = []
    for page_num in xrange(0, 100):
        print "page:", page_num
        tmp_url = latest_books_page_url.format(page_num)
        tmp_list = get_books(tmp_url)
        if not tmp_list:
            break
        book_info_list.extend(tmp_list)

    # 分析
    model_list = parse_book_infos(book_info_list)

    p_path = get_pickle_path()
    with open(p_path, "wb") as fp:
        pickle.dump(model_list, fp)


def load_pickle_data():
    """
    读取本地pickle缓存文件
    :return:
    """
    p_path = get_pickle_path()
    with open(p_path, "rb") as fp:
        model_list = pickle.load(fp)

    model_list = sorted(model_list, key=lambda m: m.price())

    for index, model in enumerate(model_list):
        if model.price() == 0:
            continue
        print index, model.ebook_price, model.zzs_price, model.title, model.book_url


def main():
    load_pickle_data()
    # all_latest_book()


if __name__ == '__main__':
    main()
