# coding:utf-8
import os.path
import re

import requests
from flask import request
from app.libs.redprint import Redprint
from app.models.book import Book
from app.models.base import db
from sqlalchemy import or_, and_, not_
from urllib import parse
from bs4 import BeautifulSoup

api = Redprint('spider')

# 添加请求头
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
}


@api.route('')
def spider():
    # https: // search.douban.com / book / subject_search?search_text = % E9 % B2 % 81 % E8 % BF % 85 & cat = 1001
    # https://book.douban.com/latest?subcat=%E5%85%A8%E9%83%A8&p=1
    # https: // book.douban.com / subject / 25898626 /
    return 'get book'


@api.route('/search')
def search_book():
    # https://search.douban.com/book/subject_search?search_text={}&cat=1001
    keyword = request.args.get('keyword')
    print(parse.unquote(keyword))
    url = 'https://search.douban.com/book/subject_search?search_text={}&cat=1001'.format(keyword)
    print(url)
    get_search_html(url)
    return 'search book'


@api.route('/tag')
def tag_book():
    # https://book.douban.com/latest?tag=%E5%B0%8F%E8%AF%B4
    tag = request.args.get('tag')
    print(parse.quote(tag))
    url = 'https://book.douban.com/latest?tag={}'.format(parse.quote(tag))
    print(url)
    # 爬取页面
    html = requests.get(url, headers=headers)
    if html.status_code == 200:
        print('正在解析页面...')
        soup = BeautifulSoup(html.content, 'lxml')
        books = soup.select('.chart-dashed-list .media')

        for item in books:
            # print(item)
            book_link = item.select('.media__img a')[0]
            book_href = book_link.attrs['href']

            douban_id = book_href.split('/')[-2]
            if not url.split('/')[-1]:
                douban_id = url.split('/')[-1]
            # 查询ID 是否已经抓取
            if douban_id:
                spider_book(douban_id)
    else:
        print('页面{}Error'.format(url))
    return 'tag book'


# @api.route('/book')
def spider_book(douban_id):
    # douban_id = '25898626'
    '''爬取图书详情页面'''
    # https://book.douban.com/subject/25898626/
    if not douban_id:
        return False

    book_orm = Book.query.filter_by(douban_id=douban_id).first()
    if book_orm:
        return 'False'

    douban_url = 'https://book.douban.com/subject/{}/'.format(douban_id)
    html = requests.get(douban_url, headers=headers)
    if html.status_code == 200:
        # match方法是从字符串的最开始匹配的 .*贪婪匹配 .*?非贪婪匹配 打括号分组
        soup = BeautifulSoup(html.content, 'lxml')
        # 处理封面 mainpic
        cover_pic = soup.select_one('#mainpic .nbg')['href']
        cover_url = ''
        if cover_pic:
            cover_url = download(douban_id, cover_pic)
        # print(cover_pic)
        # html 格式化 换行符，方便正则匹配
        books_info = str(soup.select('#info'))
        html_text = books_info.replace('\n', '')
        # print(html_text)
        # 书名
        title = soup.select_one('#wrapper h1 span').text.strip().replace(' ', '')
        # 作者
        author = re.search('作者.*>(.*?)</a></span>', html_text)
        author = author.group(1) if author else ''
        # 出版社
        publisher = re.search('出版社:</span><a href="(.*?)">(.*?)</a><br/>', html_text)
        publisher = publisher.group(2) if publisher else ''

        # 出版年
        pubdate = re.search('<span class="pl">.*?出版年.*?</span>(.*?)<br/>', html_text)
        pubdate = pubdate.group(1) if pubdate else ''

        # 页数: 7827
        pages = re.search('<span class="pl">.*?页数.*?</span>(.*?)<br/>', html_text)
        pages = pages.group(1) if pages else 0
        # 定价:388.00元
        price = re.search('<span class="pl">.*?定价.*?</span>(.*?)<br/>', html_text)
        price = price.group(1) if price else 0
        # 装帧: 精装
        binding = re.search('<span class="pl">.*?装帧.*?</span>(.*?)<br/>', html_text)
        binding = binding.group(1) if binding else '平装'
        # ISBN: 9787547711101
        isbn = re.search('<span class="pl">.*?ISBN.*?</span>(.*?)<br/>', html_text)
        isbn = isbn.group(1) if isbn else ''

        # 目录
        summary = str(soup.select('#dir_' + douban_id + '_full')) if soup.select('#dir_' + douban_id + '_full') else ''
        summary = re.sub("· · · · · ·.*</a>[)]", '', summary)

        # 书本介绍 link-report max = a if a>b else b
        catalogue = soup.select('#link-report .intro')
        if soup.select('#link-report .all'):
            catalogue = soup.select('#link-report .all .intro')
        catalogue = str(catalogue) if catalogue else ''

        # 豆瓣评分
        douban_rating = soup.select_one('.rating_num')
        douban_rating = douban_rating.text.strip().replace(' ', '') if douban_rating else 0
        douban_count = soup.select_one('.rating_people span')
        douban_count = douban_count.text.strip().replace(' ', '') if douban_count else 0

        book_items = Book()
        book_items.douban_id = douban_id
        book_items.douban_url = douban_url
        book_items.douban_rating = douban_rating
        book_items.douban_count = douban_count
        book_items.title = title
        book_items.cover = cover_url
        book_items.author = author
        book_items.publisher = publisher
        book_items.pubdate = pubdate,
        book_items.pages = pages,
        book_items.price = price
        book_items.binding = binding
        book_items.isbn = isbn
        book_items.status = 1
        book_items.summary = summary
        book_items.catalogue = catalogue
        print(db)
        print(book_items)
        db.session.add(book_items)
        db.session.commit()
        # return 'summary'
        return True
    else:
        return False


def get_search_html(url):
    '''搜索页面结果'''
    html = requests.get(url, headers=headers)
    if html.status_code == 200:
        print('正在解析搜索结果...')
        parse_search_html(html)
    else:
        print('页面{}Error'.format(url))


def get_book_html(url):
    html = requests.get(url, headers=headers)
    if html.status_code == 200:
        print('正在解析页面...')
        print(html)
        parse_html(html)
    else:
        print('页面{}Error'.format(url))


def parse_search_html(html):
    '''解析搜索页面'''
    soup = BeautifulSoup(html.content, 'lxml')
    print(soup)
    books = soup.select('.item-root')
    print(books)
    # for item in books:
    #     title = item.text.strip().replace(' ', '')
    #     url = item['href']
    #     douban_id = url.split('/')[-2]
    #     # if not url.split('/')[-1]:
    #     #     douban_id = url.split('/')[-1]
    #
    #     print(url.split('/'))
    #     print(title)
    #     print(douban_id)
    #     print(url)
    # print(books)


def parse_html(html):
    soup = BeautifulSoup(html.content, 'lxml')
    books = soup.select('.media__body h2 .fleft')
    for item in books:
        title = item.text.strip().replace(' ', '')
        url = item['href']
        douban_id = url.split('/')[-2]
        # if not url.split('/')[-1]:
        #     douban_id = url.split('/')[-1]

        print(url.split('/'))
        print(title)
        print(douban_id)
        print(url)


def download(title, url):
    # 下载封面
    # https://img9.doubanio.com/view/subject/l/public/s34300626.jpg
    # https://img9.doubanio.com/view/subject/s/public/s34300626.jpg
    if os.path.exists('/static/images/books'):
        os.makedirs('/static/images/books')
    file_path = os.path.abspath('static/images/books')
    print(file_path)
    html = requests.get(url, headers=headers)

    with open(file_path + "/{}.jpg".format(title), 'wb') as f:
        f.write(html.content)
        return '/static/images/books' + "/{}.jpg".format(title)
