# encoding=utf-8
# encoding=utf-8
from spider.base import process
from constant.const import const
from lxml import etree
import re, json
from utils.string import decodeHtml2Chinese
from entity.entity import book_list, book, author, fragment
from utils.string import getUuid
from storages.database import database

base_url1 = 'https://www.bbiquge.net/book/104535/'
base_url2 = 'https://www.bbiquge.net/book/119611/index_16.html'
base_url3 = 'https://www.bbiquge.net/book/98820/30884442.html'


# 获取简介等信息
def get_describe(base_url, header, sort, category):
    b = book_list()
    a = author()
    print(base_url)
    text = process(base_url, header)
    a.id = getUuid()
    a.auther = text.xpath("//div[@id='info']/h1/small/a/text()")[0]
    a.href = "https://www.bbiquge.net" + text.xpath("//div[@id='info']/h1/small/a/@href")[0]

    b.id = getUuid()
    b.title = text.xpath("//div[@id='info']/h1/text()")[0]
    b.intro = text.xpath("//div[@id='intro']/text()")[0]
    b.auther = a.id
    b.update_time = text.xpath("//div[@class='update']/text()[2]")[0]
    b.url = base_url
    b.image = text.xpath("//div[@class='img_in']/img/@src")[0]
    b.sort = sort
    b.popularity = text.xpath("//p[@class='booktag']/span[position()=1]/text()")[0]
    b.state = text.xpath("//p[@class='booktag']/span[position()=2]/text()")[0]
    b.category_book_id = category

    # cache().map(const.book_list_entity, b.id, str(b))
    # cache().map(const.author, a.id, str(a))
    print(b)
    return b.id


# 获取一本书的章节被分成了多少页
def get_page_num(base_url, header):
    text = process(base_url, header)
    number = text.xpath("//select/option[last()]/text()")[0]
    match = re.search("(\d+)", str(number))
    print(int(match.group()))
    return int(match.group())


# 获取一章中的内容
def get_content(base_url, header, page, id):
    text = process(base_url, header)
    title = text.xpath("//*[@id='main']/h1/text()")[0]
    xpath = text.xpath("//div[@id='readbox']/div[@id='content']/*")
    arr = []
    k = 1
    for i in range(len(xpath)):
        if i % 2 == 0 and i != 0:
            content = etree.tostring(xpath[i], method='html')  # 转为字符串
            chinese = decodeHtml2Chinese(str(content, encoding="utf-8"))
            frag = fragment()
            frag.id = k
            frag.content = chinese
            arr.append(frag)
            k += 1
    b = book()
    b.id = getUuid()
    b.title = title
    b.content = str(arr)
    b.book_list_id = id
    b.sort = page
    b.url = base_url
    print(b)


# 获取一页的章节信息
def get_page_info(base_url, url, header, page, id):
    text = process(url, header)
    number = text.xpath("//div[@class='zjbox']/dl/dd")
    for i in range(len(number)):
        href = base_url + number[i].xpath("./a/@href")[0]
        # print(href)
        get_content(href, header, page, id)


def find_one_book(url, header, sort, fenlei):
    id = get_describe(url, header, sort, fenlei)
    print(id)
    num = get_page_num(url, const.header)
    print(num)
    for i in range(num):
        s = url + "index_{}.html".format(str(i + 1))
        get_page_info(url, s, header, str(i + 1), id)


# find_one_book(base_url1, const.header, 1, const.fenlei + str(1))
# get_describe(base_url1, const.header, 1, const.fenlei + str(1))

# get_page_info(base_url2, const.header)

def get_content22(base_url, header, page, num, id):
    d = database()
    text = process(base_url, header)
    title = text.xpath("//*[@id='main']/h1/text()")[0]
    xpath = text.xpath("//div[@id='readbox']/div[@id='content']/*")
    arr = []
    k = 1
    for i in range(len(xpath)):
        if i % 2 == 0 and i != 0:
            content = etree.tostring(xpath[i], method='html')  # 转为字符串
            chinese = decodeHtml2Chinese(str(content, encoding="utf-8"))
            chinese = chinese.replace("\'", "")
            frag = fragment()
            frag.id = k
            frag.content = chinese
            frag.flag = 0
            arr.append(frag)
            k += 1
    b = book()
    b.id = getUuid()
    b.title = title
    b.content = json.dumps(arr, ensure_ascii=False)
    b.book_list_id = id
    b.page = page
    b.num = num
    b.url = base_url
    # cache().map(const.chapter, b.id, str(b))
    one_book = ""
    try:
        one_book = "(\'" + b.id + "\',\'" + b.title + "\',\'" + b.content + "\',\'" + b.book_list_id + "\',\'" + b.url + "\'," + str(
            b.page) + "," + str(b.num) + ")"
        # print(base_url)
    except Exception as e:
        print(e.args)
    d.execute("insert into " + d.database + ".book.ccc values " + one_book)


get_content22(base_url3, const.header, 1, 1, getUuid())
