# encoding=utf-8
from spider.base import category, process
from constant.const import const
from storages.cache import cache
from lxml import etree
import re, json, time, random
from utils.string import decodeHtml2Chinese
from entity.entity import book_list, book, author, fragment
from utils.string import getUuid
# 多线程
from concurrent.futures import ThreadPoolExecutor as tpe
from multiprocessing import Pool as pool
from storages.database import database
import asyncio
from lock.lua_lock import lock, unlock
from log.log import log


# 遇到  出现错误！  页
def error(url):
    text = process(url, const.header)
    xpath = text.xpath("/html/body/div[1]/div/div/div[1]/text()")
    length = len(xpath)
    if length == 0:
        return True
    elif xpath[0] == const.error:
        return False


# 获取一本书的作者,简介等信息
def get_describe(base_url, header, sort, category):
    b = book_list()
    a = author()
    # print(base_url)
    d = database()
    text = process(base_url, header)
    a.id = getUuid()
    a.auther = text.xpath("//div[@id='info']/h1/small/a/text()")[0]
    a.href = "https://www.bbiquge.net" + text.xpath("//div[@id='info']/h1/small/a/@href")[0]

    b.id = getUuid()
    b.title = text.xpath("//div[@id='info']/h1/text()")[0]
    b.intro = text.xpath("//div[@id='intro']/text()")[0]
    b.auther = a.id
    b.update_time = text.xpath("//div[@class='update']/text()[2]")[0]
    b.url = base_url
    b.image = text.xpath("//div[@class='img_in']/img/@src")[0]
    b.sort = sort
    b.popularity = text.xpath("//p[@class='booktag']/span[position()=1]/text()")[0]
    b.state = text.xpath("//p[@class='booktag']/span[position()=2]/text()")[0]
    b.category_book_id = category
    b.is_delete = str(0)
    # cache().map(const.book_list_entity, b.id, str(b))
    # cache().map(const.author, a.id, str(a))
    if d.execute("select count(*) from " + d.database + ".book.author where author_name = \'" + a.auther + "\'")[0][
        0] == 0:
        auther = '(\'' + a.id + '\',\'' + a.auther + '\',\'' + a.href + '\')'
        d.execute("insert into " + d.database + ".book." + const.author + " values " + auther)
    else:
        b.auther = d.execute("select id from self.book.author where author_name = \'" + a.auther + "\'")[0][0]
        # d.execute("update " + d.database + ".book." + const.author + " set id = " + a.id + " where author_name = \'" + a.auther + "\'")
    s = str(b.sort)
    str1 = str(b.is_delete)
    create_time = str(int(time.time()))
    book = '(\'' + b.id + '\',\'' + b.title + '\',\'' + b.intro + '\',\'' + b.auther + '\',\'' + b.update_time + '\',\'' + b.url + '\',\'' + b.image + '\',' + s + ',\'' + b.popularity + '\',\'' + b.state + '\',\'' + b.category_book_id + '\',' + str1 + ',' + create_time + ')'
    d.execute(
        "insert into " + d.database + ".book." + const.book + " (id, title, intro, auther, update_time, url, image, sort, popularity, state, category_book_id, is_delete, create_time) values " + book)
    return b.id


# 获取一本书的章节被分成了多少页
def get_page_num(base_url, header):
    text = process(base_url, header)
    if len(text.xpath("//select/option[last()]/text()")) == 0:
        return 1
    number = text.xpath("//select/option[last()]/text()")[0]
    match = re.search("(\d+)", str(number))
    # print(int(match.group()))
    return int(match.group())


# 获取一章中的内容
def get_content(base_url, header, page, num, id):
    d = database()
    text = process(base_url, header)
    title = text.xpath("//*[@id='main']/h1/text()")[0]
    xpath = text.xpath("//div[@id='readbox']/div[@id='content']/*")
    arr = []
    k = 1
    for i in range(len(xpath)):
        if i % 2 == 0 and i != 0:
            content = etree.tostring(xpath[i], method = 'html')  # 转为字符串
            chinese = decodeHtml2Chinese(str(content, encoding = "utf-8"))
            chinese = chinese.replace("\'", "")
            frag = fragment()
            frag.id = k
            frag.content = chinese
            frag.flag = 0
            arr.append(frag)
            k += 1
    b = book()
    b.id = getUuid()
    b.title = title
    b.content = json.dumps(arr, ensure_ascii = False)
    b.book_list_id = id
    b.page = page
    b.num = num
    b.url = base_url
    b.is_delete = 0.00
    b.remark_num = 0
    # cache().map(const.chapter, b.id, str(b))
    one_book = ""
    try:
        s = str(b.page)
        str1 = str(b.num)
        str2 = str(b.is_delete)
        str3 = str(b.remark_num)
        price = str(random.randint(1, 10))
        create_time = str(int(time.time()))
        one_book = "(\'" + b.id + "\',\'" + b.title + "\',\'" + b.content + "\',\'" + b.book_list_id + "\',\'" + b.url + "\'," + s + "," + str1 + "," + str2 + "," + str3 + "," + price + "," + create_time + ")"
        # print(base_url)
    except Exception as e:
        print(e.args)
    d.execute("insert into " + d.database + ".book." + const.chapter + " values " + one_book)


# 获取一页的章节信息
def get_page_info(base_url, url, header, page, id):
    text = process(url, header)
    number = text.xpath("//div[@class='zjbox']/dl/dd")
    with tpe(max_workers = 8) as t:
        for i in range(len(number)):
            href = base_url + number[i].xpath("./a/@href")[0]
            t.submit(get_content, href, header, page, str(i + 1), id)


def find_one_book(url, header, sort, fenlei):
    id = get_describe(url, header, sort, fenlei)
    num = get_page_num(url, const.header)
    b = time.time()
    with tpe(max_workers = 8) as t:
        for i in range(num):
            s = url + "index_{}.html".format(str(i + 1))
            t.submit(get_page_info, url, s, header, str(i + 1), id)
    e = time.time()
    print(e - b)


# def make_book(d, c, get, k):
#     for (i, m) in get:
#         s = str(i, encoding = "utf-8")
#         print(s)
#         if d.execute("select count(id) from self.book.book where url = '" + s + "'")[0][0] == 1:
#             continue
#         if error(s) is True:
#             url = str(i, encoding = "utf-8")
#             sort = str(c.get('k', None, const.string), encoding = 'utf-8')
#             fenlei = str(m, encoding = "utf-8")
#             find_one_book(url, const.header, sort, fenlei)
#             c.incr("k")


def make_book(d, c, get, k):
    while True:
        key = random.choice(list(get.keys()))
        s = str(key, encoding = "utf-8")
        m = str(get[key], encoding = "utf-8")
        print(s)
        if d.execute("select count(id) from self.book.book where url = '" + s + "'")[0][0] == 1:
            continue
        if error(s) is True:
            # 加个uuid防止删除别人的锁
            _lock = str(getUuid()) + "-" + s
            try:
                a = lock(_lock)
                if a == 1:
                    url = s
                    sort = str(c.get('k', None, const.string), encoding = 'utf-8')
                    fenlei = m
                    find_one_book(url, const.header, sort, fenlei)
                    c.incr("k")
            except Exception as e:
                # print(e)
                log("../../log/book.log", e)
            finally:
                unlock(_lock)
                # print()


def make(k):
    d = database()
    c = cache()
    get = c.get_map(const.book_list)
    # print(get.keys())
    with tpe(max_workers = 10) as t:
        int1 = int(str(c.get('k', None, const.string), encoding = 'utf-8'))
        if int1 >= k:
            c.string("k", int1, 0, const.px)
        else:
            c.string("k", k, 0, const.px)
        t.submit(make_book, d, c, get, k)


if __name__ == "__main__":
    with pool(processes = 3) as p:
        make(0)





# get_describe(base_url1, const.header, 1, const.fenlei + str(1))
# get_content(base_url3, const.header)
# get_page_info(base_url2, const.header)
# make_book(0)
# find_one_book(base_url1, const.header, 1, const.fenlei + str(4))
# print(get_page_num('https://www.bbiquge.net/book/128247/', const.header))
# tppppppext = process('https://www.bbiquge.net/book/3396/', const.header) 211.91.62.19:10480


# print(error('https://www.bbiquge.net/book/128247/')) 185 934 128 813248
# print(error('https://www.bbiquge.net/book/3396/'))
# https://www.bbiquge.net/book/5659/   批次 cjjl 车辆装备信息   附件 fjxx
