# encoding=utf-8
from spider.base import category, process
from constant.const import const
from storages.cache import cache
from entity.entity import left_mid_book, all_six

base_url = 'https://www.bbiquge.net/'


# 获取左边顶部的六个书
def get_left_top_book(base_url, header):
    top = process(base_url, header)
    text = top.xpath("//div[@id='main']/div[@id='mainleft']/div[position()=1]/div[@id='container']/div[position()<7]")
    for i in range(len(text)):
        six = all_six()
        six.img = text[i].xpath("./div[@class='pic']/a/img/@src")[0]
        six.href = text[i].xpath("./div[@class='txt']/dl/dt/a/@href")[0]
        six.title = text[i].xpath("./div[@class='txt']/dl/dt/a/text()")[0]
        dd = text[i].xpath("./div[@class='txt']/dl/dd")
        six.flag = dd[0].xpath("./span[@class='green']/text()")[0]
        six.author = dd[0].xpath("./span[@class='blue']/text()")[0].strip()
        six.brief = dd[1].xpath("./text()")[0]
        cache().list(const.left_top, str(six), const.list)
    cache().time(const.left_top, 10000, None)


# 获取左边中间的六列书
def get_left_mid_book(base_url, header):
    top = process(base_url, header)
    text = top.xpath("//div[@id='main']/div[@id='mainleft']/div[@class='titletop']")
    for i in range(len(text)):
        li = text[i].xpath("./ul/li")
        arr = []
        for j in range(len(li)):
            book = left_mid_book()
            if j == 0:
                book.href = li[j].xpath("./div[@class='text']/strong/a/@href")[0]
                book.title = li[j].xpath("./div[@class='text']/strong/a/text()")[0]
                book.author = li[j].xpath("./div[@class='text']/p/text()")[0]
            else:
                book.href = li[j].xpath("./a/@href")[0]
                book.title = li[j].xpath("./a/text()")[0]
                book.author = li[j].xpath("./span/text()")[0]
            print(book)
            arr.append(book)
            print("第" + str(j) + "本")
        cache().map(const.home_list, const.fenlei + str(i + 1), str(arr))
        print("-------------------------------------")
    cache().time(const.home_list, 10000, None)


# 获取左边下边的一大列书
def get_left_foot_book(base_url, header):
    top = process(base_url, header)
    xpath = top.xpath("//div[@id='main']/div[@id='mainleft']/div[@class='uplist']/div[@id='tlist']/ul/*")
    print(len(xpath))
    for i in range(len(xpath)):
        lb = xpath[i].xpath("./div[@class='lb']/text()")[0]
        title = xpath[i].xpath("./div[@class='zp']/a/text()")[0]
        href = xpath[i].xpath("./div[@class='zp']/a/@href")[0]
        update = xpath[i].xpath("./div[@class='zz']/a/text()")[0]
        author = xpath[i].xpath("./div[@class='author']/text()")[0]
        sj = xpath[i].xpath("./div[@class='sj']/text()")[0]
        print(update)


# 获取右边的四列书
def get_rigth_book(base_url, header):
    top = process(base_url, header)
    xpath = top.xpath("//div[@id='main']/div[@id='mainright']/div[@class='titletop']")
    for i in range(len(xpath)):
        ul = xpath[i].xpath("./ul/*")
        for j in range(len(ul)):
            author = ul[j].xpath("./small/text()")[0]
            cate = ul[j].xpath("./em/text()")[0]
            title = ul[j].xpath("./a/text()")[0]
            href = ul[j].xpath("./a/@href")[0]
            print(author)
            print(cate)
            print(title)
            print(href)
        print("-----------------------------------------------------")


get_rigth_book(base_url, const.header)
