# encoding=utf-8
from spider.base import category, process
from storages.cache import cache
from constant.const import const

c = cache()

base_url = 'https://www.bbiquge.net/'

# header = {
#     "Accept-Encoding": "gzip, deflate",
#     "Accept-Language": "zh-CN,zh;q=0.9",
#     "Cookie": "HMACCOUNT_BFESS=07B85514E05D22AE; H_WISE_SIDS_BFESS=110085_127969_179349_180636_185638_188749_189755_191527_194085_194519_196428_196527_197471_197711_197948_199572_203518_204916_207236_207697_208721_209568_210321_210732_210757_211953_212295_212726_212739_212797_212867_212912_213039_213059_213094_213289_213357_213484_214094_214205_214379_214596_214652_214791_214884_215070_215127_215280_215333_215554_215730_215859_215892_216043_216253_216297_216335_216342_216353_216368_216446_216596_216631_216634_216645_216843_217183_217320_217410_217439_217453_217514_217760_217868; BAIDUID_BFESS=FD30747E577715F2704ED40BB1AE3016:FG=1; BDUSS_BFESS=VJSY3dVTEpVN34ycjJFUjRjanA0Qy00UWR5allmVWFkTVE0enI3a21iUDduU05rRVFBQUFBJCQAAAAAAAAAAAEAAABJszI0xfS71L3wxaMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPsQ~GP7EPxjO; ZFY=1S3:AvULhrXM1QTkZp:AmNtEmf:AUoPgmjyb8voM0q2j:BE:C",
#     "Host": "hm.baidu.com",
#     "Referer": "https://www.bbiquge.net/",
#     "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36",
# }


# 页数爬取
def get_book_pagenum(url, header):
    print(url)
    html = process(url, header)
    text = html.xpath("//div[@id='main']/div[@class='leftBox border']/div[@class='uplist']/div[@class='articlepage']/div[@class='pagelink']/a[@class='last']/text()")[0]
    return str(text)


# 每种分类的六个显示
def get_six_book_from_one_category(url, header):
    # url = "https://www.bbiquge.net/fenlei/1_1/"
    html = process(url, header)
    six_new = html.xpath("//div[3]/div[@id='main']/div[@class='tjbox border']/div/div[position()<7]")
    for i in range(len(six_new)):
        img = six_new[i].xpath("./div[1]/a/img/@src")[0]
        title = six_new[i].xpath("./div[2]/h4/a/text()")[0]
        href = six_new[i].xpath("./div[2]/h4/a/@href")[0]
        describe = six_new[i].xpath("./div[2]/p/text()")[0]
        print(img)
        print(title)
        print(href)
        print(describe)
        print("--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------")


# 每一页的数据
def get_book_list(name, url, header):
    print(url)
    html = process(url, header)
    content = html.xpath("//div[3]/div[@id='main']/div[@class='leftBox border']/div[@class='uplist']/div[@id='tlist']/ul/li")
    for i in range(len(content)):
        title = content[i].xpath("./div[1]/a/text()")[0]
        href = content[i].xpath("./div[1]/a/@href")[0]
        new_chapter = content[i].xpath("./div[2]/a/text()")[0]
        auther = content[i].xpath("./div[3]/text()")[0]
        update_time = content[i].xpath("./div[4]/text()")[0]
        print(title)
        # print(href)
        # print(new_chapter)
        # print(auther)
        # print(update_time)
        c.map(name, title, href)
        print("--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------")
