# !/usr/bin/python
# -*- coding: utf-8 -*-
"""
@FileName:  Get_Book.py
@Time    :  2021-12-30 0:36
@Author  :  Alan_1999
@Version :  1.0
@License :  (C)Copyright 2021-2022
@Desc    :  
"""
"""
爬虫所爬取网页为：https://www.dstiejuan.com/library/0_0_0_1.html
对于上述地址，0_0_0_1.html 是全部 类目， 0_1_0_1.html是 玄幻奇幻 类目， 以此类推！
"""
import random
import re

import pymysql
import requests
from lxml import etree

homepage_html = "https://www.dstiejuan.com"  # 这是网站主址
header = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                        "Chrome/96.0.4664.55 Safari/537.36 Edg/96.0.1054.34",
          "cookie": "Hm_lvt_3024da0873e04ce5d9af346d8cde7f72=1639491095,1639526584; "
                    "Hm_lpvt_3024da0873e04ce5d9af346d8cde7f72=1639527012 ",
          "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6"
          }


def Library_main():
    li_lib_cla = ['全部', '玄幻奇幻', '武侠仙侠', '都市生活', '历史军事', '游戏竞技', '科幻未来', '恐怖悬疑', '二次元',
                  '经典网文', '古代言情', '现代言情', '幻想奇缘', '青春校园', '网络情缘', '科幻空间', '鬼怪灵异', 'N次元', '言情美文']
    print("请输入你想查看的排行榜分类的序号：")
    for i in range(len(li_lib_cla)):
        print(f"序号 {i} -- {li_lib_cla[i]}    ", end="")
        if not i % 3:
            print()
    t, page = map(int, input("在此输入序号，想要的页数：").split())
    print("请稍后，正在查询>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
    try:
        li_info = Library(t, page)
        # print(li_info)
        return li_info
    except:
        print(">>>>>>>>>>>>>>页面超限，或其他未知错误！！！！<<<<<<<<<<<<<<<<<<")


def Library(t, page):  # 这是书库的爬取函数
    page_html = f"/library/0_{t}_0_{page}.html"
    lib_cla_html = homepage_html + page_html
    # print("即将爬取页面：", lib_cla_html)  # 查看拼凑网址是否正确
    lib_cla_ym = requests.get(lib_cla_html, headers=header).text
    # print(lib_cla_ym)  # 查看当前页面的源代码爬取情况
    lib_cla_ym_etree = etree.HTML(lib_cla_ym)  # 使用etree库处理页面源码
    li_books = lib_cla_ym_etree.xpath("/html/body/div[3]/div/div[2]/ul[2]/li")  # 得到当前所有的书目li
    page_info = list()
    li_calo = ["id", "book_name", "author", "img", "introduction", "newest"]
    for item in li_books:
        li_info = [int(re.findall("\d+", item.xpath('./a/@href')[0])[0]), item[1].text, item[3].xpath("./a")[0].text,
                   item[0].xpath("./img/@src")[0], item[4].text, item[5].text]
        page_info.append(dict(zip(li_calo, li_info)))  # 封装信息为字典类型，加入到列表
    return page_info


def Book_main():
    try:
        bid = int(input("请输入要查询的book id："))  # 测试书目id为 3451
        book_chaps = Book(bid)
        return book_chaps
    except:
        print(">>>>>>>>>>>>>>>>>>>>>>请检查输入是否为已有书目的id！！<<<<<<<<<<<<<<<<<<<<<<<<<<<<")


def Book(bid):
    page_html = f"/book/{bid}.html"  # 简介页面
    page_html_chap = f"/book/{bid}"  # 所有章节页面
    book_html = homepage_html + page_html  # 拼凑url
    book_html_chap = homepage_html + page_html_chap
    book_html_sc = requests.get(book_html, headers=header).text  # 爬取源代码
    book_html_chap_sc = requests.get(book_html_chap, headers=header).text
    book_html_etree = etree.HTML(book_html_sc)  # 使用etree库进行处理
    book_html_chap_etree = etree.HTML(book_html_chap_sc)
    li_chaps = book_html_chap_etree.xpath("/html/body/div[3]/div/div[2]/div[2]/dl[2]/dd")  # 获取所有章节li标签
    book_info = ["id", "book_name", "author", "introduction", "catalogue", "img"]  # 将要返回的信息
    book_chap_info = ["pid", "chap_name"]
    li_book_chap = list()
    for chap in li_chaps:  # 获得每一个章节的名字，id等信息
        li_chaps_info = [int(chap.xpath("./@chapter-id")[0]), chap[0].text]
        li_book_chap.append(dict(zip(book_chap_info, li_chaps_info)))
    li_book = [bid, book_html_etree.xpath("/html/body/div[3]/div/div[2]/div[1]/div[1]/h1")[0].text,
               book_html_etree.xpath("/html/body/div[3]/div/div[2]/div[1]/div[1]/p[1]/a[1]")[0].text,
               book_html_etree.xpath("/html/body/div[3]/div/div[2]/div[2]/div[2]/p")[0].text, li_book_chap,
               book_html_etree.xpath("/html/body/div[3]/div/div[2]/div[1]/div[1]/a/img/@src")[0]
               ]
    d_book = dict(zip(book_info, li_book))
    return d_book


def Chapter_main():
    try:
        bid, pid = map(int, input("请输入要查询的书本id， 章节id：").split())
        chap_info = Chapter(bid, pid)
        print(chap_info)
    except:
        print(">>>>>>>>>>>>>>>>>>>>>>请检查书本id和章节id的正确性！！！！<<<<<<<<<<<<<<<<<<<<<<<<<<")


def Chapter(bid, pid):
    page_html = f"/book/{bid}/{pid}.html"
    cont_html = homepage_html + page_html
    cont_html_sc = requests.get(cont_html, headers=header).text  # 获取源码
    cont_html_etree = etree.HTML(cont_html_sc)  # 使用etree库处理
    li_key = ['title', 'content', 'book', 'author', 'length']
    contxt = str()
    for item in cont_html_etree.xpath('//*[@id="content"]/p'):
        contxt += '<p>' + item.text + '</p>'
    li_value = [cont_html_etree.xpath('//*[@id="chapter"]/div[3]/div/div[2]/h1')[0].text, contxt,
                cont_html_etree.xpath('//*[@id="bookname"]')[0].text,
                cont_html_etree.xpath('//*[@id="author"]')[0].text,
                int(re.findall("\d+",
                               cont_html_etree.xpath('//*[@id="chapter"]/div[3]/div/div[2]/div[1]/text()[5]')[0])[0])]
    content = dict(zip(li_key, li_value))
    return content


def Search_main():
    pass


def Search(words, page=1):
    res = requests.post("https://www.dstiejuan.com/search.html", {'searchkey': words})
    res_etree = etree.HTML(res.text)
    res = res_etree.xpath("/html/body/div[3]/div/div[1]/ul/li")
    li_calo = ["id", "book_name", "author", "img", "introduction", "newest"]
    page_info = []
    for item in res:
        li_info = [int(re.findall("\d+", item.xpath('./a/@href')[0])[0]), item[1].text, item[3].xpath("./a")[0].text,
                   item[0].xpath("./img/@src")[0], item[4].text, item[5].text]
        page_info.append(dict(zip(li_calo, li_info)))
    return page_info


def Rank_main():
    try:
        rank_mess = ['总点击榜', '月点击榜', '周点击榜', '日点击榜', '总推荐榜', '月推荐榜', '周推荐榜', '日推荐榜', '总收藏榜',
                     '总字数榜', '最新入库', '最近更新', '强推榜', '新书榜']
        for i in range(len(rank_mess)):
            print(f"序号{i} -- {rank_mess[i]}     ", end=" ")
            if not i % 3:
                print()
        t_rank = int(input("\n请输入数字查询相应的排行榜："))
        rank_info = Rank(t_rank)
        return rank_info
    except:
        print(">>>>>>>>>>>>>>>>>>请检查排行榜信息是否正确！！！<<<<<<<<<<<<<<<<<<<<<<<<<")


def Rank(t):
    li_page = ['/top/allvisit.html', '/top/monthvisit.html', '/top/weekvisit.html', '/top/dayvisit.html',
               '/top/allvote.html', '/top/monthvote.html', '/top/weekvote.html', '/top/dayvote.html',
               '/top/goodnum.html', '/top/size.html', '/top/postdate.html', '/top/lastupdate.html', '/top/toptime.html',
               '/top/goodnew.html']
    page_html = homepage_html + li_page[t]
    page_html_sc = requests.get(page_html, headers=header).text
    page_html_etree = etree.HTML(page_html_sc)
    li_page_infos = page_html_etree.xpath("/html/body/div[3]/div/div[2]/div[2]/ul/li")
    li_rank_info = ['id', 'book_name', 'author', 'img', 'introduction']
    rank_info = list()
    for li_page_info in li_page_infos:
        li_book_info = [int(re.findall("\d+", li_page_info.xpath("./a/@href")[0])[0]), li_page_info[2].text,
                        li_page_info[5][0].text, li_page_info[0].xpath("./img/@src"), li_page_info[6].text]
        rank_info.append(dict(zip(li_rank_info, li_book_info)))
    return rank_info


def Insert_SQL(sql, values):
    while True:
        try:
            conn = pymysql.Connect(host='127.0.0.1',
                                   port=3306,
                                   user='root',
                                   passwd='lales1999',
                                   db='book_shop',
                                   charset='utf8')
            print("数据库连接成功！")
            break
        except:
            print("请检查服务，MySQL正在尝试重连>>>>>>>>>>>>>>>>>")
    cur = conn.cursor()
    count = 0
    for item in values:
        count += 1
        print(item[0])
        cur.execute("select * from book_info where book_id = item[0]")
        res = cur.fetchall()
        if res == None:
            print(f"{item[0]}已存在！")
            continue
        try:
            cur.execute(sql % tuple(item))
            # (item[0], MySQLdb.escape_string(item[1]),
            #  MySQLdb.escape_string(item[2]), MySQLdb.escape_string(item[3]),
            #  int(item[4]), item[5],
            #  MySQLdb.escape_string(item[6]), item[7])
            print(f">>第{count}条数据插入成功<<")
        except:
            print(f"！！！！！第{count}条数据插入失败！！！！！")

    conn.commit()
    conn.close()


def Get_info(t, page):
    page_html = f"/library/0_{t}_0_{page}.html"
    lib_cla_html = homepage_html + page_html
    # print("即将爬取页面：", lib_cla_html)  # 查看拼凑网址是否正确
    lib_cla_ym = requests.get(lib_cla_html, headers=header).text
    # print(lib_cla_ym)  # 查看当前页面的源代码爬取情况
    lib_cla_ym_etree = etree.HTML(lib_cla_ym)  # 使用etree库处理页面源码
    li_books = lib_cla_ym_etree.xpath("/html/body/div[3]/div/div[2]/ul[2]/li")  # 得到当前所有的书目li
    page_info = list()
    li_calo = ["id", "book_name", "author", "des"]
    for item in li_books:
        book_id = int(re.findall("\d+", item.xpath('./a/@href')[0])[0])
        li_info = [book_id, item[1].text, item[3].xpath("./a")[0].text,
                   item[4].text]
        page_info.append(dict(zip(li_calo, li_info)))  # 封装信息为字典类型，加入到列表
    return page_info


def Pass_info(book_id):
    page_html_chap = f"/book/{book_id}"  # 所有章节页面
    book_html_chap = homepage_html + page_html_chap
    book_html_chap_sc = requests.get(book_html_chap, headers=header).text
    book_html_chap_etree = etree.HTML(book_html_chap_sc)
    li_chaps = book_html_chap_etree.xpath("/html/body/div[3]/div/div[2]/div[2]/dl[2]/dd[1]")  # 获取所有章节li标签
    chap_id = int(li_chaps[0].xpath("./@chapter-id")[0])
    page_html = f"/book/{book_id}/{chap_id}.html"
    cont_html = homepage_html + page_html
    cont_html_sc = requests.get(cont_html, headers=header).text  # 获取源码
    cont_html_etree = etree.HTML(cont_html_sc)  # 使用etree库处理
    contxt = str()
    try:
        for item in cont_html_etree.xpath('//*[@id="content"]/p'):
            contxt += '<p>' + item.text + '</p>'
        return contxt
    except:
        return ""


if __name__ == '__main__':
    # lib_info = Library_main()
    # book_info = Book_main()
    # chap_info = Chapter_main()
    # search_info = Search_main()
    # rank_info = Rank_main()
    # print(Search('武极天下'))
    li_lib_cla = ['全部', '玄幻奇幻', '武侠仙侠', '都市生活', '历史军事', '游戏竞技', '科幻未来', '恐怖悬疑', '二次元',
                  '经典网文', '古代言情', '现代言情', '幻想奇缘', '青春校园', '网络情缘', '科幻空间', '鬼怪灵异', 'N次元', '言情美文']
    print("请输入你想查看的排行榜分类的序号：")
    for i in range(len(li_lib_cla)):
        print(f"序号 {i} -- {li_lib_cla[i]}    ", end="")
        if not i % 3:
            print()
    t = 1
    page = 0
    while t != 8:
        # t, page = map(int, input("在此输入序号，想要的页数：").split())
        if page == 4:
            page = 1
            t += 1
        page += 1
        print(f"t:{t},  page{page}数据获取中>>>>>>>>>>>>>>>>>>>>>>>>>")
        book_info = Get_info(t, page)
        sql = "INSERT INTO book_info(book_id, book_name, author, book_des, book_price, book_count, chap_1, book_clas) " \
              "VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s');"
        values = list()
        count = 0
        for item in book_info:
            context = Pass_info(item["id"])
            if not len(context):
                continue
            li = [item["id"], item["book_name"], item["author"], item["des"],
                  random.randint(5, 50) + 0.1 * random.randint(0, 9),
                  random.randint(10, 1000), context, t]
            values.append(li)
        print("数据获取成功！！！正在插入>>>>>>>>>>>")
        Insert_SQL(sql, values)
