import requests
from lxml import etree
import xlwt
import pymongo
from pymongo.collation import Collation



class Dingdang(object):
    mongo_client = pymongo.MongoClient("mongodb://127.0.0.1:27017")  # 或者(host="127.0.0.1", post=27017)
    # 指定数据库为dangdang——db
    dingdang_db = mongo_client["dingdang_db"]
    def __init__(self):
        self.header = {
            "Host": "www.guxs.net",
            "Connection": "keep-alive",
            "Cache-Control": "max-age=0",
            "Upgrade-Insecure-Requests": "1",
            "Accept-Encoding": "gzip,deflate,br",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "User-Agent": "Mozilla/5.0(Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9"
        }  # 创建头部
        self.dingdang = Collation(str(Dingdang.dingdang_db), False, "dingdang")


    def get_dingdang(self, page):  # 向叮当网请求数据
        url = "https://www.guxs.net/top/0_0_0_2_0_%s.html" % page  # 通过page传递翻页码
        # 发送请求
        response = requests.get(url=url, headers=self.header)
        if response:
            html = etree.HTML(response.text)
            items = html.xpath("//div[@class='con']/ul/li/div")
            return items

    def join_list(self, item):
        """处理列表字符串"""
        return "".join(item)

    def parse_item(self, items):
        """解析具体图书条目"""
        # 定义一个列表用于存放在mongodb之前的数据
        result_list = []
        for item in items:
            name = item.xpath(".//div[@class='font1']/a/text()")  # 书名
            zt = item.xpath(".//div[@class='font1']/span/text()")  # 书籍状态
            user = item.xpath(".//div[@class='font2']/text()")  # 作者以及类别
            book = item.xpath(".//div[@class='font3']/text()")  # 小说简介
            last = item.xpath(".//div[@class='font4 clearfix']/span[@class='s1']/text()")  # 最近更新说明
            bbb = item.xpath(".//div[@class='font4 clearfix']/a/text()")  # 最近更新章节
            time = item.xpath(".//div[@class='font4 clearfix']/span[@class='s2']/text()")  # 最近更新时间
            result_list.append(
                {
                    "name": self.join_list(name),
                    "zt": self.join_list(zt),
                    "user": self.join_list(user),
                    "book": self.join_list(book),
                    "last": self.join_list(last),
                    "bbb": self.join_list(bbb),
                    "time": self.join_list(time)

                }
            )
        return result_list

    def insert_data(self, result_list):
        """输入数据库到mongodb"""
        self.dingdang.insert_many(result_list)


def main():
    import json
    d = Dingdang()
    for page in range(1, 101):
        items = d.get_dingdang(page=page)
        result = d.parse_item(items=items)
        # print(json.dumps(result))
        print(result)
        print(result)
        d.insert_data(result_list=result)
        将爬取的内容保存到一个文件
        list = result
        sep = ','
        fl = open('list.txt', 'w')
        fl.write(str(list))
        fl.close()




    def saveDate(self, savepath):
        book = xlwt.Workbook(encoding="utf-8", style_compression=0)
        sheet = book.add_sheet('图书条目', cell_overweite_ok=True)
        col = ("书名", "书籍状态", "作者以及类别", "小说简介", "最近更新说明", "最近更新章节", "最近更新时间")
        for i in range(0, 8):
            sheet.write(0, i, col[i])
        for i in range(0, 101):
            print("第%d本" % (i + 1))
            date = d[i]
            for j in range(0, 8):
                sheet.write(i + 1, j, date[j])
        book.save('图书条目.xlsx')
    savepath = "图书条目.xls"
    saveDate(d, savepath)



if __name__ == '__main__':
    main()
    # print("爬取完毕！")
