# -*- coding: utf-8 -*-
"""
@Time ： 2021/12/1 17:52
@Auth ： 颜
@File ：xiushibaikespide.py
@IDE ：PyCharm
"""
import requests
from lxml import etree
import xlwt


class Xiushi(object):
    def __init__(self):
        self.url_temp = "https://www.qiushibaike.com/text/page/{}/"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Safari/537.36 Edg/96.0.1054.34"}

    def get_url(self, url):
        content = requests.get(self.url_temp, self.headers).content
        return content

    def get_content(self):  # 提取数据
        # html_str = self.html_queue.get()
        data_list = []
        for i in range(1, 14):
            html_str = self.get_url(self.url_temp.format(i))
            html = etree.HTML(html_str)
            # print(html)
            div_list = html.xpath('//*[@id="content"]')
            # print(div_list)
            # item = {"content": [], 'author_gender': [], "author_age": [], "comment_author": [], 'comment': [],
            #         'like': [], 'author': [], 'author_img': [], 'stats_vote': [], 'comment_number': []}
            # content_list = []
            for div in div_list:
                content_list = []
                content = div.xpath('.//div[@class="content"]/span/text()')
                content = [i.replace("\n", "") for i in content]
                content_list.append(content)
                author_gender = div.xpath(".//div[contains(@class,'articleGender')]/@class")
                author_gender = author_gender[0].split(" ")[-1].replace("Icon", "") if len(
                    author_gender) > 0 else None
                content_list.append(author_gender)
                author_age = div.xpath(".//div[contains(@class,'articleGender')]/text()")
                author_age = author_age[0] if len(author_age) > 0 else None
                content_list.append(author_age)
                comment_author = div.xpath('.//span[@class="cmt-name"]/text()')
                comment_author = comment_author[0] if len(comment_author) > 0 else None
                content_list.append(comment_author)
                comment = div.xpath('.//div[@class="main-text"]/text()')
                comment = comment[0].replace("\n", "") if len(comment) > 0 else None
                content_list.append(comment)
                like = div.xpath('.//div[@class="main-text"]/text()')
                like = like[0] if len(like) > 0 else None
                content_list.append(like)
                author = div.xpath('.//div[@class="author clearfix"]//img/@alt')
                author = author[0] if len(author) > 0 else None
                content_list.append(author)
                author_img = div.xpath('.//div[@class="author clearfix"]//img/@src')
                author_img = "https:" + author_img[0] if len(author_img) > 0 else None
                content_list.append(author_img)
                stats_vote = div.xpath('.//span[@class="stats-vote"]/i/text()')
                stats_vote = stats_vote[0] + "好笑" if len(stats_vote) > 0 else None
                content_list.append(stats_vote)
                comment_number = div.xpath('.//span[@class="stats-comments"]/a/i/text()')
                comment_number = comment_number[0] + "评论" if len(comment_number) > 0 else None
                content_list.append(comment_number)
                data_list.append(content_list)
                # print(data_list)
                # print(len(content_list))
                # print(content_list)
                # print(len(data_list))
                # print(len(data_list))
        # print(data_list)
        # print(len(data_list))
        print(data_list)
        return data_list

        # self.content_queue.put(data_list)
        # self.html_queue.task_done()

    def save_content(self, data_list, path_save):
        # data_list = self.get_content()
        # print(len(data_list))
        workbook = xlwt.Workbook(encoding='utf8', style_compression=0)
        work_sheet = workbook.add_sheet('糗事百科', cell_overwrite_ok=True)
        col = ("内容", "作者的性别", '作者年龄', "最佳评论人", '评论内容', '喜欢的人数', '作者', '作者头像', '好笑数', '评论人数')
        for i in range(0, len(col)):
            work_sheet.write(0, i, col[i])
        for i in range(len(data_list)):
            # print(len(data_list))
            # print(data_list)
            data = data_list[i]
            # print(len(data))
            print('开始爬取第{}条'.format(i + 1))
            for j in range(0, len(col)):
                # print(i + 1, j, data[j])
                work_sheet.write(j + 1, i, data[j])
        workbook.save(path_save)

    def run(self):
        data_list = self.get_content()
        path_save = '糗事百科.xls'
        self.save_content(data_list, path_save)


if __name__ == '__main__':
    qiushi = Xiushi()
    qiushi.run()
