# -*- coding: utf-8 -*-
"""
@Time ： 2021/11/28 16:15
@Auth ： 颜
@File ：xiushibaike.py
@IDE ：PyCharm
"""
import threading

import requests
from lxml import etree
# import json
import csv
import xlwt
from queue import Queue
import pandas as pd


# 队列


class QiubaiSpidr(object):
    def __init__(self):
        self.url_temp = "https://www.qiushibaike.com/text/page/{}/"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Safari/537.36 Edg/96.0.1054.34"}
        self.url_queue = Queue()
        self.html_queue = Queue()
        self.content_queue = Queue()

    #     创建队列
    def get_url_list(self):
        # return [self.url_temp.format(i) for i in range(1, 14)]
        for i in range(1, 14):
            self.url_queue.put(self.url_temp.format(i))

    def parse_url(self):
        # print(url)
        while True:
            url = self.url_queue.get()
            # print(url)
            response = requests.get(url, headers=self.headers)
            self.html_queue.put(response.content)
            self.url_queue.task_done()
            # return response.content.decode()

    def get_content(self):  # 提取数据

        html_str = self.html_queue.get()
        data_list = []
        html = etree.HTML(html_str)
        # print(html)
        div_list = html.xpath('//*[@id="content"]')
        # print(div_list)
        # item = {"content": [], 'author_gender': [], "author_age": [], "comment_author": [], 'comment': [],
        #         'like': [], 'author': [], 'author_img': [], 'stats_vote': [], 'comment_number': []}
        # content_list = []
        for div in div_list:
            content_list = []
            content = div.xpath('.//div[@class="content"]/span/text()')
            content = [i.replace("\n", "") for i in content]
            content_list.append(content)
            author_gender = div.xpath(".//div[contains(@class,'articleGender')]/@class")
            author_gender = author_gender[0].split(" ")[-1].replace("Icon", "") if len(
                author_gender) > 0 else None
            content_list.append(author_gender)
            author_age = div.xpath(".//div[contains(@class,'articleGender')]/text()")
            author_age = author_age[0] if len(author_age) > 0 else None
            content_list.append(author_age)
            comment_author = div.xpath('.//span[@class="cmt-name"]/text()')
            comment_author = comment_author[0] if len(comment_author) > 0 else None
            content_list.append(comment_author)
            comment = div.xpath('.//div[@class="main-text"]/text()')
            comment = comment[0].replace("\n", "") if len(comment) > 0 else None
            content_list.append(comment)
            like = div.xpath('.//div[@class="main-text"]/text()')
            like = like[0] if len(like) > 0 else None
            content_list.append(like)
            author = div.xpath('.//div[@class="author clearfix"]//img/@alt')
            author = author[0] if len(author) > 0 else None
            content_list.append(author)
            author_img = div.xpath('.//div[@class="author clearfix"]//img/@src')
            author_img = "https:" + author_img[0] if len(author_img) > 0 else None
            content_list.append(author_img)
            stats_vote = div.xpath('.//span[@class="stats-vote"]/i/text()')
            stats_vote = stats_vote[0] + "好笑" if len(stats_vote) > 0 else None
            content_list.append(stats_vote)
            comment_number = div.xpath('.//span[@class="stats-comments"]/a/i/text()')
            comment_number = comment_number[0] + "评论" if len(comment_number) > 0 else None
            content_list.append(comment_number)
            data_list.append(content_list)
            # print(data_list)
            # print(len(content_list))
            # print(content_list)
            # print(len(data_list))
            print(len(data_list))
        self.content_queue.put(data_list)
        self.html_queue.task_done()

    def save_content(self):
        while True:
            content_list = self.content_queue.get()
            # print(len(content_list))
            # print(content_list)
            # with open('qiushibaike.csv', 'a+', encoding='utf8', newline='') as f:
            #    for i in content_list:
            #         writer = csv.DictWriter(f, content_list)
            #         # writer.writerow(content_list)
            #         # writer.writeheader()
            #         writer.writerows(i)
            # writer.writerow(content_list)
            workbook = xlwt.Workbook(encoding='utf-8', style_compression=0)
            sheet = workbook.add_sheet('糗事百科', cell_overwrite_ok=True)
            col = ("内容", "作者的性别", '作者年龄', "最佳评论人", '评论内容', '喜欢的人数', '作者', '作者头像', '好笑数', '评论人数')
            for i in range(0, 10):
                sheet.write(0, i, col[i])
            # new_value = [value for value in content_list.values()]
            for va in range(0, len(content_list)):
                # print(va)
                print("开始爬取第{}条数据".format(va + 1))
                data = content_list[va]
                # print(data)
                for j in range(0, 10):
                    sheet.write(va + 1, j, data[j])
            workbook.save("糗事百科内容.xls")

            # print("开始爬取di{}条数据".format(value))

            # df = pd.DataFrame(content_list)
            # df.to_csv('qiushibaike.csv', mode='a', encoding='utf-8')
            self.content_queue.task_done()

    def run(self):  # 实现主逻辑
        thread_list = []
        # url
        t_url = threading.Thread(target=self.get_url_list)
        thread_list.append(t_url)
        # url_temp = self.get_url_list()
        # 遍历获取响应
        # for url in url_temp:
        for i in range(18):
            t_parse = threading.Thread(target=self.parse_url)
            thread_list.append(t_parse)
        # html_str = self.parse_url(url)
        # 提取数据
        for i in range(20):
            t_html = threading.Thread(target=self.get_content)
            thread_list.append(t_html)
        # content_list = self.get_content(html_str)
        #             保存
        t_save = threading.Thread(target=self.save_content)
        thread_list.append(t_save)
        # self.save_content(content_list)
        for t in thread_list:
            t.setDaemon(True)  # 把子线程设置为守护主线程，主线程结束，子线程结束
            t.start()
        for q in [self.url_queue, self.html_queue, self.content_queue]:
            q.join()
        print('线程结束')


if __name__ == '__main__':
    qiushi = QiubaiSpidr()
    qiushi.run()
