import requests

from bs4 import BeautifulSoup
from lxml import etree
import json

class BtcSpider(object):
    def __init__(self):
        self.url = 'https://www.chainnode.com/forum/61-{}.html'
        self.header={
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36"
        }
        # 列表的数据
        self.data_list = []
        # 详情页的信息
        self.data_detail_list = []
    # 1.发请求
    def get_response(self, url):
        response = requests.get(url, headers=self.header)
        data = response.content.decode('utf-8')
        # print(data)
        return data
    # 2.解析数据
    #   2.1解析列表页数据
    def parse_list_data(self, data):
        # 1.转类型
        soup = BeautifulSoup(data, 'lxml')
        # 2.解析内容   取出所有的类选择器的  a
        title_list = soup.select('.link-dark-major.font-bold.bbt-block')
        # print(title_list)
        for title in title_list:
            list_dict_data = {}
            list_dict_data['title'] = title.get_text()
            list_dict_data['detail_url'] = "https://www.chainnode.com" + title.get('href')
            self.data_list.append(list_dict_data)
    #   2.1解析详情页数据
    def parse_detail_data(self, data):
        html_data = BeautifulSoup(data, 'lxml')
        # 取出标题
        title = html_data.select('h1')[0].get_text()
        # print(title)
        # 取出下面的评论列表
        commment_content_list = html_data.select('#comment .comment__content div')
        # print(commment_content_list)
        comment_list = []
        for comment_content in commment_content_list:
            comment_list.append(comment_content.get_text())
        #print(comment_list)

        detail_dict_data = {
            "title" : title,
            "commment_content_list" : comment_list
        }

        self.data_detail_list.append(detail_dict_data)
        # print(self.data_detail_list)



    # 3.保存数据
    def save_data(self, data, file_path):
        data_str = json.dumps(data, ensure_ascii=False)
        with open(file_path, 'w', encoding='utf-8') as f:
            f.write(data_str)
    def start(self):
        # 列表页的请求  5 页
        for i in range(1,2):
            url = self.url.format(1)
            data = self.get_response(url)
            self.parse_list_data(data)
        self.save_data(self.data_list,"02_list.json")

        # 发送详情页的请求
        for data in self.data_list:
            detail_url = data['detail_url']
            print(detail_url)
            detail_data = self.get_response(detail_url)
            # 解析详情页的数据
            self.parse_detail_data(detail_data)


        self.save_data(self.data_detail_list, "02_detail_list.json")




BtcSpider().start()


# url = 'https://blog.csdn.net/qq_38200548/article/list/{}'
# ret = url.format(1)
# print(ret)
