from lxml import etree
import requests
from bs4 import BeautifulSoup
import json


class BtcSpider:

    def __init__(self):
        self.url = "https://www.chainnode.com/forum/1"
        self.header = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36"
        }
        self.data_list = []
        self.data_detail = []

    # 1.获取源码
    def get_response(self, url):
        response = requests.get(url, headers=self.header)
        data = response.content.decode('utf-8')
        return data

    # 2.解析列表页的数据
    def parse_list_data(self, data):
        # 转类型
        soup = BeautifulSoup(data, 'html.parser')
        # 解析内容
        result_list = soup.find_all(attrs={'class': 'link-dark-major font-bold bbt-block'})
        for title in result_list:
            list_dict_data = {}
            list_dict_data['title'] = title.get_text()
            list_dict_data['url'] = "https://www.chainnode.com" + title.get('href')
            self.data_list.append(list_dict_data)

    # 解析详情页的数据
    def parse_detail_data(self, data):
        soup_html_data = BeautifulSoup(data, 'html.parser')
        title_list = soup_html_data.select('.header-module__box h1')
        for title in title_list:
            strip_title = title.get_text().strip()
        question = soup_html_data.find_all(attrs={"class": "bbt-ed-text bbt-html"})
        answer_list = []
        for answer in question:
            answer_list.append(answer.get_text().strip())

        detail_data = {
            'title': strip_title,
            'answer': answer_list,
        }

        self.data_detail.append(detail_data)

    # 3.保存数据
    def save_data(self, data, file_name):
        date_str = json.dumps(data)
        with open(file_name, mode='w', encoding='utf-8') as file:
            file.write(date_str)

    def start(self):
        for i in range(1, 2):
            # 列表页的请求
            link_url = self.url + '-%s' % str(i)
            data = self.get_response(link_url)
            self.parse_list_data(data)
        self.save_data(self.data_list, '列表页.json')

        # 请求详情页的数据
        for data in self.data_list:
            detail_url = data['url']
            detail_data = self.get_response(detail_url)

            # 解析详情页的数据
            self.parse_detail_data(detail_data)
        self.save_data(self.data_detail, '详情页.json')


BtcSpider().start()
