import requests
from parsel import Selector
import time
import json

COUNT = 1


class DetailCrawler:

    def __init__(self, url):
        self.url = url
        self.headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0'
        }
        self.html = ''

    def get_html(self):
        try:
            response = requests.get(self.url, headers=self.headers)
            response.encoding = 'utf-8'
            self.html = response.text
        except:
            pass

    def parse_html(self):
        selector = Selector(text=self.html)
        try:
            desc, summary, baike, supplementary_introduction = [None]*4
            name = selector.css('h1::text').get()
            content_list = selector.xpath('//div[@id="infos"]/div')
            for content in content_list:
                desc_ = content.css('div.peo_l_desc::text').get()
                summary_ = content.css('div.peo_summary p::text').get()
                baike_ = content.css('div.peo_baike p::text').getall()
                supplementary_introduction_ = content.css('div.infos_sup p::text').getall()
                if desc_:
                    desc = desc_
                if summary_:
                    summary = summary_
                if baike_:
                    baike_ = ''.join(baike_)
                    baike = baike_
                if supplementary_introduction_:
                    supplementary_introduction_ = ''.join(supplementary_introduction_)
                    supplementary_introduction = supplementary_introduction_


            self.save_json(name, desc, summary, baike, supplementary_introduction)
        except Exception as e:
            print(e)

    def save_json(self, *args):
        global COUNT
        name, title, summary, detailed_information, supplementary_instruction = args
        dct = {
            'id': COUNT,
            'name': name,
            'title': title,
            'summary': summary,
            'detailed_information': detailed_information,
            'supplementary_instruction': supplementary_instruction
        }
        COUNT += 1
        string = json.dumps(dct, ensure_ascii=False)   # 变成中文字符串要加ensure_ascii=False
        string = string + ",\n"
        with open('data1.json', 'a', encoding='utf-8') as f:
            f.write(string)

    def run(self):
        self.get_html()
        self.parse_html()


def main(url_list):
    for url in url_list:
        print('【开始爬取】', url)
        time.sleep(0.3)
        spider = DetailCrawler(url)
        spider.run()


if __name__ == '__main__':
    url = 'https://people.isgoodgood.cn/hunanren/choujifan.html'
    spider = DetailCrawler(url)
    spider.run()
