import requests,json
from lxml import etree
class Crawl_article_title():
    def __init__(self,url):
        # 请求地址
        self.url = url
        self.headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0'
        }
        if self.geturl():
            if self.paredata():
                self.write_data()

    def geturl(self):
        # 发送请求
        res = requests.get(url=self.url,headers=self.headers)
        if res.status_code == 200:
            with open('./8.0text.html','w',encoding='utf-8') as book:
                book.write(res.content.decode('utf-8'))
        else:
            print('失败')
        self.html = etree.parse('./8.0text.html',etree.HTMLParser())
        return True

    def paredata(self):
        # 解析数据   第一种方法
        # title_1 = html.xpath('//div[@class="list-group list-group-flush"]//div[@class="topic_title mb-0  essence_title yh"]/text()')
        # title1 = html.xpath('//div[@class="list-group list-group-flush"]//span[@class="node"]/text()')
        # print(title_1)
        # print(title1)
        # 解析数据   第二种方法
        author = self.html.xpath('//div[contains(@class,"container")]//div[contains(@class,"list-group")]//div[contains(@class,"flex-fill")]//div/text()')
        title2 = self.html.xpath('//div[contains(@class,"container")]//div[contains(@class,"list-group")]//div[contains(@class,"flex-fill")]//strong/a/text()')
        titleurl = self.html.xpath('//div[contains(@class,"container")]//div[contains(@class,"list-group")]//div[contains(@class,"flex-fill")]/a[@target="_blank"]/@href')
        b = []
        for i in title2:
            if i.endswith(' '):
                pass
            else:
                b.append(i)
            # 整理数据写入：
        data = []
        for i in range(len(author)):
            res = {'author': title2[i], 'title': author[i], 'titleurl': titleurl[i]}
            data.append(res)
            self.data = data
            return True

    def write_data(self):
        with open('./8.0data.json','w') as book:
            json.dump(self.data,book)
if __name__ == '__main__':
    Crawl_article_title('https://www.lmonkey.com/essence')
