import  requests
import pandas as pd
from lxml import  etree

import jsonpath
import json
class Tieba(object):
    def __init__(self,name):
        self.name=name
        self.url='http://tieba.baidu.com/f?kw={}'.format(self.name)
        self.headers= {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
        }

    def get_data(self,url):
        # 创建element对象
        response = requests.get(url,headers=self.headers)
        with open('temp.html','wb')as f:
            f.write(response.content)
        return  response.content


    def parse_data(self,data):
        """
        解析贴吧帖子列表页面的相应
        :param data:  帖子列表页面的响应
        :return:  帖子标题和连接列表 与下一页url
        """
        data = data.decode().replace("<!--","").replace("-->","")
        html=etree.HTML(data)
        el_list =html.xpath('//li[@class=" j_thread_list clearfix"]/div/div[2]/div[1]/div[1]/a')

        data_list=[]

        for el in el_list:
            temp ={}

            temp['title'] = el.xpath("./text()")[0]
            temp['link']='http://tieba.baidu.com'+el.xpath("./@href")[0]
            data_list.append(temp)

        try:
            next_url = 'https:'+html.xpath('//*[contains(text(),"下一页>")]/@href')[0]
        except:
            next_url=None
        return data_list,next_url

    # def parse_detail_page(self,data1):
    #     html = etree.HTML(data1)
    #     list=html.xpath('//*[@id="thread_list"]/li')
    #     with open('city_name.txt', 'w', encoding='UTF-8') as f:
    #         content = json.dumps(list, ensure_ascii=False)
    #         f.write(content)
    #     return

    def save_data(self,data_list):
        data1 = pd.DataFrame()

        for data in data_list:
            info_dic = {}
            info_dic['title'] = data['title']
            info_dic['link'] = data['link']

            # print(data)
            if data1.empty:
                data1 = pd.DataFrame(info_dic, index=[0])
            else:
                data1 = data1.append(info_dic, ignore_index=True)
        return data1.to_csv('河南工程学院.csv',sep=',',index=True)
    def run(self):

    # url
    # headers
        next_url = self.url

        while True:
            # 发送列表请求，获取响应
            data = self.get_data(next_url)
            # data1=self.get_data(next_url)
            # data1_list=self.parse_detail_page(data1)
            # self.save_data(data1_list)
            # 解析列表页面的响应，提取帖子列表数据和下一页url
            data_list, next_url = self.parse_data(data)
            self.save_data(data_list)
            # 翻页&循环终止条件
            if next_url == None:
                break


if __name__ == '__main__':
    tieba = Tieba("河南工程学院")
    tieba.run()