import requests
from lxml import etree
from pymysql import connect


class Spider(object):
    def __init__(self):
        self.url = 'http://www.hnbitebi.com/hlist-7-1.html'

        self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'}
        # 获取连接对象
        self.connect = connect(
            user='root',
            password='123456',
            host='127.0.0.1',
            port=3306,
            database='demotext',
            charset='utf8'
        )
        # 获取游标对象
        self.cs = self.connect.cursor()
        try:
            create_sqli = "create table textben (id int, title varchar(255),link varchar(255),content longtext);"
            self.cs.execute(create_sqli)
        except Exception as e:
            print("创建数据表失败:", e)
        else:
            print("创建数据表成功;")

    def get_html(self, url):
        response = requests.get(url, headers=self.headers)
        # print(response.text)
        return response.text

    def parse_data(self, data):
        # 解析列表页数据
        xml = etree.HTML(data)

        title_links = xml.xpath('//ul[@class="list2"]/li//a/@href')
        titles = xml.xpath('//ul[@class="list2"]/li//a/text()')
        for title, title_link in zip(titles, title_links):
            print(title)
            print(title_link)
            contents = self.parse_two_data(title_link)
            print(contents)
            self.save_data(title, title_link, contents)
            print('===============')

    def save_data(self, t, tl, cont):
        """将数据保存到数据库中"""
        sql = 'insert into textben(title,link, content) values(%s, %s, %s)'
        self.cs.execute(sql, [t, tl, cont])
        self.connect.commit()

    def parse_two_data(self, url):
        # 解析二级页面的数据
        # 向详情页发起请求
        content_data = self.get_html(url)
        # print(content_data)
        xml = etree.HTML(content_data)
        contents = xml.xpath('//div[@class="con"]/div//text()')
        # print(contents)
        #''.join(contents):列表转为字符串,  .replace('\r\n', '')替换字符串中的目标字符
        con = ''.join(contents).replace('\r\n', '')
        # print(con)
        return con
        # return contents
        # for i in contents:
        #     print(i)

    def run(self):
        for i in range(1, 5):
            print('正在爬取第{}页'.format(i))
            url = 'http://www.hnbitebi.com/hlist-7-{}.html'.format(i)
            data = self.get_html(url)
            self.parse_data(data)
        #关闭游标
        self.cs.close()
        #关闭数据库
        self.connect.close()


if __name__ == '__main__':
    s = Spider()
    s.run()