# -*- coding: utf-8 -*-
# 创建时间：2021/7/16 15:31
from bs4 import BeautifulSoup
import requests
import time, math, random, json
import pymysql
import re
from hhy import FileUtil,DateUtil,ToolUtil,HttpUtil
__author__ = 'LuckyHhy'

#爬取古诗词存入数据库

# 获取网页内容
def get_content(url):
    headers = {"User-Agent": HttpUtil.AgentRandom()}
    A = requests.session()  # 用一个会话请求
    A.headers = headers
    cont = A.get(url, timeout=20, allow_redirects=False)
    return cont.text



def grab_data(keys):
    global content, co
    all_page = get_all_page(keys)
    url = "https://so.gushiwen.cn/mingjus/default.aspx?page={0}&tstr={1}"
    # output = """名句类型：{} \n------------\n内容：{} -- {} \n------------\n"""
    # 建立数据库连接
    db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='root', db='pay', charset='utf8')
    # 获取游标对象
    cursor = db.cursor()
    query = "insert into ky_mingju (type, content, author,createtime) values (%s,%s,%s,%s)"
    for i in range(1, all_page):
        last_url = url.format(i, keys)
        page_content = get_content(last_url)
        print("正在爬取第{}页数据".format(i))
        soup = BeautifulSoup(page_content, 'html.parser')
        page_text = soup.find(class_="main3").find(class_="left").find(class_="sons").find_all(class_="cont")
        for z in page_text:
            all_a = z.find_all("a")

            lg = len(all_a)
            if lg == 1:
                co = ''
            else:
                co = all_a[1].get_text()

            content = all_a[0].get_text()
            # 写入日志
            # save_txt(output.format(keys, content, co))
            # 当前时间
            tt = time.time()
            values = (keys, content, co, int(tt))
            cursor.execute(query, values)

        print("第{}页数据爬取完成".format(i))

    cursor.close()
    db.commit()
    # print("{}关键词数据爬取完成,感谢使用".format(keys))
    # main() #再来


# 保存到文本
def save_txt(*args):
    for i in args:
        with open('mingju.txt', 'a', encoding='utf-8') as f:
            f.write(i)


# 获取总页码
def get_all_page(keys):
    base_url = "https://so.gushiwen.cn/mingjus/default.aspx?tstr={}".format(keys)
    htnl = get_content(base_url)
    soup = BeautifulSoup(htnl, 'html.parser')
    page_text = soup.find(class_="main3").find(class_="left").find(class_="pagesright").find("span").string
    num = re.findall('\d+', page_text)
    page = int(num[0]) + 1
    # print(page)
    return page


# 获取关键词列表
def get_key():
    url = "https://so.gushiwen.cn/mingjus/default.aspx"
    html = get_content(url)
    soup = BeautifulSoup(html, 'html.parser')
    type = soup.find(class_="main3").find(class_="left").find(class_="titletype").find(class_="sright").find_all("a")
    list_str = []
    for i in type:
        list_str.append(i.get_text())  # 获取

    return list_str


# 单个关键词输入爬取
def grab_sigle():
    global keys
    list_str = get_key()
    print("请输入如下关键词：")
    print(list_str)
    keys = str(input("关键词："))
    if keys not in list_str:
        keys = str(input("输入错误请重新输入："))

    # 执行逻辑代码
    grab_data(keys)


# 一次性全部爬取
def grab_all():
    list_str = get_key()

    for i in list_str:
        print("正在爬取{}关键词数据.....".format(i))
        grab_data(i)
        print("{}关键词数据爬取完成".format(i))

    print("所有的关键词数据爬取完成，感谢使用！！！")


def main():
    # 一次性爬取所有
    grab_all()

    # 单个关键词输入爬取
    # grab_sigle()

    # 爬取测试
    # test("励志")


if __name__ == '__main__':
    main()

