# -*- coding:utf-8 -*-
import requests
import time
import re
import datetime
from bs4 import BeautifulSoup
import  string
import pymysql
session = requests.session()

def getNewsDetail(newsurl):
    result = session.get(url=newsurl)
    result.encoding = 'gb2312'  # 解决编码问题
    soup = BeautifulSoup(result.text,'html.parser')

    #获取文章标题
    #title = soup.select('.arttitle h3')
    title = soup.find('div','arttitle').find('h3').get_text()

    #获取发布时间
    newstime = soup.find('div','info').get_text()
    newstime = re.findall(r"\d+",newstime)
    newstime = newstime[0] + "-" + newstime[1] + "-" + newstime[2] + " " + newstime[3] + ":" + newstime[4] + ":" + newstime[5]

    # 获取发布作者
    beform = soup.find('span','where').get_text()

    #获取文章内容
    newstext = soup.select('.articlemain div')
    newstext = str(newstext).replace("'",'"')#插入数据库，处理单引号
    return title, newstime, beform, newstext

# ---------连接MYSQL数据库-----------
def save_mysql(title, newstime, beform, newstext, classid, classname):
    db = pymysql.connect(host='localhost', user='root', password='123456', db='sql_python')
    cursor = db.cursor()
    sql = """INSERT INTO s_cj20211219 (title,date,beform,newstext,classid,classname) VALUES ('%s','%s','%s','%s','%s','%s')""" % (
    title, newstime, beform, newstext, classid, classname)
    try:
        cursor.execute(sql)
        db.commit()
        print("write success")
    except Exception as e:
        db.rollback()
        print("write fail")
        print(e)
    db.close()


def spider():
    for page in range(1,858):
        #组合url
        url = "http://www.yugan.com.cn/Item/list.asp?ID=1160&page=" + str(page)
        print(url)
        # 伪装请求头
        headers = {
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36',
        }

        result = session.get(url=url,headers=headers).content
        soup = BeautifulSoup(result, 'html.parser',from_encoding="gb2312")
        if soup is None:
            break

        #获取新闻列表
        result_div = soup.find('div', attrs={'class': 'articlelist'}).find('ul')

        #替换特殊符号
        result_replace = str(result_div).replace('\n','').replace('\r','').replace('\t','').replace('amp;','')

        #获取链接
        result_list = re.findall('</span> (.*?)</li>',result_replace)

        #获取最终链接
        for i in result_list:
            news_url =  re.findall('href="(.*?)" target="_blank">',i)[0]
            print(news_url)

            #间隔一段时间，采集下一页
            time.sleep(1)

            title, newstime, beform, newstext= getNewsDetail(news_url)
            save_mysql(title, newstime, beform, newstext, 1, "本地新闻")
            print(save_mysql)
spider()

# --------存储数据库 -----------
def save_data():
    dict = read_article_info()
    for key, value in dict.items():
        get_content(key, value)
save_data()