import requests
from bs4 import BeautifulSoup
import pymysql
from datetime import  datetime

# 按 Shift+F10 执行或将其替换为您的代码。
# 按 双击 Shift 在所有地方搜索类、文件、工具窗口、操作和设置。
"""
list_txtsbg
pip install requests bs4 pymysql lxml
http://www.kmdc.gov.cn/c/2025-09-06/7050279.shtml

path=/upload/resources/image/2025/09/06/3586474_500x500.png

"""
host = "http://www.kmdc.gov.cn"

base_url = "https://kmdc.org.cn:9102/api/news/forward?path="

headers = {
    "User-Agent":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.6261.95 Safari/537.36'
}

def get_db_connect():

    # 1. 建立数据库连接
    connection = pymysql.connect(
        host='',  # 数据库地址
        user='',  # 用户名
        port=3306,
        password='',  # 密码
        database='',  # 数据库名
        charset='utf8mb4',  # 推荐使用 utf8mb4 支持 emoji
        autocommit=False  # 手动提交事务
    )
    return  connection


def fetch_list():
    path = '/xwdt/dcyw/'
    response = requests.get(host + path,headers= headers)
    contents = response.content.decode(encoding='utf8')
    soup = BeautifulSoup(contents,'lxml')
    container = soup.find('div',class_="list_txtsbg")
    rows = container.find_all('li')
    conn = get_db_connect()
    db_data = []
    for item in rows:
        data = fetch_item(item)
        save_db(data,conn)
        db_data.append(data)
        # print(item)
    print(db_data)

def fetch_item(row):
    """
    获取单条数据
    :param row:
    :return:
    """
    author = '宣传部'
    category="生活"
    atr_time = row.find('span').getText().replace('[','').replace(']','')
    title = row.find('a').getText()
    href = row.find('a').get('href')
    rowid = href.split('/')[-1].split('.')[0]
    print(f'获取[{title}],地址：{host + href}')
    response = requests.get(host + href, headers=headers)
    contents = response.content.decode(encoding='utf8')
    soup = BeautifulSoup(contents, 'lxml')
    container = soup.find('div', class_="txtcen")
    summery = container.find('p').getText()
    lines = container.find_all('p')
    content = ''.join(str(line) for line in lines )
    db_data = []
    imgs = container.find_all('img')
    cover = ''
    if len(imgs) > 0  :
        cover = base_url + imgs[0].get('src')
    content = content.replace('src="/',  f'src="{base_url}/')
    content = content.replace('href="/',  f'href="{base_url}/')
    # print(content)
    data = (rowid, title, title, content, category,
    cover, '昆明市东川区人民政府', author,atr_time, datetime.now(), 0,
    10, 20, summery, 1, datetime.now(), datetime.now(),
    1300000000111,  'python' )

    return data

def save_db(data ,conn):
    check_sql  = f""" select count(1) from base_news where Id = {data[0]}  """
    cursor = conn.cursor()
    cursor.execute(check_sql)
    rows = cursor.fetchone()
    if rows [0]  == 1:
        print(f'数据已存在！{data[0]}')
        return


    sql = """
    INSERT INTO `base_news` (`Id`, `Title`, `SubTitle`, `Content`, `Category`, 
    `Cover`, `Source`, `Author`, `PublishTime`, `LastUpdatedTime`,   `IsPinned`, 
    `ViewCount`, `LikeCount`, `Summary`, `IsEnabled`, `CreateTime`, `UpdateTime`, 
    `CreateUserId`, `CreateUserName` ) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
    """

    cursor.execute(sql,data)

    conn.commit()
    print('保存成功')

# 按装订区域中的绿色按钮以运行脚本。
if __name__ == '__main__':
    fetch_list()


