import hashlib
import json
import smtplib
import sqlite3
import time
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from fake_useragent import UserAgent
import requests
from bs4 import BeautifulSoup
ua = UserAgent()
# 从config.json文件中读取配置信息
with open("config.json", "r") as f:
    config = json.load(f)
# 创建或连接数据库
conn = sqlite3.connect("gonggao_links.db")
cursor = conn.cursor()

# 创建表
cursor.execute(
    """
CREATE TABLE IF NOT EXISTS links (
    id INTEGER PRIMARY KEY,
    url TEXT UNIQUE
)
"""
)
conn.commit()


# 存储链接到数据库
def store_link(url):
    cursor.execute("INSERT OR IGNORE INTO links (url) VALUES (?)", (url,))
    conn.commit()


# 检查链接是否已存在
def link_exists(url):
    cursor.execute("SELECT 1 FROM links WHERE url = ?", (url,))
    return cursor.fetchone() is not None


# # 网页URL
# url = "https://rlsbj.cq.gov.cn/zwxx_182/sydw/sydwgkzp2024/index.html"


def get_all_links(content):
    print("正在抓取链接...")
    newlinks = []
    soup = BeautifulSoup(content, "html.parser")
    myul = soup.find_all("ul", class_="rsj-list1")
    # 再次抓取里面的a标签的href属性
    for ul in myul:
        for a in ul.find_all("a"):
            href = a.get("href")
            print()
            if not link_exists(href):
                store_link(href)
                print(f"新链接已存储: {href}")
                newlinks.append(href)
            else:
                print(f"链接已存在: {href}")
    return newlinks


# 存储上次抓取的内容的哈希值
last_hash = ""


def get_file_link(content):
    print("正在抓取文件链接...")
    file_link = ""
    file_title = ""
    # 使用正则匹配'hasFJ'后面的内容
    import re

    pattern = r'meta name="ArticleTitle" content="(.*)">'
    match = re.search(pattern, content)
    if match:
        file_title = match.group(1)
        print(file_title)
    else:
        print("未找到匹配项")
        return
    pattern = r"hasFJ(.*)"
    match = re.search(pattern, content)
    if match:
        href = match.group(1)
        print(href)
    else:
        print("未找到匹配项")
        return

    soup = BeautifulSoup(href, "html.parser")
    myul = soup.find_all("a")
    # print(myul)
    # 再次抓取里面的a标签的href属性
    for a in myul:
        if "岗位一览表" in a.text:
            file_link = a.get("href")
            print(file_link)
    return file_link, file_title


def fetch_content(url):
    try:
        headers = {
            "User-Agent": ua.random
        }
        response = requests.get(url, headers)
        response.raise_for_status()  # 检查请求是否成功
        response.encoding = "utf-8"
        return response.text
    except requests.RequestException as e:
        print(f"请求错误: {e}")
        return None


def get_html_msg(file_title, file_link, article_link):
    html = f"""
    <!DOCTYPE html>
    <html lang="en">
    <head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>{file_title}</title>
</head>
<body>
    <div class="container">
        <div class="header">
            <h1>{file_title}</h1>
        </div>
        <div class="content">
            <a href="{article_link}">{file_title}</a>
            <br>
            <br>
            <br>
            <br>
            <a href="{file_link}">岗位一览表</a>
            <br>
        </div>
        <div class="footer">
            <p>祝你有美好的一天！</p>
        </div>
    </div>
</body>
</html>
    """
    return html


def send_html_email(title, html_content):
    """
    发送HTML格式的邮件。
    :param html_content: 发送的html内容
    """

    smtp_server = config["smtp_server"] # SMTP服务器
    smtp_port = config["smtp_port"] # SMTP端口
    from_email = config["from_email"] # 发件人邮箱（登录的邮箱）
    password = config["smtp_password"] # 密码或授权码
    to_email = config["to_email"] # 收件人邮箱
    # 创建邮件对象
    subject = title
    msg = MIMEMultipart("alternative")
    msg["Subject"] = subject
    msg["From"] = from_email
    msg["To"] = to_email

    # 将HTML内容转为 MIMEText 对象
    part = MIMEText(html_content, "html")
    msg.attach(part)

    # 发送邮件
    try:
        server = smtplib.SMTP_SSL(smtp_server, smtp_port)  # 使用SSL加密的SMTP服务器
        server.login(from_email, password)
        # server.sendmail(from_email, to_email, msg.as_string())
        server.quit()
        print("邮件发送成功")
    except Exception as e:
        print(f"邮件发送失败: {e}")


def compare_content(new_content, last_hash):
    if new_content:
        new_hash = hashlib.md5(new_content.encode("utf-8")).hexdigest()
        if new_hash != last_hash:
            print("内容已更新!")
            # 获取更新内容
            new_links = get_all_links(new_content)
            # 打开链接
            for link in new_links:
                # link = ./202408/t20240808_13484246.html
                # baseurl = https://rlsbj.cq.gov.cn/zwxx_182/sydw/sydwgkzp2024/
                link = (
                    config["content_base_url"]
                    + link.lstrip("./")
                )

                # 获取网页链接中class为con的P标签
                content = fetch_content(link)
                file_link, file_title = get_file_link(content)
                file_link = link.rsplit("/", 1)[0] + file_link.lstrip(".")
                html_msg = get_html_msg(
                    file_link=file_link, file_title=file_title, article_link=link
                )
                # 发送邮件
                send_html_email(file_title, html_msg)
            return new_hash
    return last_hash


def main():
    last_hash = ""
    with open("last_hash.txt", "r") as file:
        last_hash = file.read().strip()
    url = config["index_url"]
    content = fetch_content(url)
    if content:
        last_hash = compare_content(content, last_hash)
        with open("last_hash.txt", "w") as file:
            file.write(last_hash)


if __name__ == "__main__":
    while True:
        main()
        time.sleep(int(config["cycle_time"]))  # 每5分钟检查一次
