'''
Author: duliang thinktanker@163.com
Date: 2025-07-27 22:09:09
LastEditors: duliang thinktanker@163.com
LastEditTime: 2025-08-21 22:48:08
FilePath:
Description: 检测管理处网站是否更新
'''
import asyncio
import httpx
import time
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import os
import re
import json
import sqlite3
# 运行异步主函数
import schedule
from threading import Thread
# 自定义函数
import wechatsubscribe

links_type = ('/xxgk/zbtb/cgxx', '/xxgk/zbtb/cjgs', '/lysl/xwzx/jcdt',
              '/lysl/xwzx/lyyw')
# 每类公告数量
with open('./config/setting.json', 'r', encoding='utf8') as f:
    settings = json.load(f)
    version = settings['version']
    mainpagecount = settings['mainpagecount']
limit_per_type = mainpagecount - 1
# department = '刘老涧'
# 定义执行时间：每天早上8点到晚上10点，每隔一小时整点执行
for hour in range(8, 23):
    schedule.every().day.at(f"{hour:02d}:00").do(lambda: asyncio.run(main()))


async def fetch_page(url):
    """
    使用httpx异步访问指定URL
    """
    async with httpx.AsyncClient() as client:
        try:
            response = await client.get(url, timeout=10.0)
            response.raise_for_status()  # 如果状态码不是200会抛出异常

            print(f"访问成功: {url}, 状态码: {response.status_code}")
            print(f"响应内容长度: {len(response.text)} 字符")

            # 可以在这里处理响应内容
            # 例如保存到文件或解析HTML
            return url, response.text

        except httpx.RequestError as e:
            print(f"请求失败: {url}, 错误: {e}")
            return url, None
        except httpx.HTTPStatusError as e:
            print(f"HTTP错误: {url}, 错误: {e}")
            return url, None


def parse_html_content(url, html_content):
    """
    解析HTML内容，提取href和title信息
    """
    if not html_content:
        return []

    soup = BeautifulSoup(html_content, 'html.parser')
    results = []

    # 查找所有带有href属性的<a>标签
    links = soup.find_all('a', href=True)

    for link in links:
        href = link.get('href')
        title = link.get('title')

        # 如果有title属性，或者包含特定文本内容，则添加到结果中
        if title or link.get_text(strip=True):
            # 处理相对链接，转换为绝对链接
            absolute_url = urljoin(url, href) if href else ''
            link_text = title if title else link.get_text(strip=True)
            results.append({'href': absolute_url, 'title': link_text})

    return results


def html_to_miniprogram_nodes(html_content):
    """
    将HTML内容转换为微信小程序rich-text组件可用的nodes格式
    """
    if not html_content:
        return []

    # 使用BeautifulSoup解析HTML
    soup = BeautifulSoup(html_content, 'html.parser')

    # 查找正文内容区域
    content_div = soup.find('div', class_='bt-box-main-txt')
    if not content_div:
        content_div = soup.find('div', id='bt-box-zoom')

    if not content_div:
        # 如果找不到特定内容区域，则使用整个body
        content_div = soup.find('body')

    if not content_div:
        return []

    def convert_element(element):
        """
        递归转换HTML元素为小程序nodes格式
        """
        nodes = []

        for child in element.children:
            if child.name is None:  # 文本节点
                text = str(child).strip()
                if text:
                    # 处理特殊字符
                    text = text.replace('\n', '').replace('\r', '')
                    if text:
                        nodes.append({'type': 'text', 'text': text})
            else:  # 元素节点
                # 只处理受信任的HTML节点
                supported_tags = {
                    'p', 'div', 'span', 'strong', 'b', 'em', 'i', 'u', 's',
                    'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'br', 'hr', 'a', 'img',
                    'ul', 'ol', 'li', 'dl', 'dt', 'dd', 'table', 'tr', 'td',
                    'th', 'thead', 'tbody', 'tfoot', 'caption'
                }

                if child.name.lower() in supported_tags:
                    node = {'type': 'node', 'name': child.name.lower()}

                    # 处理属性
                    attrs = {}
                    if child.attrs:
                        # 处理class属性
                        if 'class' in child.attrs:
                            if isinstance(child.attrs['class'], list):
                                attrs['class'] = ' '.join(child.attrs['class'])
                            else:
                                attrs['class'] = child.attrs['class']

                        # 处理style属性
                        if 'style' in child.attrs:
                            attrs['style'] = child.attrs['style']

                        # 处理img标签的特殊属性
                        if child.name.lower() == 'img':
                            if 'src' in child.attrs:
                                attrs[
                                    'src'] = "https://ly.jswater.org.cn/" + child.attrs[
                                        'src']
                            if 'alt' in child.attrs:
                                attrs['alt'] = child.attrs['alt']
                            # 设置图片宽度为100%，以适应屏幕宽度
                            attrs['style'] = 'width: 100%; height: auto;'
                            if 'width' in child.attrs:
                                # 不再使用原始width属性，改用样式控制
                                pass
                            if 'height' in child.attrs:
                                # 不再使用原始height属性，改用样式控制
                                pass

                        # 处理a标签的href属性
                        if child.name.lower() == 'a' and 'href' in child.attrs:
                            attrs['href'] = child.attrs['href']

                    if attrs:
                        node['attrs'] = attrs

                    # 递归处理子元素
                    if child.children:
                        children = convert_element(child)
                        if children:
                            node['children'] = children

                    nodes.append(node)
        return nodes

    # 转换内容区域为nodes
    nodes = convert_element(content_div)
    return nodes


async def fetch_multiple_pages(urls):
    """
    同时访问多个页面
    """
    # 创建任务列表
    tasks = [fetch_page(url) for url in urls]
    # 并发执行所有任务
    results = await asyncio.gather(*tasks)
    return results


def sanitize_filename(filename):
    """
    清理文件名，移除不合法的字符
    """
    # 移除或替换不合法的字符
    filename = re.sub(r'[<>:"/\\|?*]', '_', filename)
    # 限制文件名长度
    if len(filename) > 100:
        filename = filename[:100]
    return filename


def init_database(db_path='./data/tender.db'):
    """
    初始化数据库，创建表结构
    """
    os.makedirs(os.path.dirname(db_path), exist_ok=True)
    conn = sqlite3.connect(db_path)
    cursor = conn.cursor()

    # 创建招标公告表，增加notice_type字段用于区分公告类型
    cursor.execute('''
        CREATE TABLE IF NOT EXISTS tender_notices (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            url TEXT UNIQUE,
            title TEXT,
            nodes_json TEXT,
            notice_type TEXT,  -- 'cgxx' 表示招标公告, 'cjgs' 表示成交公告
            created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP UNIQUE
        )
    ''')

    # 创建标题节点表
    cursor.execute('''
        CREATE TABLE IF NOT EXISTS title_nodes (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            nodes_data TEXT,
            created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
        )
    ''')

    conn.commit()
    conn.close()
    print(f"数据库初始化完成: {db_path}")


def save_tender_to_db(url,
                      title,
                      nodes,
                      pub_date=None,
                      notice_type=None,
                      db_path='./data/tender.db'):
    """
    将招标公告数据保存到数据库
    """
    conn = sqlite3.connect(db_path)
    cursor = conn.cursor()

    try:
        if pub_date:
            # 如果提供了发布日期，则使用该日期作为created_at，并添加notice_type字段
            # 使用INSERT OR IGNORE避免重复数据插入
            cursor.execute(
                '''
                INSERT OR IGNORE INTO tender_notices (title, nodes_json, notice_type, created_at)
                VALUES (?, ?, ?, ?)
            ''', (title, json.dumps(
                    nodes, ensure_ascii=False), notice_type, pub_date))
        else:
            # 否则使用默认的当前时间戳，并添加notice_type字段
            # 使用INSERT OR IGNORE避免重复数据插入
            cursor.execute(
                '''
                INSERT OR IGNORE INTO tender_notices (title, nodes_json, notice_type)
                VALUES (?, ?, ?, ?)
            ''', (title, json.dumps(nodes, ensure_ascii=False), notice_type))

        conn.commit()
        if cursor.rowcount > 0:
            print(f"成功保存{notice_type}公告到数据库: {title}")
            return True
        else:
            print(f"公告已存在，跳过保存: {title}")
            return False
    except Exception as e:
        print(f"保存公告到数据库失败: {e}")
        return False
    finally:
        conn.close()


def save_title_nodes_to_db(title_nodes, db_path='./data/tender.db'):
    """
    将标题节点数据保存到数据库
    """
    conn = sqlite3.connect(db_path)
    cursor = conn.cursor()

    try:
        cursor.execute(
            '''
            INSERT INTO title_nodes (nodes_data)
            VALUES (?)
        ''', (json.dumps(title_nodes, ensure_ascii=False), ))

        conn.commit()
        print("成功保存标题节点到数据库")
        return True
    except Exception as e:
        print(f"保存标题节点到数据库失败: {e}")
        return False
    finally:
        conn.close()


def get_recent_titles_by_type(db_path='./data/tender.db', limit_per_type=5):
    """
    从数据库获取最近的指定数量的不同类型公告标题
    """
    conn = sqlite3.connect(db_path)
    cursor = conn.cursor()

    try:
        # 查询每种类型最近的limit_per_type条记录
        notice_types = ['招标公告', '成交公告', '基层动态', '骆运要闻']
        all_results = []

        for notice_type in notice_types:
            cursor.execute(
                '''
                SELECT title, notice_type, created_at, url
                FROM tender_notices 
                WHERE notice_type = ?
                ORDER BY created_at DESC 
                LIMIT ?
            ''', (notice_type, limit_per_type))

            results = cursor.fetchall()
            all_results.extend(results)

        return all_results
    except Exception as e:
        print(f"获取公告标题失败: {e}")
        return []
    finally:
        conn.close()


def check_title_exists(title, db_path='./data/tender.db'):
    """
    检查标题是否已存在于数据库中
    """
    conn = sqlite3.connect(db_path)
    cursor = conn.cursor()

    try:
        cursor.execute(
            '''
            SELECT COUNT(*) FROM tender_notices WHERE title = ?
            ''', (title, ))

        count = cursor.fetchone()[0]
        return count > 0
    except Exception as e:
        print(f"检查标题存在性失败: {e}")
        return False
    finally:
        conn.close()


async def main():
    """
    主函数，执行异步访问
    """
    # 初始化数据库
    init_database()

    # 修改数据库表结构以支持created_at字段的自定义插入
    conn = sqlite3.connect('./data/tender.db')
    cursor = conn.cursor()
    # 确保表结构正确（添加created_at列，如果不存在）
    cursor.execute("PRAGMA table_info(tender_notices)")
    columns = [column[1] for column in cursor.fetchall()]
    if 'created_at' not in columns:
        cursor.execute(
            "ALTER TABLE tender_notices ADD COLUMN created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP"
        )
    # 添加notice_type列（如果不存在）
    if 'notice_type' not in columns:
        cursor.execute(
            "ALTER TABLE tender_notices ADD COLUMN notice_type TEXT")
    conn.close()

    urls = [
        "https://ly.jswater.org.cn/",
        # "https://ly.jswater.org.cn/lysl/xwzx/lyyw/index.html",
        # "https://ly.jswater.org.cn/lysl/xwzx/jcdt/index.html"
    ]

    print("开始同时访问页面...")
    start_time = time.time()

    results = await fetch_multiple_pages(urls)

    end_time = time.time()
    print(f"访问完成，耗时: {end_time - start_time:.2f} 秒")

    # 处理结果
    all_links = []
    for url, content in results:
        if content:
            print(f"{url} 页面访问成功")
            # 解析HTML内容
            links = parse_html_content(url, content)
            all_links.extend(links)
            print(f"从 {url} 中提取到 {len(links)} 个链接")
        else:
            print(f"{url} 页面访问失败")

    # 筛选出链接并下载
    tender_links = []
    for link_info in all_links:
        # 所有
        if any(key in link_info['href'] for key in links_type):
            tender_links.append(link_info)

    print(f"\n找到 {len(tender_links)} 个链接")

    # 直接获取所有页面内容并保存到数据库
    print(f"\n开始获取 {len(tender_links)} 个内容...")
    success_count = 0

    for i, link_info in enumerate(tender_links, 1):
        # 获取页面内容
        url = link_info['href']
        title = link_info['title']
        if "更多>" == title or '成交公示' == title or '采购信息' == title:
            continue

        # 修改: 先检查数据库中是否已存在该标题
        if check_title_exists(title):
            print(f"标题已存在，跳过访问: {title}")
            continue

        try:
            async with httpx.AsyncClient() as client:
                response = await client.get(url, timeout=10.0)
                response.raise_for_status()
                html_content = response.text

                # 从HTML中提取PubDate
                pub_date = None
                soup = BeautifulSoup(html_content, 'html.parser')
                pub_date_meta = soup.find('meta', {'name': 'PubDate'})
                if pub_date_meta and pub_date_meta.get('content'):
                    pub_date = pub_date_meta.get('content')

                # 判断公告类型
                if '/xxgk/zbtb/cgxx' in url:
                    notice_type = '招标公告'
                elif '/xxgk/zbtb/cjgs' in url:
                    notice_type = '成交公告'
                elif '/lysl/xwzx/jcdt' in url:
                    notice_type = '基层动态'
                elif '/lysl/xwzx/lyyw' in url:
                    notice_type = '骆运要闻'

                # 转换为小程序nodes格式
                nodes = html_to_miniprogram_nodes(html_content)

                # 保存到数据库，包含发布日期和公告类型
                if save_tender_to_db(url, title, nodes, pub_date, notice_type):
                    success_count += 1
                    print(f"成功处理{notice_type}: {title}")
                else:
                    print(f"处理失败: {title}")

        except Exception as e:
            print(f"获取或处理 {title} 失败: {e}")

    print(
        f"\n处理完成: 成功 {success_count} 个，失败 {len(tender_links) - success_count} 个"
    )
    # if success_count:
    # 将获取的title转换为nodes格式并保存到数据库
    # 修改: 创建符合要求的嵌套结构格式
    title_nodes = {"name": "ul", "children": []}

    for i, link_info in enumerate(tender_links, 1):
        title_text = link_info['title']
        if i == 1:
            title_font_size = "20px"  # 设置标题字体大小
            color = "blue"
            text = f"{title_text}"
        else:
            title_font_size = "15px"
            color = "black"
            text = f"{i-1:02d}. {title_text}"

        # 为每个li元素添加data属性，用于touch事件处理
        title_nodes["children"].append({
            "name":
            "li",
            "attrs": {
                "style": f"color:{color};font-size:{title_font_size};",
                # 添加data-href属性，用于在touch事件中获取链接
                "data-id": link_info['href']
            },
            "children": [{
                "type": "text",
                "text": f"{text}"
            }]
        })

    # 保存标题节点到数据库
    # save_title_nodes_to_db(title_nodes)
    # print(f"\n已将 {len(tender_links)} 个标题保存到数据库")

    # 新增功能：获取每种类型最近？？个公告标题并保存到./config/nodes.json中第0个位置
    # 每类公告数量
    with open('./config/setting.json', 'r', encoding='utf8') as f:
        settings = json.load(f)
        mainpagecount = settings['mainpagecount']
    limit_per_type = mainpagecount - 1
    recent_titles = get_recent_titles_by_type(limit_per_type=limit_per_type)

    # 按类型分组
    grouped_titles = {}
    for title, notice_type, created_at, url in recent_titles:
        if notice_type not in grouped_titles:
            grouped_titles[notice_type] = []
        grouped_titles[notice_type].append(
            (title, notice_type, created_at, url))

    # 修改：为每个notice_type创建独立的nodes而不是放在一个nodes的children里

    # 为不同部分添加样式
    type_styles = {
        '招标公告': {
            'color': '#0056b3',  # 加深蓝色，提高对比度
            'border_color': '#0056b3'
        },
        '成交公告': {
            'color': '#0d781e',  # 加深绿色，提高对比度
            'border_color': '#0d781e'
        },
        '基层动态': {
            'color': '#e67f00',  # 加深橙色，提高对比度
            'border_color': '#e67f00'
        },
        '骆运要闻': {
            'color': '#5a1a92',  # 加深紫色，提高对比度
            'border_color': '#5a1a92'
        }
    }

    # 修改：为每种类型创建独立的nodes对象，而不是添加到一个列表中
    nodes_list = []
    title_list = []
    nodes_id = 0
    # 为每种类型创建独立的nodes
    for notice_type in list(type_styles.keys()):
        if notice_type in grouped_titles and grouped_titles[notice_type]:
            # 修改：为每个类型标题创建独立的nodes对象，并添加更美观的样式
            type_title_node = {
                "name": "div",
                "attrs": {
                    "style":
                    f"display: flex; align-items: center; margin: 20px 0 15px 0; padding: 12px 20px; background: linear-gradient(90deg, {type_styles[notice_type]['color']} 0%, rgba(255,255,255,0) 100%); color: {type_styles[notice_type]['color']}; border-left: 6px solid {type_styles[notice_type]['border_color']}; border-radius: 8px; font-weight: bold; font-size: 20px; box-shadow: 0 2px 8px rgba(0,0,0,0.1);"
                },
                "children": [{
                    "type": "text",
                    "text": notice_type
                }]
            }

            # 将类型标题节点添加到列表中
            nodes_list.append(type_title_node)

            # 为该类型下的每个公告创建独立的nodes对象
            for i, (title, _, created_at,
                    url) in enumerate(grouped_titles[notice_type], 1):
                # 解析并格式化时间
                try:
                    # 从数据库获取的时间格式化为更友好的显示格式
                    from datetime import datetime
                    if created_at:
                        dt = datetime.fromisoformat(
                            created_at.replace('Z', '+00:00')
                        ) if 'Z' in created_at else datetime.fromisoformat(
                            created_at)
                        formatted_date = dt.strftime('%Y-%m-%d %H:%M')
                    else:
                        formatted_date = "未知时间"
                except Exception:
                    formatted_date = created_at if created_at else "未知时间"

                # 修改：为每个公告创建更美观的nodes对象，添加悬停效果和更好的样式
                notice_node = {
                    "name":
                    "div",
                    "attrs": {
                        "style":
                        "margin: 5px 0; padding: 5px 5px; background: #ffffff; border-left: 4px solid #ddd; border-radius: 10px; transition: all 0.3s ease; cursor: pointer; box-shadow: 0 2px 6px rgba(0,0,0,0.05); display: flex; align-items: center;"
                    },
                    "children": [
                        {
                            "name": "div",
                            "attrs": {
                                "style":
                                f"display: inline-flex; width: 20px; height: 20px; line-height: 20px; text-align: center; background: {type_styles[notice_type]['color']}; color: white; border-radius: 50%; margin-right: 15px; font-size: 14px; justify-content: center; align-items: center; flex-shrink: 0;"
                            },
                            "children": [{
                                "type": "text",
                                "text": str(i)
                            }]
                        },
                        {
                            "name":
                            "div",
                            "attrs": {
                                "style": "flex-grow: 1; overflow: hidden;"
                            },
                            "children": [
                                {
                                    "name":
                                    "div",
                                    "attrs": {
                                        # 修改：添加换行支持，当文字超出时自动换行
                                        "style":
                                        "color: #333; font-size: 18rpx; font-weight: 500; white-space: normal; word-wrap: break-word; word-break: break-all;"
                                    },
                                    "children": [{
                                        "type": "text",
                                        "text": f"{title}"
                                    }]
                                },
                                {
                                    "name":
                                    "div",
                                    "attrs": {
                                        "style":
                                        "color: #999; font-size: 12px; margin-top: 5px; display: flex; align-items: center;"
                                    },
                                    "children": [{
                                        "name":
                                        "span",
                                        "attrs": {
                                            "style": "margin-right: 10px;"
                                        },
                                        "children": [{
                                            "type":
                                            "text",
                                            "text":
                                            f"🕒 {formatted_date}"
                                        }]
                                    }]
                                }
                            ]
                        },
                        {
                            "name":
                            "div",
                            "attrs": {
                                "style": "margin-left: 10px; flex-shrink: 0;"
                            },
                            "children": [{
                                "name": "div",
                                "attrs": {
                                    "style":
                                    "width: 8px; height: 8px; border-top: 2px solid #999; border-right: 2px solid #999; transform: rotate(45deg);"
                                }
                            }]
                        }
                    ],
                    "id":
                    nodes_id
                }
                # 将公告节点添加到列表中
                nodes_list.append(notice_node)
                # 这里只添加公告即招标和公示，其他类型的公告不添加
                # if '公告' in notice_type:
                #     title_list.append((title, formatted_date))
                title_list.append((title, formatted_date))
                nodes_id += 1

    # 读取现有的nodes.json文件
    nodes_file_path = './config/nodes.json'
    if os.path.exists(nodes_file_path):
        with open(nodes_file_path, 'r', encoding='utf-8') as f:
            try:
                nodes_data = json.load(f)
            except json.JSONDecodeError:
                nodes_data = []
    else:
        nodes_data = []

    # 确保nodes_data是一个列表
    if not isinstance(nodes_data, list):
        nodes_data = []

    # 如果列表为空，先添加一个空位
    if len(nodes_data) == 0:
        nodes_data.append({})

    # 将按类型分组的nodes列表插入到第0个位置
    new_title = []
    # 修改：比较nodes_data[0]和nodes_list是否相同
    if nodes_data[0] != nodes_list:
        for _title in title_list:
            if _title[0] not in str(nodes_data[0]):
                new_title.append(_title)
        print(" nodes_data[0] 和 nodes_list 不相同，准备更新...")
        print("新标题:", new_title)
        conn = sqlite3.connect('./data/users.db')
        cursor = conn.cursor()
        # 获取所有订阅招标的用户openid
        cursor.execute(
            "SELECT openid FROM users WHERE template_id LIKE '%ozzXbRVRDzQcIzY0AsyxpTF8B7DutU7-YnGA72T1it0%'"  # 订阅招标的模板id
        )
        openid_list = cursor.fetchall()
        conn.close()
        if openid_list:
            # "项目名称{{thing1.DATA}}地区{{thing2.DATA}}发布日期{{date3.DATA}}"
            for _title in new_title:
                for openid in openid_list:
                    th = Thread(target=wechatsubscribe.send_notification,
                                args=(openid[0], "招标",
                                      (_title[0][0:10], _title[0][0:3],
                                       _title[1]),
                                      f"pages/index/index?query={_title}"),
                                daemon=True)
                    th.start()
                    # th.join()
        # 将nodes_list作为列表形式存储
        nodes_data[0] = nodes_list
        print("nodes_data[0]已更新为nodes_list")
    else:
        print("nodes_data[0]和nodes_list相同，无需更新")

    # 保存回文件
    os.makedirs(os.path.dirname(nodes_file_path), exist_ok=True)
    with open(nodes_file_path, 'w', encoding='utf-8') as f:
        json.dump(nodes_data, f, ensure_ascii=False, indent=2)

    print(f"\n已将每种类型最近{limit_per_type}个公告标题保存到 {nodes_file_path} 的第0个位置")


def loop():
    # print("首次运行执行一次")
    # asyncio.run(main())
    print("定时任务已启动，将在每天早上8点到晚上10点每隔一小时整点执行数据读取...")
    while True:
        print("\033[4;33;45m每分钟检查一次定时任务...\033[0m", end='\r')
        schedule.run_pending()
        time.sleep(60)  # 每分钟检查一次


def getTitleByid(id):
    '''
    根据发来的id查询对应的新闻内容nodes
    '''
    try:
        with open("./config/nodes.json", "r", encoding="utf-8") as f:
            nodes = json.load(f)

        # 遍历nodes[0]中的所有元素查找匹配的id
        if nodes and len(nodes) > 0:
            for item in nodes[0]:
                if 'id' in item and item['id'] == id:
                    # 找到匹配的项，返回整个节点
                    return item["children"][1]["children"][0]["children"][0][
                        "text"]

        # 如果没有找到匹配的id，返回None
        return None
    except FileNotFoundError:
        print("nodes.json文件未找到")
        return None
    except json.JSONDecodeError:
        print("nodes.json文件格式错误")
        return None
    except Exception as e:
        print(f"查询过程中发生错误: {e}")
        return None


def getnodesjsonByTitle(title):
    '''
    由title获取nodes_json内容    
    '''
    conn = sqlite3.connect('./data/tender.db')
    cursor = conn.cursor()
    cursor.execute("SELECT nodes_json FROM tender_notices WHERE title=?",
                   (title, ))
    row = cursor.fetchone()
    if row:
        nodes = row[0]
        return nodes
    else:
        print("未找到匹配的记录")
        return None


def getnodesjsonByid(id):
    return getnodesjsonByTitle(getTitleByid(id))


if __name__ == "__main__":
    from mqttser import mqtt_run
    Thread(target=mqtt_run, args=(
        '骆运水利网',
        ("software", 0),
    ), daemon=True).start()
    loop()
    # print(getnodesjsonByid(17))
