from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
import requests
from ..model import New
from ..auth import get_current_user
from ..sqllite.database import sessionlocal
from bs4 import BeautifulSoup
import re

jwzx = APIRouter()


# @jwzx.get("/")
# async def get_data():
#     pass

# 获取数据库
def get_db():
    try:
        db = sessionlocal()
        yield db
    finally:
        db.close()



def saveData(dict_data, db: Session):
    if not dict_data:
        return "数据为空"
    for index in dict_data:
        new = New(
            title = index["title"],
            fileId = index["fileId"],
            totalCount = index["totalCount"]
        )

        # 去重，如果数据已经存在，则跳过单次循环
        if db.query(New).filter(New.fileId == index["fileId"]).first():
            continue

        db.add(new)
        db.commit()
    return "数据保存成功"



def extract_article_content(html_content):
    """从HTML中提取文章正文及相关信息（content不含标题）"""
    soup = BeautifulSoup(html_content, 'html.parser')

    # 定位主要内容区域
    main_panel = soup.find('div', id='mainPanel')
    if not main_panel:
        return {"title": "未找到主要内容区域", "publish_info": "", "content": "未找到主要内容区域"}

    # 提取标题
    title_element = main_panel.find('h3')
    title = title_element.get_text(strip=True) if title_element else "未找到标题"

    # 提取发布信息
    info_div = main_panel.find('div', class_='sj')
    publish_info = info_div.get_text(strip=True) if info_div else ""

    # 提取正文内容（从标题后的第一个有效段落开始，到附件前结束）
    if not title_element:
        return {"title": title, "publish_info": publish_info, "content": "未找到标题元素"}

    # 获取标题之后的所有兄弟元素
    siblings = list(title_element.next_siblings)

    # 找到正文开始的位置（跳过hr、发布信息和空内容）
    content_start = 0
    for i, sibling in enumerate(siblings):
        if isinstance(sibling, str):
            continue  # 跳过纯文本内容
        if sibling.name == 'hr':
            continue  # 跳过分隔线
        if sibling.name == 'div' and sibling.get('class') == ['sj']:
            continue  # 跳过发布信息区域
        if sibling.name in ['p', 'div']:  # 找到第一个段落或div作为正文起点
            content_start = i
            break

    # 找到附件开始的位置
    attachment_start = len(siblings)
    for i, sibling in enumerate(siblings):
        if isinstance(sibling, str):
            continue
        if sibling.name == 'p' and ('附件：' in sibling.get_text() or '附件:' in sibling.get_text()):
            attachment_start = i
            break

    # 提取正文段落
    paragraphs = []
    for sibling in siblings[content_start:attachment_start]:
        if isinstance(sibling, str):
            continue
        # 提取文本并清理格式
        text = sibling.get_text(strip=False)
        # 合并连续空白字符
        text = re.sub(r'\s+', ' ', text)
        # 保留段落内的换行
        text = re.sub(r'\n\s*\n', '\n', text)
        paragraphs.append(text.strip())

    # 组合正文内容（段落间用两个换行符分隔）
    content = "\n\n".join(paragraphs)

    return {
        "title": title,
        "publish_info": publish_info,
        "content": content
    }

# 刷新公告数据
@jwzx.get("/refresh_data")
async def refresh_data(current_user = Depends(get_current_user), db: Session = Depends(get_db)):
    url = "http://jwzx.cqupt.edu.cn/data/json_files.php?"

    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
    }

    params = {
        "mdq4jDUd": "AGOmIGlqEDKSP7L4.PL8M4WsnKxc7lyHnJw64ljZDnvrLan7UHVtAcUyfPtYvuCZACkmvokB5Krm8FCiVylCwbzxd_e0I2d6GT.iPVIJ9xas1IMj01bOjba4d1FbEbXysquN40T9bkg"
    }

    res = requests.get(url,params=params,headers = headers)

    res.encoding = "utf-8"

    dict_data = res.json()

    result = saveData(dict_data["data"], db)   #dict_data为列表

    return {"mas": result, "code": 200}


# 分页查询获取公告数据。只需要查询最后10条并返回给客户端
@jwzx.get("/get_data")
async def get_data(current_user = Depends(get_current_user), db: Session = Depends(get_db)):
    try:
        data = db.query(New).order_by(New.id.desc()).limit(10).all()
    except  Exception as e:
        return {"message": "查询失败"}

    return {"data": data, "code": 200, "msg": "查询成功"}


@jwzx.get("/get_data_by_fileId")
async def get_data_by_fileId(fileId: int, current_user = Depends(get_current_user), db: Session = Depends(get_db)):
    url = "http://jwzx.cqupt.edu.cn/fileShowContent.php"
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
    }

    params = {
        "id": fileId
    }

    res = requests.get(url, params=params, headers=headers)

    res.encoding = "utf-8"

    html_content = res.text

    x = extract_article_content(html_content)

    return {"data": x, "code": 200, "msg": "查询成功"}
