import re
import time
import random
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
from urllib.robotparser import RobotFileParser


BASE_URL = "https://xxgk.mot.gov.cn/2020/jigou/"
LIST_URL = f"{BASE_URL}list.html"
HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Connection": "keep-alive",
}
DELAY_RANGE = (3, 8)
MAX_RETRIES = 3
TIMEOUT = 15
ROBOTS_URL = "https://xxgk.mot.gov.cn/robots.txt"


def can_fetch(url):
    rp = RobotFileParser()
    rp.set_url(ROBOTS_URL)
    try:
        rp.read()
        path = url.split("/2020/jigou/")[1] if "/2020/jigou/" in url else url
        return rp.can_fetch(HEADERS["User-Agent"], f"https://xxgk.mot.gov.cn/2020/jigou/{path}")
    except Exception:
        return True

def random_delay():
    delay = random.uniform(*DELAY_RANGE)
    time.sleep(delay)


def safe_request(url, retry=0):
    try:
        if not can_fetch(url):
            return None
        random_delay()
        response = requests.get(url, headers=HEADERS, timeout=TIMEOUT, allow_redirects=True)
        response.raise_for_status()
        if "text/html" not in response.headers.get("Content-Type", ""):
            return None
        return response.text
    except requests.RequestException:
        if retry < MAX_RETRIES:
            time.sleep(2 ** retry)
            return safe_request(url, retry + 1)
        else:
            return None
    except Exception:
        return None


def extract_articles(html):
    soup = BeautifulSoup(html, 'html.parser')
    articles = []
    list_container = soup.find('ul', class_='right_list')
    if not list_container:
        return []
    for li in list_container.find_all('li', class_='fl w100'):
        try:
            a_tag = li.find('a')
            if not a_tag:
                continue
            relative_url = a_tag.get('href', '')
            full_url = urljoin(BASE_URL, relative_url)
            title = a_tag.get('title', a_tag.text.strip())
            date_el = li.find('font')
            date = date_el.text.strip() if date_el else "未知日期"
            articles.append({
                'title': title,
                'url': full_url,
                'date': date,
                'source': "交通运输部"
            })
        except Exception:
            continue
    return articles


def extract_title(soup):
    meta_title = soup.find('meta', {'name': 'ArticleTitle'})
    if meta_title and meta_title.get('content'):
        return meta_title.get('content').strip()
    title_tag = soup.find('h1', class_='fl w100 gkzn_right_tit')
    if title_tag:
        span_tag = title_tag.find('span')
        if span_tag:
            return span_tag.text.strip()
    return soup.title.text.split('-')[0].strip() if soup.title else "无标题"

def extract_content(soup):
    content_div = soup.find('div', id='Zoom')
    if not content_div:
        return "未找到正文内容"
    for elem in content_div.find_all(['script', 'style', 'iframe']):
        elem.decompose()
    text_content = content_div.get_text(separator='\n', strip=True)
    return re.sub(r'\n\s*\n', '\n\n', text_content).strip()


def extract_attachments(soup, base_url):
    attachments = []
    attachment_div = soup.find('div', class_='fl w100 gksqxz_fj')
    if not attachment_div:
        return attachments
    for li in attachment_div.find_all('li'):
        a_tag = li.find('a')
        if a_tag and a_tag.get('href'):
            download_url = a_tag['href']
            if not download_url.startswith('http'):
                download_url = urljoin(base_url, download_url)
            attachment_name = a_tag.text.strip() or a_tag.get('download', '') or download_url.split('/')[-1]
            attachments.append({
                'name': attachment_name,
                'url': download_url
            })
    return attachments


def extract_article_details(html, url):
    try:
        if not html:
            return None, None
        soup = BeautifulSoup(html, 'html.parser')
        title = extract_title(soup)
        content = extract_content(soup)
        attachments = extract_attachments(soup, url)
        return {
            'title': title,
            'content': content,
            'attachments': attachments
        }, None
    except Exception as e:
        return None, str(e)


def get_total_pages(html):
    try:
        match = re.search(r'createPageHTML\((\d+),', html)
        if match:
            return int(match.group(1))
    except Exception:
        pass
    soup = BeautifulSoup(html, 'html.parser')
    pagination = soup.find('ul', class_='pagination')
    if pagination:
        page_links = pagination.find_all('a')
        if page_links:
            try:
                last_page = page_links[-1]['href']
                return int(last_page.split('_')[1].split('.')[0]) + 1
            except:
                pass
    return 1 
