import os
import requests
from dotenv import load_dotenv
from smolagents import tool
from bs4 import BeautifulSoup
import pytesseract
from PIL import Image
from io import BytesIO
import re
import pandas as pd
from io import StringIO

load_dotenv()

# 1. 通用基础函数

def fetch_and_parse(url, parse_func, *args, **kwargs):
    """
    通用页面抓取与解析入口
    url: 页面地址
    parse_func: 解析函数，接收html文本和可选参数
    *args, **kwargs: 传递给解析函数的参数
    """
    try:
        resp = requests.get(url, timeout=10)
        resp.encoding = resp.apparent_encoding
        if resp.status_code == 200:
            return parse_func(resp.text, *args, **kwargs)
        else:
            return {"error": f"请求失败，状态码：{resp.status_code}"}
    except Exception as e:
        return {"error": f"请求异常：{str(e)}"}


def parse_table_html(html, table_index=0, min_cols=3):
    """
    通用表格解析，返回字典列表
    """
    tables = pd.read_html(html)
    for df in tables:
        if df.shape[1] >= min_cols:
            headers = list(df.columns)
            return [
                {headers[i]: str(row[headers[i]]).strip() for i in range(len(headers))}
                for _, row in df.iterrows()
            ]
    return []


def parse_list_items_html(html, container_tag, container_class, item_tag, item_map):
    """
    通用列表页面解析
    html: 页面HTML
    container_tag/container_class: 外层容器
    item_tag: 每个条目的标签
    item_map: 字段映射，如{"name": lambda tag: tag.get_text(), "url": lambda tag: tag['href']}
    """
    soup = BeautifulSoup(html, "html.parser")
    result = []
    container = soup.find(container_tag, class_=container_class)
    if container:
        for item in container.find_all(item_tag):
            entry = {}
            for k, func in item_map.items():
                try:
                    entry[k] = func(item)
                except Exception:
                    entry[k] = ""
            result.append(entry)
    return result

# 2. 校园服务/生活服务相关工具

@tool
def parse_life_service_html(html: str) -> dict:
    """
    name: 解析生活服务网页内容

    Args:
        html (str): 网页HTML内容字符串。
    
    description:
        dict: 返回结构化的生活服务信息（如校园餐饮、维修服务、常用电话等）。
    """
    soup = BeautifulSoup(html, "html.parser")
    result = {}

    # 1. 校园餐饮
    canteen_info = []
    canteen_div = soup.find("div", class_="fl01")
    if canteen_div:
        table = canteen_div.find("table")
        if table:
            canteen_info = parse_canteen_table(table)
        result["校园餐饮"] = canteen_info

    # 2. 维修服务
    repair_div = soup.find("div", class_="fl02")
    if repair_div:
        repair_info = {}
        # 紧急维修电话
        emg = repair_div.find("h3", string="紧急维修电话")
        if emg:
            emg_tel = emg.find_next("p")
            repair_info["紧急维修电话"] = emg_tel.get_text(strip=True) if emg_tel else ""
        # 常规维修
        regular = repair_div.find("h3", string="常规维修")
        if regular:
            regular_info = regular.find_next("div", class_="con")
            repair_info["常规维修"] = regular_info.get_text(strip=True) if regular_info else ""
        # 服务监督电话
        sup_tel = repair_div.find("h3", string=lambda x: x and "服务监督电话" in x)
        if sup_tel:
            repair_info["服务监督电话"] = sup_tel.get_text(strip=True)
        result["维修服务"] = repair_info

    # 3. 常用电话
    phone_info = []
    phone_div = soup.find("div", class_="fl03")
    if phone_div:
        table = phone_div.find("table")
        if table:
            headers = [th.get_text(strip=True) for th in table.find_all("th")]
            for row in table.find_all("tr")[1:]:
                cols = [td.get_text(strip=True) for td in row.find_all("td")]
                if cols:
                    phone_info.append(dict(zip(headers, cols)))
        result["常用电话"] = phone_info

    return result

def parse_canteen_table(table):
    rows = table.find_all("tr")
    headers = [th.get_text(strip=True) for th in rows[0].find_all("th")]
    canteen_info = []
    last_time = None  # 记录上一个营业时间
    for i, row in enumerate(rows[1:]):
        cols = row.find_all("td")
        # 第一行有营业时间，后面几行没有
        if len(cols) == 4:
            # 有营业时间
            name, loc, time, tel = [td.get_text(strip=True) for td in cols]
            last_time = time
        elif len(cols) == 3:
            # 没有营业时间，继承上一行
            name, loc, tel = [td.get_text(strip=True) for td in cols]
            time = last_time
        else:
            # 其他情况
            name, loc, time, tel = "", "", "", ""
        canteen_info.append({
            "食堂名称": name,
            "位置": loc,
            "营业时间": time,
            "订餐电话/订水电话": tel
        })
    return canteen_info

def parse_school_news_html(html: str) -> list:
    """
    name: 解析学校要闻网页内容

    Args:
        html (str): 学校要闻页面HTML内容。
    
    description:
        list: 返回包含时间、标题、梗概的要闻列表。
    """
    soup = BeautifulSoup(html, "html.parser")
    news_list = []
    for con in soup.find_all("div", class_="con"):
        # 时间
        date_span = con.find("span", class_="flex")
        date = date_span.get_text(strip=True) if date_span else ""
        # 标题
        title_h3 = con.find("h3")
        title = title_h3.get_text(strip=True) if title_h3 else ""
        # 梗概
        summary_p = con.find("p")
        summary = summary_p.get_text(strip=True) if summary_p else ""
        if title:
            news_list.append({
                "时间": date,
                "标题": title,
                "梗概": summary
            })
    return news_list

@tool
def school_news(pages: int = 1) -> list:
    """
    name: 获取学校要闻

    Args:
        pages (int): 抓取的页数，默认1页。
    
    description:
        list: 返回包含时间、标题、梗概的要闻列表。
    """
    import time
    base_url = f"{os.getenv('BASE_URL')}/index/xxyw.htm"
    all_news = []
    for i in range(464, 464-pages, -1):
        url = base_url.format(i)
        try:
            resp = requests.get(url, timeout=10)
            resp.encoding = resp.apparent_encoding
            if resp.status_code == 200:
                news = parse_school_news_html(resp.text)
                all_news.extend(news)
            else:
                break
        except Exception:
            break
        time.sleep(0.5)  # 防止请求过快
    return all_news

def parse_school_profile_html(html: str) -> str:
    """
    name: 解析学校概况网页内容

    Args:
        html (str): 学校概况页面HTML内容。
    
    description:
        str: 返回<div class="inner wl">下的全部文本内容。
    """
    soup = BeautifulSoup(html, "html.parser")
    inner = soup.find("div", class_="inner wl")
    if inner:
        return inner.get_text(separator="\n", strip=True)
    return "未找到学校概况内容。"

@tool
def school_profile() -> str:
    """
    name: 获取学校概况

    description:
        str: 返回广州南方学院学校概况页面<div class="inner wl">下的全部文本内容。
    """
    url = f"{os.getenv('BASE_URL')}/gywm/xxgk.htm"
    try:
        resp = requests.get(url, timeout=10)
        resp.encoding = resp.apparent_encoding
        if resp.status_code == 200:
            return parse_school_profile_html(resp.text)
        else:
            return f"请求失败，状态码：{resp.status_code}"
    except Exception as e:
        return f"请求异常：{str(e)}"

def parse_president_message_html(html: str) -> str:
    """
    name: 解析校长寄语网页内容

    Args:
        html (str): 校长寄语页面HTML内容。
    
    description:
        str: 返回<div class="cont">下的全部文本内容。
    """
    soup = BeautifulSoup(html, "html.parser")
    cont = soup.find("div", class_="cont")
    if cont:
        return cont.get_text(separator="\n", strip=True)
    return "未找到校长寄语内容。"

@tool
def president_message() -> str:
    """
    name: 获取校长寄语

    description:
        str: 返回广州南方学院校长寄语页面<div class="cont">下的全部文本内容。
    """
    url = f"{os.getenv('BASE_URL')}/gywm/xzjy.htm"
    try:
        resp = requests.get(url, timeout=10)
        resp.encoding = resp.apparent_encoding
        if resp.status_code == 200:
            return parse_president_message_html(resp.text)
        else:
            return f"请求失败，状态码：{resp.status_code}"
    except Exception as e:
        return f"请求异常：{str(e)}"

def parse_service_orgs_html(html: str) -> list:
    """
    name: 解析服务机构网页内容

    Args:
        html (str): 服务机构页面HTML内容。
    
    description:
        list: 返回<ul class="flex">下所有机构名称列表。
    """
    soup = BeautifulSoup(html, "html.parser")
    orgs = []
    ul = soup.find("ul", class_="flex")
    if ul:
        for li in ul.find_all("li"):
            text = li.get_text(strip=True)
            if text:
                orgs.append(text)
    return orgs

@tool
def service_orgs() -> list:
    """
    name: 获取服务机构

    description:
        list: 返回广州南方学院服务机构页面<ul class="flex">下所有机构名称列表。
    """
    url = f"{os.getenv('BASE_URL')}/gywm/fwjg.htm"
    try:
        resp = requests.get(url, timeout=10)
        resp.encoding = resp.apparent_encoding
        if resp.status_code == 200:
            return parse_service_orgs_html(resp.text)
        else:
            return [f"请求失败，状态码：{resp.status_code}"]
    except Exception as e:
        return [f"请求异常：{str(e)}"]

def school_song_ocr() -> str:
    """
    name: 获取校歌文本（OCR）
    description:
        str: 下载校歌图片并用OCR识别右侧校歌文本，返回识别结果。
    """
    url = f"{os.getenv('BASE_URL')}/images/xgtu.png"
    try:
        resp = requests.get(url, timeout=10)
        if resp.status_code == 200:
            img = Image.open(BytesIO(resp.content))
            # 假设右侧校歌区域大致为图片右半部分
            w, h = img.size
            right_img = img.crop((w//2, 0, w, h))
            text = pytesseract.image_to_string(right_img, lang='chi_sim')
            return text.strip() if text.strip() else "未识别到校歌文本。"
        else:
            return f"请求失败，状态码：{resp.status_code}"
    except Exception as e:
        return f"请求异常：{str(e)}"

school_song = tool(school_song_ocr)

def parse_academic_events_html(html: str) -> list:
    """
    name: 解析学术动态网页内容

    Args:
        html (str): 学术动态页面HTML内容。
    
    description:
        list: 返回包含时间、标题、内容的学术动态列表。
    """
    soup = BeautifulSoup(html, "html.parser")
    events = []
    for li in soup.find_all("li"):
        # 时间
        date_span = li.find("span")
        date_raw = date_span.get_text(strip=True) if date_span else ""
        # 格式化时间"23/ 2025-05"->"2025-05-23"
        date = date_raw
        m = re.match(r"(\d{1,2})/\s*(\d{4})-(\d{2})", date_raw)
        if m:
            day, year, month = m.groups()
            date = f"{year}-{month}-{int(day):02d}"
        # 标题，去除开头的时间前缀
        title_a = li.find("a")
        title = title_a.get_text(strip=True) if title_a else ""
        if m and title.startswith(date_raw):
            title = title[len(date_raw):].lstrip(' ：:')
        # 内容
        content_p = li.find("p")
        content = content_p.get_text(strip=True) if content_p else ""
        if date and title and content:
            events.append({
                "时间": date,
                "标题": title,
                "内容": content
            })
    return events

@tool
def academic_events(pages: int = 1) -> list:
    """
    name: 获取学术动态

    Args:
        pages (int): 抓取的页数，默认1页。
    
    description:
        list: 返回包含时间、标题、内容的学术动态列表。
    """
    import time
    base_url = f"{os.getenv('BASE_URL')}/index/xykd/xsdt"
    all_events = []
    # 首页特殊，第一页无/1.htm
    urls = [f"{base_url}.htm"]
    for i in range(2, 2+pages-1):
        urls.append(f"{base_url}/{i}.htm")
    for url in urls:
        try:
            resp = requests.get(url, timeout=10)
            resp.encoding = resp.apparent_encoding
            if resp.status_code == 200:
                events = parse_academic_events_html(resp.text)
                all_events.extend(events)
            else:
                break
        except Exception:
            break
        time.sleep(0.5)
    return all_events

@tool
def medical_service() -> dict:
    """
    name: 获取医疗服务信息

    description:
        dict: 返回广州南方学院医疗服务页面的诊病时间、急诊电话、医保办电话等信息。
    """
    url = f"{os.getenv('BASE_URL')}/xyfw/syxx/ylfw.htm"
    def parse_medical(html):
        soup = BeautifulSoup(html, "html.parser")
        result = {}
        time_tag = soup.find(string=lambda t: t and "医务室诊病时间" in t)
        if time_tag:
            times = []
            for sib in time_tag.parent.find_next_siblings():
                txt = sib.get_text(strip=True)
                if not txt or "电话" in txt:
                    break
                times.append(txt)
            result["医务室诊病时间"] = times
        emg_tag = soup.find(string=lambda t: t and "医务室急诊电话" in t)
        if emg_tag:
            emg_tel = emg_tag.parent.find_next_sibling()
            if emg_tel:
                result["医务室急诊电话"] = emg_tel.get_text(strip=True)
        for tag in soup.find_all(string=lambda t: t and "温泉医院急诊电话" in t):
            line = tag.parent.get_text(strip=True)
            result["温泉医院急诊电话"] = line.split("：")[-1]
        for tag in soup.find_all(string=lambda t: t and "学生医保办公室电话" in t):
            line = tag.parent.get_text(strip=True)
            result["学生医保办公室电话"] = line.split("：")[-1]
        return result
    return fetch_and_parse(url, parse_medical)

@tool
def transport_service() -> dict:
    """
    name: 获取交通出行信息

    description:
        dict: 返回广州南方学院交通出行页面的班车服务、校园巴士服务等信息。
    """
    url = f"{os.getenv('BASE_URL')}/xyfw/syxx/jtcx.htm"
    result = {}
    result["班车服务"] = fetch_and_parse(url, parse_table_html)
    def parse_bus_info(html):
        soup = BeautifulSoup(html, "html.parser")
        bus_info = {}
        op_time = soup.find(string=lambda t: t and "运营时间" in t)
        if op_time:
            op_time_val = op_time.parent.find_next().get_text(strip=True)
            bus_info["运营时间"] = op_time_val
        ride_way = soup.find(string=lambda t: t and "乘车方式" in t)
        if ride_way:
            ride_way_val = ride_way.parent.find_next().get_text(strip=True)
            bus_info["乘车方式"] = ride_way_val
        driver_tel = soup.find(string=lambda t: t and "司机电话" in t)
        if driver_tel:
            driver_tel_val = driver_tel.parent.find_next().get_text(strip=True)
            bus_info["司机电话"] = driver_tel_val
        price_route = soup.find(string=lambda t: t and "票价及路线" in t)
        if price_route:
            price_route_val = price_route.parent.find_next().get_text(strip=True)
            bus_info["票价及路线"] = price_route_val
        return bus_info
    result["校园巴士服务"] = fetch_and_parse(url, parse_bus_info)
    return result

class CampusServiceTools:
    """
    校园服务/生活服务相关工具集合，统一入口，便于其它文件调用。
    """
    def __init__(self):
        self.base_url = os.getenv('BASE_URL')

    def fetch_and_parse(self, url, parse_func, *args, **kwargs):
        try:
            resp = requests.get(url, timeout=10)
            resp.encoding = resp.apparent_encoding
            if resp.status_code == 200:
                return parse_func(resp.text, *args, **kwargs)
            else:
                return {"error": f"请求失败，状态码：{resp.status_code}"}
        except Exception as e:
            return {"error": f"请求异常：{str(e)}"}

    def life_service(self) -> str:
        url = f"{self.base_url}/xyfw/syxx/shfw.htm"
        if not url:
            return "未配置LIFE_SERVICE_URL环境变量。"
        try:
            response = requests.get(url, timeout=10)
            response.encoding = response.apparent_encoding
            if response.status_code == 200:
                data = parse_life_service_html(response.text)
                data["学校要闻"] = school_news(1)
                import json
                return json.dumps(data, ensure_ascii=False, indent=2)
            else:
                return f"请求失败，状态码：{response.status_code}"
        except Exception as e:
            return f"请求异常：{str(e)}"

    def school_news(self, pages: int = 1) -> list:
        return school_news(pages)

    def medical_service(self) -> dict:
        return medical_service()

    def transport_service(self) -> dict:
        return transport_service()

    def service_orgs(self) -> list:
        return service_orgs()

    def school_profile(self) -> str:
        return school_profile()

    def president_message(self) -> str:
        return president_message()

    def academic_events(self, pages: int = 1) -> list:
        return academic_events(pages)

    def school_song(self) -> str:
        return school_song()

# 5. 主程序入口
if __name__ == "__main__":
    # print(login(os.getenv("USERNAME"), os.getenv("PASSWORD")))  # Expected: Login successful!
    # print(login("test", "test"))
    print(transport_service())