#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
教室查询客户端
负责网络请求和HTML解析
"""

import re
from typing import List, Dict, Any, Set, Optional
from urllib.parse import urlencode

import requests
from bs4 import BeautifulSoup


class ClassroomClient:
    """教室查询客户端"""

    def __init__(self, session: requests.Session):
        """
        初始化客户端

        Args:
            session: 已登录的会话对象
        """
        self.session = session
        self.query_url = "https://aa.bjtu.edu.cn/classroomtimeholdresult/room_view/"
        self.query_headers = {
            'Referer': 'https://aa.bjtu.edu.cn/',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        }

    def query_classroom(self, query_params: Dict[str, str]) -> str:
        """
        查询教室信息

        Args:
            query_params: 查询参数

        Returns:
            合并后的HTML内容
        """
        try:
            all_html_contents = []
            first_page_classrooms = set()

            # 遍历所有可能的页面（最多4页）
            for page in range(1, 5):
                query_params_with_page = query_params.copy()
                query_params_with_page['page'] = str(page)

                # 构建完整的请求URL以便调试
                full_url = self.query_url + '?' + urlencode(query_params_with_page)
                print(f"[DEBUG] 正在查询第 {page} 页，请求URL: {full_url}")

                print(f"[DEBUG] 正在查询第 {page} 页...")
                response = self.session.get(
                    self.query_url,
                    params=query_params_with_page,
                    headers=self.query_headers,
                    timeout=15
                )
                print(f"[DEBUG] 第 {page} 页响应状态码: {response.status_code}")

                if response.status_code == 200:
                    if "登录" in response.text or "login" in response.url:
                        print(f"[DEBUG] 第 {page} 页检测到登录状态失效")
                        return "教室查询需要登录"
                    else:
                        # 提取当前页的教室信息用于比较
                        current_page_classrooms = self._extract_classroom_names(response.text)
                        print(f"[DEBUG] 第 {page} 页提取到 {len(current_page_classrooms)} 个教室: {list(current_page_classrooms)[:5]}...")

                        # 如果是第一页，保存教室信息
                        if page == 1:
                            first_page_classrooms = current_page_classrooms
                            all_html_contents.append(response.text)
                            print(f"[DEBUG] 第一页教室信息已保存作为基准")
                        else:
                            # 检查当前页的教室是否与第一页完全重复
                            if current_page_classrooms == first_page_classrooms:
                                print(f"[DEBUG] 第 {page} 页教室信息与第一页完全重复，说明已经循环回第一页，停止查询")
                                break
                            else:
                                # 检查是否有新的教室信息
                                new_classrooms = current_page_classrooms - first_page_classrooms
                                if new_classrooms:
                                    print(f"[DEBUG] 第 {page} 页发现新教室: {list(new_classrooms)[:3]}...")
                                    all_html_contents.append(response.text)
                                else:
                                    print(f"[DEBUG] 第 {page} 页没有新教室，但内容不完全相同，继续收集")

                        print(f"[DEBUG] 第 {page} 页内容已添加，当前共收集 {len(all_html_contents)} 页内容")
                else:
                    print(f"[DEBUG] 第 {page} 页请求失败，状态码: {response.status_code}")
                    if page == 1:  # 第一页失败则直接返回错误
                        return f"教室查询失败: {response.status_code}"
                    break  # 后续页面失败则停止查询

            print(f"[DEBUG] 总共获取了 {len(all_html_contents)} 页内容")
            # 合并所有页面的内容进行解析
            combined_html = "".join(all_html_contents)
            print(f"[DEBUG] 合并后HTML长度: {len(combined_html)}")

            return combined_html

        except Exception as e:
            print(f"[ERROR] 教室查询异常: {e}")
            import traceback
            traceback.print_exc()
            return f"教室查询出错: {e}"

    def _extract_classroom_names(self, html_content: str) -> Set[str]:
        """
        从HTML内容中提取教室名称

        Args:
            html_content: HTML内容

        Returns:
            教室名称集合
        """
        try:
            soup = BeautifulSoup(html_content, "html.parser")
            classroom_names = set()

            # 查找教室表格
            table = soup.find("table", class_="table-bordered")
            if table:
                rows = table.find_all("tr")[2:]  # 去掉表头
                for tr in rows:
                    tds = tr.find_all("td")
                    if len(tds) >= 1:
                        room = tds[0].get_text(strip=True)
                        if room:
                            classroom_names.add(room)

            return classroom_names
        except Exception as e:
            print(f"[ERROR] 提取教室名称时出错: {e}")
            return set()

    def parse_html_content(self, html_content: str) -> List[Dict[str, Any]]:
        """
        解析HTML内容，提取教室信息

        Args:
            html_content: HTML内容

        Returns:
            教室信息列表
        """
        try:
            soup = BeautifulSoup(html_content, "html.parser")

            # 查找所有教室表格（支持多页合并的HTML）
            tables = soup.find_all("table", class_="table-bordered")
            if not tables:
                raise ValueError("未找到教室表格，可能需要重新登录")

            print(f"[DEBUG] 在合并的HTML中找到 {len(tables)} 个教室表格")

            all_rooms_info = []

            # 处理所有表格
            for table_idx, table in enumerate(tables):
                rows = table.find_all("tr")[2:]  # 去掉表头
                print(f"[DEBUG] 处理第 {table_idx + 1} 个表格，找到 {len(rows)} 行教室数据")

                for tr in rows:
                    tds = tr.find_all("td")
                    if len(tds) < 2:
                        continue

                    room = tds[0].get_text(strip=True)
                    if not room:
                        continue

                    # 解析每个时间段
                    idx = 0
                    for day_num in range(1, 8):  # 1-7 对应星期一到星期日
                        for sec in range(1, 8):  # 1-7 对应第1节到第7节
                            if idx + 1 >= len(tds):
                                break

                            cell = tds[idx + 1]
                            style = cell.get("style", "")

                            # 解析背景颜色判断状态
                            color = "#fff"  # 默认空闲
                            color_match = re.search(r'background-color:\s*([^;]+)', style)
                            if color_match:
                                color = color_match.group(1).strip()

                            # 判断教室状态
                            is_free = color == "#fff" or color.lower() == "#ffffff"

                            all_rooms_info.append({
                                'room': room,
                                'day': day_num,
                                'section': sec,
                                'is_free': is_free,
                                'color': color,
                                'table_source': table_idx + 1
                            })

                            idx += 1

            print(f"[DEBUG] 解析完成：总共 {len(all_rooms_info)} 个教室时段信息")
            return all_rooms_info

        except Exception as e:
            print(f"[ERROR] 解析HTML时出错: {e}")
            import traceback
            traceback.print_exc()
            raise ValueError(f"解析数据时出错: {e}")

    def extract_rooms_from_text(self, text: str) -> List[str]:
        """
        从文本中提取教室信息

        Args:
            text: 包含教室信息的文本

        Returns:
            教室列表
        """
        try:
            # 简单的教室提取逻辑
            room_pattern = r'([A-Z]+\d+|\w+\d+|[A-Za-z\u4e00-\u9fff]+\d+)(?:\(\d+\))?'
            all_rooms = set(re.findall(room_pattern, text))
            return list(all_rooms)
        except Exception as e:
            print(f"[ERROR] 提取教室信息失败: {e}")
            return []