from lxml import etree
import logging
from utils import CommonUtils
from enum import Enum

logging.basicConfig(level=logging.INFO,  # 设置日志级别为DEBUG，即所有级别的日志消息都会被记录
                    format='%(asctime)s - %(levelname)s - %(message)s')  # 设置日志消息格式
# 语言关键词
LANGUAGE_KEYWORD = ['IELTS', 'TOEFL', 'CET', 'GMAT', 'GRE']
# 课程关键词
CourseKeywords = ["course", "courses", "Course", "Courses"]


# 定义xpath的枚举类
class HtmlXPath(Enum):
    PROGRAM_NAVIGATION_XPATH = "//table[contains(@class, 'table')]"
    PROGRAM_XPATH = "//*[@id='page-content']//article/*"
    DEGREE_XPATH = '//*[@id="block-w3css-subtheme-content"]//*[contains(concat(" ", normalize-space(@class), " "), ' \
                   '" view-content ")] '


class HTMLTREE:
    def __init__(self, html_file_path, target_xpath):
        # html页面路径
        self.html_file_path = html_file_path
        # 目标xpath
        self.target_xpath = target_xpath
        with open(html_file_path, "rb") as f:
            html_content = f.read()
        # 解析html
        self.html_tree = etree.HTML(html_content)
        # 获取目标xpath的元素
        try:
            # 注意:
            self.root_content = self.html_tree.xpath(target_xpath.value)
        except etree.XPathEvalError as e:
            logging.error("XPath Error: %s" % e)
            self.root_content = []

    def parse_html(self):
        if self.target_xpath == HtmlXPath.PROGRAM_XPATH:
            self.parse_program_html()
        elif self.target_xpath == HtmlXPath.DEGREE_XPATH:
            print("parse_degree_html")
            self.parse_degree_html()
        elif self.target_xpath == HtmlXPath.PROGRAM_NAVIGATION_XPATH:
            self.parse_program_navigation_html()
        else:
            logging.error("The target xpath: {} is not valid", self.target_xpath)
            return

    def parse_program_html(self):
        """
        解析program(专业)的html页面
        :return: {
            "program_title": "",
            "content_info_list": [],
            "language_info_list": [],
            "program_requirements": []
        }
        """
        # todo 判断html是否是program的html
        if self.target_xpath != HtmlXPath.PROGRAM_XPATH:
            logging.error("The target xpath is not program xpath")
            return

        # 提取标题
        page_title_node_list = self.html_tree.xpath('//h1[contains(@class, "page-title")]')
        page_title_node = page_title_node_list[0]
        page_title = CommonUtils.HtmlUtils.extract_text_from_tag_node(page_title_node)

        # 提取课程页面
        degree_url_ul_list = self.html_tree.xpath(
            "//*[contains(concat(' ', normalize-space(@class), ' '), ' list-unstyled ')]")
        degree_url_ul = degree_url_ul_list[0]
        a_tag = degree_url_ul.xpath("li[1]//a")[0]
        degree_url = a_tag.get("href")

        # 3. 解析html文件
        tree_structure = HTMLTREE.recursion_process(self.root_content, 2)

        # 4. 提取信息
        content_info_list = []
        language_info_list = []
        program_list = []

        # 过滤与初步提取数据
        for item in tree_structure:
            title = item.get("title")
            if title == "Quick Facts":
                program_list = item.get("children")
                break
            if title == "Program Overview":
                content_info_list = item.get("children")
            else:
                pass

        # 信息提取重新写 -> 去测试类中写
        # 提取Minimum Admission Requirements
        min_admission_requirement_list = []
        for program in program_list:
            title_program = program.get("title")
            if "Doctor" in title_program:
                break
            min_admission_requirement_list.append(program)
        try:
            min_admission_requirement = min_admission_requirement_list[0]
        except IndexError as e:
            print("----------------分割线开始--------------------")
            print(CommonUtils.FormatUtils.format_object_to_json(tree_structure))
            print("----------------分割线结束--------------------")
        min_admission_requirements = min_admission_requirement.get("children")
        program_requirements = []
        for item in min_admission_requirements:
            try:
                title = item.get("title")
            except AttributeError as e:
                print("***************************")
                print(CommonUtils.FormatUtils.format_object_to_json(tree_structure))
                print("***************************")
                print(CommonUtils.FormatUtils.format_object_to_json(item))
                print("***************************")
                break
            if title == "Minimum Admission Requirements":
                min_requirements = CommonUtils.ListUtils.flatten_list(item.get("children"))
                for requirement in min_requirements:
                    contains_keyword = any(keyword in requirement for keyword in LANGUAGE_KEYWORD)
                    if contains_keyword:
                        language_info_list.append(requirement)
                    content_info_list.append(requirement)
            if title == "Program Requirements":
                program_requirements = CommonUtils.ListUtils.flatten_list(item.get("children"))
        return {
            "program_title": page_title,
            "degree_url": degree_url,
            "content_info_list": content_info_list,
            "language_info_list": language_info_list,
            "program_requirements": program_requirements
        }

    def parse_degree_html(self):
        """
        解析学位的课程
        return: [{
            "title": "",
            "content": ""
            }]
        """
        # todo 判断html是否是degree的html
        if self.target_xpath != HtmlXPath.DEGREE_XPATH:
            logging.error("The target xpath is not degree xpath")
            return

        # 输出找到的元素
        view_content_list = self.root_content[0]

        # 获取所有直接子元素
        sub_elements = view_content_list.getchildren()
        course_list = []
        for sub_element in sub_elements:
            # 获取子标题节点与子内容节点
            sub_children_list = sub_element.getchildren()
            sub_title_node = sub_children_list[0]
            sub_content_node = sub_children_list[1]
            # 获取子标题与子内容
            sub_title = CommonUtils.HtmlUtils.extract_text_from_tag_node(sub_title_node)
            if CommonUtils.TextUtils.contains_word_list(sub_title, CourseKeywords):
                course_list.append({
                    "title": sub_title,
                    "content": sub_content_node
                })
        course_info_list = []
        for course in course_list:
            course_node = course.get("content")
            table_list = course_node.xpath(".//table")
            courses = []
            for table in table_list:
                table_value = CommonUtils.HtmlUtils.extract_data_from_table(table)
                tbody_value_list = table_value.get("tbody")
                for tbody_value in tbody_value_list:
                    courses.append(tbody_value[1])
            course_info_list.append({
                "title": course.get("title"),
                "courses": courses
            })
        return course_info_list

    def parse_program_navigation_html(self):
        if self.target_xpath != HtmlXPath.PROGRAM_NAVIGATION_XPATH:
            logging.error("The target xpath is not program navigation xpath")
            return

        target_table = self.root_content[0]
        table_value = CommonUtils.HtmlUtils.extract_data_from_table(target_table)
        table_tbody = table_value.get("tbody")
        program_list = []
        for item in table_tbody:
            program_list.append(item[0])
        return program_list

    def parse_program_navigation_html_v2(self):
        if self.target_xpath != HtmlXPath.PROGRAM_NAVIGATION_XPATH:
            logging.error("The target xpath is not program navigation xpath")
            return
        target_table = self.root_content[0]
        a_tag_list = target_table.xpath(".//tbody//tr//td//a")
        program_and_url_list = []
        for a_tag in a_tag_list:
            a_value = CommonUtils.HtmlUtils.extract_text_from_tag_node(a_tag)
            a_url = a_tag.get("href")
            # todo 去除末尾/操作可以封装为工具类
            if a_url.endswith("/"):
                # a_url = a_url[:-1]
                pass
            else:
                a_url = a_url + "/"
            program_and_url_list.append({
                "program": a_value,
                "url": a_url
            })
        return program_and_url_list

    # region 工具类
    @staticmethod
    def divide_node_list_by_tag(node_list, tag_name):
        """
        将节点列表按照[标签名称]进行分组
        :param node_list: 节点列表
        :param tag_name: 标签名称
        """
        groups = []
        # 初始化一个变量来跟踪[当前组]的开始位置
        start_index = 0
        # 遍历所有节点，找到tag_name标签的位置
        for i, node in enumerate(node_list):
            # 如果当前节点是tag_name
            if node.tag == tag_name:
                # 收集从上一个tag_name节点(即:位置为start_index的节点)之后的所有节点到当前节点（不包括当前节点）
                if i > start_index:
                    group = node_list[start_index:i]
                    groups.append(group)
                # 设置当前组的起始位置为当前节点的下一个位置
                start_index = i

        # 收集从最后一个tag_name到列表末尾的所有节点（如果有的话）
        if start_index < len(node_list):
            group = node_list[start_index:]
            groups.append(group)
            # 返回分组后的结果
        return groups

    @staticmethod
    # 递归
    def recursion_process(node_list, h_layer):
        """
        递归处理节点列表
        :param node_list: 节点列表
        :param h_layer: 当前节点所在的层级
        """
        divide_tag_name = "h{}".format(h_layer)
        groups = HTMLTREE.divide_node_list_by_tag(node_list, divide_tag_name)
        if len(groups) == 1:
            first_group = groups[0]
            if len(first_group) >= 1 and first_group[0].tag.startswith("h"):
                return {
                    "title": CommonUtils.TextUtils.remove_consecutive_spaces(first_group[0].text),
                    "layer": h_layer,
                    "children": HTMLTREE.classify_process_node(first_group[1:len(first_group)])
                }
        if len(groups) <= 1 and groups is not None:
            return groups
        else:
            res = []
            for group in groups:
                # 递归处理下一层
                tag_value = group[0]
                if type(tag_value) is etree._Element and tag_value.tag.startswith("h"):
                    pass
                else:
                    logging.info("特殊节点{}".format(tag_value))
                    continue

                value = HTMLTREE.recursion_process(group[1:len(group)], h_layer + 1)
                if len(value) == 1:
                    title = CommonUtils.TextUtils.remove_consecutive_spaces(group[0].text)
                    if title is not None and len(title) > 0 and title is not ' ':
                        res.append({
                            "title": title,
                            "layer": h_layer,
                            "children": HTMLTREE.classify_process_node(value)
                        })
                if value is not None and len(value) > 1:
                    title = CommonUtils.TextUtils.remove_consecutive_spaces(group[0].text)
                    if title is not None and len(title) > 0 and title is not ' ':
                        res.append({
                            "title": title,
                            "layer": h_layer,
                            "children": HTMLTREE.classify_process_node(value)
                        })
            return res

    @staticmethod
    def classify_process_node(node_list):
        """
        根据节点类型进行分类处理
        :param node_list: 节点列表
        """
        if type(node_list) is list:
            item_list = []
            for item_node in node_list:
                value = HTMLTREE.classify_process_node(item_node)
                item_list.append(value)
            return item_list
        elif type(node_list) is etree._Element:
            if node_list.tag == "ul":
                return CommonUtils.HtmlUtils.extract_data_from_ul_li_to_list(node_list)
            else:
                return CommonUtils.HtmlUtils.extract_text_from_tag_node(node_list)
        else:
            return node_list
    # endregion
