#!/usr/bin/env python
# -*- coding: utf-8 -*-

import re
from tj.util.html_tool import read_page
from tj.baike.baike_triplet import CategoryTriplet, CategoryTriplets, InstanceTriplets, InstanceTriplet
from tj.baike.category_node import CategoryNode, CategoryNodes

main_url = "http://baike.baidu.com"
main_category = ['人物', '自然', '文化', '体育', '社会', '历史', '地理', '科技', '娱乐', '生活']


# 抽取百度百科主页某主分类的一级子目录
def extract_common_category(main_category_name):
    page_content = read_page(main_url)
    main_regex = "<dt><h2><a\\s*href=\"([\\S]*?)\"\\s*target=\"_blank\">%s</a></h2></dt>\\s+<dd>([\\s\\S]*?)</dd>" % main_category_name
    main_match = re.search(main_regex, page_content, re.S)

    if main_match:
        # print match.group(2)
        category_regex = "<a\\s*href=\"(/fenlei/([\\S]*?))\"\\s*target=\"_blank\">([\\S]*?)</a>"
        category_match = re.findall(category_regex, main_match.group(2), re.S)
        if category_match:
            category_list = []
            for match in category_match:
                category_list.append(CategoryNode(match[2], main_url + match[0], main_category_name))
            return category_list

    return []


# 抽取分类页面下的相关分类和下级分类以及相关词条
def extract_xiangguan_and_xiaji(category_node):
    page_content = read_page(category_node.url)
    xiangguan_regex = "<div\\s*class=\"category-title\\s*\"><span\\s*class=\"title\">相关分类</span>([\\s\\S]*?)</div>"
    xiaji_regex = "<div\\s*class=\"category-title\\s*\"><span\\s*class=\"title\">下级分类</span>([\\s\\S]*?)</div>"

    xiangguan_list = extract_xiangguan_or_jiaji(page_content, xiangguan_regex, category_node.parent_name)
    xiaji_list = extract_xiangguan_or_jiaji(page_content, xiaji_regex, category_node.name)

    extract_instances(category_node.name, page_content)

    return xiangguan_list, xiaji_list


# 根据给定的正则表达式抽取相关分类或者下级分类
def extract_xiangguan_or_jiaji(content, regex, parent_name):
    category_regex = "<a\\s*href=\"(/fenlei/([\\S]*?))\"\\s*class=\"child-link([\\s\\S]*?)\"\\s*target=\"_blank\">([\\S]*?)</a>"

    content_match = re.search(regex, content, re.S)
    if content_match:
        category_match = re.findall(category_regex, content_match.group(1), re.S)
        if category_match:
            category_list = []
            for match in category_match:
                category_list.append(CategoryNode(match[3], main_url + match[0], parent_name))
            return category_list

    return []


# 抽取某主分类的分类体系
def extract_common_taxonomy(category_name):
    total_category_nodes = CategoryNodes()
    category_nodes = CategoryNodes()
    category_triplets = CategoryTriplets()

    common_category_nodes = extract_common_category(category_name)
    for category_node in common_category_nodes:
        total_category_nodes.append(category_node)
        category_nodes.append(category_node)
        category_triplets.append(CategoryTriplet(category_name, category_node.name))

    while not category_nodes.is_empty():
        temp_node = category_nodes.first_and_remove()
        xiangguan, xiaji = extract_xiangguan_and_xiaji(temp_node)

        # for xiangguan_node in xiangguan:
        #     category_nodes.append(xiangguan_node)
        #     category_triplets.append(CategoryTriplet(temp_node.parent_name, xiangguan_node.name))

        for xiaji_node in xiaji:
            if total_category_nodes.append(xiaji_node):
                category_nodes.append(xiaji_node)
            category_triplets.append(CategoryTriplet(temp_node.name, xiaji_node.name))

    category_triplets.import_to_cayley()


# 抽取某分类页面下的词条
def extract_instances(category_name, content):
    instance_triplets = InstanceTriplets()

    instance_regex1 = "<a([\\s\\S]*?)href=\"(/view/(\\d+).htm)\"([\\s\\S]*?)title=\"([\\S]*?)\"([\\s\\S]*?)>"
    instance_regex2 = "<a([\\s\\S]*?)href=\"(/subview/(\\d+)/(\\d+).htm)\"([\\s\\S]*?)title=\"([\\S]*?)\"([\\s\\S]*?)>"

    instance_match = re.findall(instance_regex1, content, re.S)

    if instance_match:
        for match in instance_match:
            instance_triplets.append(InstanceTriplet(category_name, match[4]))
            # print match[1], match[4]

    instance_match = re.findall(instance_regex2, content, re.S)
    if instance_match:
        for match in instance_match:
            instance_triplets.append(InstanceTriplet(category_name, match[5]))
            # print match[1], match[5]

    instance_triplets.import_to_cayley()


if __name__ == "__main__":
    # for name in main_category:
    #     print name + ":"
    #     category_list = extract_common_category(name)
    #     for category in category_list:
    #         print category

    # xiangguan_list, xiaji_list = extract_xiangguan_and_xiaji("http://baike.baidu.com/fenlei/%E6%94%BF%E6%B2%BB%E4%BA%BA%E7%89%A9")
    # print "相关分类"
    # for item in xiangguan_list:
    #     print item
    #
    # print "下级分类"
    # for item in xiaji_list:
    #     print item

    extract_common_taxonomy("体育")

    # page_content = read_page("http://baike.baidu.com/fenlei/%E6%99%BA%E5%8A%9B%E8%BF%90%E5%8A%A8")
    # extract_instances("", page_content)


