#!/usr/bin/env python3
# coding: utf-8

# ## Extract dataset from ccks_7_1_competition_data.
# 1. 训练集: 从 `entity_type.txt` 中获取 `entity_name`, `label`. 从 `entity_pages_*.xml` 中抽取 简介信息.
# 2. 测试集: 从 `entity_validation.txt` 中获取 `entity_name`, 从 `entity_pages_*.xml` 中获取 简介信息.

# In[8]:


import os
import csv
import numpy
import time
import re
from lxml import etree


# In[9]:


traini_dir = r'./ccks_7_1_competition_data/训练集'
test_dir = r'./ccks_7_1_competition_data/验证集' 
training_file = "entity_type.txt"
testing_file = "entity_validation.txt"
entity_pages = ["entity_pages_1.xml", "entity_pages_2.xml", "entity_pages_3.xml", "entity_pages_4.xml"]

out_train = "entity_desc_label.csv"
out_test = "entity_desc.csv"
out_exceptions = "entity_exception.txt"


# In[11]:


if os.path.exists(os.path.join(traini_dir, out_train)):
    os.remove(os.path.join(traini_dir, out_train))
if os.path.exists(os.path.join(test_dir, out_test)):
    os.remove(os.path.join(test_dir, out_test))
if os.path.exists(os.path.join(traini_dir, out_exceptions)):
    os.remove(os.path.join(traini_dir, out_exceptions))
if os.path.exists(os.path.join(test_dir, out_exceptions)):
    os.remove(os.path.join(test_dir, out_exceptions))


# In[5]:


def parse_baidubaike(content, entity_label, f_csv, train_or_test):
    pattern = re.compile(r"[\s]*<page>[\s]*<title>([\s\S]+?)</title>")
    html_pttn = re.compile(r'<page>[\s\S]*?(<html[\s\S]*?</html>)')

    # extract entity name using re.
    entity_name = pattern.match(content).group(1)
    # extract entity description using lxml xpath.
    content = html_pttn.match(content).group(1)
    selector = etree.HTML(content)
    # for e in selector.iter():
    #    print(e.xpath("name()"), ": ", e.xpath("text()"))
    description = selector.xpath(r'//div[@class="lemma-summary"]//text()')
    # description = selector.xpath(r'//div[@class="para"][1]//text()')
    keywords = selector.xpath(r'/html/head/meta[@name="keywords"]/@content')

    if entity_name not in entity_label:
        return

    label = entity_label[entity_name] if train_or_test == 1 else None
    # print("entity: {}, label: {}, \ndescription: {}, \nkeywords: {}".format(
    #    entity_name, entity_label[entity_name], "".join(description), "".join(keywords)))
    f_csv.writerow([entity_name, "".join(description), "".join(keywords), label])


def parse_hudongbaike(content, entity_label, f_csv, train_or_test):
    pattern = re.compile(r"[\s]*<page>[\s]*<title>([\s\S]+?)</title>")
    html_pttn = re.compile(r'<page>[\s\S]*?(<html[\s\S]*?</html>)')

    # extract entity name using re.
    entity_name = pattern.match(content).group(1)
    # extract entity description using lxml xpath.
    content = html_pttn.match(content).group(1)
    selector = etree.HTML(content)
    description = selector.xpath(r'//div[@id="anchor"]//text()')
    keywords = selector.xpath(r'/html/head/meta[@name="keywords"]/@content')

    if entity_name not in entity_label:
        return

    label = entity_label[entity_name] if train_or_test == 1 else None
    # print("entity: {}, label: {}, \ndescription: {}, \nkeywords: {}".format(
    #    entity_name, entity_label[entity_name], "".join(description), "".join(keywords)))
    f_csv.writerow([entity_name, "".join(description), "".join(keywords), label])


def parse_aplushospital(content, entity_label, f_csv, train_or_test):
    pattern = re.compile(r"[\s]*<page>[\s]*<title>([\s\S]+?)</title>")
    html_pttn = re.compile(r'<page>[\s\S]*?(<html[\s\S]*?</html>)')

    # extract entity name using re.
    entity_name = pattern.match(content).group(1)
    # extract entity description using lxml xpath.
    content = html_pttn.match(content).group(1)
    selector = etree.HTML(content)

    body_content = selector.xpath(r'//div[@id="bodyContent"]/table[@class="nav"][1]/following-sibling::*')

    description = []
    flag = True
    for element in body_content:
        tag = element.xpath("name()").strip()
        if not flag and 'p' != tag:
            break

        if 'p' == tag:
            flag = False
            # print(element.xpath("name()"), ": ", element.xpath(".//text()"))
            description.append(''.join(element.xpath(".//text()")))
        pass
    description = ''.join(description)
    keywords = []

    if entity_name not in entity_label:
        return

    label = entity_label[entity_name] if train_or_test == 1 else None
    # print("entity: {}, label: {}, \ndescription: {}, \nkeywords: {}".format(
    #    entity_name, entity_label[entity_name], "".join(description), "".join(keywords)))
    f_csv.writerow([entity_name, "".join(description), "".join(keywords), label])


def parse_mediawiki(content, entity_label, f_csv, train_or_test):
    pattern = re.compile(r"[\s]*<page>[\s]*<title>([\s\S]+?)</title>")

    # extract entity name using re.
    entity_name = pattern.match(content).group(1)
    # extract entity description using lxml xpath.
    selector = etree.XML(content)
    description = selector.xpath(r'/page/revision/text/text()')[0]
    
    if entity_name not in entity_label:
        return

    label = entity_label[entity_name] if train_or_test == 1 else None
    # print("entity: {}, label: {}, \ndescription: {}, \nkeywords: {}".format(entity_name, entity_label[entity_name], description, ""))
    f_csv.writerow([entity_name, description, "", label])
    
    
def load_dataset_from_pages(page_file, entity_label, f_csv, parse, train_or_test):
    content = ""
    flag = False
    
    print("load data from ", page_file, " ... " )
    with open(page_file, 'r') as f:
        count = 0
        for line in f:
            if line.strip() == "" or line is None:
                continue
            if line.strip() in "<page>":
                content = line
                flag = True
            elif line.strip() in "</page>":
                content += line
                flag = False
                parse(content, entity_label, f_csv, train_or_test)
                count += 1
                print("\rprocessed %d " % count, end="")
            else:
                if flag:
                    content += line
            pass
        pass
    print(" done. ")


# In[ ]:


# ## 抽取训练集数据
print("[Load Train]: ")

# read entity-label dict.
f_in = open(os.path.join(traini_dir, training_file), 'r')
entity_label = {}
for line in f_in:
    line = line.strip()
    if line == "" or line is None:
        continue
        
    entity, label = line.split("\t")
    entity_label[entity] = label
    pass
f_in.close()

# read training data from pages file.
f_out = open(os.path.join(traini_dir, out_train), 'w')
f_csv = csv.writer(f_out)


load_dataset_from_pages(os.path.join(traini_dir, entity_pages[0]), entity_label, f_csv, parse_baidubaike, 1)
load_dataset_from_pages(os.path.join(traini_dir, entity_pages[1]), entity_label, f_csv, parse_hudongbaike, 1)
# load_dataset_from_pages(os.path.join(traini_dir, entity_pages[2]), entity_label, f_csv, parse_mediawiki, 1)
load_dataset_from_pages(os.path.join(traini_dir, entity_pages[3]), entity_label, f_csv, parse_aplushospital, 1)

f_out.close()

# ## 看一下哪些 entity 没有被包含在 数据集 中.
with open(os.path.join(traini_dir, out_train), 'r') as f:
    f_csv = csv.reader(f)
    entities = set()
    for row in f_csv:
        entities.add(row[0])

f_except = open(os.path.join(traini_dir, out_exceptions), 'w')
train_except = set(entity_label.keys()).difference(entities)
print("train_except: ", train_except)
for item in train_except:
    f_except.write(item + "\n")
f_except.close()


# In[ ]:


# ## 抽取测试集数据
print("[Load Test]: ")

# read entitys .
f_in = open(os.path.join(test_dir, testing_file), 'r')
entity_label = {}
for line in f_in:
    line = line.strip()
    if line == "" or line is None:
        continue
    
    entity, label = line, ""
    entity_label[entity] = label
    pass
f_in.close()

# read testing data from pages file.
f_out = open(os.path.join(test_dir, out_test), 'w')
f_csv = csv.writer(f_out)

load_dataset_from_pages(os.path.join(traini_dir, entity_pages[0]), entity_label, f_csv, parse_baidubaike, 2)
load_dataset_from_pages(os.path.join(traini_dir, entity_pages[1]), entity_label, f_csv, parse_hudongbaike, 2)
# load_dataset_from_pages(os.path.join(traini_dir, entity_pages[2]), entity_label, f_csv, parse_mediawiki, 2)
load_dataset_from_pages(os.path.join(traini_dir, entity_pages[3]), entity_label, f_csv, parse_aplushospital, 2)

f_out.close()

with open(os.path.join(test_dir, out_test), 'r') as f:
    f_csv = csv.reader(f)
    entities = set()
    for row in f_csv:
        entities.add(row[0])


f_except = open(os.path.join(test_dir, out_exceptions), 'w')
test_except = set(entity_label.keys()).difference(entities)
print("test_except: ", test_except)
for item in test_except:
    f_except.write(item + "\n")
f_except.close()
