
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 13 16:20:27 2017

@author: BaiYunfei
"""

from extrator.TPR import TPR
from cleaner import HTMLCleaner
from util import TreeUtil,TextUtil,ListUtil
from sklearn import cluster
from deepdiff import DeepDiff
from entity.Comment import Comment
import re
import bs4
import pandas as pd

file = "../output/63.after"

with open(file, 'rt', encoding='utf-8') as f:
    html_text = f.read()

html_text = re.sub('\<\s*br\s*\/?\s*\>','',html_text)

dom = bs4.BeautifulSoup(html_text, 'lxml')
#print('title: %s' % dom.title.text)
if not dom.body.div:
    print('没有获取到主帖部分')
# 清洗，去除script标签
HTMLCleaner.clean(dom)

tpr = TPR(dom.body)

data = tpr.data.sort_values('tpr', ascending=False)
#data['tpr'] = (data.tpr - data.tpr.min())/(data.tpr.max() - data.tpr.min())
data.index = range(len(data))
threshold = data.tpr.std()*1.7
length = len(data[data.tpr > threshold])

model = cluster.KMeans(n_clusters=2)
result = model.fit(data[['textNum','puncNum','pathNum','tpr']])
data['label'] = result.labels_
tpr_label = data.groupby('label')['tpr'].mean().sort_values(ascending=False).head(1).index.values[0]

for n in data[data.label == tpr_label].index:
    print('----------%(index)d - %(tpr).2f ----------' % {'index':n,'tpr':data.loc[n].tpr})
    print(data.loc[n].node.nodes)
    print(data.loc[n].node.tag)
    
parent = dom.body
while len(list(parent.children)) == 1:
    parent = list(parent.children)[0]
    
node_list = [n for n in parent.children if (n != '\n') & (isinstance(n, bs4.element.Tag))]
#count_dic = {}
#for node in node_list:
#    # tag = '%(name)s - %(attrs)s' % {'name':node.name,'attrs':list(node.attrs.keys())}
#    tag = node.name
#    if count_dic.get(tag):
#        count_dic[tag] += 1
#    else:
#        count_dic[tag] = 1
#
## 找出最大的
#max_count = 1
#for k in count_dic:
#    if count_dic[k] > max_count:
#        max_tag = k
#        max_count = count_dic[k]
#print(max_tag)
#
#data_node_list = []
#data_node_text_list = []
#for node in node_list:
#    # tag = '%(name)s - %(attrs)s' % {'name':node.name,'attrs':list(node.attrs.keys())}
#    tag = node.name
#    if (tag == max_tag) & (node.text.strip() != '') :
#        data_node_list.append(node)
#        data_node_text_list.append(node.text)
    
#计算相似度
sim = []
for i in node_list:
    s = []
    for j in node_list:
        s.append(TreeUtil.sim(i,j,1))
    sim.append(s)

sim = pd.DataFrame(sim)

dic_list = []
for n in node_list:
    dic = {'TextNode':0}
    TreeUtil.count_node(n,dic)
    dic_list.append(dic)

sim_data = pd.DataFrame(dic_list)
sim_data = sim_data.fillna(0)
sim_data['means'] = sim.mean()
sim_data['nodes'] = node_list

for i in sim_data.index:
    if sim_data.loc[i,'means'] == 0:
        sim_data = sim_data.drop(i)
        sim = sim.drop(i)
node_list = sim_data.nodes

feature = [c for c in sim_data.columns if c not in ['nodes','means','TextNode']]

#model = cluster.KMeans(n_clusters=2).fit(sim_data[feature])
model = cluster.MiniBatchKMeans(n_clusters=2).fit(sim_data[feature])
sim_data['label'] = model.labels_
sim['label'] = model.labels_
sim_data['label'] = sim_data.label.fillna('*')

#计算数据所在的标签
max_s = 0
for l in sim.label.unique():
    i = sim[sim.label == l].index
    s = sim.loc[i,i].mean().mean()
    if s > max_s:
        label = l
    
#label = sim_data.groupby('label')['means'].mean().sort_values(ascending=False).head(1).index.values[0]

data_nodes = []
for i in sim_data[sim_data.label == label].index:
    text = re.sub('\n+','\n',node_list[i].text)
    if (node_list[i].name not in ['input']) & (len(re.findall('^\s*$',node_list[i].text)) == 0) :
        print('===============')
        data_nodes.append(node_list[i])
        print(text)
#
# 通过tpr结果的比对获取楼层内容  
comments = [Comment() for i in range(len(data_nodes))]
for i in range(len(data[data.label == tpr_label])):
    nodes = data.loc[i].node.nodes
    for n in nodes:
        for d in range(len(data_nodes)):
            find = False
            try:
                if data_nodes[d].text.index(n):
                    if len(list(TextUtil.find_time(n))) != 0:
                        continue
                    comments[d].content += n.strip()
                    break
            except :
                continue

# 填充时间
for i in range(len(data_nodes)):
    times = list(TextUtil.find_time(data_nodes[i].text))
    if len(times) > 0:
        if len(times) > 1:
            comments[i].publish_date = times[-1]    
        else:
            comments[i].publish_date = times[0]

for c in comments:
    print(c.__dict__)

floor_data = data[( data.pathNum == len(data_nodes)) & (data.label != tpr_label)]

auth_list = []
for i in floor_data.index:
    nodes = floor_data.loc[i].node.nodes
    find_time = False
#    if ListUtil.cal_sim(nodes) > 0.5:
#        continue
    is_useful = True
    for n in nodes:
        if not TextUtil.useful(n):
            is_useful = False            
            break
    if not is_useful:
        continue
    
    for n in nodes:
        if len(list(TextUtil.find_time(n))) > 0:
            find_time = True
            break
    if find_time:
        continue
    is_auth = False
    for n in nodes:
        if n in ['*']:
            continue
        try:
            auth_nodes = dom.findChildren(text=re.compile(n))
        except:
            continue
        for node in auth_nodes:
            is_auth = TreeUtil.is_auth(node)
            if is_auth:
                break
        if is_auth:
            break
    if is_auth:
        auth_list = nodes
        break

if len(auth_list) == 0:
    floor_data = data[( data.pathNum % len(data_nodes) == 0) & (data.label != tpr_label)]
    
    auth_list = []
    for i in floor_data.index:
        nodes = floor_data.loc[i].node.nodes
        find_time = False
        for n in nodes:
            if len(list(TextUtil.find_time(n))) > 0:
                find_time = True
                break
        if find_time:
            continue
        is_auth = False
        for n in nodes:
            if n in ['*']:
                continue
            try:
                auth_nodes = dom.findChildren(text=re.compile(n))
            except:
                continue
            for node in auth_nodes:
                is_auth = TreeUtil.is_auth(node)
                if is_auth:
                    break
            if is_auth:
                break
        if is_auth:
            auth_list = nodes
            break
    #删掉没用的元素
    rm_list = []
    for i in auth_list:
        if not TextUtil.useful(i):
            rm_list.append(i)
    for i in rm_list:
        auth_list.remove(i)
    
    if len(auth_list) > 0:
        n = len(auth_list) / len(data_nodes)
        new_list = []
        for i in range(len(auth_list)):
            if (i % n) == 0:
                new_list.append(auth_list[i].strip())
        auth_list = new_list
print(auth_list)

if len(auth_list) > 0:
    #设置作者
    for i in range(len(comments)):
        comments[i].author = auth_list[i]

# 第一个是作为主帖，其他为回复
#if len(comments) > 1:
#    c = comments[0]
#    comments.remove(c)
#    c.replys = comments
#
#print(c.__dict__)
print('\n=================================\n')
for c in comments:
    print(c.__dict__)

#    if ListUtil.cal_sim(nodes).corr().mean().mean() < 0.5:
#        print(nodes)
