import json
from bs4 import BeautifulSoup
import re
from selenium import webdriver
import time
from queue import Queue
import os


# 去除文本中无用信息
def text_filter(element_tag):
    # 去引用标签
    inter_1 = element_tag.findAll('sup')
    if inter_1:
        for j in inter_1:
            j.decompose()
    # 去“编辑”标签
    inter_2 = element_tag.findAll('a', {'class': re.compile('.*edit-icon.*')})
    if inter_2:
        for j in inter_2:
            j.decompose()
    # 去“图片”标签
    inter_3 = element_tag.findAll('div', {'class': re.compile('.*lemma-picture.*')})
    if inter_3:
        for j in inter_3:
            j.decompose()
    # 去“故事名称”标签
    inter_4 = element_tag.findAll('span', {'class': 'title-prefix'})
    if inter_4:
        for j in inter_4:
            j.decompose()


def get_property(html):
    '''
    获取词条的属性值对
    :param html:
    :return:propertyDict，key为属性名，value为属性值
    '''
    propertyDict={}

    soup = BeautifulSoup(html, 'lxml')
    inter_1 = soup.findAll('div', {'class': 'basic-info cmn-clearfix'})
    if inter_1:
        text_filter(inter_1[0])
        inter_2 = inter_1[0].findAll('dt', {'class': 'basicInfo-item name'})
        if inter_2:
            for i in inter_2:
                property_name = i.get_text(strip=True).replace('\xa0', '').replace(' ', '').replace(',', '，')   #属性名
                property_value = i.next_sibling.next_sibling \
                    .get_text(strip=True).replace('\xa0', '').replace(' ', '').replace(',', '，')    #属性值
                propertyDict[property_name]= property_value   # 保存
    return propertyDict


def get_abstract(html):
    '''
    获取词条的摘要
    :param html:
    :return: 摘要字符串
    '''
    abstract=''
    soup = BeautifulSoup(html, 'lxml')
    inter_1 = soup.findAll('div',{'class':'lemma-summary','label-module':'lemmaSummary'})
    if inter_1:
        text_filter(inter_1[0])
        abstract=inter_1[0].get_text(strip=True).replace('\xa0', '').replace(' ', '')\
                .replace(',', '，').replace('\n','').replace('\t','')
    return abstract


def get_text(html):
    '''
    提取文本信息
    :param html:
    :return: 文本字符串
    '''
    text=''
    soup = BeautifulSoup(html, 'lxml')
    inter_1 = soup.findAll('div',{'class':'para','label-module':'para'})
    for i in inter_1:
        text_filter(i)
        paragraph=i.get_text(strip=True).replace('\xa0', '').replace(' ', '')\
                 .replace(',', '，').replace('\n','').replace('\t','')   #一个段落
        if paragraph:   #若段落不为空
            text=text+paragraph+'\n'
    return text


def get_name(html):
    '''
    获取词条名称
    :param html:
    :return: 名称字符串
    '''
    name=''
    soup = BeautifulSoup(html, 'lxml')
    inter_1 = soup.findAll('dd',{'class':'lemmaWgt-lemmaTitle-title'})
    if inter_1:
        text_filter(inter_1[0])
        if inter_1[0].h1:
            name=name+inter_1[0].h1.get_text(strip=True)    #词条名称
        if inter_1[0].h2:
            name=name+inter_1[0].h2.get_text(strip=True)    #同义词区分
    return name


def get_tongyi(html):
    '''
    获取同义词数组
    :param html:
    :return: 同义词数组
    '''
    tongyiList=[]   #数组存储
    soup_1 = BeautifulSoup(html, 'lxml')
    inter_1 = soup_1.findAll('ul',{'class':'polysemantList-wrapper cmn-clearfix'})
    if inter_1:
        text_filter(inter_1[0])
        soup_2=BeautifulSoup(str(inter_1[0]), 'lxml')
        inter_2=soup_2.findAll('a', href=re.compile("/item/"))  #正则筛选
        for i in inter_2:
            tongyiURL='https://baike.baidu.com'+i.get('href')   #加url头部
            tongyiList.append(tongyiURL)
    return tongyiList


# 暂时没用
def get_tag(html):
    tagList=[]
    soup_1 = BeautifulSoup(html, 'lxml')
    inter_1 = soup_1.findAll('div',{'class':'entry-tags-item'})
    if inter_1:
        tagHtml=str(inter_1[0])
        soup_2 = BeautifulSoup(tagHtml, 'lxml')
        inter_2=soup_2.findAll('a',{'target':'_blank'})
        for i in inter_2:
            tagList.append(i.get_text(strip=True).replace('\xa0', '').replace(' ', '')\
                .replace(',', '，').replace('\n','').replace('\t',''))
    return tagList